Я новичок, и я хочу прочитать видео и определить лица, разрезав его на кадры, но это не сработало, и я не понял, в чем проблема. не могли бы вы помочь мне исправить это, пожалуйста. NB :: Я работаю с Google Colaboraty и обнаруживаю лица с помощью библиотеки face_recognition, там мой код:
import face_recognition
import cv2
from google.colab.patches import cv2_imshow
input_video = cv2.VideoCapture('/content/My Drive/video-3.mp4')
# Metadata from the input video
frames_per_second = int(input_video.get(cv2.CAP_PROP_FPS))
frame_width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('Metadata from input video:',
'\nFrames per second:', frames_per_second,
'\nFrame width:', frame_width,
'\nFrame height:', frame_height)
codec = cv2.VideoWriter.fourcc(*'XVID')
video_writer = cv2.VideoWriter('output_video.mp4',
codec,
frames_per_second,
(frame_width, frame_height))
# An array to hold the locations of faces that are detected on individual frames
face_locations = []
# A counter to keep track of the number of frames processed
count = 1
# Loop through all the frames in the video
while (count != no_of_frames):
# Read the video to retrieve individual frames. 'frame' will reference the inidivdual frames read from the video.
ret, frame = input_video.read()
# Check the 'ret' (return value) to see if we have read all the frames in the video to exit the loop
if not ret:
print('Processed all frames')
break
# Convert the image (frame) to RGB format as by default Open CV uses BGR format.
# This conversion is done as face_recognition and other libraries usually use RGB format.
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Get the coordinates in the image where a face is detected. Use the model 'cnn' after greater accuracy.
face_locations = face_recognition.face_locations(rgb_frame, model='cnn')
# Loop through the face locations array and draw a rectangle around each face that is detected in the frame
for top, right, bottom, left in face_locations:
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Write the frame to the output vide0
video_writer.write(frame)
# Print for every 50 frames processed
if(count % 50 == 0):
print('Processed', count, 'frames')
count += 1
# Release to close all the resources that we have opened for reading and writing video
input_video.release()
video_writer.release()
cv2.destroyAllWindows()
Результат: в строке 7: имя 'no_of_frames' не определено
Ее это полный код
import face_recognition
import cv2
from google.colab.patches import cv2_imshow
input_video = cv2.VideoCapture('/content/My Drive/video-3.mp4')
# Metadata from the input video
frames_per_second = int(input_video.get(cv2.CAP_PROP_FPS))
frame_width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('Metadata from input video:',
'\nFrames per second:', frames_per_second,
'\nFrame width:', frame_width,
'\nFrame height:', frame_height)
codec = cv2.VideoWriter.fourcc(*'XVID')
video_writer = cv2.VideoWriter('output_video.mp4',
codec,
frames_per_second,
(frame_width, frame_height))
# An array to hold the locations of faces that are detected on individual frames
face_locations = []
# A counter to keep track of the number of frames processed
count = 1
# Loop through all the frames in the video
while (count != no_of_frames):
# Read the video to retrieve individual frames. 'frame' will reference the inidivdual frames read from the video.
ret, frame = input_video.read()
# Check the 'ret' (return value) to see if we have read all the frames in the video to exit the loop
if not ret:
print('Processed all frames')
break
# Convert the image (frame) to RGB format as by default Open CV uses BGR format.
# This conversion is done as face_recognition and other libraries usually use RGB format.
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Get the coordinates in the image where a face is detected. Use the model 'cnn' after greater accuracy.
face_locations = face_recognition.face_locations(rgb_frame, model='cnn')
# Loop through the face locations array and draw a rectangle around each face that is detected in the frame
for top, right, bottom, left in face_locations:
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Write the frame to the output vide0
video_writer.write(frame)
# Print for every 50 frames processed
if(count % 50 == 0):
print('Processed', count, 'frames')
count += 1
# Release to close all the resources that we have opened for reading and writing video
input_video.release()
video_writer.release()
cv2.destroyAllWindows()