Я застрял на том, как определить возвращаемое значение для моего многопроцессорного.Я не могу запустить свою программу в одном потоке или процессе, потому что у меня есть 2 бесконечных цикла while.Один предназначен для видеосъемки 2 видеопотоков с моей веб-камеры и малиновой пи-камеры, а второй - для распознавания лиц в этом сшитом потоке.Поэтому я пытаюсь сделать многопоточность или многопроцессорность, но я не знаю, как вернуть видеопоток из потока.Я подумал об использовании очереди или потоков позже, если это не сработает.
Есть идеи для этого?Функции выполняются, если они выполняются отдельно.
#Process #1
def VideoStitching()
# initialize the video streams and allow them to warmup
print("[INFO] starting cameras for video stitching...")
leftStream = VideoStream(src=0).start()
rightStream = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# initialize the image stitcher
# number of frames read
stitcher = Stitcher()
# loop over frames from the video streams
while True:
# grab the frames from their respective video streams
left = leftStream.read()
right = rightStream.read()
# resize the frames
left = imutils.resize(left, width=400)
right = imutils.resize(right, width=400)
# stitch the frames together to form the panorama
# IMPORTANT: you might have to change this line of code
# depending on how your cameras are oriented; frames
# should be supplied in left-to-right order
result = stitcher.stitch([left, right])
# no homograpy could be computed
if result is None:
print("[INFO] homography could not be computed")
break
# show the output images
#cv2.imshow("Result", result)
return result
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
leftStream.stop()
rightStream.stop()
#Process #2
def FaceRecog(vs)
# start the FPS counter
fps = FPS().start()
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream and resize it
# to 500px (to speedup processing)
#frame = result.read()
#frame = imutils.resize(frame, width=800)
frame = vs
# convert the input frame from (1) BGR to grayscale (for face
# detection) and (2) from BGR to RGB (for face recognition)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# detect faces in the grayscale frame
rects = detector.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
# OpenCV returns bounding box coordinates in (x, y, w, h) order
# but we need them in (top, right, bottom, left) order, so we
# need to do a bit of reordering
boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]
# compute the facial embeddings for each face bounding box
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
# display the image to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
if __name__ == '__main__':
p = Pool(processes=2)
vs = p.map(VideoStitching)
frame = p.map(FaceRecog, vs)
p.close()
p.join()