Run multiple functions on single videostream multiprocessing - python

Hey I am trying to run different face detection models simultaneously. I am using opencv library to open Video Stream and created different process objects for different face detection models. When I run the program the first method is running successfully but second method exits with an error that can't receive frame.
The major challenge is the while loop for reading the capture source(cap) which makes it different from the question posted on stackoverflow before
The code is as follows:
import cv2
import dlib
from multiprocessing import Process
def haar_cascade():
while True:
ret,frame=cap.read()
cv2.imshow('input',frame)
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
classifier = cv2.CascadeClassifier('haarcascade_frontalface2.xml')
faces = classifier.detectMultiScale(frame)
for result in faces:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
if cv2.waitKey(1) == ord('q'):
break
cv2.imshow('harr-cascade',frame)
def dlib_hog():
while True:
ret,frame=cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1) # result
#to draw faces on image
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
if cv2.waitKey(1) == ord('q'):
break
cv2.imshow('dlib-hog',frame)
if __name__ == "__main__":
cap =cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
harrProcess=Process(target=haar_cascade)
harrProcess.start()
dlibProcess=Process(target=dlib_hog)
dlibProcess.start()
# When everything done, release the capture
harrProcess.join()
dlibProcess.join()
cap.release()
cv2.destroyAllWindows()
How can I create a multiprocessing model that read source from single source and perform independent operation?

I have made various attempts:
I tried using multiprocessing with a producer process and two consumer processes. The frame created by the producer must be converted to a shared-memory array and then converted back to a numpy array when retrieved by a consumer. There is sufficient overhead in these operations that I was finding that frames were being lost.
I tried using multithreading with a producer thread and two consumer threads. This has less overhead with regards to passing frames from the producer and consumer. The problem, of course, with multithreading is that due to contention for the Global Interpreter Lock, any CPU-intensive processing required by a consumer cannot be run in parallel with CPU-intensive processing required by the other consumer and could even cause the producer to miss frames. Unfortunately, I don't know if when using a camera for input whether there is a way to detect missed frames on the part of the producer. To remediate these problems I pass a multiprocessing pool to the consumer threads to which they can submit tasks that perform the CPU-intensive processing on the frames. Here, too, there is sufficient overhead in passing frames from one process to another and frames are lost.
As in bullet point 2 above, I use multithreading but instead of submitting CPU-intensive work to the multiprocessing pool, I perform it within the consumer thread. This seems to cause fewer missed frames for the consumer. But I can't tell if it is causing the producer now to miss frames it would not otherwise miss. So using a multiprocessing pool for doing the CPU-intensive work seems to be the wiser approach. Of course, if your CPU is fast enough, neither the consumer not producer should miss frames. But option 1 (see second code example), i.e. using just multiprocessing, is probably best.
In the following demos, since I don't have access to your XML file, I have dummied out the processing for one of your consumers. You terminate the program by just hitting the enter key:
Using Multithreading
Set USE_POOL_FOR_COMPUTATION = False to perform CPU-intensive processing by direct call instead of submitting the work to a multiprocessing pool:
#!/usr/bin/env python3
import threading
import multiprocessing
import cv2
import dlib
USE_POOL_FOR_COMPUTATION = True
class Producer:
def __init__(self):
# Create shared memory version of a numpy array:
self._frame = None
self._condition = threading.Condition()
self._running = True
# The latest frame number retrieved
self._latest_frame_number = 0
def run(self, cap):
while self._running:
ret, self._frame = cap.read()
if not ret:
self._running = False
else:
self._latest_frame_number += 1
with self._condition:
self._condition.notify_all()
def stop(self):
self._running = False
def get_frame(self, sequence_number):
with self._condition:
# We block until we find a frame sequence number >= sequence_number.
self._condition.wait_for(lambda: not self._running or self._latest_frame_number >= sequence_number)
# Even after the stop method has been called and we are no longer running,
# there could still be an unprocessed frame. But when we are called again, the current
# frame number will be < the expected frame number:
return (self._latest_frame_number, None if self._latest_frame_number < sequence_number else self._frame)
def process_haar_cascade(frame):
classifier = cv2.CascadeClassifier('haarcascade_frontalface2.xml')
faces = classifier.detectMultiScale(frame)
for result in faces:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def haar_cascade(producer, pool):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'haar_cascade missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
cv2.imshow('input', frame) # Unmodified frame
# Since I don't have required xml file, just skip processing:
"""
if USE_POOL_FOR_COMPUTATION:
frame = pool.apply(process_haar_cascade, args=(frame,))
else:
frame = process_haar_cascade(frame)
"""
cv2.imshow('harr-cascade', frame)
def process_dlib_hog(frame):
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1) # result
#to draw faces on image
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def dlib_hog(producer, pool):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'dlib_hog missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
if USE_POOL_FOR_COMPUTATION:
frame = pool.apply(process_dlib_hog, args=(frame,))
else:
frame = process_dlib_hog(frame)
cv2.imshow('dlib-hog', frame)
def main():
producer = Producer()
pool = multiprocessing.Pool(2) if USE_POOL_FOR_COMPUTATION else None
# Pass pool for CPU-Intensive work:
consumer1_thread = threading.Thread(target=haar_cascade, args=(producer, pool))
consumer1_thread.start()
consumer2_thread = threading.Thread(target=dlib_hog, args=(producer, pool))
consumer2_thread.start()
cap = cv2.VideoCapture(0)
producer_thread = threading.Thread(target=producer.run, args=(cap,))
producer_thread.start()
input('Hit enter to terminate:\n')
producer.stop()
producer_thread.join()
consumer1_thread.join()
consumer2_thread.join()
cap.release()
cv2.destroyAllWindows()
if USE_POOL_FOR_COMPUTATION:
pool.close()
pool.join()
if __name__ == '__main__':
main()
Using Multiprocessing
The multiprocessing.RawArray that is used to hold the sharable frame must be allocated before the consumer process is run so that all processes have access to this array. This requires knowing in advance how large an array to create:
#!/usr/bin/env python3
import multiprocessing
import ctypes
import cv2
import numpy as np
import dlib
class Producer:
def __init__(self):
# Discover how large a framesize is by getting the first frame
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
if ret:
self._shape = frame.shape
frame_size = self._shape[0] * self._shape[1] * self._shape[2]
self._shared_array = multiprocessing.RawArray(ctypes.c_ubyte, frame_size)
else:
self._arr = None
cap.release()
self._condition = multiprocessing.Condition()
self._running = multiprocessing.RawValue('i', 1)
# The latest frame number retrieved
self._latest_frame_number = multiprocessing.RawValue('i', 0)
self._lock = multiprocessing.Lock()
def run(self):
cap = cv2.VideoCapture(0)
while self._running.value:
ret, frame = cap.read()
if not ret:
self._running.value = 0
with self._condition:
self._condition.notify_all()
cap.release()
break
with self._lock:
self._latest_frame_number.value += 1
# np array to shared_array
temp = np.frombuffer(self._shared_array, dtype=frame.dtype)
temp[:] = frame.flatten(order='C')
with self._condition:
self._condition.notify_all()
def stop(self):
self._running.value = 0
def get_frame(self, sequence_number):
with self._condition:
# We block until we find a frame sequence number >= sequence_number.
self._condition.wait_for(lambda: not self._running.value or self._latest_frame_number.value >= sequence_number)
# Even after the stop method has been called and we are no longer running,
# there could still be an unprocessed frame. But when we are called again, the current
# frame number will be < the expected frame number:
if self._latest_frame_number.value < sequence_number:
return (self._latest_frame_number.value, None)
with self._lock:
# Convert to np array:
return self._latest_frame_number.value, np.ctypeslib.as_array(self._shared_array).reshape(self._shape)
def process_haar_cascade(frame):
classifier = cv2.CascadeClassifier('haarcascade_frontalface2.xml')
faces = classifier.detectMultiScale(frame)
for result in faces:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def haar_cascade(producer):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'haar_cascade missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
cv2.imshow('input', frame) # Unmodified frame
# Since I don't have required xml file, just skip processing:
#frame = process_haar_cascade(frame)
cv2.imshow('harr-cascade', frame)
def process_dlib_hog(frame):
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1) # result
#to draw faces on image
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def dlib_hog(producer):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'dlib_hog missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
frame = process_dlib_hog(frame)
cv2.imshow('dlib-hog', frame)
def main():
producer = Producer()
# Pass pool for CPU-Intensive work:
consumer1_process = multiprocessing.Process(target=haar_cascade, args=(producer,))
consumer1_process.start()
consumer2_process = multiprocessing.Process(target=dlib_hog, args=(producer,))
consumer2_process.start()
producer_process = multiprocessing.Process(target=producer.run)
producer_process.start()
input('Hit enter to terminate:\n')
producer.stop()
producer_process.join()
consumer1_process.join()
consumer2_process.join()
if __name__ == '__main__':
main()

Related

Why does the cropped video not get saved?

Here, I have a code that takes a video as input and let the user draw a ROI. Later the cropped video will be displayed. This code was originally taken from I would like to define a Region of Interest in a video and only process that area.
I have provided the code below.
Question: I would like to save the cropped video in .mp4 format. The video output shows only a 1kb file. Can you go through the code and suggest a solution?
NB: I went through answer provided at OpenCV video not getting saved. Still I have failed to figure out the error.
import numpy as np
import cv2
import matplotlib.pyplot as plt
ORIGINAL_WINDOW_TITLE = 'Original'
FIRST_FRAME_WINDOW_TITLE = 'First Frame'
DIFFERENCE_WINDOW_TITLE = 'Difference'
canvas = None
drawing = False # true if mouse is pressed
#Retrieve first frame
def initialize_camera(cap):
_, frame = cap.read()
return frame
# mouse callback function
def mouse_draw_rect(event,x,y,flags, params):
global drawing, canvas
if drawing:
canvas = params[0].copy()
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
params.append((x,y)) #Save first point
elif event == cv2.EVENT_MOUSEMOVE:
if drawing:
cv2.rectangle(canvas, params[1],(x,y),(0,255,0),2)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
params.append((x,y)) #Save second point
cv2.rectangle(canvas,params[1],params[2],(0,255,0),2)
def select_roi(frame):
global canvas
#here, it is the copy of the first frame.
canvas = frame.copy()
params = [frame]
ROI_SELECTION_WINDOW = 'Select ROI'
cv2.namedWindow(ROI_SELECTION_WINDOW)
cv2.setMouseCallback(ROI_SELECTION_WINDOW, mouse_draw_rect, params)
roi_selected = False
while True:
cv2.imshow(ROI_SELECTION_WINDOW, canvas)
key = cv2.waitKey(10)
#Press Enter to break the loop
if key == 13:
break;
cv2.destroyWindow(ROI_SELECTION_WINDOW)
roi_selected = (3 == len(params))
print(len(params))
if roi_selected:
p1 = params[1]
p2 = params[2]
if (p1[0] == p2[0]) and (p1[1] == p2[1]):
roi_selected = False
#Use whole frame if ROI has not been selected
if not roi_selected:
print('ROI Not Selected. Using Full Frame')
p1 = (0,0)
p2 = (frame.shape[1] - 1, frame.shape[0] -1)
return roi_selected, p1, p2
if __name__ == '__main__':
cap = cv2.VideoCapture(r'E:\cardiovascular\brad1low.mp4')
#Grab first frame
first_frame = initialize_camera(cap)
#Select ROI for processing. Hit Enter after drawing the rectangle to finalize selection
roi_selected, point1, point2 = select_roi(first_frame)
#Grab ROI of first frame
first_frame_roi = first_frame[point1[1]:point2[1], point1[0]:point2[0]]
print(f'first frame roi is {first_frame_roi}')
#An empty image of full size just for visualization of difference
difference_image_canvas = np.zeros_like(first_frame)
out = cv2.VideoWriter(r'E:\cardiovascular\filename2.mp4', cv2.VideoWriter_fourcc(*'MP4V'), int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), (int(first_frame_roi.shape[0]), (int(first_frame_roi.shape[1]))))
while cap.isOpened():
ret, frame = cap.read()
if ret:
#ROI of current frame
roi = frame[point1[1]:point2[1], point1[0]:point2[0]]
print(f'roi is {roi}')
cv2.imshow(DIFFERENCE_WINDOW_TITLE, roi)
out.write(roi)
key = cv2.waitKey(30) & 0xff
if key == 27:
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()

pythons opencv running out of frames when using webcam

I am trying to set my opencv to make a heat map. I have it working for a set .mp4 file. However when i try to make it work using my webcam on a live feed it doesn't seem to like it. the problem it has is it says that the "index is out of range" fr the following lines of code:
make_video('./frames/', './output.avi')
frame = cv2.imread(os.path.join(image_folder, images[0]))
The .py file is as follows:
import numpy as np
import cv2
import copy
from make_video import make_video
from progress.bar import Bar
def main():
capture = cv2.VideoCapture(0)
background_subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
length = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
bar = Bar('Processing Frames', max=length)
print(length)
short = length
first_iteration_indicator = 1
for i in range(0, short):
print(i)
ret, frame = capture.read()
# If first frame
if first_iteration_indicator == 1:
first_frame = copy.deepcopy(frame)
height, width = frame.shape[:2]
accum_image = np.zeros((height, width), np.uint8)
first_iteration_indicator = 0
else:
filter = background_subtractor.apply(frame) # remove the background
cv2.imwrite('./frame.jpg', frame)
cv2.imwrite('./diff-bkgnd-frame.jpg', filter)
threshold = 2
maxValue = 2
ret, th1 = cv2.threshold(filter, threshold, maxValue, cv2.THRESH_BINARY)
# add to the accumulated image
accum_image = cv2.add(accum_image, th1)
cv2.imwrite('./mask.jpg', accum_image)
color_image_video = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)
video_frame = cv2.addWeighted(frame, 0.7, color_image_video, 0.7, 0)
name = "./frames/frame%d.jpg" % i
cv2.imwrite(name, video_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
bar.next()
bar.finish()
make_video('./frames/', './output.avi')
color_image = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)
result_overlay = cv2.addWeighted(first_frame, 0.7, color_image, 0.7, 0)
# save the final heatmap
cv2.imwrite('diff-overlay.jpg', result_overlay)
# cleanup
capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
in order to set it to the webcam i use capture = cv2.VideoCapture(0) which causes the error above, however if i use a pre-saved .mp4 file like so capture = cv2.VideoCapture('vid.mp4') it works fine.Any ideas?
Problem:
When using your webcam with capture = cv2.VideoCapture(0); the line length = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) is not helpful. Because, when using a webcam capture.get(cv2.CAP_PROP_FRAME_COUNT) returns -1.0. And that means to say the number of frames does not apply when using webcam. It makes sense when using to read video, since it can find out the number of frames present in the video.
Solution:
I modified the existing code to use the webcam and removed the for loop. I tested this code without make_video. The results were as expected on analyzing them using cv2.imshow (uncomment them to see it for yourself).
import cv2
capture = cv2.VideoCapture(0)
background_subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
threshold = 100
maxValue = 255
# for counting frames
i = 0
while True:
i = i + 1
ret, frame = capture.read()
if frame is None:
break
fgMask = background_subtractor.apply(frame)
ret, th1 = cv2.threshold(fgMask, threshold, maxValue, cv2.THRESH_BINARY)
blur = cv2.GaussianBlur(th1,(11,11), 9)
heat_map = cv2.applyColorMap(blur, cv2.COLORMAP_HOT)
video_hm = cv2.addWeighted(frame, 0.7, heat_map, 0.7, 0)
name = "./frames/frame%d.jpg" % i
cv2.imwrite(name, video_hm)
#cv2.imshow('Frame', frame)
#cv2.imshow('FG Mask', fgMask)
#cv2.imshow('Video OP', video_hm)
#cv2.imshow('Blur', blur)
keyboard = cv2.waitKey(30)
if keyboard == 'q' or keyboard == 27:
break
capture.release()
make_video('./frames/', './output.avi')
cv2.destroyAllWindows()

Killing a child thread running a multiprocessing operation

I'm new to the multiprocessing thing. I was designing an app where the user can select a video and the app would do facial recognition on it and show it. I used concurrent.futures.ProcessPoolExecutor to multiprocess the frames to improve the total time required. My problem is when I need to integrate the face recognition multiprocessing part in the app, I would need to run the entire thing on a separate thread to keep it responsive.
Usually when the stand alone multiprocessing part runs, it can be stopped by raising an exception. BUt while running on a separate thread, they have their own stack. I have seen people suggesting using a message to verify if the code should keep running or not. But my caller function is not running on a "while loop" to check that.
All i'm asking is, if there is any way I can raise an exception to stop both threads or any other way to stop running that child thread from the main thread. My code is attached below. The sub() is the caller function and the process() is the function that is used as the multiprocessing target.
Thanks in advance.
import cv2
import face_recognition
import numpy as np
import time
import pickle
import concurrent.futures
import queue
import file_video_stream
def process(sframe, facesencoded, knownfacenames,process_this_frame):
if sframe is not None :
if process_this_frame:
small_frame = cv2.resize(sframe, (0, 0), fx=0.5, fy=0.5)
rgb_small_frame = small_frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_small_frame)
unknown_face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
matches = face_recognition.compare_faces(facesencoded, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(facesencoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = knownfacenames[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 2
right *= 2
bottom *= 2
left *= 2
# Draw a box around the face
cv2.rectangle(sframe, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
# Draw a label with a name below the face
cv2.rectangle(sframe, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(sframe, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
show_frame = cv2.resize(sframe, (1080,720))
return show_frame
else :
show_frame = cv2.resize(sframe, (1080,720))
return show_frame
def sub():
list_original = []
frame_number = 0
alt_frame_number = 0
proc_frame = 0
q1= queue.Queue()
with open("known.pickle", 'rb') as ki:
faces = pickle.load(ki)
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
fvs = file_video_stream.FileVideoStream('Attendance.mp4')
fvs.start()
time.sleep(1.0)
process_this_frame = True
with concurrent.futures.ProcessPoolExecutor() as executor:
while fvs.more():
#for _ in range (5):
frame = fvs.read()
time.sleep(0.1)
list_element = executor.submit(process,frame,faces_encoded,known_face_names,process_this_frame)
time.sleep(0.1)
if list_element is not None:
frame_number +=1
#print (frame_number)
list_original.append(list_element)
else :
fvs.stop()
break
process_this_frame = not process_this_frame
print ("Total number of frames read:",frame_number)
#print ("Total number of frames processed:",alt_frame_number)
fvs.stop()
for res in list_original:
q1.put(res.result())
while not q1.empty():
dump = q1.get()
cv2.imshow('Video', dump)
time.sleep(0.01)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
sub()

how can i control frames in opencv?

how can i control frames in opencv?
here you insert the url of a video in the internet/local and this streams it to you.but voice and video are not Coordinated.video is faster than voice :/
import cv2
import numpy as np
from ffpyplayer.player import MediaPlayer
def getVideoSource(source, width, height):
cap = cv2.VideoCapture(source)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
return cap
def main():
url=input('enter url: ')
sourcePath = url
camera = getVideoSource(sourcePath, 720, 480)
player = MediaPlayer(sourcePath)
while True:
ret, frame = camera.read()
audio_frame, val = player.get_frame()
if (ret == 0):
print("End of video")
break
frame = cv2.resize(frame, (720, 480))
cv2.imshow('Camera', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if val != 'eof' and audio_frame is not None:
frame, t = audio_frame
print("Frame:" + str(frame) + " T: " + str(t))
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
A slight modification of the answer by #furas:
player = MediaPlayer(video_path)
while True:
frame, val = player.get_frame()
if val == 'eof':
break # this is the difference
if frame is not None:
image, pts = frame
w, h = image.get_size()
# convert to array width, height
img = np.asarray(image.to_bytearray()[0]).reshape(h,w,3)
# convert RGB to BGR because `cv2` need it to display it
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
time.sleep(val)
cv2.imshow('video', img)
if cv2.waitKey(1) & 0xff == ord('q'):
break
cv2.destroyAllWindows()
player.close_player()
The difference is the explicit break once EOF is reached, which causes the program to terminate. To me, that's expected behavior, so I wanted to post this code in case someone wants that behavior as well.
It seems you can't control when to play audio because it uses SDL to play it in separated thread but get_frame() gives tuple (frame, val) and frame is (image, time_when_to_display_image) and you should use this time_when_to_display_image to control when to display image.
And all this code doesn't need cv2.VideoCapture() to get frame.
I use cv2 only to display it but you can use any GUI to create window to display it.
I use current_time to get next frame without using time.sleep() because video wasn't smooth.
BTW: You can use player.set_size(720, 480) to resize frame.
from ffpyplayer.player import MediaPlayer
import cv2
import numpy as np
import time
filename = 'video.mp4'
player = MediaPlayer(filename)
player.set_size(720, 480) # resize it
#player.set_size(400, 300)
start_time = time.time()
frame_time = start_time + 0
while True:
current_time = time.time()
# check if it is time to get next frame
if current_time >= frame_time:
# get next frame
frame, val = player.get_frame()
if val != 'eof' and frame is not None:
image, pts = frame
w, h = image.get_size()
# convert to array width, height
img = np.asarray(image.to_bytearray()[0]).reshape(h,w,3)
# convert RGB to BGR because `cv2` need it to display it
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow('video', img)
frame_time = start_time + pts
if cv2.waitKey(1) & 0xff == ord('q'):
break
cv2.destroyAllWindows()
player.close_player()
EDIT: I found that get_frame() gives (frame, val) and this val can be used in time.sleep(val). And probably it should sleep before displaying frame, not after displaying it.
from ffpyplayer.player import MediaPlayer
import cv2
import numpy as np
import time
filename = 'video.mp4'
player = MediaPlayer(filename)
player.set_size(720, 480)
#player.set_size(400, 300)
while True:
frame, val = player.get_frame()
if val != 'eof' and frame is not None:
image, pts = frame
w, h = image.get_size()
# convert to array width, height
img = np.asarray(image.to_bytearray()[0]).reshape(h,w,3)
# convert RGB to BGR because `cv2` need it to display it
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
time.sleep(val)
cv2.imshow('video', img)
if cv2.waitKey(1) & 0xff == ord('q'):
break
cv2.destroyAllWindows()
player.close_player()
EDIT: Code using tkinter to display it.
from ffpyplayer.player import MediaPlayer
import tkinter as tk
from PIL import Image, ImageTk
import time
# -- functions ---
def update_frame():
global photo # solution for BUG in PhotoImage
frame, val = player.get_frame()
if val != 'eof' and frame is not None:
image, pts = frame
w, h = image.get_size()
data = image.to_bytearray()[0]
img = Image.frombytes("RGB", (w,h), bytes(data))
photo = ImageTk.PhotoImage(img)
time.sleep(val)
label['image'] = photo
root.after(1, update_frame) # again after `1ms` without blocking `mainloop()`
# --- main ---
filename = 'video.mp4'
player = MediaPlayer(filename)
player.set_size(720, 480)
#player.set_size(400, 300)
root = tk.Tk()
label = tk.Label(root)
label.pack()
root.bind('q', lambda event:root.destroy())
update_frame()
root.mainloop()
player.close_player()

Python, drawing a polygon over webcam video using mouse clicks to detect points

I'm using Python3 and OpenCV (4.1.0) to realize a script that:
displays the contents of the webcam;
records the coordinates of mouse clicks over video;
after pressing a certain button ('p' in my example), draws a polyline between the points identified by previous mouse clicks;
So far, I'm trying:
import numpy as np
import cv2
def main():
cap = cv2.VideoCapture("files/long_video.mp4") # Open video file
points = []
while (cap.isOpened()):
ret, frame = cap.read() # read a frame
try:
cv2.imshow('Frame', frame)
except:
print('EOF')
break
cv2.setMouseCallback('Frame', left_click_detect, points)
# Abort and exit with 'Q'
key = cv2.waitKey(25)
if (key == ord('q')):
break
elif (key== ord('p')): # HERE, IT SHOULD DRAW POLYLINE OVER VIDEO!!!
pts_array = np.array([[x, y] for (x, y) in points], np.int0)
frame = cv2.polylines(frame, np.int32(np.array(points)), False, (255, 0, 0), thickness=5)
points = []
cv2.imshow('Frame', frame)
cap.release() # release video file
cv2.destroyAllWindows() # close all openCV windows
def left_click(event, x, y, flags, points):
if (event == cv2.EVENT_LBUTTONDOWN):
print(f"\tClick on {x}, {y}")
points.append([x,y])
It kinda works, but after pressing 'p' it doesn't draw the polyline over the video.
Any suggestions?
There are 2 problems with your code:
cv2.polylines() accepts a list of arrays. so here:
frame = cv2.polylines(frame, np.int32(np.array(points)), False, (255, 0, 0), thickness=5)
Replace np.int32(np.array(points)) with [np.int32(points)] to fix the exception. (you also don't need to use np.array() here)
After you draw the polygon on the frame, you call cv2.show(), but almost immediately after, you show the next frame without the polygon on it, so you don't have time to see the polygon. to fix it you need to draw the polygon again for each frame. and to do that, you need to save it until you press p again (to show another polygon).
This will work:
import numpy as np
import cv2
def main():
cap = cv2.VideoCapture("files/long_video.mp4") # Open video file
polygon = []
points = []
while (cap.isOpened()):
ret, frame = cap.read() # read a frame
if not ret:
print('EOF')
break
frame = cv2.polylines(frame, polygon, False, (255, 0, 0), thickness=5)
cv2.imshow('Frame', frame)
# Abort and exit with 'Q'
key = cv2.waitKey(25)
if (key == ord('q')):
break
elif (key== ord('p')):
polygon = [np.int32(points)]
points = []
cv2.setMouseCallback('Frame', left_click_detect, points)
cap.release() # release video file
cv2.destroyAllWindows() # close all openCV windows
def left_click_detect(event, x, y, flags, points):
if (event == cv2.EVENT_LBUTTONDOWN):
print(f"\tClick on {x}, {y}")
points.append([x,y])
print(points)

Categories