Constant camera grabbing with OpenCV & Python multiprocessing - Follow up - python

This is a followup on this where #Aaron helped quite a lot.
Context:
OpenCV, Python, a USB Webcam and multiprocessing - Grabbing all the time from the camera on a subprocess and returning a single frame upon request to the main process.
#Aaron did most of the heavylifting, of not all :). What I am missing is how to turn this into a process that gets a single frame upon request.
Help Please?
import numpy as np
import cv2
from multiprocessing import Process, Queue
from multiprocessing.shared_memory import SharedMemory
def produce_frames(q):
#get the first frame to calculate size of buffer
cap = cv2.VideoCapture(0)
success, frame = cap.read()
shm = SharedMemory(create=True, size=frame.nbytes)
framebuffer = np.ndarray(frame.shape, frame.dtype, buffer=shm.buf) #could also maybe use array.array instead of numpy, but I'm familiar with numpy
framebuffer[:] = frame #in case you need to send the first frame to the main process
q.put(shm) #send the buffer back to main
q.put(frame.shape) #send the array details
q.put(frame.dtype)
try:
while True:
cap.read(framebuffer)
except KeyboardInterrupt:
pass
finally:
shm.close() #call this in all processes where the shm exists
shm.unlink() #call this in at least one process
def consume_frames(q):
shm = q.get() #get the shared buffer
shape = q.get()
dtype = q.get()
framebuffer = np.ndarray(shape, dtype, buffer=shm.buf) #reconstruct the array
try:
while True:
cv2.imshow("window title", framebuffer)
cv2.waitKey(100)
except KeyboardInterrupt:
pass
finally:
shm.close()
if __name__ == "__main__":
q = Queue()
producer = Process(target=produce_frames, args=(q,))
producer.start()
consume_frames(q)

Related

Python multiprocessing queue using a lot of resources with opencv

I am using multiprocessing to get frames of a video using Opencv in python.
My class looks like this :-
import cv2
from multiprocessing import Process, Queue
class StreamVideos:
def __init__(self):
self.image_data = Queue()
def start_proces(self):
p = Process(target=self.echo)
p.start()
def echo(self):
cap = cv2.VideoCapture('videoplayback.mp4')
while cap.isOpened():
ret,frame = cap.read()
self.image_data.put(frame)
# print("frame")
I start the process "echo" using :-
p = Process(target=self.echo)
p.start()
the echo function looks like this :-
def echo(self):
cap = cv2.VideoCapture('videoplayback.mp4')
while cap.isOpened():
ret,frame = cap.read()
self.image_data.put(frame)
in which i am using queue where i put these frames
self.image_data.put(frame)
and then in another process I start reviving these frames
self.obj = StreamVideos()
def start_process(self):
self.obj.start_proces()
p = Process(target=self.stream_videos)
p.start()
def stream_videos(self):
while True:
self.img = self.obj.image_data.get()
print(self.img)
but as soon as I start putting frames to queue, the ram gets filled very quickly and the system gets stuck. The video I am using is just 25 fps and 39mb in size, so it does not make any sense.
One thing I noticed is that the "echo" process is putting a lot of frames in the queue before the "stream_videos" process retrives it.
What could be the root of this problem?
Thanks in advance.
Expectations: -
Able to retrieve the frames continuosly.
Tried :-
Not putting frames in queue, in which case the ram is not filled.
The following is a general purpose single producer/multiple consumer implementation. The producer (class StreamVideos) creates a shared memory array whose size is the number of bytes in the video frame. One or more consumers (you specify the number of consumers to StreamVideos) can then call StreamVideos.get_next_frame() to retrieve the next frame. This method converts the shared array back into a numpy array for subsequent processing. The producer will only read the next frame into the shared array after all consumers have called get_next_frame:
#!/usr/bin/env python3
import multiprocessing
import numpy as np
import ctypes
import cv2
class StreamVideos:
def __init__(self, path, n_consumers):
"""
path is the path to the video:
n_consumers is the number of tasks to which we will be sreaming this.
"""
self._path = path
self._event = multiprocessing.Event()
self._barrier = multiprocessing.Barrier(n_consumers + 1, self._reset_event)
# Discover how large a framesize is by getting the first frame
cap = cv2.VideoCapture(self._path)
ret, frame = cap.read()
if ret:
self._shape = frame.shape
frame_size = self._shape[0] * self._shape[1] * self._shape[2]
self._arr = multiprocessing.RawArray(ctypes.c_ubyte, frame_size)
else:
self._arr = None
cap.release()
def _reset_event(self):
self._event.clear()
def start_streaming(self):
cap = cv2.VideoCapture(self._path)
while True:
self._barrier.wait()
ret, frame = cap.read()
if not ret:
# No more readable frames:
break
# Store frame into shared array:
temp = np.frombuffer(self._arr, dtype=frame.dtype)
temp[:] = frame.flatten(order='C')
self._event.set()
cap.release()
self._arr = None
self._event.set()
def get_next_frame(self):
# Tell producer that this consumer is through with the previous frame:
self._barrier.wait()
# Wait for next frame to be read by the producer:
self._event.wait()
if self._arr is None:
return None
# Return shared array as a numpy array:
return np.ctypeslib.as_array(self._arr).reshape(self._shape)
def consumer(producer, id):
frame_name = f'Frame - {id}'
while True:
frame = producer.get_next_frame()
if frame is None:
break
cv2.imshow(frame_name, frame)
cv2.waitKey(1)
cv2.destroyAllWindows()
def main():
producer = StreamVideos('videoplayback.mp4', 2)
consumer1 = multiprocessing.Process(target=consumer, args=(producer, 1))
consumer1.start()
consumer2 = multiprocessing.Process(target=consumer, args=(producer, 2))
consumer2.start()
"""
# Run as a child process:
producer_process = multiprocessing.Process(target=producer.start_streaming)
producer_process.start()
producer_process.join()
"""
# Run in main process:
producer.start_streaming()
consumer1.join()
consumer2.join()
if __name__ == '__main__':
main()

how to convert pytorch model( ) output to something cv2.imshow fit

Recently, I want to show tello stream with image detection. My first thought is to save model’s output to my local with save() method, and show it by cv2.imshow() method. It works but the stream with objects detection will have a delay about 4~5 second.
My code:
from threading import Thread
from djitellopy import Tello
import cv2, math, time
import torch
import os
import numpy as np
import asyncio
import imutils
from PIL import Image
path = r'C:\yolov5-master'
model = torch.hub.load(path, 'yolov5s',source='local', pretrained=True)
tello = Tello()
tello.connect()
tello.streamon()
frame_read = tello.get_frame_read()
class VideoStreamWidget(object):
def __init__(self, src=0):
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
# Read the next frame from the stream
global frame
while True:
self.frame = cv2.cvtColor(frame_read.frame,cv2.COLOR_RGB2BGR)
time.sleep(.01)
def show_frame(self):
# Display frames in main program
wee = model(self.frame)
arr = wee.datah().cpu().numpy()
img = Image.fromarray.fromarray(arr, 'RGB')
result = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
cv2.imshow('frame', result)
key = cv2.waitKey(1)
if __name__ == '__main__':
video_stream_widget = VideoStreamWidget()
time.sleep(1)
while True:
try:
video_stream_widget.show_frame()
except AttributeError:
pass
I'm wondering what data type is the output of model( ).
And I tried:
wee = model(self.frame)
print( type( wee ) )
output:
<class 'models.common.Detections'>
How can I convert this kind of data to the thing fit cv2.imshow( ) method? Or is there any way to show a real-time stream with object detection without delay?
Appreciate.

Multithreading Face-recognition python

I am trying to use 2 threads in python to solve face-recognition task. The first thread is used to capture frame and append it into deque, and the second one is used to read the frame from deque to solve face-recognition task, deque is a global variable .When i start 2 threads, it show the result as below.
This is result of program. In second thread, process function don't execute.
and here is my code. How to when a new frame is appended into deque in first thread, and the second thread is detect and recognize the face in new frame.
import face_recognition
import cv2
from threading import Thread
from collections import deque
import pickle
svm = pickle.load(open('svm.pkl','rb'))
deque = deque()
def open_camera(url):
print('start camera')
#capture = cv2.VideoCapture('rtsp://'+str(url))
capture = cv2.VideoCapture(0)
while capture.isOpened():
_,frame = capture.read()
frame_resize = cv2.resize(frame,(640,480))
cv2.imshow('Video',frame_resize)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
# cvtColor
frame_resize = frame_resize[:, :, ::-1]
deque.append(frame_resize)
capture.release()
cv2.destroyAllWindows()
def spin(seconds):
time.sleep(seconds)
def process_camera():
print("start process")
global deque
if deque:
time.sleep(0.1)
print(deque[-1])
for i in deque:
process(frame=i)
else:
for i in deque:
process(frame=i)
def process(frame):
print('process')
frame = cv2.imread(frame)
face_locations = face_recognition.face_locations(frame)
face_encodings = face_recognition.face_encodings(frame,face_locations)
face_names = []
for face_encoding in face_encodings:
name = svm.predict(face_encoding.reshape(1, -1))
face_names.append(name)
print(face_names)
thread1= Thread(target=open_camera, args=(0,))
thread2 = Thread(target=process_camera, args=())
thread1.start()
thread2.start()
print('\texit main')
Thanks you so much!

Problems with serial communication and queues

I've some problems creating a multi-process serial logger.
The plan: Having a seperate process reading from serial port, putting data into a queue. The main process reads the entire queue after some time and processes the data.
But I'm not sure if this is the right way to do it, because sometimes the data is not in the right order. It works well for slow communication.
Do I have to lock something?! Is there a smarter way to do this?
import time
import serial
from multiprocessing import Process, Queue
def myProcess(q):
with serial.Serial("COM2", 115200, 8, "E", 1, timeout=None) as ser:
while True:
q.put("%02X" % ser.read(1)[0])
if __name__=='__main__':
try:
q = Queue()
p = Process(target=myProcess, args=(q,))
p.daemon = True
p.start()
data = []
while True:
print(q.qsize()) #!debug
while not q.empty(): #get all data from queue
data.append(q.get())
#proc_data(data) #data processing
time.sleep(1) #emulate data processing
del data[:] #clear buffer
except KeyboardInterrupt:
print("clean-up") #!debug
p.join()
Update:
I tried another version based on threads (see code below), but with the same effect/problem. The carry-over works fine, but one byte 'between' the carry-over and the new data is always gone -> The script will miss the byte when main reads the queue?!
import time, serial, threading, queue
def read_port(q):
with serial.Serial("COM2", 19200, 8, "E", 1, timeout=None) as ser:
while t.is_alive():
q.put("%02X" % ser.read(1)[0])
def proc_data(data, crc):
#processing data here
carry = data[len(data)/2:] #DEBUG: emulate result (return last half of data)
return carry
if __name__=='__main__':
try:
q = queue.Queue()
t = threading.Thread(target=read_port, args=(q,))
t.daemon = True
t.start()
data = []
while True:
try:
while True:
data.append(q.get_nowait()) #get all data from queue
except queue.Empty:
pass
print(data) #DEBUG: show carry-over + new data
data = proc_data(data) #process data and store carry-over
print(data) #DEBUG: show new carry-over
time.sleep(1) #DEBUG: emulate processing time
except KeyboardInterrupt:
print("clean-up")
t.join(0)
Consider the following code.
1) the two processes are siblings; the parent just sets them up then waits for control-C to interrupt everything
2) one proc puts raw bytes on the shared queue
3) other proc blocks for the first byte of data. When it gets the first byte, it then grabs the rest of the data, outputs it in hex, then continues.
4) parent proc just sets up others then waits for interrupt using signal.pause()
Note that with multiprocessing, the qsize() (and probably empty()) functions are unreliable -- thus the above code will reliably grab your data.
source
import signal, time
import serial
from multiprocessing import Process, Queue
def read_port(q):
with serial.Serial("COM2", 115200, 8, "E", 1, timeout=None) as ser:
while True:
q.put( ser.read(1)[0] )
def show_data(q):
while True:
# block for first byte of data
data = [ q.get() ]
# consume more data if available
try:
while True:
data.append( q.get_nowait() )
except Queue.Empty:
pass
print 'got:', ":".join("{:02x}".format(ord(c)) for c in data)
if __name__=='__main__':
try:
q = Queue()
Process(target=read_port, args=(q,)).start()
Process(target=show_data, args=(q,)).start()
signal.pause() # wait for interrupt
except KeyboardInterrupt:
print("clean-up") #!debug

Opencv multiprocessing in python - queue sync

I am a beginner with multiprocessing in Python. I am developing a multiprocessing script for OpenCV, since my computer does not succeed in real-time processing of OpenCV frames.
I aim at loading and processing frames in the main process, and displaying them using a child process. My problem is that I do not understand how to build the display loop from the queued frames. Can someone please help?
My code:
#!/usr/bin/env python
from multiprocessing import Process, Queue
from Queue import Empty
from PIL import Image
import cv2
import cv2.cv as cv
import numpy as np
def image_display(taskqueue):
cv2.namedWindow ('image_display', cv2.CV_WINDOW_AUTOSIZE)
while True:
if taskqueue.get()==None:
continue
else:
image = taskqueue.get()
im = Image.fromstring(image['mode'], image['size'], image['pixels'])
num_im = np.asarray(im)
cv2.imshow ('image_display', num_im)
if __name__ == '__main__':
taskqueue = Queue()
vidFile = cv2.VideoCapture('doppler.wmv')
p = Process(target=image_display, args=(taskqueue,))
p.start()
while True:
flag, image=vidFile.read()
if flag == 0:
break
im = Image.fromarray(image)
im_dict = {
'pixels': im.tostring(),
'size': im.size,
'mode': im.mode,
}
taskqueue.put(im_dict)
p.join()
cv.DestroyAllWindows()
EDIT
Thanks to the answers, I was able to find the problem. Below is a modified script in which I slowed my loops on purpose and added an outqueue for debugging. It appears that although the frames captured with vidFile.read() are indeed passed as numpy arrays through the queue and are then passed unmodified as argument to cv2.imshow(),cv2.imshow() refuses to display the image for an unknown reason. Any help to fix that issue would be immensly appreciated!
modified code:
#!/usr/bin/env python
from multiprocessing import Process, Queue
from Queue import Empty
import cv2
import cv2.cv as cv
import numpy as np
import time
def image_display(taskqueue, outqueue):
cv2.namedWindow ('image_display', cv2.CV_WINDOW_AUTOSIZE)
while True:
try:
outqueue.put('trying')
time.sleep(1)
image = taskqueue.get()
outqueue.put(image)
cv2.imshow('image_display', image)
except:
continue
if __name__ == '__main__':
taskqueue = Queue()
outqueue = Queue()
vidFile = cv2.VideoCapture('doppler.wmv')
p = Process(target=image_display, args=(taskqueue, outqueue))
p.start()
while True:
print outqueue.get()
flag, image=vidFile.read()
if flag == 0:
break
taskqueue.put(image)
time.sleep(0.010)
p.join()
cv.DestroyAllWindows()
This should work (explanation of changes below):
#!/usr/bin/env python
from multiprocessing import Process, Queue
from Queue import Empty
from PIL import Image
import cv2
import cv2.cv as cv
import numpy as np
def image_display(taskqueue):
cv2.namedWindow ('image_display', cv2.CV_WINDOW_AUTOSIZE)
while True:
image = taskqueue.get() # Added
if image is None: break # Added
cv2.imshow ('image_display', image) # Added
cv2.waitKey(10) # Added
continue # Added
if taskqueue.get()==None:
continue
else:
image = taskqueue.get()
im = Image.fromstring(image['mode'], image['size'], image['pixels'])
num_im = np.asarray(im)
cv2.imshow ('image_display', num_im)
if __name__ == '__main__':
taskqueue = Queue()
vidFile = cv2.VideoCapture('doppler.wmv')
p = Process(target=image_display, args=(taskqueue,))
p.start()
while True:
flag, image=vidFile.read()
taskqueue.put(image) # Added
import time # Added
time.sleep(0.010) # Added
continue # Added
if flag == 0:
break
im = Image.fromarray(image)
im_dict = {
'pixels': im.tostring(),
'size': im.size,
'mode': im.mode,
}
taskqueue.put(im_dict)
taskqueue.put(None)
p.join()
cv.DestroyAllWindows()
I tried to make minimal changes to your code by just adding lines (lines containing comments # Added):
1) Just put the image itself (the original NumPy array) on the queue.
2) Pause a little bit in the master process before reading another frame. You need this so as not to overrun the queue, because imshow() in the spawned process may take a bit longer since it's calling X. You might need to increase this value (in seconds) depending on your system.
3) Spawned process has to do the waitKey() after every imshow().
4) Master process puts the special None image on the queue when it's done.

Categories