Open CV RTSP camera buffer lag - python

I'm struggling to understand why I cant get a "LIVE" feed from my IP camera.
It appears that there is a buffer and it causes the frames to build up if not being read - and as each iteration of my code takes some time there is a backlog and it ends up being almost slow mo to whats actually happening.
I found the below code which triggers a thread to do the reading of the camera on a loop to try and avoid this. But now i'm getting a "LIVE" feed for around 5 frames and then it stalls and shows the same image for another few.
##camera class - this stops the RTSP feed getting caught in the buffer
class Camera:
def __init__(self, rtsp_link):
#init last ready and last frame
self.last_frame = None
self.last_ready = None
self.lock = Lock()
#set capture decive
capture = cv2.VideoCapture(rtsp_link,apiPreference=cv2.CAP_FFMPEG)
#set thread to clear buffer
thread = threading.Thread(target=self.rtsp_cam_buffer, args=(capture,), name="rtsp_read_thread")
thread.daemon = True
thread.start()
#delay start of next step to avoid errors
time.sleep(2)
def rtsp_cam_buffer(self, capture):
#loop forever
while True:
with self.lock:
capture.grab()
self.last_ready, self.last_frame = capture.retrieve()
def getFrame(self):
#get last frame
if (self.last_ready is not None) and (self.last_frame is not None):
return self.last_frame.copy())
else:
return None
Whats the correct thing to do in this situation? Is there a way round this?
OR
Should I use something like gstreamer or ffmpeg to get the camera feed? If so which is better and why? Any advice or pages to give me some python examples of getting it working? I couldn't find loads about that made sense to me.
thanks

After searching online through multiple resources the suggestion for using threads to remove frames from the buffer came up ALOT. And although it seemed to work for a while it caused me issues with duplicate frames being displayed for some reason that I could not work out.
I then tried to build opencv from source with gstreamer support but even once it was compiled correctly it still didn't seem to like interfacing with gstreamer correctly.
Eventually I thought the best bet was to go back down the threading approach but again couldnt get it working. So I gave multiprocessing a shot.
I wrote the below class to handle the camera connection:
import cv2
import time
import multiprocessing as mp
class Camera():
def __init__(self,rtsp_url):
#load pipe for data transmittion to the process
self.parent_conn, child_conn = mp.Pipe()
#load process
self.p = mp.Process(target=self.update, args=(child_conn,rtsp_url))
#start process
self.p.daemon = True
self.p.start()
def end(self):
#send closure request to process
self.parent_conn.send(2)
def update(self,conn,rtsp_url):
#load cam into seperate process
print("Cam Loading...")
cap = cv2.VideoCapture(rtsp_url,cv2.CAP_FFMPEG)
print("Cam Loaded...")
run = True
while run:
#grab frames from the buffer
cap.grab()
#recieve input data
rec_dat = conn.recv()
if rec_dat == 1:
#if frame requested
ret,frame = cap.read()
conn.send(frame)
elif rec_dat ==2:
#if close requested
cap.release()
run = False
print("Camera Connection Closed")
conn.close()
def get_frame(self,resize=None):
###used to grab frames from the cam connection process
##[resize] param : % of size reduction or increase i.e 0.65 for 35% reduction or 1.5 for a 50% increase
#send request
self.parent_conn.send(1)
frame = self.parent_conn.recv()
#reset request
self.parent_conn.send(0)
#resize if needed
if resize == None:
return frame
else:
return self.rescale_frame(frame,resize)
def rescale_frame(self,frame, percent=65):
return cv2.resize(frame,None,fx=percent,fy=percent)
Displaying the frames can be done as below
cam = Camera("rtsp://admin:[somepassword]#192.168.0.40/h264Preview_01_main")
print(f"Camera is alive?: {cam.p.is_alive()}")
while(1):
frame = cam.get_frame(0.65)
cv2.imshow("Feed",frame)
key = cv2.waitKey(1)
if key == 13: #13 is the Enter Key
break
cv2.destroyAllWindows()
cam.end()
This solution has resolved all my issues of buffer lag and also repeated frames. #
Hopefully it will help anyone else in the same situation.

Lewis's solution was helpful to reduce the lag so far but there was still some lag in my case, and I have found this gist, which is a bit faster:
import os
import sys
import time
import threading
import numpy as np
import cv2 as cv
# also acts (partly) like a cv.VideoCapture
class FreshestFrame(threading.Thread):
def __init__(self, capture, name='FreshestFrame'):
self.capture = capture
assert self.capture.isOpened()
# this lets the read() method block until there's a new frame
self.cond = threading.Condition()
# this allows us to stop the thread gracefully
self.running = False
# keeping the newest frame around
self.frame = None
# passing a sequence number allows read() to NOT block
# if the currently available one is exactly the one you ask for
self.latestnum = 0
# this is just for demo purposes
self.callback = None
super().__init__(name=name)
self.start()
def start(self):
self.running = True
super().start()
def release(self, timeout=None):
self.running = False
self.join(timeout=timeout)
self.capture.release()
def run(self):
counter = 0
while self.running:
# block for fresh frame
(rv, img) = self.capture.read()
assert rv
counter += 1
# publish the frame
with self.cond: # lock the condition for this operation
self.frame = img if rv else None
self.latestnum = counter
self.cond.notify_all()
if self.callback:
self.callback(img)
def read(self, wait=True, seqnumber=None, timeout=None):
# with no arguments (wait=True), it always blocks for a fresh frame
# with wait=False it returns the current frame immediately (polling)
# with a seqnumber, it blocks until that frame is available (or no wait at all)
# with timeout argument, may return an earlier frame;
# may even be (0,None) if nothing received yet
with self.cond:
if wait:
if seqnumber is None:
seqnumber = self.latestnum+1
if seqnumber < 1:
seqnumber = 1
rv = self.cond.wait_for(lambda: self.latestnum >= seqnumber, timeout=timeout)
if not rv:
return (self.latestnum, self.frame)
return (self.latestnum, self.frame)
And then you use it like:
# open some camera
cap = cv.VideoCapture('rtsp://URL')
cap.set(cv.CAP_PROP_FPS, 30)
# wrap it
fresh = FreshestFrame(cap)
Use fresh to deal with the open camera

Related

How to synchronously sample two systems using python

I need to acquire samples from two systems:
Video capturer (for example, OpenCV)
Electromagnetic tracking system.
The samples must be synchronized, the sampling time must be as stable as possible, and the total capture time is limited.
For this, I have developed two functions (update, update2), and I have tried to execute them using threads, taking as a starting point one of the examples shown in Run certain code every n seconds.
Code:
from threading import Timer, Thread, Event
from threading import Timer, Thread, Event
import polhemus # Package for Tracker system
import cv2, time
import numpy as np
import matplotlib.pyplot as plt
class InfiniteTimer():
"""A Timer class that does not stop, unless you want it to."""
def __init__(self, seconds, target):
self._should_continue = False
self.is_running = False
self.seconds = seconds
self.target = target
self.thread = None
def _handle_target(self):
self.is_running = True
self.target()
self.is_running = False
self._start_timer()
def _start_timer(self):
if self._should_continue: # Code could have been running when cancel was called.
self.thread = Timer(self.seconds, self._handle_target)
self.thread.start()
def start(self):
if not self._should_continue and not self.is_running:
self._should_continue = True
self._start_timer()
else:
print("Timer already started or running, please wait if you're restarting.")
def cancel(self):
if self.thread is not None:
self._should_continue = False # Just in case thread is running and cancel fails.
self.thread.cancel()
else:
print("Timer never started or failed to initialize.")
src=0 # Computer camera
track = polhemus.polhemus() # Tracker system
check =track.Initialize()
capture = cv2.VideoCapture(src) # Video capture system
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
capture.read()
frames=[]
pos=[]
def update(): # Function for image data capturing
t11=time.time()
(_, frame) = capture.read() # get one sample from OpenCV
t21=time.time()
frames.append((frame,[t11,t21])) # save sample
def update2(): #Function for pose data capturing
if check:
t1=time.time()
track.Run() # get one sample from Tracker
t2=time.time()
pose=[track.PositionTooltipX1, track.PositionTooltipY1, track.PositionTooltipZ1,
track.AngleX1, track.AngleY1, track.AngleZ1]
pos.append((pose,[t1,t2])) # save sample
else:
print('Tracker not connected')
def mainUpdate(): # main function to sample the two systems at time
Thread(target=update).start()
Thread(target=update2).start()
print('Recording starts at',time.time())
tObject=InfiniteTimer(0.04,mainUpdate) # Execute mainUpdate every 0.04 seconds (25 samples per second)
tObject.start()
time.sleep(5) # Total Capture time
tObject.cancel()
print('Recording ends at', time.time())
Results
I set a sample period of 0.04 seconds and a total capture time of 5 seconds; this should generate approximately 25 samples per second, that is, 125 samples during the entire capture.
However, I only obtained 102 samples with a sampling period that varies between 0.045 and 0.065 seconds.
Is there a way to sample both systems synchronously with a stable sampling period?
Thanks in advance.

Why did multiprocessing giving me EOFError : Ran out of Input Error?

videoPlayerThreading is my own made library to basically making 2 class with each using threading to get and show frames. objDect is also my own library to basically return frame after object detection. I got EOFError : Ran out of Input error and from the traceback I think it is caused by the multiprocessing itself hence I don't post my library because it is so long. Can anyone help me with what is wrong with it? Thank you
from multiprocessing import Process
import sys
import videoPlayerThreading as vpt
from objDect import objDect as od
def main(videoSource):
obd = od( videoSources = videoSource )
getFrame = vpt.getFrames(videoSource).start()
showFrame = vpt.showFrames(videoSource).start()
while True:
frame = getFrame.frame
frame=Process(target=obd.predictYolo, args=(frame,)).start()
showFrame.frame = frame
if getFrame.doVideo == False or showFrame.doVideo == False:
getFrame.stop()
showFrame.stop()
sys.exit()
if __name__=="__main__":
main(0)
Edit :
Here is the show frames and get frames class it basically only get and show frame using threading.
class getFrames():
def __init__(self,
videoSource:Union[int,str]=0):
self.stream = self.videoInit(videoSource)
self.hasFrame, self.frame = self.stream.read()
self.doVideo = True
def videoInit(self,
videoSource:Union[int,str]):
try:
cap = cv2.VideoCapture(videoSource)
except Exception as e:
raise Exception(f"Video source error: {e}")
return cap
def start(self):
Thread(target=self.getFrames, args=()).start()
return self
def getFrames(self):
while self.doVideo:
if not self.hasFrame:
self.stop()
else:
(self.hasFrame, self.frame) = self.stream.read()
def stop(self):
self.doVideo = False
self.stream.release()
class showFrames():
def __init__(self,
frame:cv2=None):
self.frame = frame
self.doVideo = True
def start(self):
Thread(target=self.showFrame, args=()).start()
return self
def showFrame(self):
while self.doVideo:
cv2.imshow("Video", self.frame)
if cv2.waitKey(1) == ord("q"):
self.doVideo = False
def stop(self):
self.doVideo = False
The best I can understand your program logic you need something like the following. Generator function read_frames (which may or may not need correction), reads the frames one by one yielding each frame. The main process creates a multiprocessing pool and passes each input frame to the multiprocessing pool to be processed by obd.predictYolo and sets vpt.frame with the returned frame. This continues until either there are no more frames to process or showFrame.doVideo is False. In short, I have done away with your getFrames class, which is useless here.
I do not have OpenCV installed and do not really know the package nor do I have your video file, so consider this a starting point for your further investigation.
from multiprocessing.pool import Pool
import sys
import videoPlayerThreading as vpt
from objDect import objDect as od
def read_frames(videoSource:Union[int,str]=0):
try:
stream = cv2.VideoCapture(videoSource)
except Exception as e:
raise Exception(f"Video source error: {e}")
while True:
hasFrame, frame = stream.read()
if not hasFrame:
break
yield frame
def main(videoSource):
obd = od( videoSources = videoSource )
showFrame = vpt.showFrames(videoSource).start()
with Pool() as pool:
for frame in pool.imap(obd.predictYolo, read_frames(videoSource)):
showFrame.frame = frame
if showFrame.doVideo is False:
showFrame.stop()
break
if __name__=="__main__":
main(0)

How to record audio each time user presses a key?

How to indeterminately record user's audio, if and only if when the user press ctrl key and shut down the recording loop when the user press ctrl+c keys? So far based on some online examples build this script:
from pynput import keyboard
import time, os
import pyaudio
import wave
import sched
import sys
from playsound import playsound
CHUNK = 8192
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
WAVE_OUTPUT_FILENAME = "mic.wav"
p = pyaudio.PyAudio()
frames = []
def callback(in_data, frame_count, time_info, status):
frames.append(in_data)
return (in_data, pyaudio.paContinue)
class MyListener(keyboard.Listener):
def __init__(self):
super(MyListener, self).__init__(self.on_press, self.on_release)
self.key_pressed = None
self.wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
self.wf.setnchannels(CHANNELS)
self.wf.setsampwidth(p.get_sample_size(FORMAT))
self.wf.setframerate(RATE)
def on_press(self, key):
try:
if key.ctrl:
self.key_pressed = True
return True
except AttributeError:
sys.exit()
def on_release(self, key):
if key.ctrl:
self.key_pressed = False
return True
listener = MyListener()
listener.start()
started = False
stream = None
def recorder():
global started, p, stream, frames
while True:
try:
if listener.key_pressed and not started:
# Start the recording
try:
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK,
stream_callback = callback)
print("Stream active:", stream.is_active())
started = True
print("start Stream")
except KeyboardInterrupt:
print('\nRecording finished: ' + repr(WAVE_OUTPUT_FILENAME))
quit()
elif not listener.key_pressed and started:
print("Stop recording")
listener.wf.writeframes(b''.join(frames))
listener.wf.close()
print("You should have a wav file in the current directory")
print('-> Playing recorded sound...')
playsound(str(os.getcwd())+'/mic.wav')
os.system('python "/Users/user/rec.py"')
except KeyboardInterrupt:
print('\nRecording finished: ' + repr(WAVE_OUTPUT_FILENAME))
quit()
except AttributeError:
quit()
print ("-> Press and hold the 'ctrl' key to record your audio")
print ("-> Release the 'ctrl' key to end recording")
recorder()
The problem is that it is really inefficient, for example the computer starts heating up. The only way I found to make the program keep running and recording different audio samples was with: os.system('python "/Users/user/rec.py"'). For finishing the program I tried to either catch the exception with:
except AttributeError:
sys.exit()
or with the user input:
if key.ctrl_c:
sys.exit()
Based on pyinput docs, I tried to make effective usage of the listeners. However, for this specific scenario which is the recommended way of using those listeners?
As to the primary concern of your computer seeming to work terribly hard, that's because you use a while loop to constantly check for when the record key is released. Within this loop, the computer will loop around as fast as it can without ever taking a break.
A better solution is to use event driven programming where you let the OS inform you of events periodically, and check if you want to do anything when they happen. This may sound complicated, but fortunately pynput does most of the hard work for you.
If you keep track of the state of the recording or playback, it is also fairly simple to start a new recording the next time a control key down event happens without needing the "hack" of calling an entire new process recursively for each new recording. The event loop inside the keyboard listener will keep on going until one of the callback functions returns False or raises self.stopException().
I have created a simple listener class similar to your initial attempt that calls on a recorder or player instance (which I'll get to later) to start and stop. I also have to agree with Anwarvic that <ctl-c> is supposed to be reserved as an emergency way of stopping a script, so I have changed the stop command to the letter q.
class listener(keyboard.Listener):
def __init__(self, recorder, player):
super().__init__(on_press = self.on_press, on_release = self.on_release)
self.recorder = recorder
self.player = player
def on_press(self, key):
if key is None: #unknown event
pass
elif isinstance(key, keyboard.Key): #special key event
if key.ctrl and self.player.playing == 0:
self.recorder.start()
elif isinstance(key, keyboard.KeyCode): #alphanumeric key event
if key.char == 'q': #press q to quit
if self.recorder.recording:
self.recorder.stop()
return False #this is how you stop the listener thread
if key.char == 'p' and not self.recorder.recording:
self.player.start()
def on_release(self, key):
if key is None: #unknown event
pass
elif isinstance(key, keyboard.Key): #special key event
if key.ctrl:
self.recorder.stop()
elif isinstance(key, keyboard.KeyCode): #alphanumeric key event
pass
if __name__ == '__main__':
r = recorder("mic.wav")
p = player("mic.wav")
l = listener(r, p)
print('hold ctrl to record, press p to playback, press q to quit')
l.start() #keyboard listener is a thread so we start it here
l.join() #wait for the tread to terminate so the program doesn't instantly close
With that structure, we then need a recorder class with a start and stop function which will not block (asynchronous) the listener thread from continuing to receive key events. The documentation for PyAudio gives a pretty good example for asynchronous output, so I simply applied it to an input. A little bit of re-arranging, and a flag to let our listener know when we're recording later, and we have a recorder class:
class recorder:
def __init__(self,
wavfile,
chunksize=8192,
dataformat=pyaudio.paInt16,
channels=2,
rate=44100):
self.filename = wavfile
self.chunksize = chunksize
self.dataformat = dataformat
self.channels = channels
self.rate = rate
self.recording = False
self.pa = pyaudio.PyAudio()
def start(self):
#we call start and stop from the keyboard listener, so we use the asynchronous
# version of pyaudio streaming. The keyboard listener must regain control to
# begin listening again for the key release.
if not self.recording:
self.wf = wave.open(self.filename, 'wb')
self.wf.setnchannels(self.channels)
self.wf.setsampwidth(self.pa.get_sample_size(self.dataformat))
self.wf.setframerate(self.rate)
def callback(in_data, frame_count, time_info, status):
#file write should be able to keep up with audio data stream (about 1378 Kbps)
self.wf.writeframes(in_data)
return (in_data, pyaudio.paContinue)
self.stream = self.pa.open(format = self.dataformat,
channels = self.channels,
rate = self.rate,
input = True,
stream_callback = callback)
self.stream.start_stream()
self.recording = True
print('recording started')
def stop(self):
if self.recording:
self.stream.stop_stream()
self.stream.close()
self.wf.close()
self.recording = False
print('recording finished')
Finally, we create an audio player for audio playback when you press p. I threw the PyAudio example into a thread which is created everytime you press the button so that multiple players could be created which overlap eachother. We also keep track of how many players are playing so we don't try to record while the file is already in use by a player. (I also have included my imports at the top)
from threading import Thread, Lock
from pynput import keyboard
import pyaudio
import wave
class player:
def __init__(self, wavfile):
self.wavfile = wavfile
self.playing = 0 #flag so we don't try to record while the wav file is in use
self.lock = Lock() #muutex so incrementing and decrementing self.playing is safe
#contents of the run function are processed in another thread so we use the blocking
# version of pyaudio play file example: http://people.csail.mit.edu/hubert/pyaudio/#play-wave-example
def run(self):
with self.lock:
self.playing += 1
with wave.open(self.wavfile, 'rb') as wf:
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(8192)
while data != b'':
stream.write(data)
data = wf.readframes(8192)
stream.stop_stream()
stream.close()
p.terminate()
wf.close()
with self.lock:
self.playing -= 1
def start(self):
Thread(target=self.run).start()
I can't guarantee this is perfectly free of bugs, but if you have any questions on how it works / how to get it working, feel free to comment.
I really don't recommend using ctrl + c for anything rather than interrupting. Also, I don't recommend to keep pressing on a certain button to keep recording. What I suggest is to use a key to record and another to stop; That's why in the following code I've used s to start recording and q to quit. It's totally configurable and you can change based on your preference:
Install Dependencies
pip install pyaudio numpy scipy
sudo pip install keyboard
Recorder
A simple class to record from your mic:
import pyaudio
import keyboard
import numpy as np
from scipy.io import wavfile
class Recorder():
def __init__(self, filename):
self.audio_format = pyaudio.paInt16
self.channels = 1
self.sample_rate = 44100
self.chunk = int(0.03*self.sample_rate)
self.filename = filename
self.START_KEY = 's'
self.STOP_KEY = 'q'
def record(self):
recorded_data = []
p = pyaudio.PyAudio()
stream = p.open(format=self.audio_format, channels=self.channels,
rate=self.sample_rate, input=True,
frames_per_buffer=self.chunk)
while(True):
data = stream.read(self.chunk)
recorded_data.append(data)
if keyboard.is_pressed(self.STOP_KEY):
print("Stop recording")
# stop and close the stream
stream.stop_stream()
stream.close()
p.terminate()
#convert recorded data to numpy array
recorded_data = [np.frombuffer(frame, dtype=np.int16) for frame in recorded_data]
wav = np.concatenate(recorded_data, axis=0)
wavfile.write(self.filename, self.sample_rate, wav)
print("You should have a wav file in the current directory")
break
def listen(self):
print(f"Press `{self.START_KEY}` to start and `{self.STOP_KEY}` to quit!")
while True:
if keyboard.is_pressed(self.START_KEY):
self.record()
break
To use this class, simply call the listen() method like so:
recorder = Recorded("mic.wav") #name of output file
recorder.listen()
Try this:
import sys
try:
#Your code here
except KeyboardInterrupt:
sys.exit()

My process finishes its `run` function, but it doesn't die

I'm subclassing multiprocessing.Process to create a class that will asynchronously grab images from a camera and push them to some queues for display and saving to disk.
The problem I'm having is that when I issue a stop command using a multiprocessing.Event object that belongs to the Process-descendant-object, the process successfully completes the last line of the run function, but then it doesn't die. The process just continues to exist and continues to return true from the is_alive function. I don't understand how this could be possible. What would cause a process to complete its run function but not die?
Maddeningly, when I separated this object from the larger context I'm using it in (which includes several other Process subclasses also running simultaneously), I can't reproduce the problem, which tends to make me think it has something to do with the rest of the code, but I don't understand how that could be - if it executed the last line of the run function, shouldn't it die regardless of what else is going on? I must be misunderstanding something about how a Process object works.
Here's the code below. When I run it, I see the message "Video acquires process STOPPED" printed out, but the process doesn't die.
class VideoAcquirer(mp.Process):
def __init__(self, camSerial, imageQueue, monitorImageQueue, acquireSettings={}, monitorFrameRate=15):
mp.Process.__init__(self, daemon=True)
self.camSerial = camSerial
self.acquireSettings = acquireSettings
self.imageQueue = imageQueue
self.monitorImageQueue = monitorImageQueue
self.videoFrequencyEntry.get()Rate = monitorFrameRate
self.stop = mp.Event()
def stopProcess(self):
print('Stopping video acquire process')
self.stop.set()
def run(self):
system = PySpin.System.GetInstance()
camList = system.GetCameras()
cam = camList.GetBySerial(self.camSerial)
cam.Init()
nodemap = cam.GetNodeMap()
setCameraAttributes(nodemap, self.acquireSettings)
cam.BeginAcquisition()
monitorFramePeriod = 1.0/self.monitorFrameRate
print("Video monitor frame period:", monitorFramePeriod)
lastTime = time.time()
k = 0
im = imp = imageResult = None
print("Image acquisition begins now!")
while not self.stop.is_set():
try:
# Retrieve next received image
print(1)
imageResult = cam.GetNextImage(100) # Timeout of 100 ms to allow for stopping process
print(2)
# Ensure image completion
if imageResult.IsIncomplete():
print('Image incomplete with image status %d...' % imageResult.GetImageStatus())
else:
# Print image information; height and width recorded in pixels
width = imageResult.GetWidth()
height = imageResult.GetHeight()
k = k + 1
print('Grabbed Image %d, width = %d, height = %d' % (k, width, height))
im = imageResult.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR)
imp = PickleableImage(im.GetWidth(), im.GetHeight(), 0, 0, im.GetPixelFormat(), im.GetData())
self.imageQueue.put(imp)
# Put the occasional image in the monitor queue for the UI
thisTime = time.time()
if (thisTime - lastTime) >= monitorFramePeriod:
# print("Sent frame for monitoring")
self.monitorImageQueue.put((self.camSerial, imp))
lastTime = thisTime
imageResult.Release()
print(3)
except PySpin.SpinnakerException as ex:
pass # Hopefully this is just because there was no image in camera buffer
# print('Error: %s' % ex)
# traceback.print_exc()
# return False
# Send stop signal to write process
print(4)
self.imageQueue.put(None)
camList.Clear()
cam.EndAcquisition()
cam.DeInit()
print(5)
del cam
system.ReleaseInstance()
del nodemap
del imageResult
del im
del imp
del camList
del system
print("Video acquire process STOPPED")
I start the process from a tkinter GUI thread roughly like this:
import multiprocessing as mp
camSerial = '2318921'
queue = mp.Queue()
videoMonitorQueue = mp.Queue()
acquireSettings = [('AcquisitionMode', 'Continuous'), ('TriggerMode', 'Off'), ('TriggerSource', 'Line0'), ('TriggerMode', 'On')]
v = VideoAcquirer(camSerial, queue, videoMonitorQueue, acquireSettings=acquireSettings, monitorFrameRate=15)
And here's roughly how I stop the process, also from the tkinter GUI thread:
v.stopProcess()
Thanks for your help.

Handle blocking operations efficiently in python

I'm using python and OpenCV to get video from a rtsp stream. I'm getting single frames from the stream and saving them to the file system.
I wrote a StreamingWorker which handles frame getting and saving. Additionally there is a StreamPool that has all the streaming objects. I thought that as the StreamingWorker would always be running, there should only be one per core, in order to take as much as possible. Then the StreamPool would provide the VideoCapture objects to the available StreamingWorker.
The problem is that most of the time that the script is running, is blocking:
import os
import time
import threading
import cv2 as cv
class StreamingWorker(object):
def __init__(self, stream_pool):
self.stream_pool = stream_pool
self.start_loop()
def start_loop(self):
while True:
try:
# getting a stream from the read_strategy
stream_object = self.stream_pool.next()
# getting an image from the stream
_, frame = stream_object['stream'].read()
# saving image to file system
cv.imwrite(os.path.join('result', stream_object['feed'], '{}.jpg'.format(time.time())))
except ValueError as e:
print('[error] {}'.format(e))
class StreamPool(object):
def __init__(self, streams):
self.streams = [{'feed': stream, 'stream': cv.VideoCapture(stream)} for stream in streams]
self.current_stream = 0
self.lock = threading.RLock()
def next(self):
self.lock.acquire()
if(self.current_stream + 1 >= len(self.streams)):
self.current_stream = 0
else:
self.current_stream += 1
result = self.streams[self.current_stream]
self.lock.release()
return result
def get_cores():
# This function returns the number of available cores
import multiprocessing
return multiprocessing.cpu_count()
def start(stream_pool):
StreamingWorker(stream_pool)
def divide_list(input_list, amount):
# This function divides the whole list into list of lists
result = [[] for _ in range(amount)]
for i in range(len(input_list)):
result[i % len(result)].append(input_list[i])
return result
if __name__ == '__main__':
stream_list = ['rtsp://some/stream1', 'rtsp://some/stream2', 'rtsp://some/stream3']
num_cores = get_cores()
divided_streams = divide_list(stream_list, num_cores)
for streams in divided_streams:
stream_pool = StreamPool(streams)
thread = threading.Thread(target=start, args=(stream_pool))
thread.start()
When I thought of this, I didn't take into account that most of the operations will be blocking operations like:
# Getting a frame blocks
_, frame = stream_object['stream'].read()
# Writing to the file system blocks
cv.imwrite(os.path.join('result', stream_object['feed'], '{}.jpg'.format(time.time())))
The problem with spending too much time blocking is that most of the processing power is wasted. I thought of using futures with a ThreadPoolExecutor but I can't seem to reach my goal of using the maximum amount of processing cores possible. Maybe I'm not setting enaugh threads.
Is there a standard way of handling blocking operations, in order to make the best use of the cores' processing power? I'm fine having a language-agnostic answer.
I ended up using the ThreadPoolExecutor using the add_done_callback(fn) function.
class StreamingWorker(object):
def __init__(self, stream_pool):
self.stream_pool = stream_pool
self.thread_pool = ThreadPoolExecutor(10)
self.start_loop()
def start_loop(self):
def done(fn):
print('[info] future done')
def save_image(stream):
# getting an image from the stream
_, frame = stream['stream'].read()
# saving image to file system
cv.imwrite(os.path.join('result', stream['feed'], '{}.jpg'.format(time.time())))
while True:
try:
# getting a stream from the read_strategy
stream_object = self.stream_pool.next()
# Scheduling the process to the thread pool
self.thread_pool.submit(save_image, (stream_object)).add_done_callback(done)
except ValueError as e:
print('[error] {}'.format(e))
I didn't actually want to do anything after the future finished, but if I used result() then the while True would stop, which whould also defeat all the purpose of using the thread pool.
Side note: I had to add a threading.Rlock() when calling self.stream_pool.next() because apparently opencv can't handle calls from multiple threads.

Categories