pickle error with multiprocessing and cv2 - python

I'm trying to implement a code that takes a video and divides it into frames while also filtering the frames and saving them to a different directory.
I am running into an error that says:
TypeError: cannot pickle 'cv2.VideoCapture' object
I have tried to understand why this problem occurs but I'm still unsure why.
here is my code:
import cv2
import os
import time
import matplotlib.image as pltim
from multiprocessing import Process, Lock
import matplotlib.pyplot as plt
def saveFramesUnfiltered(vid, lock):
currentFrame = 0 # counter for frames (to organize them by order)
while True:
success, frame = vid.read()
lock.acquire()
cv2.imwrite("./framesBefore/frame" + str(currentFrame) + '.jpg',
frame) # save unfiltered frame to folder and show the video (using the frames)
lock.release()
currentFrame = currentFrame + 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break # end loop when finished
time.sleep(0.01)
def saveFramesFiltered(lock):
currentFrame = 0
framemax = 215
while currentFrame < framemax:
if os.path.exists("framesBefore/frame" + str(currentFrame) + '.jpg'):
lock.acquire()
image = pltim.imread("./framesBefore/frame" + str(currentFrame) + '.jpg')
lock.release()
r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2]
grayImage = 0.299 * r + 0.587 * g + 0.114 * b
plt.plot(grayImage, cmap="gray")
plt.axis("off")
lock.acquire()
plt.savefig("./framesAfter/grayImage" + str(currentFrame) + ".jpg", bbox_inches='tight', pad_inches=0)
lock.release()
time.sleep(0.01)
def main():
if not os.path.exists('framesBefore'):
os.makedirs('framesBefore') # create a folder for the unfiltered frames
if not os.path.exists('framesAfter'):
os.makedirs('framesAfter') # create a folder for the filtered frames
lock = Lock()
vid = cv2.VideoCapture("maxwell cat.mp4") # getting the video
unfiltered_process = Process(target=saveFramesUnfiltered, args=(vid, lock))
filtered_process = Process(target=saveFramesFiltered, args=lock)
unfiltered_process.start()
filtered_process.start()
unfiltered_process.join()
filtered_process.join()
vid.release()
cv2.destroyAllWindows() # clear memory
if __name__ == '__main__':
main()
I am also new at using threads in python and in general so I would like to know if the way I implemented it is correct.
thanks!

Related

How to capture video by video from one rtsp Url using Opencv?

The server is sending video by video using the same RTSP URL(rtsp://192.168.0.2:8554/)
I can capture and display video using opencv.
import numpy as np
import cv2 as cv
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
cap = cv.VideoCapture('rtsp://192.168.0.2:8554/')
while cap.isOpened():
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
cv.imshow('frame', frame)
if cv.waitKey(1) == ord('q'):
break
cap.release()
cv.destroyAllWindows()
This program returns error when going on to the next video.
I tried this, but this didn't work.
import cv2 as cv
import os
import time
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
cap = cv.VideoCapture('rtsp://192.168.0.26:8554/')
if not cap.isOpened():
print("Cannot open camera")
exit()
while True:
try:
time.sleep(2)
# Capture frame-by-frame
ret, frame = cap.read()
# if frame is read correctly ret is True
# Our operations on the frame come here
# Display the resulting frame
cv.imshow('frame',frame)
if cv.waitKey(1) == ord('q'):
break
except:
print("Exception!!")
# When everything done, release the capture
cap.release()
cv.destroyAllWindows()
Can I get some help?
Thanks in advance!
I solved this by using multi-threaded program.
Main file
from datasets import LoadStreams
import threading
import os
import logging
import cv2
import torch
import time
logger = logging.getLogger(__name__)
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == 'cpu'
if device and not cpu_request: # if device requested other than 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024 ** 2 # bytes to MB
ng = torch.cuda.device_count()
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % ng == 0, f'batch-size {batch_size} not multiple of GPU count {ng}'
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = f'Using torch {torch.__version__} '
for i, d in enumerate((device or '0').split(',')):
if i == 1:
s = ' ' * len(s)
logger.info(f"{s}CUDA:{d} ({x[i].name}, {x[i].total_memory / c}MB)")
else:
logger.info(f'Using torch {torch.__version__} CPU')
logger.info('') # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def detect(rtsp_url):
dataset = LoadStreams(rtsp_url)
device = select_device('')
count = 0
view_img = True
# img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
try:
for frame_idx, (path, img, im0s, vid_cap) in enumerate(dataset): # for every frame
count += 1
im0 = im0s[0].copy()
if view_img:
cv2.imshow(str(path), im0)
# if cv2.waitKey(1) == ord('q'): # q to quit
# raise StopIteration
except:
print("finish execption")
dataset.stop()
return "good"
if __name__ == '__main__':
rtsp_url = "rtsp://192.168.0.26:8554/"
while True:
for thread in threading.enumerate():
print(thread.name)
print(detect(rtsp_url))
dataset class file
import glob
import logging
import math
import os
import random
import shutil
import time
import re
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'stream'
self.img_size = img_size
self.capture = None
self.my_thread = None
self.stopFlag = False
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
s = sources[0]
# for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
# print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
self.ret, self.imgs[0] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([0, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
self.capture = cap
self.my_thread = thread
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened() and not self.stopFlag:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def stop(self):
self.stopFlag = True
try:
# self.capture.release()
# self.my_thrsead.join()
print("stop thread!!")
except:
print("ERROR stopping thread!!")
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
if not self.ret:
print("error!!!")
self.stop()
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
# def stop(self):
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|##!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
while cap.isOpened() and not self.stopFlag:
this line is especially important because
without this line the threads will be stacked and will have memory error
as the stack stacks up.

Detecting water in a plant stem in real time using computer vision (OpenCV and Python)

I am working on an experiment with plants in a pressure chamber. I need to be able to identify with a computer vision algorithm the exact moment when water starts to appear at the cut end of the stem. In the case of this video - taken from a USB microscope, this is the interval between 0:30 and 0:34 seconds, approximately.
I tried to use MOG, MOG2 and GMG as a background subtractor, and compare the histograms of each frame (using chi-squared, bhattacharyya, correlation), looking for changes that could be significant, however still without success. Is there a better alternative for this type of work?
Below, some code (made with the help of a friend)
import numpy as np
import sys
import time
import cv2
from matplotlib import pyplot as plt
video_filename = 'M20201022_004.mov'
capture = cv2.VideoCapture(video_filename)
#fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
fgbg = cv2.createBackgroundSubtractorMOG2()
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = capture.get(cv2.CAP_PROP_FPS)
num_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
print(' height: {}\n width: {}\n fps: {}\n num_frames: {}\n'.format(height, width,frames_per_second, num_frames))
frameCounter = 0
t = time.process_time()
dist_hist = 0 # distance between histograms
frame_hist = 0
time_hist = 0
#write file
file1 = open("resultado.txt","w")
if not capture.isOpened():
print("Could not open video")
print('frameCounter: {}'.format(frameCounter))
sys.exit(1)
while capture.isOpened():
success, frame = capture.read()
frameCounter += 1
# Test for read error
if not success:
print('Failed to read video - Video Capture EOF or Error')
print('frameCounter:{}'.format(frameCounter))
if frameCounter == num_frames + 1:
print('EOF found')
else:
print('error')
break
#sys.exit(1)
else:
if frameCounter % 1000 == 0:
print('type:{} size:{} dtype:{} len(shape):{} contador:{}'.format(type(frame),frame.size,frame.dtype,len(frame.shape),frameCounter))
if len(frame.shape) < 3: # grayscale
h, w = frame.shape
print('h:{} w:{}'.format(h, w))
else: # color image
h, w, ch = frame.shape
print('h:{} w:{} ch:{}'.format(h, w, ch))
fgmask = fgbg.apply(frame)
#fgmask = fgbg.apply(frame)
#fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# Initial histogram Test
if frameCounter == 1:
hist_initial = cv2.calcHist([fgmask], [0],None,[16],[0, 256])
# print('hist_initial:{}'.format(hist_initial))
#elapsed_time = time.process_time() - t
elapsed_time = frameCounter / frames_per_second
# Process Histogram
hist_process = cv2.calcHist([fgmask], [0],None,[16],[0, 256])
dist = cv2.compareHist(hist_initial, hist_process,cv2.HISTCMP_CHISQR)
str1 = str(frameCounter) + "," + str(dist) + "," + str(dist_hist) + "," + str(elapsed_time)
file1.write(str1)
file1.write("\n")
if dist > dist_hist: # Depending on compareHist method
dist_hist = dist
time_hist = elapsed_time
frame_hist = frameCounter
# Print line at image
strfmt = 'frame: {} elapsed_time: {:7.2f}'.format(frameCounter, elapsed_time)
cv2.putText(frame, strfmt, (0, 50),cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255), 1, cv2.LINE_AA)
cv2.imshow('frame', frame)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xff == 27: # ESC pressed
break
print('---> frame:{} dist:{:10.6f} time:{:7.2f}'.format(frame_hist, dist_hist,time_hist))
capture.release()
cv2.destroyAllWindows()
file1.close()
Any help appreciated!

How do you perform Multithreaded Frame Extraction in Python with OpenCV?

Hi Stackoverflow Users,
I'm currently trying to perform a multithreaded frame extraction using OpenCV. I've made such multithreaded operations before on other tasks, but for some reason it doesnt seem to work on the frame extraction. This is my Code:
import cv2
import os
import face_recognition
from PIL import Image
import multiprocessing
try:
if not os.path.exists('frames'):
os.makedirs('frames')
except OSError:
print('Error: Creating directory of frames')
try:
if not os.path.exists('faces'):
os.makedirs('faces')
except OSError:
print('Error: Creating directory of faces')
def frame_extract_1():
currentFrame_extract = 1
video_capture = cv2.VideoCapture("DOLFACE_1.mp4")
while(True):
ret1, frame = video_capture.read()
if ret1 == False:
break
name = 'frames/frame_' + str(currentFrame_extract) + '.jpg'
print(f"Processor 1 extracted Frame {currentFrame_extract}, saving it as Frame_{currentFrame_extract}.jpg")
cv2.imwrite(name, frame)
currentFrame_extract += 4
video_capture.release()
cv2.destroyAllWindows()
def frame_extract_2():
currentFrame_extract = 2
video_capture = cv2.VideoCapture("DOLFACE_1.mp4")
while(True):
ret2, frame = video_capture.read()
if ret2 == False:
break
name = 'frames/frame_' + str(currentFrame_extract) + '.jpg'
print(f"Processor 2 extracted Frame {currentFrame_extract}, saving it as Frame_{currentFrame_extract}.jpg")
cv2.imwrite(name, frame)
currentFrame_extract += 4
video_capture.release()
cv2.destroyAllWindows()
def frame_extract_3():
currentFrame_extract = 3
video_capture = cv2.VideoCapture("DOLFACE_1.mp4")
while(True):
ret3, frame = video_capture.read()
if ret3 == False:
break
name = 'frames/frame_' + str(currentFrame_extract) + '.jpg'
print(f"Processor 3 extracted Frame {currentFrame_extract}, saving it as Frame_{currentFrame_extract}.jpg")
cv2.imwrite(name, frame)
currentFrame_extract += 4
video_capture.release()
cv2.destroyAllWindows()
def frame_extract_4():
currentFrame_extract = 4
video_capture = cv2.VideoCapture("DOLFACE_1.mp4")
while(True):
ret4, frame = video_capture.read()
if ret4 == False:
break
name = 'frames/frame_' + str(currentFrame_extract) + '.jpg'
print(f"Processor 4 extracted Frame {currentFrame_extract}, saving it as Frame_{currentFrame_extract}.jpg")
cv2.imwrite(name, frame)
currentFrame_extract += 4
video_capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
video_file_path = "DOLFACE_1.mp4"
frame_extractor_1 = multiprocessing.Process(target=frame_extract_1)
frame_extractor_2 = multiprocessing.Process(target=frame_extract_2)
frame_extractor_3 = multiprocessing.Process(target=frame_extract_3)
frame_extractor_4 = multiprocessing.Process(target=frame_extract_4)
frame_extractor_1.start()
frame_extractor_2.start()
frame_extractor_3.start()
frame_extractor_4.start()
frame_extractor_1.join()
frame_extractor_2.join()
frame_extractor_3.join()
frame_extractor_4.join()
Do you know what I am doing wrong? Every "Processor" creates the full Video, without skipping 4 Frames and letting the other Processors do the remaining 3.

OpenCV / Python : multi-threading for live facial recognition

I'm using OpenCv and Dlib to execute facial recognition w/ landmarks, live from the webcam stream. The language is Python. It works fine on my macbook laptop, but I need it to run from a desktop computer 24/7. The computer is a PC Intel® Core™2 Quad CPU Q6600 # 2.40GHz 32bit running Debian Jessie. The drop in performance is drastic : there is a 10 seconds delay due to processing !
I therefore looked into multi-threading to gain performance :
I first tried the sample code by OpenCv, and the result is great! All four cores hit 100%, and the performance is much better.
I then replaced the frame processing code with my code, and it doesn't improve performance at all ! Only one core hits the 100%, the other ones stay very low. I even think it's worse with multi-threading on.
I got the facial landmark code from the dlib sample code. I know it can probably be optimized, but I want to understand why am I not able to use my (old) computer's full power with multi-threading ?
I'll drop my code below, thanks a lot for reading :)
from __future__ import print_function
import numpy as np
import cv2
import dlib
from multiprocessing.pool import ThreadPool
from collections import deque
from common import clock, draw_str, StatValue
import video
class DummyTask:
def __init__(self, data):
self.data = data
def ready(self):
return True
def get(self):
return self.data
if __name__ == '__main__':
import sys
print(__doc__)
try:
fn = sys.argv[1]
except:
fn = 0
cap = video.create_capture(fn)
#Face detector
detector = dlib.get_frontal_face_detector()
#Landmarks shape predictor
predictor = dlib.shape_predictor("landmarks/shape_predictor_68_face_landmarks.dat")
# This is where the facial detection takes place
def process_frame(frame, t0, detector, predictor):
# some intensive computation...
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
clahe_image = clahe.apply(gray)
detections = detector(clahe_image, 1)
for k,d in enumerate(detections):
shape = predictor(clahe_image, d)
for i in range(1,68): #There are 68 landmark points on each face
cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0,0,255), thickness=2)
return frame, t0
threadn = cv2.getNumberOfCPUs()
pool = ThreadPool(processes = threadn)
pending = deque()
threaded_mode = True
latency = StatValue()
frame_interval = StatValue()
last_frame_time = clock()
while True:
while len(pending) > 0 and pending[0].ready():
res, t0 = pending.popleft().get()
latency.update(clock() - t0)
draw_str(res, (20, 20), "threaded : " + str(threaded_mode))
draw_str(res, (20, 40), "latency : %.1f ms" % (latency.value*1000))
draw_str(res, (20, 60), "frame interval : %.1f ms" % (frame_interval.value*1000))
cv2.imshow('threaded video', res)
if len(pending) < threadn:
ret, frame = cap.read()
t = clock()
frame_interval.update(t - last_frame_time)
last_frame_time = t
if threaded_mode:
task = pool.apply_async(process_frame, (frame.copy(), t, detector, predictor))
else:
task = DummyTask(process_frame(frame, t, detector, predictor))
pending.append(task)
ch = cv2.waitKey(1)
if ch == ord(' '):
threaded_mode = not threaded_mode
if ch == 27:
break
cv2.destroyAllWindows()
Performance issue was due to a bad compilation of dlib. Do not use pip install dlib which runs very very slowly for some reason compared to the proper compilation. I went from almost 10 seconds lag to about 2 seconds this way. So finally I didn't need multi-threading/processing, but I'm working on it to enhance the speed even more. Thanks for the help :)
i tried a simplified approach like P.Ro mentioned in his answer with processes writing to an output queue but somehow the queue got locked most of the time because all the processes wrote to it at the same time. (just my guess) i probably did something wrong.
in the end i ended up using pipes.
the code is nasty. but if i was me a few hours ago. i would still be glad to find an example that actually runs without effort.
from multiprocessing import Process, Queue, Manager,Pipe
import multiprocessing
import face_recognition as fik
import cv2
import time
video_input = 0
obama_image = fik.load_image_file("obama.png")
obama_face_encoding = fik.face_encodings(obama_image)[0]
quality = 0.7
def f(id,fi,fl):
import face_recognition as fok
while True:
small_frame = fi.get()
print("running thread"+str(id))
face_locations = fok.face_locations(small_frame)
if(len(face_locations)>0):
print(face_locations)
for (top7, right7, bottom7, left7) in face_locations:
small_frame_c = small_frame[top7:bottom7, left7:right7]
fl.send(small_frame_c)
fps_var =0
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
# global megaman
with Manager() as manager:
video_capture = cv2.VideoCapture(video_input)
fi = Queue(maxsize=14)
threads = 8
proc = []
parent_p = []
thread_p = []
# procids = range(0,threads)
for t in range(0,threads):
p_t,c_t = Pipe()
parent_p.append(p_t)
thread_p.append(c_t)
print(t)
proc.append(Process(target=f, args=(t,fi,thread_p[t])))
proc[t].start()
useframe = False
frame_id = 0
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
effheight, effwidth = frame.shape[:2]
if effwidth < 20:
break
# Resize frame of video to 1/4 size for faster face recognition processing
xxx = 930
yyy = 10/16 #0.4234375
small_frame = cv2.resize(frame, (xxx, int(xxx*yyy)))
if frame_id%2 == 0:
if not fi.full():
fi.put(small_frame)
print(frame_id)
cv2.imshow('Video', small_frame)
print("FPS: ", int(1.0 / (time.time() - fps_var)))
fps_var = time.time()
#GET ALL DETECTIONS
for t in range(0,threads):
if parent_p[t].poll():
small_frame_c = parent_p[t].recv()
cv2.imshow('recc', small_frame_c)
height34, width34 = small_frame_c.shape[:2]
# print fsizeee
if(width34<20):
print("face 2 small")
print(width34)
break
face_encodings_cam = fik.face_encodings(small_frame_c,[(0, width34, height34, 0)])
match = fik.compare_faces([obama_face_encoding], face_encodings_cam[0])
name = "Unknown"
if match[0]:
name = "Barack"
print(name)
break
frame_id += 1
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
Do not have much experience with using ThreadPool, but I always just use Process like shown below. You should be able to easily edit this code to fit your needs. I wrote this with your implementation in mind.
This code will get the number of cores and start however many worker processes that will all be implementing the desired function in parallel. They all share a Queue of frames for input and all put to the same output Queue for the main to get and show. Each Queue has a maximum size, in this case 5. This ensures that despite the CPU time it takes to process, it will always be relatively live time.
import numpy as np
import cv2
from multiprocessing import Process, Queue
import time
#from common import clock, draw_str, StatValue
#import video
class Canny_Process(Process):
def __init__(self,frame_queue,output_queue):
Process.__init__(self)
self.frame_queue = frame_queue
self.output_queue = output_queue
self.stop = False
#Initialize your face detectors here
def get_frame(self):
if not self.frame_queue.empty():
return True, self.frame_queue.get()
else:
return False, None
def stopProcess(self):
self.stop = True
def canny_frame(self,frame):
# some intensive computation...
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 100)
#To simulate CPU Time
#############################
for i in range(1000000):
x = 546*546
res = x/(i+1)
#############################
'REPLACE WITH FACE DETECT CODE HERE'
if self.output_queue.full():
self.output_queue.get_nowait()
self.output_queue.put(edges)
def run(self):
while not self.stop:
ret, frame = self.get_frame()
if ret:
self.canny_frame(frame)
if __name__ == '__main__':
frame_sum = 0
init_time = time.time()
def put_frame(frame):
if Input_Queue.full():
Input_Queue.get_nowait()
Input_Queue.put(frame)
def cap_read(cv2_cap):
ret, frame = cv2_cap.read()
if ret:
put_frame(frame)
cap = cv2.VideoCapture(0)
threadn = cv2.getNumberOfCPUs()
threaded_mode = True
process_list = []
Input_Queue = Queue(maxsize = 5)
Output_Queue = Queue(maxsize = 5)
for x in range((threadn -1)):
canny_process = Canny_Process(frame_queue = Input_Queue,output_queue = Output_Queue)
canny_process.daemon = True
canny_process.start()
process_list.append(canny_process)
ch = cv2.waitKey(1)
cv2.namedWindow('Threaded Video', cv2.WINDOW_NORMAL)
while True:
cap_read(cap)
if not Output_Queue.empty():
result = Output_Queue.get()
cv2.imshow('Threaded Video', result)
ch = cv2.waitKey(5)
if ch == ord(' '):
threaded_mode = not threaded_mode
if ch == 27:
break
cv2.destroyAllWindows()
This should do the trick just change my canny function to do your face detection. I wrote this from your code and compared the two. This is significantly faster. I am using multiprocessing.Process here. In python processes are truly parallel and threads are not quite because of the GIL. I am using 2 queues to send data back and forth between the main and the processes. Queues are both Thread and Process safe.
you may use this, multithreaded:
from imutils.video import VideoStream
# Initialize multithreading the video stream.
videostream = "rtsp://192.168.x.y/user=admin=xxxxxxx_channel=vvvv=1.sdp?params"
vs = VideoStream(src=videostream, resolution=frameSize,
framerate=32).start()
frame = vs.read()

How do I terminate processes in an infinite loop in python?

I have to write a code that converts video files from RGB to black and white using an equation that converts every frame to black and white.
and I have to do that in parallel with multiprocessing and queue and with the help of Opencv.
I did write the code but I have a problem with the termination of the processes in the infinite loop. How can I terminate the processes when I am finished with reading the frames, because the father is waiting for the children to finish and they never finish.
this is my code..
#! /usr/bin/python
import numpy as np
import cv2
import multiprocessing as mp
import time
def read_frames(q1, q2):
while True:
NumAndFrame = q1.get()
frame = NumAndFrame[1]
if frame == 'Done':
# Here is my problem,this is not working!!!
processes.terminate()
break
j = NumAndFrame[0]
R = frame[:, :, 0]
G = frame[:, :, 1]
B = frame[:, :, 2]
y = (np.uint8)((0.299 * R) + (0.587 * G) + (0.114 * B))
q2.put((j, y))
if __name__ == '__main__':
start = time.time()
q1 = mp.Queue()
q2 = mp.Queue()
processes = []
for i in range(4):
processes.append(mp.Process(target=read_frames, args=(q1, q2)))
for p in processes:
p.start()
# feed the processes
# read input file and send to the processes the frames:
cap = cv2.VideoCapture('gou.avi')
lines = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cols = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
fps = int(cap.get(cv2.CAP_PROP_FPS))
fourcc_ver = int(cap.get(cv2.CAP_PROP_FOURCC))
out = cv2.VideoWriter('output.avi', fourcc_ver, fps, (cols, lines), False)
j = 1
while (cap.isOpened()):
ret, frame = cap.read()
# as long as new frames are there
if ret == True:
q1.put((j, frame))
j += 1
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
else:
break
q1.put((1, 'Done'))
for p in processes:
p.join()
result = []
for p in processes:
result.append(q2.get())
result.sort()
result = []
for r in result:
result.append(r[1])
for i in result:
out.write(i)
print i
# Release everything if job is finished
print 'final finish'
cap.release()
out.release()
cv2.destroyAllWindows()
You might want to try to pair your question down to a smaller example, but if you're just interested in stopping the computation in the middle of a loop that is running indefinitely, you can spam Ctrl-C until it halts. Alternatively, you can just close the shell window.
Without having tested, for the same reason others gave in there comments:
You should rather call "terminate" on each process within the main part, than call it in the child function:
...
for p in processes:
p.terminate()
p.join()
Consider using multiprocessing.Pool because it does most of the heavy lifting for you.
You need a "done" message for each child process. The child should send some sort of acknowledgement back to the parent and terminate. You also need some sort of error handling policy in the worker so that an exception doesn't just silently exit the worker.
You have other problems such as this code that confuses number of processes with number of messages processed.
for p in processes:
result.append(q2.get())
Instead you should read all messages, counting the number of termination acknowledgements it gets on the way so that you know when to stop reading.
Your script is long and I'm not going to pretend that I've gotten it all right (please be friendly and post smaller examples in the future!) but here is a first go at cleaning it up.
#! /usr/bin/python
import numpy as np
import cv2
import multiprocessing as mp
import time
def read_frames(q1, q2):
while True:
try:
NumAndFrame = q1.get()
frame = NumAndFrame[1]
if frame == 'Done':
q2.put('Done')
break
j = NumAndFrame[0]
R = frame[:, :, 0]
G = frame[:, :, 1]
B = frame[:, :, 2]
y = (np.uint8)((0.299 * R) + (0.587 * G) + (0.114 * B))
q2.put((j, y))
except Exception, e:
q2.put('Error: ' + str(e))
if __name__ == '__main__':
start = time.time()
q1 = mp.Queue()
q2 = mp.Queue()
processes = []
for i in range(4):
processes.append(mp.Process(target=read_frames, args=(q1, q2)))
for p in processes:
p.start()
# feed the processes
# read input file and send to the processes the frames:
cap = cv2.VideoCapture('gou.avi')
lines = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cols = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
fps = int(cap.get(cv2.CAP_PROP_FPS))
fourcc_ver = int(cap.get(cv2.CAP_PROP_FOURCC))
out = cv2.VideoWriter('output.avi', fourcc_ver, fps, (cols, lines), False)
j = 1
while (cap.isOpened()):
ret, frame = cap.read()
# as long as new frames are there
if ret == True:
q1.put((j, frame))
j += 1
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
else:
break
for _ in len(processes):
q1.put((1, 'Done'))
for p in processes:
p.join()
result = []
done_count = 0
while done_count < len(processes):
data = q2.get()
if isinstance(data, basetring) and data == 'Done':
done_count += 1
else:
result.append(data)
result.sort()
# What??? don't overwrite result here!
result = []
for r in result:
result.append(r[1])
for i in result:
out.write(i)
print i
# Release everything if job is finished
print 'final finish'
cap.release()
out.release()
cv2.destroyAllWindows()
You end up holding the entire returned dataset in the parent so you may hit memory problems. And since (1) you have a large data payload being copied from parent to child and back, and (2) numpy releases the gil, you may find threads perform better than processes. You can check rather quickly by just substituting Thread for Process when you create the workers.

Categories