I was actually trying to modify some yolov5 script. Here I'm trying to pass an array between threads.
def detection(out_q):
while(cam.isOpened()):
ref, img = cam.read()
img = cv2.resize(img, (640, 320))
result = model(img)
yoloBbox = result.xywh[0].numpy() # yolo format
bbox = result.xyxy[0].numpy() # pascal format
for i in bbox:
out_q.put(i) # 'i' is the List of length 6
def resultant(in_q):
while(cam.isOpened()):
ref, img =cam.read()
img = cv2.resize(img, (640, 320))
qbbox = in_q.get()
print(qbbox)
if __name__=='__main__':
q = Queue(maxsize = 10)
t1 = threading.Thread(target= detection, args = (q, ))
t2 = threading.Thread(target= resultant, args = (q, ))
t1.start()
t2.start()
t1.join()
t2.join()
I tried with this but it's giving me errors like:
Assertion fctx->async_lock failed at libavcodec/pthread_frame.c:155
so is there any other method to pass the array?
any kind of tutorial/ solution is appreciated. If there is any misunderstanding with my question, please let me know.
Thanks a lot!!
Update:::
I was trying like this..
def detection(ns, event):#
## a = np.array([1, 2, 3]) -
#### a= list(a) | #This is working
## ns.value = a |
## event.set() -
while(cam.isOpened()):
ref, img = cam.read()
img = cv2.resize(img, (640, 320))
result = model(img)
yoloBbox = result.xywh[0].numpy() # yolo format
bbox = result.xyxy[0].numpy() # pascal format
for i in bbox:
arr = np.squeeze(np.array(i))
print("bef: ", arr) -
ns.value = arr | # This is not working
event.set() -
def transfer(ns, event):
event.wait()
print(ns.value)
if __name__=='__main__':
## detection()
manager = multiprocessing.Manager()
namespace = manager.Namespace()
event=multiprocessing.Event()
p1 = multiprocessing.Process(target=detection, args=
(namespace, event),)
p2= multiprocessing.Process(target=transfer, args=(namespace,
event),)
p1.start()
p2.start()
p1.join()
p2.join()
The output from the above "arr" = [ 0 1.8232
407.98 316.46 0.92648 0]
but all I got is blank. no error, no warning, only blank.
I tested arr is having value.
I tested the list, np array all are shareing data which is marked as working.
But why that the data from "arr" array is blank (after sharing)
so what should I do?
so is there any other method to pass the array?
Yes, you could use multiprocessing.shared_memory, it is part of standard library since python3.8, and PyPI has backport allowing to use it in python3.6 and python3.7. See example in linked docs to learn how to use multiprocessing.shared_memory with numpy.ndarray
The answer provided by #Daweo suggesting use of shared memory is correct.
However, it's also worth considering using a lock to 'protect' access to the numpy array (which is not thread-safe).
See:- this
Okay guys, Thanks for the help. I used multiprocessing queue to share data.
Then I transfered my program multiprocessing to threading.
def capture(q):
cap =
cv2.VideoCapture(0)
while True:
ref, frame = cap.read()
frame = cv2.resize(frame, (640, 480))
q.put(frame)
def det(q):
model = torch.hub.load('ultralytics/yolov5','yolov5s',device='cpu')
model.conf = 0.30 # model confidence level
model.classes = [0] # model classes (where 0 = person, 2 = car)
model.iou = 0.55 # bounding box accuracy
while True:
mat = q.get()
det = model(mat)
bbox = det.xyxy[0].numpy()
for i in bbox:
print(i)
Related
I have been trying to solve this one for quite a while now and cannot figure it out. Would appreciate some help with it. So I have a FastAPI server in which I have deployed a Drowsiness Detection Model/Script (dlib, opencv2, scipy). Now what I am trying to achieve is - Start and stop the DDM via API Endpoints. So the problem is - the uvicorn server is single-threaded, so when I run the DDM it will run in the same thread and when I try to stop the DDM it stops the entire server process (which is not something I want). I have tried forking the process and running the DDM on that process but it gives an error and crashes. I think using multithreading might help, I am not sure. Also if it does help me solve my issue I don't know how exactly to approach it. Relevant Code :
# Drowsiness Detection Script
def eye_aspect_ratio(eye):
A = distance.euclidean(eye[1], eye[5])
B = distance.euclidean(eye[2], eye[4])
C = distance.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
def detect_drowsiness(monitor: bool):
pid_file = open("intelligence/drowsiness_detection/dataset/pid.txt", "w")
pid_str = str(os.getpid())
pid_file.write(pid_str)
pid_file.close()
thresh = 0.25
frame_check = 18
detect = dlib.get_frontal_face_detector()
# Dat file is the crux of the code
predict = dlib.shape_predictor(
"intelligence/drowsiness_detection/dataset/shape_predictor_68_face_landmarks.dat")
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"]
cap = cv2.VideoCapture(0)
flag = 0
while monitor:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
subjects = detect(gray, 0)
for subject in subjects:
shape = predict(gray, subject)
shape = face_utils.shape_to_np(
shape) # converting to NumPy Array
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
if ear < thresh:
flag += 1
print("Detecting,{}".format(flag))
if flag >= frame_check:
print("ALERT - Drowsy")
else:
flag = 0
cap.release()
# Drowsiness detection for a user
# router.get("/face/drowsy/start", response_description="Drowsiness monitoring for the user")
async def start_drowsiness_detection(background_tasks: BackgroundTasks):
background_tasks.add_task(detect_drowsiness, True)
return("Drowsiness monitoring ON")
# router.get("/face/drowsy/stop", response_description="Drowsiness monitoring for the user")
async def stop_drowsiness_detection():
pid_file_path = f"intelligence/drowsiness_detection/dataset/pid.txt"
pid_file = open(pid_file_path, "r")
if not os.path.exists(pid_file_path):
return("Please start monitoring first")
pid_str = pid_file.read()
remove_file(pid_file_path)
os.kill(int(pid_str), signal.SIGKILL)
return("Drowsiness monitoring OFF")
Possible workaround :
# Drowsiness Detection Script
def eye_aspect_ratio(eye):
A = distance.euclidean(eye[1], eye[5])
B = distance.euclidean(eye[2], eye[4])
C = distance.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
class DrowsinessDetector(Process):
running = Event()
def stop_monitoring(self):
if self.running.is_set():
self.running.clear()
def start_monitoring(self):
if self.running.is_set():
return
self.running.set()
self.detect_drowsiness()
def detect_drowsiness(self):
thresh = 0.25
frame_check = 18
detect = dlib.get_frontal_face_detector()
# Dat file is the crux of the code
predict = dlib.shape_predictor("./shape_predictor_68_face_landmarks.dat")
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"]
cap = cv2.VideoCapture(0)
flag = 0
while self.running.is_set():
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
subjects = detect(gray, 0)
for subject in subjects:
shape = predict(gray, subject)
shape = face_utils.shape_to_np(shape) # converting to NumPy Array
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
if ear < thresh:
flag += 1
print("Detecting - {}".format(flag))
if flag >= frame_check:
print("ALERT - Drowsy")
else:
flag = 0
cap.release()
# Drowsiness detection for a user
drowsy = DrowsinessDetector()
#router.get("/face/drowsy/start", response_description="Drowsiness monitoring for the user")
async def start_drowsiness_detection(background_tasks: BackgroundTasks):
background_tasks.add_task(drowsy.start_monitoring())
return "Drowsiness monitoring ON"
#router.get("/face/drowsy/stop", response_description="Drowsiness monitoring for the user")
async def stop_drowsiness_detection(background_tasks: BackgroundTasks):
background_tasks.add_task(drowsy.stop_monitoring())
return "Drowsiness monitoring OFF"
I got this solution from Reddit but for some reason, it doesn't work. Any help will be much appreciated.
You can also just put your non async code into a standard sync route def. (This is actually the encouraged approach from FastAPI) FastAPI will then run that code in an external threadpool and manage it all for you. From there you could simply check the status of literally anything(files, redis, inMem dict, pub/sub) from within your while loop to stop the drowsiness detector.
https://fastapi.tiangolo.com/async/#path-operation-functions
While not explicitly mentioned in the FastAPI documentation, BackgroundTasks.background_tasks will create a new thread on the same process.
Using the first code you posted - when you store the PID (process ID) into a file in the detect_drowsiness() function, and then kill the process on stop_drowsiness_detection() route/function, you are effectively killing the very process that is running FastAPI.
In the background tasks section of FastAPI, in caveat, they mention:
If you need to perform heavy background computation and you don't necessarily need it to be run by the same process (for example, you don't need to share memory, variables, etc), you might benefit from using other bigger tools like Celery.
Relating to the second code you posted, the use of multiprocessing there seems to be towards the right direction. Without more details on why that specific implementation is not working, it's hard to help you further.
I need a function that changes their size for a given number of photos, use all available computing powers. I found that I can use function parallel for this, but I don't know how to use it properly.
Here is my code:
from random import randint
from fastai.core import parallel
import cv2
def image_conversion(list, size, directory, conversion):
if conversion == '--resize':
for img in list:
imageAsNumpy = cv2.imread(img)
dim = (size[1], size[0])
resized = cv2.resize(imageAsNumpy, dim, interpolation = cv2.INTER_AREA)
writeStatus = cv2.imwrite(directory + img, resized)
if writeStatus is True:
print('Imagine written successfully!')
else:
print('Something went wrong!')
elif conversion == '--random crop':
for img in list:
image = cv2.imread(img)
height, width = image.shape[:2]
h = randint(0, height)
w = randint(0, width)
cropped = image[h:h+width, w:w+height]
status = cv2.imwrite(directory + img, cropped)
if status is True:
print('Imagine cropped')
else:
print('Problem')
But when I tried to call parallel function it fails every time, for example the progress bar goes to a 100%, but with none of the work is completed. Could someone please help me understand what I am doing wrong?
I am trying to get my script to run over multiple cores. When I start the script it works but only seems to run on one core. I am running this on a Raspberry Pi 3 b+ with 4 cores, but only one is being used when I check using top.
I have tried both Pool and Process from the multiprocessing module. I do not have a programming background but have written basic scrips with Python before, though this is the first time trying to use multiprocessing.
Sorry for sharing all this code but I think its the best way to show what I have tried.
import multiprocessing as mp
from multiprocessing import Process
import cv2
import face_pi_helper as fph
def put_frame_on_q(q_vid_cap, vid_cap):
for i in range(5):
frame = fph.fetch_frame(vid_cap)
q_vid_cap.put(frame)
def detect_from_q(q_vid_cap, q_detections, net):
print(fph.time_stamp(), "[INFO] starting detect from q...")
while True:
frame = q_vid_cap.get()
if frame is None:
print(fph.time_stamp(), "[WARNING] No frame")
else:
detections = tuple(fph.detect(frame, net))
d = detections[1]
for f in d:
conf = float(f[2])
if conf > 0.9:
print(fph.time_stamp(), "[INFO] face found")
q_detections.put(detections)
def identify_from_q(q_detections,
vid_cap,
known_face_encodings,
known_face_names,
q_vid_cap
):
print(fph.time_stamp(), "[INFO] starting identify from q...")
while True:
detections = q_detections.get()
if detections is None:
print(fph.time_stamp(), "[WARNING] No detections")
else:
frame = detections[0]
fph.identify(
frame,
detections[1],
known_face_encodings,
known_face_names
)
put_frame_on_q(q_vid_cap, vid_cap)
if __name__ == "__main__":
topology = "models/intel/face-detection-retail-0004.xml"
weights = "models/intel/face-detection-retail-0004.bin"
image = "images/adam.jpg"
db_file = "data/allstars_faces_plus.json"
print(fph.time_stamp(), "[INFO] starting app...")
net = fph.load_model(topology, weights)
known_face_encodings, known_face_names = fph.load_face_db(db_file)
q_vid_cap = mp.Queue(1)
q_detections = mp.Queue(1)
vid_cap = cv2.VideoCapture(0)
put_frame_on_q(q_vid_cap, vid_cap)
p_detect = Process(
target = detect_from_q,
args = (q_vid_cap, q_detections, net, ))
print(fph.time_stamp(), "[INFO] starting detect from q process...")
p_detect.start()
p_identify = Process(
target = identify_from_q,
args = (
q_detections,
vid_cap,
known_face_encodings,
known_face_names,
q_vid_cap,
)
)
print(fph.time_stamp(),
"[INFO] starting identify from q process..."
)
p_identify.start()
p_detect.join()
p_identify.join()
I would expect this to run on 3 cores but it only runs on 1.
I have an api take img_path as input as it use cv2.imread() to load image.
def detect(net, classes, img_path, CONF_THRESH = 0.8, NMS_THRESH = 0.3):
outputs = []
im = cv2.imread(img_path)
......
......
Now I want to create a small tool to directly load video and call the api function.However, while the cap.read() is done. I can't directly pass the image object into the api function.
def detect_video(net, classes, video, CONF_THRESH = 0.8, NMS_THRESH =0.3):
"""detect an input video"""
try:
cap = cv2.VideoCapture('video')
except Exception, e:
print e
return None
while(True):
ret, frame = cap.read()
# try to call the api function
What the best way to do so that I don't have to change the api function? My idea is to imwrite the video capture image and reload it again. but this seems stupid and slow.
if you want to pass a frame as argument to your api detect function, save it first and then call the api function as usual
img_path = 'image.jpg'
...
ret, frame = cap.read()
cv2.imwrite(img_path, frame)
# and now try to call the api function
detect(net, classes, img_path, CONF_THRESH = 0.8, NMS_THRESH = 0.3)
I am attempting to write a function which imports a specified CDF datafile, formats the image as a numpy array and does some slight refinements on the image to remove background.This works fine, however I encounter an error when I try to define the name of the array as one of my parameters in my function:
from netCDF4 import Dataset
import numpy as np
def importfunction(datafile, imagelabel):
f = Dataset(datafile)
locationfloatfield = f.variables['FloatField']
floatfield = locationfloatfield[:]
img = floatfield.flatten()
img = scipy.signal.detrend(img)
imagelabel = np.reshape(img, (256, 256))
imagelabel += abs(imagelabel.min())
imagelabel *= (1.0/imagelabel.max())
I attempt to label the array as imagelabel which is specified when I call the function. However when I call this function i get this error:
importfunction('..../CS191mk2153-M-Xp-Topo.nc', label)
NameError: name 'label' is not defined
I am unsure how to fix this
Maybe what you want to do is:
def importfunction(datafile):
f = Dataset(datafile)
locationfloatfield = f.variables['FloatField']
floatfield = locationfloatfield[:]
img = floatfield.flatten()
img = scipy.signal.detrend(img)
imagelabel = np.reshape(img, (256, 256))
imagelabel += abs(imagelabel.min())
imagelabel *= (1.0/imagelabel.max())
return imagelabel
then call:
label = importfunction('..../CS191mk2153-M-Xp-Topo.nc')