CV2 error in face detection program via Python - python

So, I was making a face detection program and everything was going right but as soon as I run it, it showed me as error saying:
line 40, in <module>
faceNet=cv2.dnn.readNet(faceModel, faceProto)
cv2.error: OpenCV(4.5.4-dev) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\caffe\caffe_io.cpp:1138: error: (-2:Unspecified error) FAILED: fs.is_open(). Can't open "opencv_face_detector_uint8.pb" in function 'cv::dnn::ReadProtoFromBinaryFile'
The code is:
import cv2
import math
import argparse
def highlightFace(net, frame, conf_thershold=0.7):
frameOpencvDnn=frame.copy()
frameHight=frameOpencvDnn.shape[0]
frameWidth=frameOpencvDnn.shape[1]
blob=cv2.dnnblobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)
net.setInput(blob)
detections=net.forword()
faceBoxes=[]
for i in range(detections.shape[2]):
confidence=detections[0,0,1,2]
if confidence>conf_thershold:
x1=int(detections[0,0,i,3]*frameWidth)
y1=int(detections[0,0,i,4]*frameHight)
x2=int(detections[0,0,i,5]*frameWidth)
y2=int(detections[0,0,i,6]*frameHight)
faceBoxes.append([x1,y1,x2,y2])
cv2.rectangel(frameOpencvDnn, (x1,y1), (x2,y2), (0,225,0), int(round(frameHight/150)), 8)
return frameOpencvDnn,faceBoxes
parser=argparse.ArgumentParser()
parser.add_argument('--image')
args=parser.parse_args()
faceProto="opencv_face_detector.pbtxt"
faceModel="opencv_face_detector_uint8.pb"
ageProto="age_deploy.prototxt"
ageModel="age_net.caffemodel"
genderProto="gender_deploy.prototxt"
genderModel="gender_net.caffmodel"
MODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 144.895847746)
ageList=['(0-2)', '(4-6)', '(8-12)','(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
genderList=['Male','Female']
faceNet=cv2.dnn.readNet(faceModel, faceProto)
ageNet=cv2.dnn.readNet(ageModel, ageProto)
genderNet=cv2.dnn.readNet(genderModel, genderProto)
video=cv2.VideoCapture(args.image if args.image else 0)
padding=20
while cv2.waitKey(1)<0 :
hasFrame,frame=video.read()
if not hasFrame:
cv2.waitKey()
break
resultImg,faceBoxes=highlightFace(faceNet,frame)
if not faceBoxes:
print("No Face is being Detected")
for faceBox in faceBoxes:
face=frame[max(0,faceBox[1]-padding):
min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding)
:min(faceBox[2]+padding, frame.shape[1]-1)]
blob=cv2.dnn.blobFromImage(face, 1.0, (227.227), MODEL_MEAN_VALUES, swapRB=False)
genderNet.setInput(blob)
genderPreds=genderNet.forword()
gender=genderList[genderPreds[0].argmax()]
print(f'Gender: {gender}')
ageNet.setInput(blob)
agePreds=ageNet.forword()
age=ageList[agePreds[0].argmax()]
print(f'Age: {age[1:-1]} years')
cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,225,225), 2, cv2.LINE_AA)
cv2.imshow("Detecting age and gender", resultImg)

The code is a little dodgy mate, it does not display the video. Try this.
import cv2
import math
import argparse
""" Identification """
faceProto = "opencv_face_detector.pbtxt"
faceModel = "opencv_face_detector_uint8.pb"
ageProto = "age_deploy.prototxt"
ageModel = "age_net.caffemodel"
genderProto = "gender_deploy.prototxt"
genderModel = "gender_net.caffemodel"
faceNet=cv2.dnn.readNet(faceModel, faceProto)
ageNet=cv2.dnn.readNet(ageModel,ageProto)
genderNet=cv2.dnn.readNet(genderModel,genderProto)
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
genderList = ['Male', 'Female']
padding=20
""" Face highliting """
def faceBox(faceNet, frames):
frameHeight=frames.shape[0]
frameWidth=frames.shape[1]
blob=cv2.dnn.blobFromImage(frames, 1.0, (300,300), [104,117,123], swapRB=False)
faceNet.setInput(blob)
detection=faceNet.forward()
bboxs=[]
for i in range(detection.shape[2]):
confidence=detection[0,0,i,2]
if confidence>0.7:
x1=int(detection[0,0,i,3]*frameWidth)
y1=int(detection[0,0,i,4]*frameHeight)
x2=int(detection[0,0,i,5]*frameWidth)
y2=int(detection[0,0,i,6]*frameHeight)
bboxs.append([x1,y1,x2,y2])
cv2.rectangle(frames, (x1,y1),(x2,y2),(0,255,0), 1)
return frames, bboxs
""" Video display """
def DisplayVid():
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('testvideo', fourcc, 20.0, (640, 480))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
while (True):
ret, frame = cap.read()
frameFace, bboxes = faceBox(faceNet, frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(frame)
for bbox in bboxes:
face = frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
blob = cv2.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
genderNet.setInput(blob)
genderPreds = genderNet.forward()
gender = genderList[genderPreds[0].argmax()]
ageNet.setInput(blob)
agePreds = ageNet.forward()
age = ageList[agePreds[0].argmax()]
label = "{},{}".format(gender, age)
cv2.rectangle(frameFace, (bbox[0], bbox[1] - 30), (bbox[2], bbox[1]), (0, 255, 0), -1)
cv2.putText(frameFace, label, (bbox[0], bbox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2,
cv2.LINE_AA)
cv2.imshow("Age-Gender", frameFace)
k = cv2.waitKey(1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if not (cap.isOpened()):
print("Could not open video device")
cap.release()
out.release()
cv2.destroyAllWindows()
DisplayVid()

Related

opencv in python showing a error not read video frame

read videocapture frame using cv2 but any time showing a error
error is :
Traceback (most recent call last):
File "d:\pythonprojects\gym\demo.py", line 33, in <module>
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
code is
<import cv2
import numpy as np
import os
from PIL import Image
from Attendance import attendance
from datetime import datetime
from database import\*
def getProfile(Id):
query="SELECT \* FROM users WHERE id="+str(Id)
cursor=mycursor.execute(query)
profile = mycursor.fetchone()
\# profile=None
\# for row in cursor:
\# profile=row
\# con.close()
return profile
\# os.chdir(os.getcwd())
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("face-trainner.yml")
cap = cv2.VideoCapture(0) #Get vidoe feed from the Camera
cap.set(3, 640)
cap.set(4, 480)
font = cv2.FONT_HERSHEY_COMPLEX
while(True):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
nbr_predicted, conf = recognizer.predict(gray[y:y+h, x:x+w])
print(nbr_predicted, conf)
if(conf < 80):
profile=getProfile(nbr_predicted)
if profile != None:
time_now=datetime.now()
newdate=time_now.strftime('%Y-%m-%d')
newtime=time_now.strftime('%H:%M:%S')
attendance(nbr_predicted,newtime,newdate)
cv2.putText(img, "Name: "+str(profile[4]), (x, y+h+30), font, 0.4, (0, 0, 255), 1)
cv2.putText(img, "Gender: " + str(profile[7]), (x, y + h + 50), font, 0.4, (0, 0, 255), 1)
else:
cv2.putText(img, "Name: Unknown", (x, y + h + 30), font, 0.4, (0, 0, 255), 1)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('Preview',img) #Display the Video
cv2.waitKey(1)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
This error seems to be due to img variable being None or empty. Not sure why it is None in your case. It could be due to various reasons like if the camera is not connected properly, or the camera driver is not installed, or there are permission issues.
You can add a condition after cap.read() call:
...
while True:
ret, img = cap.read()
if img is None:
print("Could not read frame from the source.")
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
...

"can't open camera by index" using cv2 on Linux

I am trying to make a motion detector (using the internal camera) in python(3), I am using linux (debian), and I keep getting this error
[ WARN:0#0.724] global /io/opencv/modules/videoio/src/cap_v4l.cpp (889) open VIDEOIO(V4L2:/dev/video0): can't open camera by index
here's the code I'm using
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
else:
vs = cv2.VideoCapture(args["Video"])
firstFrame = None
while True:
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Muon is stuck in helium"
if frame is None:
break
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < args["min_area"]:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Muon is fusing hydrogen"
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
personally, I think that the problem is linux is having trouble using the internal camera, but I ofc have been wrong before, but if that is the problem, can somebody please help me fix it, and if it isn't, can somebody please help me out, and tell me what I need to fix please

how to stop the execution of a cv2 program

i am currently workingon a project which requires body detection, so i created a file for it and when i tried to use it my main file but the body detection keeps on running and never stops making my rest of the program non executabale . I know it is because of the infinite for loop. But do i have any other ways where i could use in my main file
i have attached the body detection program below
kindly help
import cv2
import mediapipe as mp
import time
mpDraw = mp.solutions.drawing_utils
mppose = mp.solutions.pose
pose = mppose.Pose()
cap = cv2.VideoCapture('open cv/squidgamee/3.mp4')
cap.set(3, 400)
cap.set(4, 800)
ptime =0
while True:
succ, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = pose.process(imgRGB)
#print(results.pose_landmarks)
if results.pose_landmarks:
mpDraw.draw_landmarks(img, results.pose_landmarks, mppose.POSE_CONNECTIONS)
lmlist= []
if results.pose_landmarks:
for id, lm in enumerate(results.pose_landmarks.landmark):
h, w, c = img.shape
cx , cy = int(lm.x*w), int(lm.y*h)
lmlist.append([id, cx, cy])
cv2.circle(img, (lmlist[0][1], lmlist[0][2]), 15 , (255, 0, 255), cv2.FILLED)
print(lmlist[0])
ctime = time.time()
fps = 1/(ctime-ptime)
ptime = ctime
cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_COMPLEX, 3, (255,255,0), 3)
cv2.imshow("image", img)
key= cv2.waitKey(1)
if key == ord('q'):
break
You can call the content in the file in function on a separate thread.
import cv2
import mediapipe as mp
import time
def func():
mpDraw = mp.solutions.drawing_utils
mppose = mp.solutions.pose
pose = mppose.Pose()
cap = cv2.VideoCapture('open cv/squidgamee/3.mp4')
cap.set(3, 400)
cap.set(4, 800)
ptime =0
while True:
succ, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = pose.process(imgRGB)
if results.pose_landmarks:
mpDraw.draw_landmarks(img, results.pose_landmarks, mppose.POSE_CONNECTIONS)
lmlist= []
if results.pose_landmarks:
for id, lm in enumerate(results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy = int(lm.x*w), int(lm.y*h)
lmlist.append([id, cx, cy])
cv2.circle(img, (lmlist[0][1], lmlist[0][2]), 15 , (255, 0, 255), cv2.FILLED)
print(lmlist[0])
ctime = time.time()
fps = 1/(ctime-ptime)
ptime = ctime
cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_COMPLEX, 3, (255,255,0), 3)
cv2.imshow("image", img)
key= cv2.waitKey(1)
if key == ord('q'):
break
Main:
import threading
from file import func
x = threading.Thread(target=func, args=())
x.start()

Converting an OpenCV program into a Module reduces frame rate drastically

I wrote a code for pose estimation using OpenCV and mediapipe library. The program was working well and I was getting around 30-35 fps. When I tried to convert the same program to a module so that I can use it easily in future for different projects, the fps of the new code(module) reduced drastically to 3-4 fps.
My original Program:
import cv2
import mediapipe as mp
import time
cap = cv2.VideoCapture(1)
pTime = 0
cTime = 0
mpDraw = mp.solutions.drawing_utils
mpPose = mp.solutions.pose
pose = mpPose.Pose()
while True:
success, img1 = cap.read()
img = cv2.flip(img1, 1)
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = pose.process(imgRGB)
if results.pose_landmarks:
mpDraw.draw_landmarks(img, results.pose_landmarks, mpPose.POSE_CONNECTIONS)
for id, lm in enumerate(results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy = int(lm.x*w), int(lm.y*h)
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
cTime = time.time()
fps = 1/(cTime - pTime)
pTime = cTime
cv2.putText(img, "FPS : " + str(int(fps)), (10, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 8), 2)
cv2.imshow("Live Feed", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
My attempt at converting it into a module :
import cv2
import mediapipe as mp
import time
class poseDetector():
def __init__(self, mode=False, upBody=False, smooth=True, detectionCon = 0.5, trackingCon=0.5):
self.mode = mode
self.upBody = upBody
self.smooth = smooth
self.detectionCon = detectionCon
self.trackingCon = trackingCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose =self.mpPose.Pose(self.mode, self.upBody, self.smooth, self.detectionCon, self.trackingCon)
def findPose(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)
return img
def findPosition(self, img, draw=True):
lmList = []
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy = int(lm.x*w), int(lm.y*h)
lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return lmList
def main():
cap = cv2.VideoCapture(1)
pTime = 0
cTime = 0
while True:
success, img1 = cap.read()
img = cv2.flip(img1, 1)
detector = poseDetector()
img = detector.findPose(img)
lmList = detector.findPosition(img)
cTime = time.time()
fps = 1/(cTime - pTime)
pTime = cTime
cv2.putText(img, "FPS : " + str(int(fps)), (10, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 8), 2)
cv2.imshow("Live Feed", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
main()
According to me , both the code should have been working in the same manner, but they are not. Can anyone tell where am I making mistake ?
You need to place detector = poseDetector() to be before the while True::
detector = poseDetector()
while True:
success, img1 = cap.read()
...
Your "module" implementation creates a new poseDetector object every iteration of the main loop.
Each execution of detector = poseDetector() includes a call to poseDetector.__init__ that calls self.pose =self.mpPose.Pose...
There is a lot of overhead...
while True:
success, img1 = cap.read()
img = cv2.flip(img1, 1)
detector = poseDetector()
...
In your original ("non-module") implementation, you are executing pose = mpPose.Pose() only once (before the loop).
pose = mpPose.Pose()
while True:
success, img1 = cap.read()
...
I have tested your code before and after moving detector = poseDetector() outside the loop.
After moving the line above the loop, the frame rate is the same as the "non-module" implementation.

YOLO object detection in windows

i want to change the code i got by the Youtuber Mark Jay which can detect objects in front of a webcam to detect objects in windows(like pygta5).
(i changed the code a bit to something i (noob) thaught could work)
import cv2
from darkflow.net.build import TFNet
import numpy as np
import time
from PIL import ImageGrab
options = {
'model': 'cfg/yolo.cfg',
'load': 'bin/yolo.weights',
'threshold': 0.2,
'gpu': 1.0
}
tfnet = TFNet(options)
colors = [tuple(255 * np.random.rand(3)) for _ in range(10)]
#capture = cv2.VideoCapture(0)
#capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
#capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
while True:
stime = time.time()
screen = np.array(ImageGrab.grab(bbox=(0,0,1920,1080)))
ret, frame = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
results = tfnet.return_predict(frame)
if ret:
for color, result in zip(colors, results):
tl = (result['topleft']['x'], result['topleft']['y'])
br = (result['bottomright']['x'], result['bottomright']['y'])
label = result['label']
confidence = result['confidence']
text = '{}: {:.0f}%'.format(label, confidence * 100)
frame = cv2.rectangle(frame, tl, br, color, 5)
frame = cv2.putText(frame, text, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
cv2.imshow('frame', frame)
print('FPS {:.1f}'.format(1 / (time.time() - stime)))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
This code returns this error
Traceback (most recent call last):
File "D:\Python_Object_analyzis\YOLO Version\darkflow-master\Person_detection.py", line 23, in <module>
ret, frame = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
ValueError: too many values to unpack (expected 2)
What do i have to change the code to get it working?
(sorry for bad english)
Thanks in advance
Tobias
for those who are interested i got my code working with python mss, which is also faster.
import cv2
from darkflow.net.build import TFNet
import numpy as np
import mss
options = {
'model': 'cfg/tiny-yolo-voc.cfg',
'load': 'bin/tiny-yolo-voc.weights',
'threshold': 0.23,
'gpu': 0.26
}
tfnet = TFNet(options)
colors = [tuple(255 * np.random.rand(3)) for _ in range(10)]
with mss.mss() as sct:
monitor = {'top': 0, 'left': 0, 'width': 1920, 'height': 1080}
while True:
screen = np.array(sct.grab(monitor))
ret, frame = True, cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
results = tfnet.return_predict(frame)
if ret:
for color, result in zip(colors, results):
tl = (result['topleft']['x'], result['topleft']['y'])
br = (result['bottomright']['x'], result['bottomright']['y'])
label = result['label']
confidence = result['confidence']
text = '{}: {:.0f}%'.format(label, confidence * 100)
frame = cv2.rectangle(frame, tl, br, color, 5)
frame = cv2.putText(frame, text, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
cv2.imshow('frame', cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
Have a good time
Tobias

Categories