I am using opencv for face recognition and while executing the code i am getting error:
File "<ipython-input-2-b8cd1a5a0b77>", line 44, in <module>
canvas = detect(gray, frame)
File "<ipython-input-2-b8cd1a5a0b77>", line 23, in detect
id_, conf = recognizer.predict(roi_gray)
TypeError: 'int' object is not iterable
And then after that, Kernel dies.
please help
def detect(gray, frame):
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
id_, conf = recognizer.predict(roi_gray)
if conf>=45:
font = cv2.FONT_HERSHEY_COMPLEX
name = labels[id_]
color = (255, 255, 0)
cv2.putText(frame, name, (x,y), font, 1, color, 2, cv2.LINE_AA)
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 3)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
return frame
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
if ret is True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
else:
continue
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
Related
im using cv2 to try to take a picture here is my code
import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
eye_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
for (x, y, w, h) in faces:
#print(x,y,w,h)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
img_item = "my-image.png"
cv2.imwrite(img_item, roi_color)
stroke = 2
color = (0, 255, 0)
end_cord_x = x + w
end_cord_y = y + h
ecolor = (50, 50, 50)
cv2.rectangle(frame, (x,y), (end_cord_x, end_cord_y), color, stroke)
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), ecolor, 1)
cv2.imshow('frame',frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
i get the error:
File "c:NothereForPrivacyReasons\imagerecognition\face\picture.py", line 3, in
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
AttributeError: module 'cv2' has no attribute 'CascadeClassifier'
I have tried to use cv2.putText and it appears to show the position based on the the top right of the window and not the actual center of the image. It will probably be an obvious fix since I just started using opencv
import os
import numpy as np
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
radius = 3
thickness = 2
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while (True):
ret, frames = video_capture.read()
gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frames, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = (x+w//2), (y+h//2)
cv2.circle(frames, (cx, cy), radius, (255, 0, 0), -1)
cv2.putText(frames, str(text), org, font, fontScale, color, thickness)
cv2.imshow('Video', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
With the cv2.getTextSize() function, you can calculate the pixel size of the text you will put and subtract it from the text's position. In this way, the text will be right on the center.
text = (x+w//2), (y+h//2)
text_size,t = cv2.getTextSize(text=str(text), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, thickness=1)
text_size_x,text_size_y = text_size
text_pos = (x+w//2)-(text_size_x//2), (y+h//2)+(text_size_y//2)
Here is a working code
import os
import numpy as np
import cv2
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
radius = 3
thickness = 2
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while (True):
ret, frames = video_capture.read()
gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frames, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = (x+w//2), (y+h//2)
text_size,t = cv2.getTextSize(text=str(text), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, thickness=1)
text_size_x,text_size_y = text_size
text_pos = (x+w//2)-(text_size_x//2), (y+h//2)+(text_size_y//2)
#cv2.circle(frames, (cx, cy), radius, (255, 0, 0), -1)
cv2.putText(frames, str(text), text_pos, font, fontScale, color, thickness)
cv2.imshow('Video', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
My face recognition code is getting and error. The error is as follows:
Traceback (most recent call last):
File "C:/Users/Harsh/Desktop/python codes/main.py", line 19, in <module>
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
cv2.error: OpenCV(4.5.1) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-xeqjxthj\opencv\modules\objdetect\src\cascadedetect.cpp:1689: error: (-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
[ WARN:0] global C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-xeqjxthj\opencv\modules\videoio\src\cap_msmf.cpp (434) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
the code is as follows:
import cv2
face_cascade = \
cv2.CascadeClassifier('/C:/User/Harsh/Downloads/haarcascade_frontalface_alt.xml')
eye_cascade = \
cv2.CascadeClassifier('/C:/User/Harsh/Downloads/haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
if ret is True:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
continue
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for(x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+h]
roi_color = img[y:y+h, x:x+h]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew), (ex+ew, ey+eh), (0, 255, 0), 2)
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xFf
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
That error means that your xml file could not be found.
You got some typos here:
face_cascade =
cv2.CascadeClassifier('/C:/User/Harsh/Downloads/haarcascade_frontalface_alt.xml')
eye_cascade =
cv2.CascadeClassifier('/C:/User/Harsh/Downloads/haarcascade_eye.xml')
Notice the slash (/) at the beginning of the path. Correct that and it should work.
I am trying to create a program than can detect faces and predict its gender on the live stream. but I get this error message when running the program
Traceback (most recent call last):
File "text.py", line 54, in <module>
cus_ana.prediction()
File "text.py", line 31, in prediction
gender = gender_classifier()
TypeError: __call__() missing 1 required positional argument: 'inputs'
This is the code so far
class CusAnalytics():
def __init__(self, cascade, gender):
self.cascade = cascade
self.gender = gender
def gender_classifier(self):
classifier = self.gender
gender = classifier.predict(np.expand_dims(cv2.resize(cropped, (198, 198)), axis=0))
gender = np.where(gender.flatten() < 0.5, "Female", "Male")
def prediction(self):
cam = cv2.VideoCapture(0)
while True:
ret, frame = cam.read()
frame = cv2.flip(frame, 1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5, minSize=(150, 150))
for (x, y, w, h) in faces:
cropped = cv2.resize(frame[y:y+h, x:x+w], (198,198))
gender = gender_classifier()
if gender[0] == 'Male':
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.rectangle(frame, (x-1, y+h), (x+w+1, y+h+50), (255, 0, 0), -1)
cv2.rectangle(frame, (x-1, y), (x+w+1, y-50), (255, 0, 0), -1)
else:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv2.rectangle(frame, (x-1, y+h), (x+w+1, y+h+50), (0, 0, 255), -1)
cv2.rectangle(frame, (x-1, y), (x+w+1, y-50), (0, 0, 255), -1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imshow('Stream', frame)
cascadePath = "../../../MODEL/TRAINED MODELS/FACE_DETECTION/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
gender_classifier_path = '../../../MODEL/TRAINED MODELS/GENDER_CLASSIFICATION/best_gender_classifier.h5'
gender_classifier = load_model(gender_classifier_path)
cus_ana = CusAnalytics(faceCascade, gender_classifier)
cus_ana.prediction()
The code works fine without the implementation of OOPS. I am new to object oriented programming and am trying to learn with this project.
Thank you in advance
I have an algorithm Viola-Jones in Python. I'm using haarcascade xml, which I load from openCV root file. But there wasn't any xml file for mouth and nose in openCV, so I downloaded these files from EmguCV. Result for detection of face is OK, but detection of eye isn't good and nose with mouth is very bad. I tried to change parameters in face_cascade.detectMultiScale, but it didn't help at all.
My code:
import cv2
import sys
def facedet(img):
face_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_eye.xml')
mouth_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_mouth.xml')
nose_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_nose.xml')
img = cv2.imread(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
nose = nose_cascade.detectMultiScale(roi_gray)
mouth = mouth_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
for (nx, ny, nw, nh) in nose:
cv2.rectangle(roi_color, (nx, ny), (nx + nw, ny + nh), (0, 0, 255), 2)
for (mx, my, mw, mh) in mouth:
cv2.rectangle(roi_color, (mx, my), (mx + mw, my + mh), (0, 0, 0), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
#img = sys.argv[1]
facedet(img)
My question
What am I doing wrong? Is there any simple solution, which will give me a better result?
Output:
Haar cascades perform alright for faces but not so well for smaller individual parts. A better solution is to detect all the face landmarks together. A good algorithm for that is "One Millisecond Face Alignment with an Ensemble of Regression Trees by Vahid Kazemi and Josephine Sullivan, CVPR 2014" which is implemented in Dlib (http://dlib.net/face_landmark_detection.py.html).
This works really well for me.
I found that if you divide the face into 2 sections and have the eyes look for eyes in the top section, and the mouth in the lower section it works really well.
face
--------
| eyes |
|------|
|mouth |
--------
This is a rough illustration of what I did with the code below.
I am aware the the cascade i use is smile, but the mouth doesn't seem to work.
import cv2
import sys
mouthCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_smile.xml')
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eyeCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mouth = mouthCascade.detectMultiScale(gray, 1.3, 5)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Draw a rectangle around the faces
roi_gray_mouth = gray[y+(int(h/2)):y+h, x:x+w]
roi_color_mouth = frame[y+(int(h/2)):y+h, x:x+w]
roi_gray_eye = gray[y-(int(h/2)):y+h, x:x+w]
roi_color_eye = frame[y-(int(h/2)):y+h, x:x+w]
mouth = mouthCascade.detectMultiScale(roi_gray_mouth)
eyes = eyeCascade.detectMultiScale(roi_gray_eye)
for (ex,ey,ew,eh) in mouth:
cv2.rectangle(roi_color_mouth, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
for (eex,eey,eew,eeh) in eyes:
d = int(eew / 2)
cv2.circle(roi_color_eye, (int(eex + eew / 4) + int(d / 2), int(eey + eeh / 4) + int(d / 2)), int(d) ,(0,0,255),2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
import cv2
import sys
face_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_eye.xml')
mouth_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_mouth.xml')
nose_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_nose.xml')
img = cv2.imread(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(gray, 1.3, 5)
nose = nose_cascade.detectMultiScale(gray, 1.3, 5)
mouth = mouth_cascade.detectMultiScale(gray, 1.7, 11)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(img, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
for (nx, ny, nw, nh) in nose:
cv2.rectangle(img, (nx, ny), (nx + nw, ny + nh), (0, 0, 255), 2)
for (mx, my, mw, mh) in mouth:
cv2.rectangle(img, (mx, my), (mx + mw, my + mh), (0, 0, 0), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
you can try this code. It worked for me.