Trying to print the center position of a square - python

I have tried to use cv2.putText and it appears to show the position based on the the top right of the window and not the actual center of the image. It will probably be an obvious fix since I just started using opencv
import os
import numpy as np
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
radius = 3
thickness = 2
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while (True):
ret, frames = video_capture.read()
gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frames, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = (x+w//2), (y+h//2)
cv2.circle(frames, (cx, cy), radius, (255, 0, 0), -1)
cv2.putText(frames, str(text), org, font, fontScale, color, thickness)
cv2.imshow('Video', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

With the cv2.getTextSize() function, you can calculate the pixel size of the text you will put and subtract it from the text's position. In this way, the text will be right on the center.
text = (x+w//2), (y+h//2)
text_size,t = cv2.getTextSize(text=str(text), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, thickness=1)
text_size_x,text_size_y = text_size
text_pos = (x+w//2)-(text_size_x//2), (y+h//2)+(text_size_y//2)
Here is a working code
import os
import numpy as np
import cv2
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
radius = 3
thickness = 2
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while (True):
ret, frames = video_capture.read()
gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frames, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = (x+w//2), (y+h//2)
text_size,t = cv2.getTextSize(text=str(text), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, thickness=1)
text_size_x,text_size_y = text_size
text_pos = (x+w//2)-(text_size_x//2), (y+h//2)+(text_size_y//2)
#cv2.circle(frames, (cx, cy), radius, (255, 0, 0), -1)
cv2.putText(frames, str(text), text_pos, font, fontScale, color, thickness)
cv2.imshow('Video', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

Related

How to run a command once when a face was detected in OpenCV

I am trying to make it so when a face was detected, it would run a python command once instead of spamming it, for example print()
try:
import cv2
cap = cv2.VideoCapture(0)
pTime = 0
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
face_detected = True
while True:
success, img = cap.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
face_count = 0
cv2.putText(img, f'FPS:{int(fps)}', (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
face_count = face_count+1
cv2.putText(img, 'Face num '+str(face_count), (x-10, y-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(img, f'Faces Detected: {face_count}', (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
if face_count > 0: # If one or more faces were detected
print("a face was detected")
elif face_count == 0: # If no faces were detected
print("the face magically disappeared")
cv2.imshow("Face Recognition", img)
if cv2.waitKey(1) & 0xFF == ord(' '):
cv2.destroyAllWindows()
break
except KeyboardInterrupt:
print("[KeyboardInterrupt] Exiting...")
time.sleep(2)
exit()
how can i make while the face is detected, it prints only once instead of spamming (without using something like \r)
You need to introduce state and react only when it changes.
Without copy-pasting your entire code, I'll just show you the idea.
# the state variable
seeing_faces = False
while True:
frame = get_frame()
faces = detect_faces(frame)
there_are_faces_now = (len(faces) > 0)
# compare state to current detection, different? state CHANGES
if there_are_faces_now and not seeing_faces:
print("NOW I see faces")
elif seeing_faces and not there_are_faces_now:
print("no longer seeing faces")
else:
# no change
# update state
seeing_faces = there_are_faces_now
You can track the number of detected faces every frame and execute the command you want when the number of faces increases:
try:
import cv2
cap = cv2.VideoCapture(0)
pTime = 0
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
face_detected = True
currentFaces = 0
while True:
success, img = cap.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
face_count = 0
cv2.putText(img, f'FPS:{int(fps)}', (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
face_count = face_count+1
cv2.putText(img, 'Face num '+str(face_count), (x-10, y-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(img, f'Faces Detected: {face_count}', (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
print(f"Faces Detected: {face_count}", end="\r")
#if face_count > 0: # If one or more faces were detected
# print("a face was detected")
#elif face_count == 0: # If no faces were detected
# print("the face magically disappeared")
if face_count > currentFaces:
print("Command to execute, face detected")
currentFaces = face_count
cv2.imshow("Face Recognition", img)
if cv2.waitKey(1) & 0xFF == ord(' '):
cv2.destroyAllWindows()
break
except KeyboardInterrupt:
print("[KeyboardInterrupt] Exiting...")
time.sleep(2)
exit()

How to fix Python error: 'int' object is not iterable

I am using opencv for face recognition and while executing the code i am getting error:
File "<ipython-input-2-b8cd1a5a0b77>", line 44, in <module>
canvas = detect(gray, frame)
File "<ipython-input-2-b8cd1a5a0b77>", line 23, in detect
id_, conf = recognizer.predict(roi_gray)
TypeError: 'int' object is not iterable
And then after that, Kernel dies.
please help
def detect(gray, frame):
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
id_, conf = recognizer.predict(roi_gray)
if conf>=45:
font = cv2.FONT_HERSHEY_COMPLEX
name = labels[id_]
color = (255, 255, 0)
cv2.putText(frame, name, (x,y), font, 1, color, 2, cv2.LINE_AA)
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 3)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
return frame
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
if ret is True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
else:
continue
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

Viola-Jones in Python with openCV, detection mouth and nose

I have an algorithm Viola-Jones in Python. I'm using haarcascade xml, which I load from openCV root file. But there wasn't any xml file for mouth and nose in openCV, so I downloaded these files from EmguCV. Result for detection of face is OK, but detection of eye isn't good and nose with mouth is very bad. I tried to change parameters in face_cascade.detectMultiScale, but it didn't help at all.
My code:
import cv2
import sys
def facedet(img):
face_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_eye.xml')
mouth_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_mouth.xml')
nose_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_nose.xml')
img = cv2.imread(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
nose = nose_cascade.detectMultiScale(roi_gray)
mouth = mouth_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
for (nx, ny, nw, nh) in nose:
cv2.rectangle(roi_color, (nx, ny), (nx + nw, ny + nh), (0, 0, 255), 2)
for (mx, my, mw, mh) in mouth:
cv2.rectangle(roi_color, (mx, my), (mx + mw, my + mh), (0, 0, 0), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
#img = sys.argv[1]
facedet(img)
My question
What am I doing wrong? Is there any simple solution, which will give me a better result?
Output:
Haar cascades perform alright for faces but not so well for smaller individual parts. A better solution is to detect all the face landmarks together. A good algorithm for that is "One Millisecond Face Alignment with an Ensemble of Regression Trees by Vahid Kazemi and Josephine Sullivan, CVPR 2014" which is implemented in Dlib (http://dlib.net/face_landmark_detection.py.html).
This works really well for me.
I found that if you divide the face into 2 sections and have the eyes look for eyes in the top section, and the mouth in the lower section it works really well.
face
--------
| eyes |
|------|
|mouth |
--------
This is a rough illustration of what I did with the code below.
I am aware the the cascade i use is smile, but the mouth doesn't seem to work.
import cv2
import sys
mouthCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_smile.xml')
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eyeCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mouth = mouthCascade.detectMultiScale(gray, 1.3, 5)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Draw a rectangle around the faces
roi_gray_mouth = gray[y+(int(h/2)):y+h, x:x+w]
roi_color_mouth = frame[y+(int(h/2)):y+h, x:x+w]
roi_gray_eye = gray[y-(int(h/2)):y+h, x:x+w]
roi_color_eye = frame[y-(int(h/2)):y+h, x:x+w]
mouth = mouthCascade.detectMultiScale(roi_gray_mouth)
eyes = eyeCascade.detectMultiScale(roi_gray_eye)
for (ex,ey,ew,eh) in mouth:
cv2.rectangle(roi_color_mouth, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
for (eex,eey,eew,eeh) in eyes:
d = int(eew / 2)
cv2.circle(roi_color_eye, (int(eex + eew / 4) + int(d / 2), int(eey + eeh / 4) + int(d / 2)), int(d) ,(0,0,255),2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
import cv2
import sys
face_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_eye.xml')
mouth_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_mouth.xml')
nose_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_nose.xml')
img = cv2.imread(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(gray, 1.3, 5)
nose = nose_cascade.detectMultiScale(gray, 1.3, 5)
mouth = mouth_cascade.detectMultiScale(gray, 1.7, 11)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(img, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
for (nx, ny, nw, nh) in nose:
cv2.rectangle(img, (nx, ny), (nx + nw, ny + nh), (0, 0, 255), 2)
for (mx, my, mw, mh) in mouth:
cv2.rectangle(img, (mx, my), (mx + mw, my + mh), (0, 0, 0), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
you can try this code. It worked for me.

Haar- Cascade face detection OpenCv

I used the following code to detect a face using Haar cascade classifiers provided by OpenCv Python. But the faces are not detected and the square around the face is not drawn. How to solve this?
import cv2
index=raw_input("Enter the index No. : ")
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
cap = cv2.VideoCapture(0)
cont=0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=10,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
#cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('frame',frame)
inpt=cv2.waitKey(1)
if inpt & 0xFF == ord('q'):
break
elif inpt & 0xFF == ord('s') :
#name='G:\XCODRA\Integrated_v_01\EigenFaceRecognizer\img2'+index+"."+(str(cont))+".png"
name='IC_image\\'+index+"."+(str(cont))+".png"
resized = cv2.resize(gray,None,fx=200, fy=200, interpolation = cv2.INTER_AREA)
img=cv2.equalizeHist(resized)
cv2.imwrite(name,img)
print cont
cont+=1
Use the full path for the classifier.

OpenCV +Python take sample automatically

I want to take some sample of face image from webcam. I want to take about 8 samples. So when i press button "s", it will take 8 samples. So i try to make looping like this:
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
if cv2.waitKey(1) & 0xFF == ord('s'):
ROI = frame[y:y+h,x:x+w]
nameUser = raw_input("Input yourname: ")
for i in range(0,8):
i_string = str(i)
cv2.imwrite(nameUser+i_string+'.jpg',ROI)
time.sleep(3)
But 8 image that i got all is exactly same. There is a way to make program take sample face but not exactly same?
i think, it's just a logic problem, try to re-organize it:
recording = 0 # use it as a flag/counter
while(True):
ok, frame= cap.read();
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY);
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
if cv2.waitKey(1) & 0xFF == ord('s'):
recording = 8
nameUser = raw_input("Input yourname: ")
if recording >= 0 && len(faces)>0:
ROI = frame[y:y+h,x:x+w]
i_string = str(recording)
cv2.imwrite(nameUser+i_string+'.jpg',ROI)
recording -= 1
time.sleep(3)

Categories