OpenCV +Python take sample automatically - python

I want to take some sample of face image from webcam. I want to take about 8 samples. So when i press button "s", it will take 8 samples. So i try to make looping like this:
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
if cv2.waitKey(1) & 0xFF == ord('s'):
ROI = frame[y:y+h,x:x+w]
nameUser = raw_input("Input yourname: ")
for i in range(0,8):
i_string = str(i)
cv2.imwrite(nameUser+i_string+'.jpg',ROI)
time.sleep(3)
But 8 image that i got all is exactly same. There is a way to make program take sample face but not exactly same?

i think, it's just a logic problem, try to re-organize it:
recording = 0 # use it as a flag/counter
while(True):
ok, frame= cap.read();
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY);
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
if cv2.waitKey(1) & 0xFF == ord('s'):
recording = 8
nameUser = raw_input("Input yourname: ")
if recording >= 0 && len(faces)>0:
ROI = frame[y:y+h,x:x+w]
i_string = str(recording)
cv2.imwrite(nameUser+i_string+'.jpg',ROI)
recording -= 1
time.sleep(3)

Related

Trying to print the center position of a square

I have tried to use cv2.putText and it appears to show the position based on the the top right of the window and not the actual center of the image. It will probably be an obvious fix since I just started using opencv
import os
import numpy as np
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
radius = 3
thickness = 2
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while (True):
ret, frames = video_capture.read()
gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frames, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = (x+w//2), (y+h//2)
cv2.circle(frames, (cx, cy), radius, (255, 0, 0), -1)
cv2.putText(frames, str(text), org, font, fontScale, color, thickness)
cv2.imshow('Video', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
With the cv2.getTextSize() function, you can calculate the pixel size of the text you will put and subtract it from the text's position. In this way, the text will be right on the center.
text = (x+w//2), (y+h//2)
text_size,t = cv2.getTextSize(text=str(text), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, thickness=1)
text_size_x,text_size_y = text_size
text_pos = (x+w//2)-(text_size_x//2), (y+h//2)+(text_size_y//2)
Here is a working code
import os
import numpy as np
import cv2
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
radius = 3
thickness = 2
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while (True):
ret, frames = video_capture.read()
gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frames, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = (x+w//2), (y+h//2)
text_size,t = cv2.getTextSize(text=str(text), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, thickness=1)
text_size_x,text_size_y = text_size
text_pos = (x+w//2)-(text_size_x//2), (y+h//2)+(text_size_y//2)
#cv2.circle(frames, (cx, cy), radius, (255, 0, 0), -1)
cv2.putText(frames, str(text), text_pos, font, fontScale, color, thickness)
cv2.imshow('Video', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

Webcam window alwasy crash while trying to detect a face

I'm trying to make a simple haar cascade program to detect a face.
faceCascade = cv2.CascadeClassifier('D:\\Python\\Python37\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalcatface.xml')
body_cascade = cv2.CascadeClassifier('haarcascade_upperbody.xml')
video_capture = cv2.VideoCapture(0)
img_counter = 0
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.5,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('FaceDetection', frame)
k = input()
# ESC Pressed
if k % 256 == 27:
break
video_capture.release()
cv2.destroyAllWindows()
But every time I launch it, my webcam window just froze and crash :(
My PC is powerful enough for sure, why could it happen?
I have made few changes to your code. For human face use haarcascade_frontalface_default.xml and for the cat face use haarcascade_frontalcatface.xml. Try the code below and it will work like a charm :)
import cv2
#faceCascade = cv2.CascadeClassifier('D:\\Python\\Python37\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalcatface.xml')
faceCascade = cv2.CascadeClassifier('D:\\Python\\Python37\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml')
body_cascade = cv2.CascadeClassifier('haarcascade_upperbody.xml')
video_capture = cv2.VideoCapture(0)
img_counter = 0
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.5,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('FaceDetection', frame)
#k = input()
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# ESC Pressed
# if k % 256 == 27:
# break
video_capture.release()
cv2.destroyAllWindows()

How to show live video feed using opencv?

While attempting to do a facial detection which displays coordinates and their average using Raspberry Pi 3, PiCamera live video stream only shows a static image, whereas I am trying to get live video feed.
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
while True:
frame = vs.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
detected = False
xcoords.clear()
for (x, y, w, h) in faces:
detected= True
cv2.rectangle(frame, (x, y), (x+w,y+h), (255,0,0), 2)
coordinates = (x)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eyeCascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0), 2)
cv2.imshow('Frame', frame)
k = cv2.waitKey(0) & 0xff
if k == 27:
break
vs.stop()
cv2.destroyAllWindows()
Does anyone have a solution ?
change k = cv2.waitKey(0) & 0xff to k = cv2.waitKey(1) & 0xff

Viola-Jones in Python with openCV, detection mouth and nose

I have an algorithm Viola-Jones in Python. I'm using haarcascade xml, which I load from openCV root file. But there wasn't any xml file for mouth and nose in openCV, so I downloaded these files from EmguCV. Result for detection of face is OK, but detection of eye isn't good and nose with mouth is very bad. I tried to change parameters in face_cascade.detectMultiScale, but it didn't help at all.
My code:
import cv2
import sys
def facedet(img):
face_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_eye.xml')
mouth_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_mouth.xml')
nose_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_nose.xml')
img = cv2.imread(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
nose = nose_cascade.detectMultiScale(roi_gray)
mouth = mouth_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
for (nx, ny, nw, nh) in nose:
cv2.rectangle(roi_color, (nx, ny), (nx + nw, ny + nh), (0, 0, 255), 2)
for (mx, my, mw, mh) in mouth:
cv2.rectangle(roi_color, (mx, my), (mx + mw, my + mh), (0, 0, 0), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
#img = sys.argv[1]
facedet(img)
My question
What am I doing wrong? Is there any simple solution, which will give me a better result?
Output:
Haar cascades perform alright for faces but not so well for smaller individual parts. A better solution is to detect all the face landmarks together. A good algorithm for that is "One Millisecond Face Alignment with an Ensemble of Regression Trees by Vahid Kazemi and Josephine Sullivan, CVPR 2014" which is implemented in Dlib (http://dlib.net/face_landmark_detection.py.html).
This works really well for me.
I found that if you divide the face into 2 sections and have the eyes look for eyes in the top section, and the mouth in the lower section it works really well.
face
--------
| eyes |
|------|
|mouth |
--------
This is a rough illustration of what I did with the code below.
I am aware the the cascade i use is smile, but the mouth doesn't seem to work.
import cv2
import sys
mouthCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_smile.xml')
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eyeCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mouth = mouthCascade.detectMultiScale(gray, 1.3, 5)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Draw a rectangle around the faces
roi_gray_mouth = gray[y+(int(h/2)):y+h, x:x+w]
roi_color_mouth = frame[y+(int(h/2)):y+h, x:x+w]
roi_gray_eye = gray[y-(int(h/2)):y+h, x:x+w]
roi_color_eye = frame[y-(int(h/2)):y+h, x:x+w]
mouth = mouthCascade.detectMultiScale(roi_gray_mouth)
eyes = eyeCascade.detectMultiScale(roi_gray_eye)
for (ex,ey,ew,eh) in mouth:
cv2.rectangle(roi_color_mouth, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
for (eex,eey,eew,eeh) in eyes:
d = int(eew / 2)
cv2.circle(roi_color_eye, (int(eex + eew / 4) + int(d / 2), int(eey + eeh / 4) + int(d / 2)), int(d) ,(0,0,255),2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
import cv2
import sys
face_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_eye.xml')
mouth_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_mouth.xml')
nose_cascade = cv2.CascadeClassifier('/home/kattynka/opencv/data/haarcascades/haarcascade_mcs_nose.xml')
img = cv2.imread(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(gray, 1.3, 5)
nose = nose_cascade.detectMultiScale(gray, 1.3, 5)
mouth = mouth_cascade.detectMultiScale(gray, 1.7, 11)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(img, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
for (nx, ny, nw, nh) in nose:
cv2.rectangle(img, (nx, ny), (nx + nw, ny + nh), (0, 0, 255), 2)
for (mx, my, mw, mh) in mouth:
cv2.rectangle(img, (mx, my), (mx + mw, my + mh), (0, 0, 0), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
you can try this code. It worked for me.

Haar- Cascade face detection OpenCv

I used the following code to detect a face using Haar cascade classifiers provided by OpenCv Python. But the faces are not detected and the square around the face is not drawn. How to solve this?
import cv2
index=raw_input("Enter the index No. : ")
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
cap = cv2.VideoCapture(0)
cont=0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=10,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
#cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('frame',frame)
inpt=cv2.waitKey(1)
if inpt & 0xFF == ord('q'):
break
elif inpt & 0xFF == ord('s') :
#name='G:\XCODRA\Integrated_v_01\EigenFaceRecognizer\img2'+index+"."+(str(cont))+".png"
name='IC_image\\'+index+"."+(str(cont))+".png"
resized = cv2.resize(gray,None,fx=200, fy=200, interpolation = cv2.INTER_AREA)
img=cv2.equalizeHist(resized)
cv2.imwrite(name,img)
print cont
cont+=1
Use the full path for the classifier.

Categories