How to detect only the left eye in a video with opencv? - python

I want to detect only the left eye in a video streaming, but I can't. When I run this code, it detects two eyes (right and left) and sometimes it also detects the mouth or the nose like it is an eye.
Follow the code below:
import cv2
import numpy as np
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_lefteye_2splits.xml')
video_capture = cv2.VideoCapture(0)
while True:
ret, img = video_capture.read()
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray_img,
scaleFactor=1.98,
minNeighbors=8,
minSize=(80, 80),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
#print 'faces: ', faces
for (p,q,r,s) in faces:
#cv2.rectangle(img,(p,q),(p+r,q+s),(255,0,0),3)
face_gray = gray_img[q:q+s, p:p+r]
face_color = img[q:q+s, p:p+r]
eyes = eye_cascade.detectMultiScale(face_gray)
for (ep,eq,er,es) in eyes:
cv2.rectangle(face_color,(ep,eq),(ep+er,eq+es), (0,255,0),3)
rimg = cv2.flip(img, 1) # invert the object img
cv2.imshow("Video", rimg)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
So how should I do to detect only the left eye?
PS: I'm using Python 2.7.13, NumPy 1.10.0 and OpenCV 2.4.13.3 in this program.

You can use a simple for loop to iterate through the eyes returned by eye-cascade and find the index of the eye with minimum x-coordinate. That will give you the index of the left-most eye.
def getleftmosteye(eyes):
leftmost=9999999
leftmostindex=-1
for i in range(0,2):
if eyes[i][0]<leftmost:
leftmost=eyes[i][0]
leftmostindex=i
return eyes[leftmostindex]
Now you can get coordinates of the left eye once get its index.

Related

Segment black AND moving Pixels

I’m trying to segment the moving propeller of this Video. My approach is, to detect all black and moving pixels to separate the propeller from the rest.
Here is what I tried so far:
import numpy as np
import cv2
x,y,h,w = 350,100,420,500 # Croping values
cap = cv2.VideoCapture('Video Path')
while(1):
_, frame = cap.read()
frame = frame[y:y+h, x:x+w] # Crop Video
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_black = np.array([0,0,0])
upper_black = np.array([360,255,90])
mask = cv2.inRange(hsv, lower_black, upper_black)
res = cv2.bitwise_and(frame,frame, mask= mask)
nz = np.argwhere(mask)
cv2.imshow('Original',frame)
cv2.imshow('Propeller Segmentation',mask)
k = cv2.waitKey(30) & 0xff # press esc to exit
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Screenshot form the Video
Result of the Segmentation
With function cv.createBackgroundSubtractorMOG2()
I think you should have a look at background subtraction. It should be the right approach for your problem.
OpenCV provides a good tutorial on this: Link

How to rotate camera recorded video?

I am trying to detect faces in a camera recorded video. When i did it with webcam video, it's working fine. But, with camera recorded video, the video gets rotated by -90 degree. Please suggest me, how do I get the actual video output for face detection?
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier('C:/Users/HP/Anaconda2/pkgs/opencv-3.2.0-np112py27_204/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
#video_capture = cv2.videoCapture(0)
video_capture = cv2.VideoCapture('C:/Users/HP/sample1.mp4')
w=int(video_capture.get(3))
h=int(video_capture.get(4))
#output = cv2.VideoWriter('output_1.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 60,frameSize = (w,h))
while True:
ret, frame = video_capture.read()
frame = rotateImage(frame,90)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.imshow('face',i)
#output.write(frame)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
output.release()
cv2.destroyAllWindows()
In cv2 you can use the cv2.rotate function to rotate image as per your requirement
rotated=cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
for rotating video you can use cv2.flip(), this method take 3 Args and one of them is the rotating code(0,1,-1) you can check this link for more details:
https://www.geeksforgeeks.org/python-opencv-cv2-flip-method/

OpenCV slow face detection with CCTV Or IP Cam

When i try to detect face using my laptop or computer web cam it work fine but when i try to detect using IP cam it looks like it take to much time to detect one frame. Is there any solution for this because I also try YOLO. It take more time than opencv haar cascade
There I have a simple code that detect face and crop than part of frame.
cap = cv2.VideoCapture("web_Cam_IP")
cropScal = 25
while(True):
# Capture frame-by-frame
for i in range(10): #this loop skip 10 frames if I don't skip frame it looks like it stack there
ret, frame = cap.read()
frame = cv2.resize(frame, (0, 0), fx=0.70, fy=0.70)
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.02, minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in faces:
if len(faces) > 0 :
try:
img = gray[y-cropScal:y+h+cropScal, x-cropScal:x+w+cropScal]
img = cv2.resize(img,(200,200))
img = Image.fromarray(img)
img.save('images/'+datetime.now().strftime("%d_%m_%Y_%I_%M_%S_%p")+'.png')
except Exception as e:
pass
cv2.rectangle(gray, (x-cropScal, y-cropScal), (x+w+cropScal, y+h+cropScal), (0, 255, 0), 2)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
You're only scaling the input frames by a factor of 0.70, not to an absolute resolution. It's possible that your IP cam has a higher resolution than your webcam and so the detection requires more time to analyze a larger frame.
Try rescaling the frames to a definite size (eg. 800x600) before the face detection.

Understanding how to deploy python code to pop up balloons

I'm a newbie in programming and I need to write code to detect balloon on the fixed background using numpy and openCV in live video and to return the centre of the object [balloon].
Sorry about the ignorance of the questions.
Since I'm new, I had troubles with thinking about the logic of doing it, I don't have the resources to "teach the machine" and creating cascade XML to detect balloons so I thought about 1 possible solution :
Using cv2.createBackgroundSubtractorMOG2() to detect motion with the same background and once there is some object [balloon], count all the white pixels in the live video and return the centre of it, with the right threshold amount of white pixels.
The problem is, I don't know how to get the value of the pixel from 0-255 to know if it's white or black and shows the video at the same time, I think that there is a much easier way that I couldn't find guides for it.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
img_arr = np.array(fgmask)
cv2.imshow('frame',fgmask)
for i in fgmask:
for j in i:
print(fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I'm getting fray video on the output and lots of values that I don't know how to understand them on the output.
I would use
changes = (fgmask>200).sum()
to compare all pixels with almost white value (>200) and count these pixels.
And then I can compare result with some value to treat it as move.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum()
is_moving = (changes > 10000)
print(changes, is_moving)
cv2.imshow('frame', fgmask)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
print() needs some time to display text so printing all pixels (many times in loop) can slow down program. So I skip this. I don't have to know values of all pixels.
EDIT: Using answer in how to detect region of large # of white pixels using opencv? and add code which can find white regions and draw rectangle. Program opens two window - one with grayscale fgmask and other with RGB frame and they can be hidden one behind another. You have to move one window to see another.
EDIT: I added code which use cv2.contourArea(cnt) and (x,y,w,h) = cv2.boundingRect(cnt) to create list with items (area,x,y,w,h) for all counturs and then get max(items) to get contour with the biggest area. And then it use (x + w//2, y + h//2) as center for red circle.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum() #
is_moving = (changes > 10000)
print(changes, is_moving)
items = []
contours, hier = cv2.findContours(fgmask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if 200 < area:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(fgmask, (x,y),(x+w,y+h),255, 2)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0), 2)
items.append( (area, x, y, w, h) )
if items:
main_item = max(items)
area, x, y, w, h = main_item
if w > h:
r = w//2
else:
r = h//2
cv2.circle(frame, (x+w//2, y+h//2), r, (0,0,255), 2)
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame', frame)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()

Python Opencv2 + webcam facial dection, no face being detected no error

I'm using a guide provided online with opencv2.4 that shows you how to detect faces with opencv2 and python. I followed the guide and understand what it says. However I can't seem to find the issue with my program because the video shows but now face is detected and the video is very clear. There are no errors. I ran in debug mode and the value faces remains a blank tuple so I'm assuming that means its not finding the face. What I don't understand is why and I think it has something to do with the hash table.
By hash table I mean the cascade xml file. I understand cascades are basically the guidelines for detecting the facial artifacts correct?
Links to the guides. The hash table i.e the xml file is on the github linked.
https://github.com/shantnu/FaceDetect/blob/master/haarcascade_frontalface_default.xml
https://realpython.com/blog/python/face-detection-in-python-using-a-webcam/
import cv2
import sys
import os
#cascPath = sys.argv[1]
cascPath = os.getcwd()+'facehash.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
print faceCascade
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
cv2.cv
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
You have a wrong path to your xml classifier. (I guess you've changed the name to get a shorter form).
Instead of your cascPath:
cascPath = os.getcwd()+'facehash.xml'
Try this:
cascPath = "{base_path}/folder_with_your_xml/haarcascade_frontalface_default.xml".format(
base_path=os.path.abspath(os.path.dirname(__file__)))
And now it should work as well.

Categories