Find if drawn rectangle is visible on screen in opencv python - python

I have some code which will draw a rectangle using cv2.rectangle function based on other parameters. This is part of a face detection program. The rectangle is shown on screen within an image using imshow. However, due to the variable nature of the rectangle's dimensions, there are situations when the rectangle is not visible. After the line of code which draws the rectangle in the image, how do I detect if the rectangle is visible or not. I want this so that I can find when a face is not detected. This is my code:
import numpy as np
import cv2
# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
# https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x + (w / 4), y + (h / 4)),(x+(3 * w / 4),y + (3 * h / 4)),(255,0,0),2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Thanks!

Add print len(faces) in the while loop. If its zero, then no face detected. No rectangle.

Related

Auto zooming while face detected

hi guys I want to implement (zoom in and zoom out) like digital camera to the detected faces while real-time capturing using opencv, is there is any way I can do it without just cropping the frame then display it.
here is my code ... ,,,
import cv2
# Load the cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(0)
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# Release the VideoCapture object
cap.release()

Face Detection - Open CV can't find the face

I am learning the OpenCV. Here is my code:
import cv2
face_patterns = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
sample_image = cv2.imread('1.jpg')
gray = cv2.cvtColor(sample_image,cv2.COLOR_RGB2GRAY)
faces = face_patterns.detectMultiScale(gray,1.3,5)
print(len(faces))
for (x, y, w, h) in faces:
cv2.rectangle(sample_image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imwrite('result.jpg', sample_image)
If I use the picture A, I could get a lot of faces, if I use the picture B, I get none.
I changed argument in detectMultiScale(gray,1.3,5) many times, it still doesn't work.
Picture A
Picture A Result
Picture B no face
I see this more as a problem of Cv2 module itself. There are better models than HAAR CASCADES for detecting faces. face_recognition library is also very useful to detect and recognize face. It uses hog as default model. You can also use cnn for better accuracy but the detection process will be slow.
Find more here.
import cv2
import face_recognition as fr
sample_image = fr.load_image_file("1.jpg")
unknown_face_loc = fr.face_locations(sample_image, model="hog")
print(len(unknown_face_loc)) #detected face count
for faceloc in unknown_face_loc:
y1, x2, y2, x1 = faceloc
cv2.rectangle(sample_image, (x1, y1), (x2, y2), (0, 0, 255), 2)
sample_image = sample_image[:, :, ::-1] #converting bgr image to rbg
cv2.imwrite("result.jpg", sample_image)
Instead of -
faces = face_patterns.detectMultiScale(gray,1.3,5)
Try Using -
faces = face_patterns.detectMultiScale(blackandwhite,1.3,5)
If the problem occurs even after this check out my code for face detection.
It uses hog as default model. You can also use cnn for better accuracy but the detection process will be slow.
cascade_classifier = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, 0)
detections = cascade_classifier.detectMultiScale(gray,scaleFactor=1.3,minNeighbors=5)
if(len(detections) > 0):
(x,y,w,h) = detections[0]
frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
# for (x,y,w,h) in detections:
# frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()```

How to rotate camera recorded video?

I am trying to detect faces in a camera recorded video. When i did it with webcam video, it's working fine. But, with camera recorded video, the video gets rotated by -90 degree. Please suggest me, how do I get the actual video output for face detection?
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier('C:/Users/HP/Anaconda2/pkgs/opencv-3.2.0-np112py27_204/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
#video_capture = cv2.videoCapture(0)
video_capture = cv2.VideoCapture('C:/Users/HP/sample1.mp4')
w=int(video_capture.get(3))
h=int(video_capture.get(4))
#output = cv2.VideoWriter('output_1.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 60,frameSize = (w,h))
while True:
ret, frame = video_capture.read()
frame = rotateImage(frame,90)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.imshow('face',i)
#output.write(frame)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
output.release()
cv2.destroyAllWindows()
In cv2 you can use the cv2.rotate function to rotate image as per your requirement
rotated=cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
for rotating video you can use cv2.flip(), this method take 3 Args and one of them is the rotating code(0,1,-1) you can check this link for more details:
https://www.geeksforgeeks.org/python-opencv-cv2-flip-method/

Understanding how to deploy python code to pop up balloons

I'm a newbie in programming and I need to write code to detect balloon on the fixed background using numpy and openCV in live video and to return the centre of the object [balloon].
Sorry about the ignorance of the questions.
Since I'm new, I had troubles with thinking about the logic of doing it, I don't have the resources to "teach the machine" and creating cascade XML to detect balloons so I thought about 1 possible solution :
Using cv2.createBackgroundSubtractorMOG2() to detect motion with the same background and once there is some object [balloon], count all the white pixels in the live video and return the centre of it, with the right threshold amount of white pixels.
The problem is, I don't know how to get the value of the pixel from 0-255 to know if it's white or black and shows the video at the same time, I think that there is a much easier way that I couldn't find guides for it.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
img_arr = np.array(fgmask)
cv2.imshow('frame',fgmask)
for i in fgmask:
for j in i:
print(fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I'm getting fray video on the output and lots of values that I don't know how to understand them on the output.
I would use
changes = (fgmask>200).sum()
to compare all pixels with almost white value (>200) and count these pixels.
And then I can compare result with some value to treat it as move.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum()
is_moving = (changes > 10000)
print(changes, is_moving)
cv2.imshow('frame', fgmask)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
print() needs some time to display text so printing all pixels (many times in loop) can slow down program. So I skip this. I don't have to know values of all pixels.
EDIT: Using answer in how to detect region of large # of white pixels using opencv? and add code which can find white regions and draw rectangle. Program opens two window - one with grayscale fgmask and other with RGB frame and they can be hidden one behind another. You have to move one window to see another.
EDIT: I added code which use cv2.contourArea(cnt) and (x,y,w,h) = cv2.boundingRect(cnt) to create list with items (area,x,y,w,h) for all counturs and then get max(items) to get contour with the biggest area. And then it use (x + w//2, y + h//2) as center for red circle.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum() #
is_moving = (changes > 10000)
print(changes, is_moving)
items = []
contours, hier = cv2.findContours(fgmask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if 200 < area:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(fgmask, (x,y),(x+w,y+h),255, 2)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0), 2)
items.append( (area, x, y, w, h) )
if items:
main_item = max(items)
area, x, y, w, h = main_item
if w > h:
r = w//2
else:
r = h//2
cv2.circle(frame, (x+w//2, y+h//2), r, (0,0,255), 2)
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame', frame)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()

Python dlib - Read image instead of webcam

I am working with this example Python script from the article enter link description here
from imutils import face_utils
import dlib
import cv2
# Vamos inicializar um detector de faces (HOG) para então
# let's go code an faces detector(HOG) and after detect the
# landmarks on this detected face
# p = our pre-treined model directory, on my case, it's on the same script's diretory.
p = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
cap = cv2.VideoCapture(0)
while True:
# Getting out image by webcam
_, image = cap.read()
# Converting the image to gray scale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Get faces into webcam's image
rects = detector(gray, 0)
# For each detected face, find the landmark.
for (i, rect) in enumerate(rects):
# Make the prediction and transfom it to numpy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# Draw on our image, all the finded cordinate points (x,y)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
# Show the image
cv2.imshow("Output", image)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
All works great, but I am trying to modify it to read an image file instead of grab the cap webcam stream.
I have tried reading in a URL instead but it is not liking it, anyone any suggestions?
It seems you're asking for the standard way of reading images in OpenCV.
Assuming you're running your script.py from the same folder where image.jpg is stored, simply type:
img = cv2.imread("image.jpg")
Of course, since you're reading the image only once, there's no need to have a while loop anymore.
Here below the full working code:
from imutils import face_utils
import dlib
import cv2
p = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
image = cv2.imread("image.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
cv2.imshow("Output", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
A video is basically a stream of pictures which move faster than our eyes can detect. So for your query, the code remains pretty much the same except the while loop part.
from imutils import face_utils
import dlib
import cv2
# Vamos inicializar um detector de faces (HOG) para então
# let's go code an faces detector(HOG) and after detect the
# landmarks on this detected face
# p = our pre-treined model directory, on my case, it's on the same script's diretory.
p = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
cap = cv2.VideoCapture(0)
#while True:
# Getting out image by webcam
image = #load your image here
# Converting the image to gray scale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Get faces into webcam's image
rects = detector(gray, 0)
# For each detected face, find the landmark.
for (i, rect) in enumerate(rects):
# Make the prediction and transfom it to numpy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# Draw on our image, all the finded cordinate points (x,y)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
# Show the image
cv2.imshow("Output", image)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
#cv2.destroyAllWindows()
#cap.release()

Categories