I have script in python using opencv2 to detect face. I take video in my webcam and using Haar Cascade for detect faces. I want to get out of the number of detected faces in a one frame. I understand that this can be done by counting rectangles when a face is found. how to do it? How to count rectangles in one frame?
import cv2
import sys
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
Simple use of len(faces) should return the number of faces.
Related
I am trying to display a live video feed from camera. When I run the program, ret returns true but cv2.imshow() displays a placeholder image. Any help would be greatly appreciated.
import numpy as np
from cv2 import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#initialize video from the webcam
video = cv2.VideoCapture(0)
print(cv2.VideoCapture(0).isOpened()) # ->returns True
while True:
# ret tells if the camera works properly. Frame is an actual frame from the video feed
ret, frame= video.read()
if ret ==True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display
cv2.imshow('img', frame)
if cv2.waitKey(30) & 0xff==27:
break
video.release()
cv2.destroyAllWindows()
cv2.imshow('img', frame) opens the following window.
So, I checked whether the Camera permission is allowed and it seems it already is. I am using MacOS Big Sur (version 11.6).
hi guys I want to implement (zoom in and zoom out) like digital camera to the detected faces while real-time capturing using opencv, is there is any way I can do it without just cropping the frame then display it.
here is my code ... ,,,
import cv2
# Load the cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(0)
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# Release the VideoCapture object
cap.release()
I am trying to use face detection but I do not want the video feed window to open up when I use videocapture, this is the code I'm working on:
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k==27:
break
cap.release()
I just want to somehow disable the video feed window that opens up automatically and instead get an output on the command line every time it detects a face.
I am using the code from here: https://github.com/adarsh1021/facedetection.
Thank you.
Reomve cv2.imshow('img', img) and replace it with print(faces)
cv2.imshow() is responsible for opening of image window.
I am trying to detect faces in a camera recorded video. When i did it with webcam video, it's working fine. But, with camera recorded video, the video gets rotated by -90 degree. Please suggest me, how do I get the actual video output for face detection?
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier('C:/Users/HP/Anaconda2/pkgs/opencv-3.2.0-np112py27_204/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
#video_capture = cv2.videoCapture(0)
video_capture = cv2.VideoCapture('C:/Users/HP/sample1.mp4')
w=int(video_capture.get(3))
h=int(video_capture.get(4))
#output = cv2.VideoWriter('output_1.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 60,frameSize = (w,h))
while True:
ret, frame = video_capture.read()
frame = rotateImage(frame,90)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.imshow('face',i)
#output.write(frame)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
output.release()
cv2.destroyAllWindows()
In cv2 you can use the cv2.rotate function to rotate image as per your requirement
rotated=cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
for rotating video you can use cv2.flip(), this method take 3 Args and one of them is the rotating code(0,1,-1) you can check this link for more details:
https://www.geeksforgeeks.org/python-opencv-cv2-flip-method/
I am using Python 2.7.11 and opencv 2.4.9. I have two prgram for the video face detecting and people detecting. However, it is smooth for the face detecting but slow or the people detecting.
Face Detecting:
faceCascade = cv2.CascadeClassifier('C:\opencv\sources\data\haarcascades\haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
People Detecting:
hog = cv2.HOGDescriptor()
hog.setSVMDetectorcv2.HOGDescriptor_getDefaultPeopleDetector())
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
(rects, weights) = hog.detectMultiScale(
frame,
winStride=(4, 4),
padding=(8, 8),
scale=1.05
)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
for (xA, yA, xB, yB) in pick:
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
cv2.imshow("Before NMS", frame)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
video_capture.release()
cv2.destroyAllWindows()
Actually Human detection is a very time consuming algorithm. You check the algorithm in detail here. We can change the arguments passing to hog function. Like winStride, padding and scale it's change the speed of algorithm. Only do the fine tuning else it's effects the result.
Or you can implement an another step before the People detection. Like motion detection,then only check for the people if there is any kind of motions is occur. You can find the python code for motion detection here. So it's remove the unnecessary check for people.