I use open cv and haar cascade features to detect faces from an image. And after I load the cascade.xml library it will test the face read and draw a green rectangle in the face. My question is how to get confidence value from this library, like percentage or accusation value?
import cv2 as cv
img = cv.imread('Bryan/2.PNG')
cv.imshow('Bryan', img)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)
haar_cascade = cv.CascadeClassifier('cascade.xml')
faces_rect = haar_cascade.detectMultiScale(
gray, scaleFactor=1.1, minNeighbors=6)
print(f'Number of faces of found = {len(faces_rect)}')
for (x, y, w, h) in faces_rect:
cv.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), thickness=2)
cv.imshow('Detected faces', img)
cv.waitKey(0)
Related
I want to detect the whole head from chin to the top of the hair to calculate its size.
I have a simple face detection code with python/openCV :
import cv2
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
faceCascade = cv2.CascadeClassifier(os.path.join(dir_path, 'haarcascade_frontalface_default.xml'))
image_name = input()
image = cv2.imread(os.path.join(dir_path, image_name))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 255, 255), 3)
print('{} faces detected.'.format(len(faces)))
cv2.imshow('image', image)
cv2.waitKey()
cv2.imwrite(os.path.join(dir_path, image_name[:image_name.find('.') + 1] + '_resault.jpg'), image)
this detect the face from the chin to the top of the forehead like this:
the problem is I want to calculate the height of the face to the top of the hair like this:
how can I detect the whole head from the chin to the top of the hair ?
I have a program that detects a face when the the web cam i recording. I've created a region of interest and i want to only detect faces only within the roi. Im trying to instruct my program to operate only in that region. Have no clue how to
cap = cv2.VideoCapture(0)
Cascade_face = cv2.CascadeClassifier('C:\\Users\moham\PycharmProjects\Face\cascade\cascade.xml')
roi = cap[40:520,340:550]
while True:
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = Cascade_face.detectMultiScale(imgGray, 1.3, 5)
for (x, y, w, h) in faces:
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.imshow('face_detect', img)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyWindow('face_detect')
Try this for ROI. I do not have cascade.xml. Actually, I cannot test it.
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cv2.imshow('face_detect',img)
k = cv2.waitKey(30) & 0xff
if k == 27: # press 'ESC' to quit
break
cap.release()
cv2.destroyAllWindows()
You need to create a mask using the coordinates of the ROI.
Sample image taken from this link:
Code:
img = cv2.imread('crowd.jpg')
# create background to draw the mask
black = np.zeros((img.shape[0], img.shape[1]), np.uint8)
#ROI for this image: img[40:180, 130:300]
# create the mask using ROI coordinates
black = cv2.rectangle(black, (180, 40), (300, 130), 255, -1)
# masking the image
roi = cv2.bitwise_and(img, img, mask = black)
Now you can perform face detection on roi. You need to incorporate the above snippet in your code accordingly.
Note: To draw rectangle, cv2.rectangle() follows (x1, y1), (x2, y2) order. But to crop an image, the order is reversed: crop_img = img[y1:x1, y2:x2]
I have a simple face detection implementation as following
import cv2
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
filename = "path/to/image"
img = cv2.imread(filename)
cv2.imshow("Original image", img)
face_region = face_cascade.detectMultiScale(img, 1.1, 4)
for (x, y, w, h) in face_region:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.imshow("Output", img)
cv2.waitKey(0)
after running the code, I got the following result
As you can see that, the implementation detects two faces! How can I get rid of this kind of false detection?
first delete the textual data like in this link Delete OCR word from Image (OpenCV,Python)
after that try to use you face detection code...then it will improve your accuracy
I want to change the rectangle size of this face detector i need it about 25% bigger:
import cv2
# Load the cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# Read the input image
img = cv2.imread('test.jpg')
# Convert into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display the output
cv2.imshow('img', img)
cv2.waitKey()
xml: https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_default.xml
but I don't know how to do it
I have this code to detect laser points using the open cv library and I had it working when I would feed it a .jpg or .png file as an augment but now I want to get an image from a camera. "video 0" I am using Ubuntu 16.04 here is my code I marked the problem with ******
any help would greatly be appreciated:
# import the necessary packages
from imutils import contours
from skimage import measure
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=False,
help="path to the image file")
args = vars(ap.parse_args())
camera = cv2.VideoCapture(0)
#problem is here ********************************************
ret, image = camera.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (11, 11), 0)
#threshold the image to reveal light regions in the
# blurred image
thresh = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY)[1]
# perform a series of erosions and dilations to remove
# any small blobs of noise from the thresholded image
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=4)
# perform a connected component analysis on the thresholded
# image, then initialize a mask to store only the "large"
# components
labels = measure.label(thresh, neighbors=8, background=0)
mask = np.zeros(thresh.shape, dtype="uint8")
# loop over the unique components
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask and count the
# number of pixels
labelMask = np.zeros(thresh.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# if the number of pixels in the component is sufficiently
# large, then add it to our mask of "large blobs"
if numPixels > 300:
mask = cv2.add(mask, labelMask)
# find the contours in the mask, then sort them from left to
# right
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = contours.sort_contours(cnts)[0]
# loop over the contours
for (i, c) in enumerate(cnts):
# draw the bright spot on the image
(x, y, w, h) = cv2.boundingRect(c)
((cX, cY), radius) = cv2.minEnclosingCircle(c)
#x and y center are cX and cY
cv2.circle(image, (int(cX), int(cY)), int(radius),
(0, 0, 255), 3)
cv2.putText(image, "#{}".format(i + 1), (x, y - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
Wrapping your camera capture in a While loop with a break condition might help:
import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
cv2.imshow('frame', frame)
# ADD LOGIC HERE
print(frame.shape)
# END
if cv2.waitKey(20) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Your is working fine & detect a face from video feed. But, you can do it another way...
'''
:: Face Detection using Haar Cascades ::
'''
import numpy as np
import cv2, argparse
# set classifiers
face_cascade = cv2.CascadeClassifier(
'/opt/opencv/main/data/haarcascades/haarcascade_frontalface_default.xml'
)
cam = cv2.VideoCapture(0)
_, img = cam.read()
# load image & convert
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces; If faces are found, it returns the positions
# of detected faces as Rect(x,y,w,h).
faces = face_cascade.detectMultiScale(gray, 1.2, 5)
print "[ INFO:1] Found ", len(faces), "face(s) in this image."
for (x, y, w, h) in faces:
cv2.rectangle(
img,
(x, y),
(x+w, y+h),
(255, 100, 25),
2
)
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()