Python dlib - Read image instead of webcam - python

I am working with this example Python script from the article enter link description here
from imutils import face_utils
import dlib
import cv2
# Vamos inicializar um detector de faces (HOG) para então
# let's go code an faces detector(HOG) and after detect the
# landmarks on this detected face
# p = our pre-treined model directory, on my case, it's on the same script's diretory.
p = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
cap = cv2.VideoCapture(0)
while True:
# Getting out image by webcam
_, image = cap.read()
# Converting the image to gray scale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Get faces into webcam's image
rects = detector(gray, 0)
# For each detected face, find the landmark.
for (i, rect) in enumerate(rects):
# Make the prediction and transfom it to numpy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# Draw on our image, all the finded cordinate points (x,y)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
# Show the image
cv2.imshow("Output", image)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
All works great, but I am trying to modify it to read an image file instead of grab the cap webcam stream.
I have tried reading in a URL instead but it is not liking it, anyone any suggestions?

It seems you're asking for the standard way of reading images in OpenCV.
Assuming you're running your script.py from the same folder where image.jpg is stored, simply type:
img = cv2.imread("image.jpg")
Of course, since you're reading the image only once, there's no need to have a while loop anymore.
Here below the full working code:
from imutils import face_utils
import dlib
import cv2
p = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
image = cv2.imread("image.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
cv2.imshow("Output", image)
cv2.waitKey(0)
cv2.destroyAllWindows()

A video is basically a stream of pictures which move faster than our eyes can detect. So for your query, the code remains pretty much the same except the while loop part.
from imutils import face_utils
import dlib
import cv2
# Vamos inicializar um detector de faces (HOG) para então
# let's go code an faces detector(HOG) and after detect the
# landmarks on this detected face
# p = our pre-treined model directory, on my case, it's on the same script's diretory.
p = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
cap = cv2.VideoCapture(0)
#while True:
# Getting out image by webcam
image = #load your image here
# Converting the image to gray scale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Get faces into webcam's image
rects = detector(gray, 0)
# For each detected face, find the landmark.
for (i, rect) in enumerate(rects):
# Make the prediction and transfom it to numpy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# Draw on our image, all the finded cordinate points (x,y)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
# Show the image
cv2.imshow("Output", image)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
#cv2.destroyAllWindows()
#cap.release()

Related

Auto zooming while face detected

hi guys I want to implement (zoom in and zoom out) like digital camera to the detected faces while real-time capturing using opencv, is there is any way I can do it without just cropping the frame then display it.
here is my code ... ,,,
import cv2
# Load the cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(0)
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# Release the VideoCapture object
cap.release()

Face Detection - Open CV can't find the face

I am learning the OpenCV. Here is my code:
import cv2
face_patterns = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
sample_image = cv2.imread('1.jpg')
gray = cv2.cvtColor(sample_image,cv2.COLOR_RGB2GRAY)
faces = face_patterns.detectMultiScale(gray,1.3,5)
print(len(faces))
for (x, y, w, h) in faces:
cv2.rectangle(sample_image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imwrite('result.jpg', sample_image)
If I use the picture A, I could get a lot of faces, if I use the picture B, I get none.
I changed argument in detectMultiScale(gray,1.3,5) many times, it still doesn't work.
Picture A
Picture A Result
Picture B no face
I see this more as a problem of Cv2 module itself. There are better models than HAAR CASCADES for detecting faces. face_recognition library is also very useful to detect and recognize face. It uses hog as default model. You can also use cnn for better accuracy but the detection process will be slow.
Find more here.
import cv2
import face_recognition as fr
sample_image = fr.load_image_file("1.jpg")
unknown_face_loc = fr.face_locations(sample_image, model="hog")
print(len(unknown_face_loc)) #detected face count
for faceloc in unknown_face_loc:
y1, x2, y2, x1 = faceloc
cv2.rectangle(sample_image, (x1, y1), (x2, y2), (0, 0, 255), 2)
sample_image = sample_image[:, :, ::-1] #converting bgr image to rbg
cv2.imwrite("result.jpg", sample_image)
Instead of -
faces = face_patterns.detectMultiScale(gray,1.3,5)
Try Using -
faces = face_patterns.detectMultiScale(blackandwhite,1.3,5)
If the problem occurs even after this check out my code for face detection.
It uses hog as default model. You can also use cnn for better accuracy but the detection process will be slow.
cascade_classifier = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, 0)
detections = cascade_classifier.detectMultiScale(gray,scaleFactor=1.3,minNeighbors=5)
if(len(detections) > 0):
(x,y,w,h) = detections[0]
frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
# for (x,y,w,h) in detections:
# frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()```

How to rotate camera recorded video?

I am trying to detect faces in a camera recorded video. When i did it with webcam video, it's working fine. But, with camera recorded video, the video gets rotated by -90 degree. Please suggest me, how do I get the actual video output for face detection?
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier('C:/Users/HP/Anaconda2/pkgs/opencv-3.2.0-np112py27_204/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
#video_capture = cv2.videoCapture(0)
video_capture = cv2.VideoCapture('C:/Users/HP/sample1.mp4')
w=int(video_capture.get(3))
h=int(video_capture.get(4))
#output = cv2.VideoWriter('output_1.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 60,frameSize = (w,h))
while True:
ret, frame = video_capture.read()
frame = rotateImage(frame,90)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.imshow('face',i)
#output.write(frame)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
output.release()
cv2.destroyAllWindows()
In cv2 you can use the cv2.rotate function to rotate image as per your requirement
rotated=cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
for rotating video you can use cv2.flip(), this method take 3 Args and one of them is the rotating code(0,1,-1) you can check this link for more details:
https://www.geeksforgeeks.org/python-opencv-cv2-flip-method/

how to get an image from a camera using opencv without making a new .jpg file

I have this code to detect laser points using the open cv library and I had it working when I would feed it a .jpg or .png file as an augment but now I want to get an image from a camera. "video 0" I am using Ubuntu 16.04 here is my code I marked the problem with ******
any help would greatly be appreciated:
# import the necessary packages
from imutils import contours
from skimage import measure
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=False,
help="path to the image file")
args = vars(ap.parse_args())
camera = cv2.VideoCapture(0)
#problem is here ********************************************
ret, image = camera.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (11, 11), 0)
#threshold the image to reveal light regions in the
# blurred image
thresh = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY)[1]
# perform a series of erosions and dilations to remove
# any small blobs of noise from the thresholded image
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=4)
# perform a connected component analysis on the thresholded
# image, then initialize a mask to store only the "large"
# components
labels = measure.label(thresh, neighbors=8, background=0)
mask = np.zeros(thresh.shape, dtype="uint8")
# loop over the unique components
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask and count the
# number of pixels
labelMask = np.zeros(thresh.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# if the number of pixels in the component is sufficiently
# large, then add it to our mask of "large blobs"
if numPixels > 300:
mask = cv2.add(mask, labelMask)
# find the contours in the mask, then sort them from left to
# right
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = contours.sort_contours(cnts)[0]
# loop over the contours
for (i, c) in enumerate(cnts):
# draw the bright spot on the image
(x, y, w, h) = cv2.boundingRect(c)
((cX, cY), radius) = cv2.minEnclosingCircle(c)
#x and y center are cX and cY
cv2.circle(image, (int(cX), int(cY)), int(radius),
(0, 0, 255), 3)
cv2.putText(image, "#{}".format(i + 1), (x, y - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
Wrapping your camera capture in a While loop with a break condition might help:
import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
cv2.imshow('frame', frame)
# ADD LOGIC HERE
print(frame.shape)
# END
if cv2.waitKey(20) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Your is working fine & detect a face from video feed. But, you can do it another way...
'''
:: Face Detection using Haar Cascades ::
'''
import numpy as np
import cv2, argparse
# set classifiers
face_cascade = cv2.CascadeClassifier(
'/opt/opencv/main/data/haarcascades/haarcascade_frontalface_default.xml'
)
cam = cv2.VideoCapture(0)
_, img = cam.read()
# load image & convert
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces; If faces are found, it returns the positions
# of detected faces as Rect(x,y,w,h).
faces = face_cascade.detectMultiScale(gray, 1.2, 5)
print "[ INFO:1] Found ", len(faces), "face(s) in this image."
for (x, y, w, h) in faces:
cv2.rectangle(
img,
(x, y),
(x+w, y+h),
(255, 100, 25),
2
)
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

Find if drawn rectangle is visible on screen in opencv python

I have some code which will draw a rectangle using cv2.rectangle function based on other parameters. This is part of a face detection program. The rectangle is shown on screen within an image using imshow. However, due to the variable nature of the rectangle's dimensions, there are situations when the rectangle is not visible. After the line of code which draws the rectangle in the image, how do I detect if the rectangle is visible or not. I want this so that I can find when a face is not detected. This is my code:
import numpy as np
import cv2
# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
# https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x + (w / 4), y + (h / 4)),(x+(3 * w / 4),y + (3 * h / 4)),(255,0,0),2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Thanks!
Add print len(faces) in the while loop. If its zero, then no face detected. No rectangle.

Categories