The following is the code used to detect single face in a image but the issue is if there are multiple faces in a image it takes only first one. Pls suggest
import caffe, dlib, io
from __future__ import print_function
import os
import matplotlib.pyplot as plt
detector = dlib.get_frontal_face_detector()
img21 = 'group.jpg'
im_name = img21
img = io.imread(os.path.join('./',im_name))
faces=[]
faces= detector(img)
total= len(faces)
print('total faces here :',total)
cropped_face = input_image_cropped[faces[0].top():faces[0].bottom(),
faces[0].left():faces[0].right(), :]
input_image_cropped = caffe.io.load_image(os.path.join('./', im_name))
cropped_face = input_image_cropped[faces[0].top():faces[0].bottom(),
faces[0].left():faces[0].right(), :]
h = faces[0].bottom() - faces[0].top()
w = faces[0].right() - faces[0].left()
age_prediction_cropped = age_net.predict([cropped_face])
print('\n\t predicted age (Dlib-cropped image):',
age_prediction_cropped[0].argmax())
plt.show()
FYI:
i stripped other parts of the code which is not required.
I gone thru the below link but i could not run the loop for all the detected faces ? pls suggest how can i run the loop for all detected faces.
http://dlib.net/face_detector.py.html
With the following change the issue is resolved
for i, d in enumerate(faces):
cropped_face = input_image_cropped[ d.top():d.bottom(), d.left():d.right(), :]
input_image_cropped = caffe.io.load_image(os.path.join('./', im_name))
cropped_face = input_image_cropped[d.top():d.bottom(), d.left():d.right(), :]
Related
I am new to python and I was playing around with background subtraction to visualize changes in pre and post change images.
I wrote a short and simple script using the cv2 library:
#!/usr/bin/env python
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
#GRAYSCALE ONLY FOR TESTING
#Test with person appearing in image
img1 = cv.imread("images/1.jpg", 0)
img2 = cv.imread("images/2.jpg", 0)
img3 = cv.subtract(img1, img2)
ret,thresh1 = cv.threshold(img3,90,255,cv.THRESH_BINARY)
#Test with satelite image of japan landslide changes after earthquake
jl_before = cv.imread("images/japan_earthquake_before.jpg",0)
jl_after = cv.imread("images/japan_earthquake_after.jpg",0)
jl_subtraction = cv.subtract(jl_before, jl_after)
ret,thresh2 = cv.threshold(img3,20,255,cv.THRESH_BINARY)
images = [img1, img2, thresh1, jl_before, jl_after, thresh2]
titles = ["Image1", "Image2", "Changes", "Japan_Before", "Japan_After", "Japan_Changes" ]
for i in range(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
The result looks like this:
Why is the mask with changes from the first set of images present in the mask of the second set of images?
I used different variables, thresh1 and thresh2.
Any help would be greatly appreciated as I can't seem to find the problem.
Because you missed a change when copy pasting:
ret,thresh2 = cv.threshold(img3,20,255,cv.THRESH_BINARY)
^^^^
i used the following code to capture a face using Haar cascade classifier but still have not get full head image
from imutils.video import WebcamVideoStream
import os
import time
from datetime import datetime
from imutils.video import FPS
import cv2
cascadePath = "/home/pi/opencv-3.3.0/data/haarcascades/haarcascade_frontalface_default.xml"
eye_cascade = cv2.CascadeClassifier('/home/pi/opencv-3.3.0/data/haarcascades/haarcascade_eye.xml')
faceCascade = cv2.CascadeClassifier(cascadePath);
fn = input('Enter your Folder name: ')
os.system("mkdir "+fn)
vs = WebcamVideoStream(src=0).start()
while 1:
time.sleep(0.05)
frame = vs.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.1, 6)
for (x,y,w,h) in faces:
cv2.imwrite(fn+"/"+ datetime.now().strftime("%H:%M:%S.%f") + ".jpg", gray[y:y+h+30,x:x+w+20])
cv2.imshow('frame',frame)
key = cv2.waitKey(1) & 0xFF
In order to capture the head also you need to decrease the initial point where you start cropping the face.
In your code you have used gray[y:y+h+30,x:x+w+20].
y takes into account the height of the cropped face. This is where you need to decrease the initial cropping point.
x denotes the width of the face which you do not need to change.
Conclusion: Change it to gray[y-20:y+h, x:x+w] in line 22 within cv2.imwrite().
So what I'm trying to do using OpenCV, dlib, and Python is to basically identify facial landmarks on a set of images using dlib and then crop the the mouths from those very same images and save them as separate images with ".jpg" extensions.
This here is the code:
import numpy as np
import cv2
import dlib
import sys
import skimage
from PIL import Image
import os
import glob
#Everything is imported here
folderpath = sys.argv[1]
cascPath = sys.argv[2]
PREDICTOR_PATH = "/home/victor/facial-landmarks/shape_predictor_68_face_landmarks.dat"
#user supplies the folderpath and cascpath in a terminal/command prompt
#predictor_path is already set
imageformat = ".tif"
path = folderpath
imfilelist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith(imageformat)]
#only images with ".tif" extensions in the folder interest us, we create a
#list with paths to those images
data = np.array([])
for IMG in imfilelist:
image = cv2.imread(IMG) #this for-loop iterates through images we need
np.append(data, image) # reads them, and appends them to the data
# numpy array
gray = np.array([])
for j in range(0, len(data)):
cvtimg = cv2.cvtColor(np.array(data[j]), cv2.COLOR_BGR2GRAY)
np.append(gray, cvtimg) #empty numpy array called gray is declared
# for-loop goes through all RGB pictures
# stored in data, converts them to grayscale
# and stores them in gray
MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_INNER_POINTS = list(range(61, 68))
#defines the landmarks for the Mouth Outline and the inner mouth points
faceCascade = cv2.CascadeClassifier(cascPath)
#faceCascade is defined here, cascPath which is user supplied is the param
predictor = dlib.shape_predictor(PREDICTOR_PATH)
faces = np.array([])
for i in gray:
face = faceCascade.detectMultiScale(gray[i], scaleFactor=1.05, minNeighbors=5, minSize=(100,100))
np.append(faces, face) #this for-loop tries to detect faces and append
#them to the empty numpy array called faces
print("Found {0} faces!".format(len(faces)))
# nothing is displayed beyond this print statement
for (x, y, w, h) in faces:
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
landmarks = np.matrix([[p.x, p.y]
for p in predictor(IMAGES, dlib_rect).parts()])
landmarks_display = landmarks[MOUTH_OUTLINE_POINTS + MOUTH_INNER_POINTS]
highX = 0
lowX = 1000
highY = 0
lowY = 1000
for idx, point in enumerate(landmarks_display):
pos = (point[0, 0], point[0, 1])
cv2.circle(image, pos, 2, color=(0, 0, 255), thickness=-1)
if (pos[0] > highX):
highX = pos[0]
if (pos[0] < lowX):
lowX = pos[0]
if (pos[1] > highY):
highY = pos[1]
if (pos[1] < lowY):
lowY = pos[1]
print (lowX, lowY, highX, highY)
CONSTANT_FACTOR = 0.325
delta_x = highX-lowX
delta_y = highY - lowY
low_x_adj = lowX - int(delta_x * CONSTANT_FACTOR)
high_x_adj = highX + int(delta_x * CONSTANT_FACTOR)
low_y_adj = lowY - int(delta_y * 0.2)
high_y_adj = highY + int(delta_y * CONSTANT_FACTOR)
crop_img = image[low_y_adj:high_y_adj,low_x_adj:high_x_adj]
cv2.imwrite("Cropped_Mouth.jpg", crop_img)
cv2.imshow("Cropped_Mouth.jpg", crop_img)
cv2.waitKey(0)
Now, I've checked the paths and they are correct. I don't get any syntax errors, runtime errors, nothing. The script runs, but no output is produced other than the following print statement: print("Found {0} faces!".format(len(faces))).
I assume it runs what comes after it, but there is no output on the screen and nothing is saved in my home folder (which is were the output pictures of cropped mouths are normally stored). The original script which was meant to work with one image only works perfectly, but this one doesn't seem to do the trick.
Any ideas and suggestions would be highly appreciated. Thank you.
P.S if the problem is with the code after the line that gets printed, I still didn't start working on that part for this script because I believe it is the code above the print statement that is faulty in some way
Why not use dlib face detector for detecting faces?. Below is the code to detect faces using dlib face detector and save mouth from faces with a .jpg extension. I just modified the dlib face landmarks.py given in the python examples folder of dlib.
import sys
import os
import dlib
import glob
import cv2
predictor_path = "shape_predictor_68_face_landmarks.dat"
faces_folder_path = "path/to/faces/folder"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()
i = 0
for f in glob.glob(os.path.join(faces_folder_path, "*.tiff")):
print("Processing file: {}".format(f))
img = cv2.imread(f)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# to clear the previous overlay. Useful when multiple faces in the same photo
win.clear_overlay()
# to show the image
win.set_image(img)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(img, d)
i += 1
# The next lines of code just get the coordinates for the mouth
# and crop the mouth from the image.This part can probably be optimised
# by taking only the outer most points.
xmouthpoints = [shape.part(x).x for x in range(48,67)]
ymouthpoints = [shape.part(x).y for x in range(48,67)]
maxx = max(xmouthpoints)
minx = min(xmouthpoints)
maxy = max(ymouthpoints)
miny = min(ymouthpoints)
# to show the mouth properly pad both sides
pad = 10
# basename gets the name of the file with it's extension
# splitext splits the extension and the filename
# This does not consider the condition when there are multiple faces in each image.
# if there are then it just overwrites each image and show only the last image.
filename = os.path.splitext(os.path.basename(f))[0]
crop_image = img[miny-pad:maxy+pad,minx-pad:maxx+pad]
cv2.imshow('mouth',crop_image)
# The mouth images are saved in the format 'mouth1.jpg, mouth2.jpg,..
# Change the folder if you want to. They are stored in the current directory
cv2.imwrite(filename+'.jpg',crop_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
win.add_overlay(shape)
win.add_overlay(dets)
I am trying to recreate a working python script using RStudio's reticulate package for python. The working script is an adaption of dlib's face detection script. I have it almost working, I think, but I do not know how to enumerate along an object. In the original script, it reads:
from imutils import face_utils
import numpy as np
import imutils
import dlib
import cv2
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("path/to/shape_predictor_68_face_landmarks.dat")
image = cv2.imread("path/to/defiant2.jpg")
print(image.shape)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
bb = face_utils.rect_to_bb(rect)
rect = shape[1]
for rect in shape:
print("{} {}".format(*rect))
print("{} {} {} {}".format(*bb))
When I try and recreate this in R to the best of my ability, I hit a roadblock when it is supposed to enumerate along the dlib detector object. Specifically, I get the following error:
library(reticulate)
cv2 <- import('cv2', convert = FALSE)
dlib <- import('dlib', convert = FALSE)
face_utils <- import('imutils.face_utils')
imutils <- import('imutils')
np <- import('numpy')
main <- import_main()
py <- import_builtins()
shape_predictor = 'shape_predictor_68_face_landmarks.dat'
img = 'defiant2.jpg'
detector = dlib$get_frontal_face_detector()
predictor = dlib$shape_predictor(shape_predictor)
image = cv2$imread(img)
gray = cv2$cvtColor(image, cv2$COLOR_BGR2GRAY)
rects = detector$run(gray)
py$enumerate(rects)
Error: attempt to apply non-function
Any help in transferring the code to R would be greatly appreciated. If it helps, I have uploaded the full script files, as well as a test image and the shape predictor landmarks file to Github.
I am using OpenCV 2.4.8 and python 2.7.5 in Ubuntu 14.04
When I call
knn=cv2.KNearest()
I get an error saying
knn=cv2.KNearest() AttributeError: 'module' object has no attribute 'KNearest'
How can I resolve it?
Full Code is here
import cv2
import numpy as np
####### training part ###############
samples = np.loadtxt('generalsamples.data',np.float32)
responses = np.loadtxt('generalresponses.data',np.float32)
responses = responses.reshape((responses.size,1))
model = cv2.KNearest()
model.train(samples,responses)
####################### testing part
im = cv2.imread('training_images/number1.jpg')
out = np.zeros(im.shape,np.uint8)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,1,1,11,2)
image,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt)>50:
[x,y,w,h] = cv2.boundingRect(cnt)
if h>28:
cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
roi = thresh[y:y+h,x:x+w]
roismall = cv2.resize(roi,(10,10))
roismall = roismall.reshape((1,100))
roismall = np.float32(roismall)
retval, results, neigh_resp, dists = model.find_nearest(roismall, k = 1)
string = str(int((results[0][0])))
cv2.putText(out,string,(x,y+h),0,1,(0,255,0))
cv2.imshow('im',im)
cv2.imshow('out',out)
cv2.waitKey(0) & 0 x FF
That is the correct way to call KNearest(), so you are doing something else wrong.
Here is a complete working example of using k-Nearest Neighbours taken from [here].
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Feature set containing (x,y) values of 25 known/training data
trainData = np.random.randint(0,100,(25,2)).astype(np.float32)
# Labels each one either Red or Blue with numbers 0 and 1
responses = np.random.randint(0,2,(25,1)).astype(np.float32)
# Take Red families and plot them
red = trainData[responses.ravel()==0]
plt.scatter(red[:,0],red[:,1],80,'r','^')
# Take Blue families and plot them
blue = trainData[responses.ravel()==1]
plt.scatter(blue[:,0],blue[:,1],80,'b','s')
plt.show()
newcomer = np.random.randint(0,100,(1,2)).astype(np.float32)
plt.scatter(newcomer[:,0],newcomer[:,1],80,'g','o')
knn = cv2.KNearest()
knn.train(trainData,responses)
ret, results, neighbours ,dist = knn.find_nearest(newcomer, 3)
print "result: ", results,"\n"
print "neighbours: ", neighbours,"\n"
print "distance: ", dist
plt.show()
If that doesn't work, then you have larger problems.
I have solved this problem. The problem was in opencv version. That's why it was not recognizing KNN function. I have installed opencv 2.4.10 and it works fine. Thank you all.