I am trying to extract faces from images by creating bounding boxes, but I am getting an error shown below.
First code block:
def get_predicition(image):
"""Expects the image input, this image further cropped to face
and the cropped face image will be sent for evalution funtion
finally
returns the annotated reusult with bounding box around the face.
"""
height, width = image.shape[:2]
try: # If in case face is not detected at any frame
face = face_detector(image, 1)[0] # Face detection
x, y, size = get_boundingbox(face=face, width=width, height=height) # Calling to get bound box around the face
except IndexError:
pass
cropped_face = image[y:y+size, x:x+size] # cropping the face
output,label = evaluate(cropped_face) # Sending the cropped face to get classifier result
font_face = cv2.FONT_HERSHEY_SIMPLEX # font settings
thickness = 2
font_scale = 1
if label=='Real':
color = (0,255, 0)
else:
color = (0, 0, 255)
x = face.left() # Setting the bounding box on uncropped image
y = face.top()
w = face.right() - x
h = face.bottom() - y
cv2.putText(image, label+'_'+str('%.2f'%output)+'%', (x, y+h+30),
font_face, font_scale,
color, thickness, 2) # Putting the label and confidence values
return cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)# draw box over face
Second code block:
cropped_face =[]
for image in images:
faces = face_detector(image[0], 1)
height, width = image[0].shape[:2]
try: # If in case face is not detected at any frame
x, y, size = get_boundingbox(face=faces[0], width=width, height=height)
except IndexError:
continue
cropped_face.append([image[0][y:y+size, x:x+size],image[1]])
The first code block runs without any errors but when I try to run the second code block. I am getting the error shown below:
NameError Traceback (most recent call last)
<ipython-input-48-52e274eb31f6> in <module>
5 try: # If in case face is not detected at any frame
6
----> 7 x, y, size = get_boundingbox(face=faces[0], width=width, height=height)
8 except IndexError:
9 continue
NameError: name 'get_boundingbox' is not defined
Related
help meee TT i received error in my coding of social distancing detection system using webcam. i done search the error but there is nothing difference with my code TT i wite my coding using notepad++ and run using command prompt. below is my error :
C:\Users\User\Downloads\Social_Distancing_Detection_Real_Time>python Run.py
[INFO] loading YOLO from disk...
[INFO] setting preferable backend and target to CUDA...
[INFO] accessing video stream...
[ WARN:0] global D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\dnn.cpp (1447) cv::dnn::dnn4_v20211004::Net::Impl::setUpNet DNN module was not built with CUDA backend; switching to CPU
Traceback (most recent call last):
File "C:\Users\User\Downloads\Social_Distancing_Detection_Real_Time\Run.py", line 77, in <module>
results = detect_people(frame, net, ln,
File "C:\Users\User\Downloads\Social_Distancing_Detection_Real_Time\mylib\detection.py", line 58, in detect_people
idxs = cv2.dnn.NMSBoxes(boxes, confidence, MIN_CORP, NMS_THRESH)
TypeError: Can't parse 'scores'. Input argument doesn't provide sequence protocol
[ WARN:1] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
my error
below here is my full code of file detection.py
#import the necessary packages
from .config import NMS_THRESH, MIN_CORP, People_Counter
import numpy as np
import cv2
def detect_people(frame, net, In, personIdx = 0):
#grab the dimensions of the frame and initialize the list of results
(H, W) = frame.shape[:2]
results = []
#construct a blob from the input frame and then perform a forward
#pass of the YOLO object detector, giving us our boarding boxes
#add associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
layerOutputs = net.forward(In)
#initialize out lists of detected bounding boxes, centroids and
#confidence, respectively
boxes = []
centroids = []
confidences = []
#loop over each of the layer outputs
for output in layerOutputs:
#for detection in output;
for detection in output:
#extract the class ID and confidence[i.e., probability)
#of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
#filter detections by (1) ensuring that the object
#detected was a person and (2) that the minimum
#confidence is met
if classID == personIdx and confidence > MIN_CORP:
#scale the bounding box coordinates back relative to
#the size of the image, keeping in mind that YOLO
#actually returns the center (x,y) -coordinates of
#the bounding box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
#use the center (x,y) -coordinates to derive the top
#and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
#update our list of bounding box coordinates,
#centroids and confidences
boxes.append([x, y, int(width), int(height)])
centroids.append((centerX, centerY))
confidences.append(float(confidence))
#apply non-maxim suppression to suppress weak, overlapping bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidence, MIN_CORP, NMS_THRESH)
#print('Total people count:', len(idxs))
#compute the total people counter
#if People_Counter:
#human_count = "Human count: {}".format(len(idxs))
#cv2.putText(frame, human_count, (470, frame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.70, (0, 0, 0), 2)
#ensure at least one detection exists
if len(idxs) > 0:
#loop over the indexes we are keeping
for i in idxs.flatten():
#extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
#update our results list to consist of the person
#prediction probability, bounding box coordinates,
#and the centroids
r = (confidences[i], (x, y, x + w, y + h), centroids[i])
results.append(r)
#return the list of the results
return results
The answer to your problem (as usually) likes in response from the interpreter:
TypeError: Can't parse 'scores'. Input argument doesn't provide sequence protocol
scores is the second argument to cv2.dnn.NMSBoxes which in your case is confidence. confidence is a single number, you can't iterate over it. You've made a typo and probably you wanted to pass confidences which is a list.
Change your code to:
idxs = cv2.dnn.NMSBoxes(boxes, confidences, MIN_CORP, NMS_THRESH)
def face_detect(img):
hog_rects = hog_detector(img, 0)
hog_faces = np.zeros((0, 4), dtype=int)
for (i, rect) in enumerate(hog_rects):
(x, y, w, h) = rect_to_bb(rect)
face = np.asarray((x, y, w, h), dtype=int)
hog_faces = np.append(hog_faces, [face], axis=0)
return hog_faces
def detect(img, cascade, minimumFeatureSize=(20, 20)):
if cascade.empty():
raise (Exception("There was a problem loading your Haar Cascade xml file."))
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=1, minSize=minimumFeatureSize)
if len(rects) == 0:
return []
rects[:, 2:] += rects[:, :2] # convert last coord from (width,height) to (maxX, maxY)
return rects
def eye_detect(faces, gray, minEyeSize):
# eyes = np.zeros((0, 4), dtype=int)
for (x, y, w, h) in faces:
roi_gray = gray[y:h, x:w]
detected_eyes = detect(roi_gray, haarEyeCascade, minEyeSize)
eyeFix = detected_eyes + [x, y, x, y]
# eyes = np.append(eyes, eyeFix, axis=0)
return eyeFix
I use the above function for dlib_face detector and loop through the detected faces to find eyes with the eye_detect function using OpenCV haar eyecascade. The input is VideoCapture input from OpenCV. The output of all the functions is a numpy array containing min x, min y max x, max y of the detected feature.
If there is only one face the code works fine. But as soon as a second face comes, it throws
UnboundLocalError: local variable 'eyeFix' referenced before assignment
I want it to only detect eyes on the existing face and not on new faces. What can I do to improve this code?
i ma trying to make an automatic annotiation tool for yolo object detection which useses previosly trained model to find the detections , and i managed to put together some code but i am stuck a little, as far as i know this needs to be the annotation format for YOLO:
18 0.154167 0.431250 0.091667 0.612500
And with my code i get
0.5576068858305613, 0.5410404056310654, -0.7516528169314066, 0.33822181820869446
I am not sure why i get the - at the third number and if i need to shorten my float number,
I will post the code below if someone could help me , after completing this project i will post the whole code if someone wants to use it
def convert(size, box):
dw = 1./size[0]
dh = 1./size[1]
x = (box[0] + box[1])/2.0
y = (box[2] + box[3])/2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
The above code is the function that converts the coordinates for YOLO format , For the size you need to pass the (w,h) and the for the box you need to pass (x,x+w, y, y+h)
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
# path_name = "images/city_scene.jpg"
path_name = image
image = cv2.imread(path_name)
file_name = os.path.basename(path_name)
filename, ext = file_name.split(".")
h, w = image.shape[:2]
# create 4D blob
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)
# sets the blob as the input of the network
net.setInput(blob)
# get all the layer names
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# feed forward (inference) and get the network output
# measure how much it took in seconds
start = time.perf_counter()
layer_outputs = net.forward(ln)
time_took = time.perf_counter() - start
print(f"Time took: {time_took:.2f}s")
boxes, confidences, class_ids = [], [], []
b=[]
a=[]
# loop over each of the layer outputs
for output in layer_outputs:
# loop over each of the object detections
for detection in output:
# extract the class id (label) and confidence (as a probability) of
# the current object detection
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# discard weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("float")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
a = w, h
convert(a, box)
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD,
IOU_THRESHOLD)
font_scale = 1
thickness = 1
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# draw a bounding box rectangle and label on the image
color = [int(c) for c in colors[class_ids[i]]]
ba=w,h
print(w,h)
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
text = "{}".format(labels[class_ids[i]])
conf = "{:.3f}".format(confidences[i], x, y)
int1, int2 = (x, y)
print(text)
#print(convert(ba, box))
#b=w,h
#print(convert(b, boxes))
#print(convert(a, box)) #coordinates
ivan = str(int1)
b.append([text, ivan])
#a.append(float(conf))
#print(a)
# calculate text width & height to draw the transparent boxes as background of the text
(text_width, text_height) = \
cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
# add opacity (transparency to the box)
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
# now put the text (label: confidence %)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
text = "{}".format(labels[class_ids[i]],x,y)
conf = "{:.3f}".format(confidences[i])
the problem is the indexes in your function.
box[0]=>center x
box[1]=>center y
box[2]=>width of your bbox
box[3]=>height of your bbox
and according to the document, yolo labels are like this :
<object-class> <x> <y> <width> <height>
which x and y are the center of the bounding box.so your code should be like this :
def convert(size, box):
dw = 1./size[0]
dh = 1./size[1]
x = box[0]*dw
y = box[1]*dh
w = box[2]*dw
h = box[3]*dh
return (x,y,w,h)
Maybe this can help you
def bounding_box_2_yolo(obj_detections, frame, index):
yolo_info = []
for object_det in obj_detections:
left_x, top_y, right_x, bottom_y = object_det.boxes
xmin = left_x
xmax = right_x
ymin = top_y
ymax = bottom_y
xcen = float((xmin + xmax)) / 2 / frame.shape[1]
ycen = float((ymin + ymax)) / 2 / frame.shape[0]
w = float((xmax - xmin)) / frame.shape[1]
h = float((ymax - ymin)) / frame.shape[0]
yolo_info.append((index, xcen, ycen, w, h))
return yolo_info
The labelimg has a lot of things that you can use too
https://github.com/tzutalin/labelImg/blob/master/libs/yolo_io.py
Hi I am working on facial recognition.
To increase performance I want to use facial alignment.
When I use the HOG face identifier, described e.g., by Adrian I get an aligned image out.
from imutils.face_utils import rect_to_bb
from dlib import get_frontal_face_detector
detector = dlib.get_frontal_face_detector()
shape_predictor = dlib.shape_predictor('/home/base/Documents/facial_landmarks/shape_predictor_5_face_landmarks.dat')
fa = face_utils.facealigner.FaceAligner(shape_predictor, desiredFaceWidth=112, desiredLeftEye=(0.3, 0.3))
img=cv2.imread(pathtoimage)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
for rect in rects:
(x, y, w, h) = rect_to_bb(rect)
faceAligned = fa.align(img, gray, rect)
However, I have to work on an embedded hardware and the HOG facial recognition is not fast enough. The best working is the cv2 lbpcascader.
With cv2 I also get the box of the found face, but using that works not.
faces_detected = face_cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=4)
In other examples using the HOG, the coordinates are extracted from the HOG-rect with:
(x, y, w, h) = rect_to_bb(rect)
and then used with
aligned_face = fa.align(img, gray, dlib.rectangle(left = x, top=y, right=w, bottom=h))
The idea would be to exchange the x,y,w,h with the cv2 values. Unfortunately, that does not work as the two lines above result in a complete false alignment. In the first code example, the rect_to_bb function is included but not used.
I checked the values and they are somehow off:
224x224 the image
156 70 219 219 the cv2 values (slightly different of course)
165 101 193 193 the rect values with rect_to_bb
[(165, 101) (358, 294)] the rect values
I checked the rect_to_bb function, but this seems straight forward:
def rect_to_bb(rect):
# take a bounding predicted by dlib and convert it
# to the format (x, y, w, h) as we would normally do
# with OpenCV
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
# return a tuple of (x, y, w, h)
return (x, y, w, h)
While typing I got the answer... classic
the alignment function needs the bounding box marks slightly different.
It can be seen in the rect_to_bb() function.
def rect_to_bb(rect):
# take a bounding predicted by dlib and convert it
# to the format (x, y, w, h) as we would normally do
# with OpenCV
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
# return a tuple of (x, y, w, h)
return (x, y, w, h)
There the rect.right (w in cv2) and the rect.bottom (h in cv2) are subtracted with x and y. So in the alignment function you have to add the values, otherwise the image fed to the alignment function is much to small and out of shape. And this can also be the values from the cv2 detection.
aligned_face = fa.align(img, gray, dlib.rectangle(left = x, top=y, right=w+x, bottom=h+y))
Keep healthy
What I want to do is to crop out the white lines above a given instagram print screen. I tried doing that by finding the center of the image and going up, line by line, until I found the first line entirely white. Any idea why my code is not working?
from PIL import Image
image_file = "test.png"
im = Image.open(image_file)
width, height = im.size
centerLine = height // 2
entireWhiteLine = set()
entireWhiteLine.add(im.getpixel((0, 0)))
terminateUpperCrop = 1
while terminateUpperCrop != 2 :
for i in range(centerLine, 1, -1) :
entireLine = set()
upperBorder = i - 1
for j in range(0, width, 1) :
entireLine.add((im.getpixel((i, j))))
if entireLine == im.getpixel((0,0)):
box = (0, upperBorder, width, height)
crop = im.crop((box))
crop.save('test2.png')
terminateUpperCrop = 2
Your getpixel() call is actually searching with the coordinates the wrong way around, so in effect you were scanning for the left edge. You could use the following approach. This creates a row of data containing only white pixels. If the length of the row equals your width, then you know they are all white.
from PIL import Image
image_file = "test.png"
im = Image.open(image_file)
width, height = im.size
centerLine = height // 2
white = (255, 255, 255)
for y in range(centerLine, 0, -1) :
if len([1 for x in range(width) if im.getpixel((x, y)) == white]) == width - 1:
box = (0, y, width, height)
crop = im.crop((box))
crop.save('test2.png')
break