OpenCV haar cascade classifier not detecting objects - python

I trained a Haar cascade classifier by using the cascade trainer GUI, in which I showed 5000 positive images and 5000 negative images and trained the model with that dataset, returning a cascade.xml file.
In my localhost Jupyter Notebook I went ahead and attempted to add the classifier to my code so it can detect Halo Reach Grunts in a screenshot the script would take constantly so it could update its visual surroundings and draw rectangles on any Grunts in sight.
Everything seems to work except for the classifier itself. It keeps printing out that no Grunts are being detected in the screenshots so it is not displaying the screenshot in question and even if it did there were not rectangles drawn on the grunts.
This is the following code:
import os
import cv2
import numpy as np
import time
from mss import mss
import pyautogui as pygui
import pydirectinput as pydi
import matplotlib.pyplot as plt
import shutil
time.sleep(5)
i = 0
while True:
with mss() as sct:
sct.shot()
#print("Screenshot"+str(i)+"taken")
#posImg = r'C:\Users\carlo\PycharmProjects\gamingvision\monitor-1.png'
#shutil.copy(r'C:\Users\carlo\PycharmProjects\gamingvision\monitor-1.png', r'C:\Users\carlo\PycharmProjects\gamingvision\photos\cascade\n\negative'+str(i)+'.jpg')
#i += 1
#exit(0)
img = cv2.imread(r'C:\Users\carlo\PycharmProjects\gamingvision\monitor-1.png', 0)
image_path = r'C:\Users\carlo\PycharmProjects\gamingvision\monitor-1.png'
original_image = cv2.imread(image_path)
print("Image read")
# Convert the image to grayscale for easier computation
image_grey = cv2.cvtColor(original_image, cv2.COLOR_RGB2GRAY)
print("Image Grayed")
grunt_classifier = cv2.CascadeClassifier(
r"C:\Users\carlo\PycharmProjects\gamingvision\photos\cascade\classifier\cascade.xml")
print("Grunt Classifier added")
detected_grunts = grunt_classifier.detectMultiScale(image_grey, minSize=(50, 50))
print("Grunts classifier activated")
# Draw rectangles on grunts
if len(detected_grunts) != 0:
for (x, y, width, height) in detected_grunts:
cv2.rectangle(image_grey, (x, y),
(x + height, y + width),
(0, 255, 0), 2)
print("Grunt rectangles done")
cv2.imshow("Detected Grunts", image_grey)
cv2.waitKey()
cv2.destroyAllWindows()
#pydi.moveTo(x, y)
#print("Mouse Moved")
else:
print("Grunts not detected")
time.sleep(1)

Related

Face Detection with less cpu load cv2 [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 2 years ago.
Improve this question
for a university project I am programming a face mask recognition. For detecting faces, I use the cv2.CascadeClassifier('face_detector.xml'). As I noticed, this program is taking up way too much of the CPU resulting in a heavily disordered video stream frame rate.
I am running the code on a MacBook Air with a 1.6Hz Dual Core (Intel Core i5).
Can someone explain what I can change to make it smoother? Or maybe recommend another face detection?
Here is my code:
import numpy as np
import os
import tensorflow as tf
import cv2
from matplotlib.pyplot import gray
# Disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import cv2
# Load the cascade
face_cascade = cv2.CascadeClassifier('face_detector.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(0)
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
model = tf.keras.models.load_model('checkpoint19.ckpt')
i = 0
while True:
# Read the frame
_, img = cap.read()
# Detect the faces
faces = face_cascade.detectMultiScale(img, 1.3, 4)
# save each frame as image with PNG format
image = cv2.imwrite('database/{index}.png'.format(index=i), img)
i += 1
# cut out the fragment in the box of the image
# Draw the rectangle around each face
for (x, y, w, h) in faces:
crop_img = img[y:y + h, x:x + w]
resizedImg = cv2.resize(crop_img, (224, 224))
gray = cv2.cvtColor(resizedImg, cv2.COLOR_BGR2GRAY)
imgArrNew = gray.reshape(1, 224, 224, 1)
prediction = model.predict(imgArrNew)
print(prediction)
label = np.argmax(prediction)
print(label)
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# org
for (x, y, w, h) in faces:
org = (x, y+h+30)
# fontScale
fontScale = 1
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 2
# output the predicted label/sign on the live-stream frame
if label == 0:
color = (0,0,225)
label_out = "Mask off"
if label == 1:
color = (50,205,50)
label_out = "Mask on"
if label == 2:
color = (0,255,225)
label_out = "incorrect Mask"
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
image1 = cv2.putText(img, label_out, org, font,
fontScale, color, thickness, cv2.LINE_AA)
# Display
cv2.imshow('Face_Regonition', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Release the VideoCapture object
cap.release()
Thanks for your help :)
haar cascaded classifier is slow. . To do detection in every single frame is hard for low-end computing devices.
The easiest way is to use a lower resolution image or lower FPS. But it will appear to be cheap
The better way is to use a detection and tracking framework where detection happens at a 1hz interval at a new thread and tracking can happen at 30hz, which human eye cant tell the difference.
For detection of face, you can choose any method such as hear, HOG, CNN and put it in a new thread. In the main tracking thread (which can run in real time) update the model and predict the bounding box and display it.
You may look for the tracking from here. I suggest KCF based method for it is fast and reliable.
https://www.pyimagesearch.com/2018/07/30/opencv-object-tracking/
Just put the detection box rect as input rect box for the tracking. THen it should work directly.

Saving an image from pixel data in python

I am trying to create a program which will save images from pixel data obtained through openCV canny edge detection. Right now, the program saves a small image file in the correct path but the image file does not contain any of the data from the webcam.
An example of what should be saved in the image file:
picture of edge detected room
Versus what is actually saved: just a black rectangle
CODE BELOW:
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from numpy import asarray
import PIL
from PIL import Image
import cv2
def LiveCamEdgeDetection_canny(image_color):
threshold_1 = 100 #LINES
threshold_2 = 50 #NOISE
image_gray = cv2.cvtColor(image_color, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(image_gray, threshold_1, threshold_2)
return canny
# Main calling function to initialize webcam and apply edge detection
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
cv2.imshow('Live Edge Detection', LiveCamEdgeDetection_canny(frame))
#cv2.imshow('Webcam Video', frame)
#print(LiveCamEdgeDetection_canny(frame))
# Store pixel data
pixels = [LiveCamEdgeDetection_canny(frame)]
image_todraw = np.array(pixels)
image_todraw = np.reshape(image_todraw, (720, 1280))
image_todraw *= 255
image_tosave = Image.fromarray(image_todraw.astype(np.uint8))
image_tosave.save('/Users/user/Desktop/destinationFolder/RETRY.jpeg', 'JPEG')
#print(image_tosave)
if cv2.waitKey(1) == 'p': #13 Enter Key
break
cap.release() # camera release
cv2.destroyAllWindows()
I appreciate all the help you can give me!
Remove the image_todraw *= 255 line.
Below is the output:

Edge box detection using opencv python

Can any one give an example of edge box detection algorithm to generate proposals for object detection using open cv.
We can get the details from https://docs.opencv.org/3.4.0/d4/d0d/group__ximgproc__edgeboxes.html
Yes. First you will need to download the model file that is used for Edge Boxes here. Once you do that, the following code below (taken from their Github) can be used as an example for running the Edge Boxes algorithm. In short, put the code below into a separate file called edgeboxes_demo.py, then in the terminal type in:
python model.yml.gz image_file
model.yml.gz is the model that you saved from the link above which I assume is in the same directory where the code is. image_file is the path to the image you want to use for testing the algorithm. The code will run the Edge Boxes algorithm then draw the detected boxes on the image in green:
import cv2 as cv
import numpy as np
import sys
if __name__ == '__main__':
model = sys.argv[1]
im = cv.imread(sys.argv[2])
edge_detection = cv.ximgproc.createStructuredEdgeDetection(model)
rgb_im = cv.cvtColor(im, cv.COLOR_BGR2RGB)
edges = edge_detection.detectEdges(np.float32(rgb_im) / 255.0)
orimap = edge_detection.computeOrientation(edges)
edges = edge_detection.edgesNms(edges, orimap)
edge_boxes = cv.ximgproc.createEdgeBoxes()
edge_boxes.setMaxBoxes(30)
boxes = edge_boxes.getBoundingBoxes(edges, orimap)
for b in boxes:
x, y, w, h = b
cv.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
cv.imshow("edges", edges)
cv.imshow("edgeboxes", im)
cv.waitKey(0)
cv.destroyAllWindows()
Test Image
Result

How to get full head image from haar cascade

i used the following code to capture a face using Haar cascade classifier but still have not get full head image
from imutils.video import WebcamVideoStream
import os
import time
from datetime import datetime
from imutils.video import FPS
import cv2
cascadePath = "/home/pi/opencv-3.3.0/data/haarcascades/haarcascade_frontalface_default.xml"
eye_cascade = cv2.CascadeClassifier('/home/pi/opencv-3.3.0/data/haarcascades/haarcascade_eye.xml')
faceCascade = cv2.CascadeClassifier(cascadePath);
fn = input('Enter your Folder name: ')
os.system("mkdir "+fn)
vs = WebcamVideoStream(src=0).start()
while 1:
time.sleep(0.05)
frame = vs.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.1, 6)
for (x,y,w,h) in faces:
cv2.imwrite(fn+"/"+ datetime.now().strftime("%H:%M:%S.%f") + ".jpg", gray[y:y+h+30,x:x+w+20])
cv2.imshow('frame',frame)
key = cv2.waitKey(1) & 0xFF
In order to capture the head also you need to decrease the initial point where you start cropping the face.
In your code you have used gray[y:y+h+30,x:x+w+20].
y takes into account the height of the cropped face. This is where you need to decrease the initial cropping point.
x denotes the width of the face which you do not need to change.
Conclusion: Change it to gray[y-20:y+h, x:x+w] in line 22 within cv2.imwrite().

Script for identifying landmarks and cropping mouth from images using OpenCV doesn't see faces

So what I'm trying to do using OpenCV, dlib, and Python is to basically identify facial landmarks on a set of images using dlib and then crop the the mouths from those very same images and save them as separate images with ".jpg" extensions.
This here is the code:
import numpy as np
import cv2
import dlib
import sys
import skimage
from PIL import Image
import os
import glob
#Everything is imported here
folderpath = sys.argv[1]
cascPath = sys.argv[2]
PREDICTOR_PATH = "/home/victor/facial-landmarks/shape_predictor_68_face_landmarks.dat"
#user supplies the folderpath and cascpath in a terminal/command prompt
#predictor_path is already set
imageformat = ".tif"
path = folderpath
imfilelist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith(imageformat)]
#only images with ".tif" extensions in the folder interest us, we create a
#list with paths to those images
data = np.array([])
for IMG in imfilelist:
image = cv2.imread(IMG) #this for-loop iterates through images we need
np.append(data, image) # reads them, and appends them to the data
# numpy array
gray = np.array([])
for j in range(0, len(data)):
cvtimg = cv2.cvtColor(np.array(data[j]), cv2.COLOR_BGR2GRAY)
np.append(gray, cvtimg) #empty numpy array called gray is declared
# for-loop goes through all RGB pictures
# stored in data, converts them to grayscale
# and stores them in gray
MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_INNER_POINTS = list(range(61, 68))
#defines the landmarks for the Mouth Outline and the inner mouth points
faceCascade = cv2.CascadeClassifier(cascPath)
#faceCascade is defined here, cascPath which is user supplied is the param
predictor = dlib.shape_predictor(PREDICTOR_PATH)
faces = np.array([])
for i in gray:
face = faceCascade.detectMultiScale(gray[i], scaleFactor=1.05, minNeighbors=5, minSize=(100,100))
np.append(faces, face) #this for-loop tries to detect faces and append
#them to the empty numpy array called faces
print("Found {0} faces!".format(len(faces)))
# nothing is displayed beyond this print statement
for (x, y, w, h) in faces:
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
landmarks = np.matrix([[p.x, p.y]
for p in predictor(IMAGES, dlib_rect).parts()])
landmarks_display = landmarks[MOUTH_OUTLINE_POINTS + MOUTH_INNER_POINTS]
highX = 0
lowX = 1000
highY = 0
lowY = 1000
for idx, point in enumerate(landmarks_display):
pos = (point[0, 0], point[0, 1])
cv2.circle(image, pos, 2, color=(0, 0, 255), thickness=-1)
if (pos[0] > highX):
highX = pos[0]
if (pos[0] < lowX):
lowX = pos[0]
if (pos[1] > highY):
highY = pos[1]
if (pos[1] < lowY):
lowY = pos[1]
print (lowX, lowY, highX, highY)
CONSTANT_FACTOR = 0.325
delta_x = highX-lowX
delta_y = highY - lowY
low_x_adj = lowX - int(delta_x * CONSTANT_FACTOR)
high_x_adj = highX + int(delta_x * CONSTANT_FACTOR)
low_y_adj = lowY - int(delta_y * 0.2)
high_y_adj = highY + int(delta_y * CONSTANT_FACTOR)
crop_img = image[low_y_adj:high_y_adj,low_x_adj:high_x_adj]
cv2.imwrite("Cropped_Mouth.jpg", crop_img)
cv2.imshow("Cropped_Mouth.jpg", crop_img)
cv2.waitKey(0)
Now, I've checked the paths and they are correct. I don't get any syntax errors, runtime errors, nothing. The script runs, but no output is produced other than the following print statement: print("Found {0} faces!".format(len(faces))).
I assume it runs what comes after it, but there is no output on the screen and nothing is saved in my home folder (which is were the output pictures of cropped mouths are normally stored). The original script which was meant to work with one image only works perfectly, but this one doesn't seem to do the trick.
Any ideas and suggestions would be highly appreciated. Thank you.
P.S if the problem is with the code after the line that gets printed, I still didn't start working on that part for this script because I believe it is the code above the print statement that is faulty in some way
Why not use dlib face detector for detecting faces?. Below is the code to detect faces using dlib face detector and save mouth from faces with a .jpg extension. I just modified the dlib face landmarks.py given in the python examples folder of dlib.
import sys
import os
import dlib
import glob
import cv2
predictor_path = "shape_predictor_68_face_landmarks.dat"
faces_folder_path = "path/to/faces/folder"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()
i = 0
for f in glob.glob(os.path.join(faces_folder_path, "*.tiff")):
print("Processing file: {}".format(f))
img = cv2.imread(f)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# to clear the previous overlay. Useful when multiple faces in the same photo
win.clear_overlay()
# to show the image
win.set_image(img)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(img, d)
i += 1
# The next lines of code just get the coordinates for the mouth
# and crop the mouth from the image.This part can probably be optimised
# by taking only the outer most points.
xmouthpoints = [shape.part(x).x for x in range(48,67)]
ymouthpoints = [shape.part(x).y for x in range(48,67)]
maxx = max(xmouthpoints)
minx = min(xmouthpoints)
maxy = max(ymouthpoints)
miny = min(ymouthpoints)
# to show the mouth properly pad both sides
pad = 10
# basename gets the name of the file with it's extension
# splitext splits the extension and the filename
# This does not consider the condition when there are multiple faces in each image.
# if there are then it just overwrites each image and show only the last image.
filename = os.path.splitext(os.path.basename(f))[0]
crop_image = img[miny-pad:maxy+pad,minx-pad:maxx+pad]
cv2.imshow('mouth',crop_image)
# The mouth images are saved in the format 'mouth1.jpg, mouth2.jpg,..
# Change the folder if you want to. They are stored in the current directory
cv2.imwrite(filename+'.jpg',crop_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
win.add_overlay(shape)
win.add_overlay(dets)

Categories