cv2.face.LBPHFaceRecognizer_create().predict() is recognizing All users as my name - python

cv2.face.LBPHFaceRecognizer_create().predict() is showing all users with my name!
I was making a Face Recognition System; I almost succeed but I noticed, my name is showing for Elon Musk! Where it should be shown as Unknown!
Face Recognizer.py:
import json
import cv2
import numpy as np
print("Please press ESC to close!")
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('Trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
font = cv2.FONT_HERSHEY_SIMPLEX
id = 2
with open('index.json', 'r') as f:
db = json.load(f)
names = {}
for i in db:
for face in db['faces']:
names[face] = db['faces'][face]
cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cam.set(3, 640)
cam.set(4, 480)
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
ret, img = cam.read()
converted_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
converted_image,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int(minW), int(minH)),
)
for (x, y, w, h) in faces:
id, accuracy = recognizer.predict(converted_image[y:y + h, x:x + w])
if (accuracy < 100):
id = names[str(id)]
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
else:
id = "Unknown"
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
if cv2.imwrite(f"Unknowns/{str(datetime.datetime.now().strftime('%Y-%m-%d-%X')).replace(':', '_')}.jpg", img):
print("Image Saved!")
winsound.Beep(2000, 500)
cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
cv2.imshow('Face Detection', img)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cam.release()
cv2.destroyAllWindows()
index.json:
{
"faces": {
"1": "Tahsin"
}
}
This is how I'm taking the Samples!
import json
import cv2
import numpy as np
from PIL import Image
import os
cam = cv2.VideoCapture(0,
cv2.CAP_DSHOW)
cam.set(3, 640)
cam.set(4, 480)
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
face_id = input("Enter a Numeric user ID: ")
face_name = input("Enter a Name: ")
print("Taking samples, look at camera... ")
count = 0
while True:
ret, img = cam.read()
converted_image = cv2.cvtColor(img,
cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(converted_image, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
count += 1
cv2.imwrite("samples/face." + str(face_id) + '.' + str(count) + ".jpg", converted_image[y:y + h, x:x + w])
k = cv2.waitKey(100) & 0xff
if k == 27:
break
elif count >= 50:
break
cv2.imshow('image', img)
with open('index.json', 'r') as f:
db = json.load(f)
if 'faces' in db:
db['faces'][face_id] = face_name
else:
db['faces'] = {}
db['faces'][face_id] = face_name
with open('index.json', 'w') as f:
json.dump(db, f, indent=4)
print("Samples taken!")
cam.release()
cv2.destroyAllWindows()
path = 'samples'
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
def Images_And_Labels(path):
imagePaths = [os.path.join(path, f) if f.split('.')[1] == face_id else None for f in os.listdir(path)]
faceSamples = []
ids = []
for imagePath in imagePaths:
if imagePath != None:
gray_img = Image.open(imagePath).convert('L')
img_arr = np.array(gray_img, 'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_arr)
for (x, y, w, h) in faces:
faceSamples.append(img_arr[y:y + h, x:x + w])
ids.append(id)
return faceSamples, ids
print("Training faces. It will take a few seconds. Wait ...")
faces, ids = Images_And_Labels(path)
recognizer.train(faces, np.array(ids))
recognizer.write('trainer/trainer.yml')
print("Model trained, Now we can recognize your face.")
Sample: Elon Musk is showing as Tahsin:

Related

Python Process share global Numpy Array

Hey, I want to share a global NumpyArray(cameraImg) between my main
programm and the process which runs parallel. But it doesn't work. It says
"TypeError: only size-1 arrays can be converted to Python scalars"
The main Programm displa_camera() runs in a While Loop, and gets the Image of the Webcam. The img is saved in global cameraImg and used in recognize_faces() to identifiy the face depcit in the Image. It saves the name, which is added to the Image, above the Persons head. Hope you can help me, thx
import time, os, cv2 as cv2, face_recognition
from multiprocessing import Process, Array, Value
global name
name=""
window_name = 'facerecognition'
def display_camera():
cap = cv2.VideoCapture(0)
cap.set(3, 640) # set Width
cap.set(4, 480) # set Height
cascade = cv2.CascadeClassifier('cascade.xml')
global cameraImg, name
while True:
ret, img = cap.read()
cameraImg=Array('d',img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(20, 20)
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (36, 255, 12), 1)
cv2.putText(img, name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
cv2.imshow('video', img)
k = cv2.waitKey(30) & 0xff
if k == 27: # press 'ESC' to quit
break
cap.release()
cv2.destroyAllWindows()
def recognize_Faces():
global cameraImg, name
folder_dir = "../images/"
image_List=[]
for images in os.listdir(folder_dir):
# check if the image ends with png or jpg or jpeg
if (images.endswith(".png") or images.endswith(".jpg") \
or images.endswith(".jpeg")):
# display
image_List.append(images)
while True:
try:
newFaces=False
for path in image_List :
rgb_img = cv2.cvtColor(cameraImg, cv2.COLOR_BGR2RGB)
img_encoding = face_recognition.face_encodings(rgb_img)
img2 = cv2.imread("../images/"+path)
rgb_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
img_encoding2 = face_recognition.face_encodings(rgb_img2)[0]
result = face_recognition.compare_faces(img_encoding, img_encoding2)
print("Result: ", result)
if(result[0]):
newFaces=True
name= os.path.basename("../images/"+path).split('.')[0]
except Exception as e:
print(e)
if(newFaces==False):
name=""
k = cv2.waitKey(30) & 0xff
if k == 27: # press 'ESC' to quit
break
time.sleep(3)
if __name__ == '__main__':
secondaryThread = Process(target=recognize_Faces)
secondaryThread.start()
display_camera()

How to make the Speed Calculation being able to calculate from both direction (above to the bottom and from the bottom to the top)?

I am currently working on a project on human walking speed estimation by using Haar-cascade. The program works as such: the detected object passes two imaginary lines and when it passes the second line the program will show the speed. The currently program that I've worked only calculate the detected object that walks from above to the bottom, but I wanted the program will catch the calculation from below to the top too so it can calculating in both ways.
here's the program:
import cv2
import time
cascade_src = 'haarcascade_fullbody.xml'
video_src = 'video-1.mp4'
#line a
ax1=15
ay=225
ax2=600
#line b
bx1=15
by=275
bx2=600
#car num
i = 1
start_time = time.time()
#video ....
cap = cv2.VideoCapture(video_src)
human_cascade = cv2.CascadeClassifier(cascade_src)
videoWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
videoHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('wisuda-14.mp4', fourcc, 25.0, (videoWidth,videoHeight))
def Speed_Cal(time):
try:
Speed = (9.144*3600)/(time*1000)
return Speed
except ZeroDivisionError:
print (5)
while True:
ret, img = cap.read()
if (type(img) == type(None)):
break
#bluring to have exacter detection
blurred = cv2.blur(img, ksize=(3,3))
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
human = human_cascade.detectMultiScale(gray, scaleFactor=1.04865, minNeighbors=6)
#line a #i know road has got
cv2.line(img,(ax1,ay),(ax2,ay),(255,0,0),2)
#line b
cv2.line(img,(bx1,by),(bx2,by),(255,0,0),2)
for (x,y,w,h) in human:
cv2.rectangle(img, (x,y), (x + w, y + h), (0, 0, 255), 2)
roi_blurred = blurred[x: x + h, y:y + w]
roi_gray = gray[x: x + h, y:y + w]
roi_img = img[x: x + h, y:y + w]
cv2.circle(img,(int((x+x+w)/2),int((y+y+h)/2)), 2,(0,255,0), -1)
#cv2.putText(img, "ID : " + str(i), (x, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1);
while int(ay) == int((y+y+h)/2):
start_time = time.time()
break
while int(ay) <= int((y+y+h)/2):
if int(by) <= int((y+y+h)/2)&int(by+10) >= int((y+y+h)/2):
cv2.line(img, (bx1,by), (bx2,by), (0,255,0), 2)
Speed = Speed_Cal(time.time() - start_time)
print("ID Number "+str(i)+" Speed: " + str(int(Speed)))
i = i + 1
cv2.putText(img, "Speed: "+str(int(Speed))+"km/jam", (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2);
break
else :
break
out.write(img)
cv2.imshow('video', img)
cv2.imshow('Gray', gray)
cv2.imshow('Blurr', blurred)
if cv2.waitKey(33) == 27:
break
cap.release()
out.release()
cv2.destroyAllWindows()
Hope you can help guys

OpenCV assertion failed with negative values

I am making a Face Liveliness detection program with OpenCV and Keras.
I am stuck with this error:
OpenCV assertion failed with negative values
I tried all the suggested answers but none of them solved my issue.
I have already tried all the solutions which are present on StackOverflow and Github issues but none of them worked out in my case.
video_capture = cv2.VideoCapture(0)
model = load_model()
print("[LOG] COLLECTING images.....")
images = []
for direc, _, files in tqdm(os.walk(dataset)):
for file in files:
if file.endswith("jpg"):
images.append(os.path.join(direc, file))
return model, face_detector, open_eyes_detector, left_eye__detector, right_eye_detector, video_capture, images
def process_and_encode(images):
known_encodings = []
known_names = []
print("[LOG] Encoding faces....")
for image_path in tqdm(images):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(image, model='hog')
encoding = face_recognition.face_encodings(image, boxes)
name = image_path.split(os.path.sep)[-2]
if len(encoding) > 0:
known_encodings.append(encoding[0])
known_names.append(name)
return {"encodings": known_encodings, "names": known_names}
def detect_and_display(model, video_capture, face_detector, open_eyes_detector, left_eye_detector, right_eye_detector,
data, eyes_detected):
frame = video_capture.read()
try:
frame = cv2.resize(frame, (0, 0), fx=0.6, fy=0.6)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
faces = face_detector.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(50, 50),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
encoding = face_recognition.face_encodings(rgb, [(y, x + w, y + h, x)])
matches = face_recognition.compare_faces(data["encodings"], encoding)
name = "Unknown"
if True in matches:
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
face = frame[y:y + h, x:x + w]
gray_face = gray[y:y + h, x:x + w]
eyes = []
open_eyes_glasses = open_eyes_detector.detectMultiScale(
gray_face,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
if len(open_eyes_glasses) == 2:
eyes_detected[name] += '1'
for (ex, ey, ew, eh) in open_eyes_glasses:
cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
# otherwise try detecting eyes using left and right_eye_detector
# which can detect open and closed eyes
else:
# separate the face into left and right sides
left_face = frame[y:y + h, x + int(w / 2):x + w]
left_face_gray = gray[y:y + h, x + int(w / 2):x + w]
right_face = frame[y:y + h, x:x + int(w / 2)]
right_face_gray = gray[y:y + h, x:x + int(w / 2)]
# Detect the left eye
left_eye = left_eye_detector.detectMultiScale(
left_face_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Detect the right eye
right_eye = right_eye_detector.detectMultiScale(
right_face_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
eye_status = '1' # we suppose the eyes are open
# For each eye check wether the eye is closed.
# If one is closed we conclude the eyes are closed
for (ex, ey, ew, eh) in right_eye:
color = (0, 255, 0)
pred = predict(right_face[ey:ey + eh, ex:ex + ew], model)
if pred == 'closed':
eye_status = '0'
color = (0, 0, 255)
cv2.rectangle(right_face, (ex, ey), (ex + ew, ey + eh), color, 2)
for (ex, ey, ew, eh) in left_eye:
color = (0, 255, 0)
pred = predict(left_face[ey:ey + eh, ex:ex + ew], model)
if pred == 'closed':
eye_status = '0'
color = (0, 0, 255)
cv2.rectangle(left_face, (ex, ey), (ex + ew, ey + eh), color, 2)
eyes_detected[name] += eye_status
# Each time, we check if the person has blinked
# If yes, we display its name
if isBlinking(eyes_detected[name], 3):
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display name
y = y - 15 if y - 15 > 15 else y + 15
cv2.putText(frame, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
return frame
except Exception as e:
print(str(e))
if __name__ == "__main__":
(model, face_detector, open_eyes_detector, left_eye_detector, right_eye_detector, video_capture, images) = init()
data = process_and_encode(images)
eyes_detected = defaultdict(str)
while True:
frame = detect_and_display(model, video_capture, face_detector, open_eyes_detector, left_eye_detector,
right_eye_detector, data, eyes_detected)
cv2.imshow("Face Liveness Detector", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
video_capture.stop()
Error message:-
Expected cv::UMat for argument 'src'
Traceback (most recent call last):
File "C:/Users/Saksham Dubey/PycharmProjects/FacePay/FaceLive.py", line 190, in
cv2.imshow("Face Liveness Detector", frame)
cv2.error: OpenCV(4.1.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:352: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'
[ WARN:0] terminating async callback
That could probably be because you try to use imshow() but there have been no imwite() before. Not exactly a solution but a working example. Take a look:
import cv2 # pip install opencv-python
import datetime
from cv2.data import haarcascades as hc
import requests
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier("%s/haarcascade_frontalface_default.xml" % hc)
eye_cascade = cv2.CascadeClassifier('%s/haarcascade_eye.xml' % hc)
profile_cascade = cv2.CascadeClassifier('%s/haarcascade_profileface.xml' % hc)
fullbody_cascade = cv2.CascadeClassifier('%s/haarcascade_fullbody.xml' % hc)
smile_cascade = cv2.CascadeClassifier('%s/haarcascade_smile.xml' % hc)
eyesglasses_cascade = cv2.CascadeClassifier('%s/haarcascade_eye_tree_eyeglasses.xml' % hc)
mouth_cascade = cv2.CascadeClassifier('%s/haarcascade_mcs_mouth.xml' % hc)
filename = 'output/'+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
def recognizer():
while True:
ret, frame = cap.read()
profile_count = recognize_profile(frame)
face_count, eye_count = recognize_face(frame, True)
cv2.imwrite('%s.png' % filename, frame)
image = cv2.imread('%s.png' % filename)
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def recognize_profile(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
profiles = profile_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in profiles:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
return len(profiles)
def recognize_face(frame, recognize_eyes=None):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
eyes = []
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
if recognize_eyes:
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(roi_gray)
# draw bounding boxes around detected features
for (ex, ey, ew, eh) in eyes:
eye_center = (ex + ew // 2, ey + eh // 2)
radius = int(round((ew + eh) * 0.25))
cv2.circle(roi_color, eye_center, radius, (0, 0, 255), 4)
return len(faces), len(eyes)
def snapshot():
while True:
# Capture frame-by-frame
ret, frame = cap.read()
recognize_profile(frame)
recognize_face(frame, True)
cv2.imwrite('%s.png' % filename, frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def live_video_recognizer():
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
out = cv2.VideoWriter('%s.avi' % filename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
while True:
ret, frame = cap.read()
recognize_profile(frame)
recognize_face(frame, True)
if ret is True:
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
recognizer()
# snapshot()
# live_video_recognizer()

how to write filename in utf-8 in python?

I am trying to save the frames in to "veriseti" file but framenames can not be shown regularly.
cam = cv2.VideoCapture(0)
harcascadePath = "haarcascade_frontalface_default.xml"
detector = cv2.CascadeClassifier(harcascadePath)
sampleNum = 0
while (True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
# incrementing sample number
sampleNum = sampleNum + 1
# saving the captured face in the dataset folder TrainingImage
cv2.imwrite("veriseti\ " + username1 + "." + id1 + '.' + str(sampleNum) + ".jpg", gray[y:y + h, x:x + w])*
Did you include all of your code? Your last line has an asterisk at the end so it made me wonder. I don't know what username1 or id1 are but I assume strings, you could wrap those in str(), couldn't hurt.

int object has no attribute __getitem__

I am working on some code to recognises the face and speak someone's name. I am working this code in raspberry pi 3 with opencv 3.1 and python 2.7. This code works fine on windows but when I try it on raspberry, it gives an error:
Type error: 'int' object has no attriibute '__getitem__'
For the line:
for line 'if prediction[1]<100'
Full code:
import cv2, sys, numpy, os, pyttsx, time,picamera
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets'
engine = pyttsx.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate-40)
print('Training...')
# Create a list of images and a list of corresponding names
(images, labels, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
label = id
images.append(cv2.imread(path, 0))
labels.append(int(label))
id += 1
(width, height) = (130, 100)
# Create a Numpy array from the two lists above
(images, labels) = [numpy.array(lis) for lis in [images, labels]]
model = cv2.createLBPHFaceRecognizer()
model.train(images, labels)
#use LBPHFace recognizer on camera frame
face_cascade = cv2.CascadeClassifier(haar_file)
camera = picamera.PiCamera()
camera.resolution = (320, 240)
def getFrame():
jpegBuffer = io.BytesIO()
camera.capture(jpegBuffer, format='jpeg')
buff = numpy.fromstring(jpegBuffer.getvalue(), dtype=numpy.uint8)
return cv2.imdecode(buff, 1)
while True:
im = fetFrame()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
#Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<100:
cv2.putText(im,'%s - %.0f' % (names[prediction[0]],prediction[1]),(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
engine.say('Hello')
engine.say(names[prediction[0]],prediction[1])
time.sleep(3)
else:
cv2.putText(im,'not recognized',(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
engine.say('Hello, Newface')
time.sleep(3)
engine.runAndWait()
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break

Categories