Tkinter windows fails to update after using OpenCV2 (facial recognition) - python

I'm working on both Tkinter and OpenCV 2 (facial recognition). I'm making a program that when you press a single button (from Tkinter), the program start the recognition process, and when the user press "Escape" the recognition process stop and in the tkinter windows appears a new label indicating who the subject is.
This last thing isn't working. When I press "Escape", to finish the recognition process, the tkinter windows isn't updated with the new label.
The code is pretty large but here it is:
import cv2, sys, numpy, os
from Tkinter import *
global size
global fn_haar
global fn_dir
global Welcome
os.system("sudo modprobe bcm2835-v4l2")
size = 5
fn_haar = '/usr/share/opencv/haarcascade/haarcascade_frontalface_alt.xml'
fn_dir = '/home/pi/Control'
def recognition():
(images, lables, names, id) = ([], [], {}, 0)
subdir = "Subject 1"
names[id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
for filename in os.listdir(subjectpath):
f_name, f_extension = os.path.splitext(filename)
if(f_extension.lower() not in ['.png','.jpg','.jpeg','.gif','.pgm']):
continue
path = subjectpath + '/' + filename
lable = id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
(im_width, im_height) = (112, 92)
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
model = cv2.createLBPHFaceRecognizer()
model.train(images, lables)
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
while True:
rval = False
while(not rval):
(rval, frame) = webcam.read()
if(not rval):
print "Failed to open webcam. Trying again..."
frame=cv2.flip(frame,1,0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mini = cv2.resize(gray, (int(gray.shape[1] / size), int(gray.shape[0] / size)))
faces = haar_cascade.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<90:
cv2.putText(frame,'%s - %.0f' %(names[prediction[0]],prediction[1]),(x-10, y-10),cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
else:
cv2.putText(frame,'Unknown',(x-10, y-10),cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
cv2.imshow('OpenCV', frame)
cv2.moveWindow('OpenCV', 350, 120)
key = cv2.waitKey(10)
if key == 27:
cv2.startWindowThread()
cv2.destroyWindow('OpenCV')
L1 = Label(Welcome, text = "The subject is:" + names[prediction[0]]).pack()
Welcome = Tk()
Welcome.title ("Welcome")
Welcome.geometry ("300x100+1+1")
LabelWelcome = Label(Welcome, text = "Welcome to the facial recognitio program").pack()
B1 = Button(Welcome, text="START", command=recognition).pack()
Welcome.mainloop()
The program runs fine until I press "Escape", and then it doesn't update the windows with the new label and the windows just freezes.
So, after doing some tests I think the problem is that "cv2.destroyWindow('OpenCV')" destroy the mainloop or something like that.
My questions are:
Is that the problem ? cv2.destroyWindow('OpenCV') destroy the mainloop too?
If that is the problem, what can I do about that?
If thats not the problem do you know what can be?
PD: I'm thinking that maybe, if the problem is with cv2.destroyWindow, I can just hide that windows (openCV) and not close it (don't know if this is possible).
PD2: I know that if this code works, I'm going to add a label every time I run the recognition part, but I'm ok with that for the moment.
Thanks in advance.

Related

How do I livestream the video I have captured using OpenCV on my webpage which is made using Django while also keeping the face recognition function

I am a new developer working at an interI am developing a web application which takes your attendance using face recognition and puts the entry in the database using django, python, open-cv and the face_recognition library of python. The Login page looks like this: Login Page.
When I click the login button, my views program directs to an external python function which opens a separate camera window which does the face recognition. The Camera Window.
What I want to do is display the camera directly in the browser window rather than as a separate window. I found some tutorials and also tried to implement them but using them I can only display the frames in the browser window while the functionality of face recognition is lost.
The Views Code (views.py):
def camera_on(request):
out = Popen('E:/Study/Internship/Capstone/Project/Web App/web_app/Basic.py', shell=True, stdout=PIPE)
op = out.stdout.read().decode("utf-8")
new_str = ''
for i in range(len(op)-2):
new_str += op[i]
request.session['name'] = new_str
return render(request, 'open_camera.html', {'data': new_str})
This code accesses the Basics.py file which opens the camera window, does the face recognition and gives the entry in the database.
The Basics.py code:
from datetime import datetime, date
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_app.settings')
django.setup()
import cv2
import face_recognition
import numpy as np
from opencamera.models import Logs
import pymysql
conn=pymysql.connect(host='localhost', user='root', password='', database='projectdb')
cur=conn.cursor()
video_capture = cv2.VideoCapture(0)
cur.execute('SELECT Name, Photo FROM employee')
result=cur.fetchall()
known_face_names=[]
known_face_encodings=[]
for i in range(len(result)):
known_face_names.append(result[i][0])
for i in range(len(result)):
img = face_recognition.load_image_file(result[i][1])
img_encode = face_recognition.face_encodings(img)[0]
known_face_encodings.append(img_encode)
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while process_this_frame < 15:
nam = "N/A"
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name1 = "N/A"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name1 = known_face_names[best_match_index]
nam=str(name1)
face_names.append(name1)
for (top, right, bottom, left), name1 in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left - 10, top - 50), (right + 10, bottom + 50), (0, 255, 0), 2)
cv2.rectangle(frame, (left - 10, bottom + 30), (right + 10, bottom + 50), (0, 255, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name1, (left + 10, bottom + 45), font, 0.5, (0,0,0), 1)
cv2.imshow('Attendance Cam', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if nam != "N/A":
process_this_frame += 1
if process_this_frame == 13:
print(nam)
today = date.today()
now = datetime.now()
now=now.strftime('%H:%M:%S')
sql = "SELECT Action FROM logs WHERE Name=%s"
recs = (nam)
cur.execute(sql, recs)
status = cur.fetchall()
stat = ''
if len(status) !=0:
length = len(status) - 1
stat = status[length][0]
if stat == 'Exit' or len(status) == 0:
attd = 'INSERT INTO logs (Name, Action, Date, Time) VALUES (%s, "Entry", %s, %s)'
rec = (nam, today, now)
cur.execute(attd, rec)
video_capture.release()
cv2.destroyAllWindows()
This code opens the camera, puts a green box around your face if its in the database and then makes an entry in the logs if recognized.
I need help integrating this functionality in the live stream in the browser window.

How to call a function only once and wait for 5 seconds before calling again inside while loop- Python

I'm trying capture image and play a song when smile is detected on live camera feed. I have created a separate thread to play complete song and save image using multi-threading as frames were getting stuck when song was playing.
The issue I am facing is that inside the infinite while loop, the multi-thread function is being called multiple times in a second resulting in overlapping of the song and many images being saved.
Is there a better way to call that function once and wait for few seconds (5 seconds exactly) till song is finished without breaking/pausing the while loop?
Here is the code I have worked on:
import cv2
import datetime
import threading
from playsound import playsound
def play_save(save_img):
print("Saving Image")
time_stamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
file_name = f'selfie-{time_stamp}.png'
cv2.imwrite(file_name, save_img)
print("Playing Song")
playsound("Happy_birthday.mp3")
def main():
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
frame = img.copy()
if not ret:
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
smile = smile_cascade.detectMultiScale(roi_gray, 1.3, 25)
for x1, y1, w1, h1 in smile:
cv2.rectangle(img, (x1, y1), (x1 + w1, y1 + h1), (0, 0, 255), 2)
multithread = threading.Thread(target=play_save, args=(frame,))
multithread.start()
cv2.imshow('Smile Birthday', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
An Idea is to use the UTC-Time to check it. You could make a check like this:
import time
time_function_done = None
while True:
#Do some shit
if (time_function_done + 5) < time.time():
time_function_done = time.time()
#Do your function
#Do some other shit
If you can't find a direct solution, make a workaround. Hope this helps :))
import time
time.sleep(5) # Sleep for 5 seconds

LBPHFaceRecognizer predict function always return 1

I am developing a facial recognition system and for that I have selected LBPH algorithm for doing the task. I have collected the sample images of user and trained it. The problem is while recognizing the face, the predict() of LBPHRecognizer always return same value for label but different value for confidence. Even if the face is unknown it returns 1.
Technologies I have been using : Python 3.7.4, OpenCV 4.1.2
Code to collect sample image
import cv2
import numpy as np
import os
import requests
import time
from PIL import Image
class CollectFaceWebCam():
def __init__(self, sid):
self.studentId = sid
#capture webcam
self.LiveWebCamera = cv2.VideoCapture(0)
#pre-trained dataset (haar-cascade classifier)
self.faceDataSet = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
#sample image capture counter
self.imgCounter = 0
self.directoryName = 'sampleImgFolder'
#check path
if not os.path.exists(self.directoryName):
os.makedirs(self.directoryName)
if not os.path.exists(self.directoryName + '/' + self.studentId):
os.makedirs(self.directoryName + '/' + self.studentId)
def gen(self):
while True:
condition, frame = self.LiveWebCamera.read() #capture frame
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#conversion to gray scale
#face detection
faces = self.faceDataSet.detectMultiScale( # Detect face sizes
img,
scaleFactor=1.3,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
end_crd_x = x + w # face start coordinates
end_crd_y = y + h #face end coordinate
#draw rectangle
##parms image, start plot, end plot, thickness, color
cv2.rectangle(frame, (x, y), (end_crd_x, end_crd_y), (0, 255, 33), 1)
#accepts multiple face
if len(faces) >= 0:
#face must be single in frame
if len(faces) == 1:
detectedImg = img[y:y + h, x:x + w]
#checking blurness of image
blurValue = cv2.Laplacian(detectedImg, cv2.CV_64F).var()
#ignoring the blury images
if not blurValue <= 60:
newImg = img[y:y + h, x:x + w] #new img
#saving the detected faces
filename = '{}\{}\{}\{}_{}'.format(os.getcwd(), self.directoryName, self.studentId, self.studentId, self.imgCounter) + '.jpg'
cv2.imwrite(filename, newImg)
self.imgCounter += 1
else:
cv2.putText(frame,"Multiple Face not allowed", (50,150), cv2.FONT_HERSHEY_SIMPLEX, 1, (237, 20, 5), thickness=2)
cv2.putText(frame,"Collecting Sample", (50,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (250, 250, 250), thickness=3)
cv2.putText(frame,"Image Count " + str(self.imgCounter), (50,200), cv2.FONT_HERSHEY_SIMPLEX, 2, (237, 20, 5), thickness=2)
cv2.imshow('Collecting Sample', frame) # display frames
k = cv2.waitKey(100) & 0xff # capture when user press 'esc'
if k == 27:
break
elif self.imgCounter == 110:
break
self.LiveWebCamera.release() #stop video capture
cv2.destroyAllWindows() #close all windows
class CleanSampleImages():
def __init__(self):
self.faceDataset = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
self.eyeDataset = cv2.CascadeClassifier('resources/haarcascade_eye.xml')
self.targetFolder = 'sampleImgFolder'
def checkFace(self):
os.chdir(self.targetFolder)
for directory in os.listdir():
os.chdir(directory)
for files in os.listdir():
imagePath = '{}/{}'.format(os.getcwd(), files)
imagePil = Image.open(imagePath).convert('L')
imageNumpy = np.array(imagePil) #conversion of normal image to numpy array
#detect face
faces = self.faceDataset.detectMultiScale(imageNumpy)
#deleting image file if face is not found
if not len(faces) == 1:
os.remove(files)
break
for (x, y, w, h) in faces:
#detect eye from selected
eyes = self.eyeDataset.detectMultiScale(imageNumpy)
if not len(eyes) > 0 and len(eyes) <=2:
#deleting image file if eye count of image is less than 0 or more than 2
os.remove(files)
os.chdir('../')
os.chdir('../')
#id must be in X-X-ID eg. a-b-342
t = CollectFaceWebCam('sa-t-1')
t.gen()
clean = CleanSampleImages
c.checkFace()
Above code consist of two class CollectFaceWebCam and CleanSampleImages. CollectFaceWebCam works for collecting the sample images. and CleanSampleImages works for cleaning the collected data. if the image does not consist of face the file is deleted.
Code to Train images
import os
import cv2
import numpy as np
from PIL import Image
class Trainer():
def __init__(self):
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.targetImagesDirectory="sampleImgFolder"
self.dataset = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
def getImgwithId(self):
sampleImage, sampleImageId = [], []
filename = '{}\\{}'.format(os.getcwd(), self.targetImagesDirectory)
if os.path.exists(filename):
os.chdir(filename)
print('current path is ' + os.getcwd())
for f in os.listdir():
imgPath = os.path.join(filename, f)
os.chdir(imgPath)
for file in os.listdir():
#reteving id from filename (filename format : ta-s-ID_Filename.jpg)
id = file.split('_')
id = id[0].split('-')
id = id[2]
imageFilePath = imgPath + '\\' + file
imagePil = Image.open(imageFilePath).convert('L')
#conversion to numpy array
imageNp = np.array(imagePil, 'uint8')
faces = self.dataset.detectMultiScale(imageNp)
for (x, y, w, h) in faces:
sampleImage.append(imageNp)
sampleImageId.append(id)
os.chdir('../')
os.chdir('../')
return sampleImage, np.array(sampleImageId, dtype = int)
def train(self, data, label):
try:
self.recognizer.train(data, label)
self.msg = 'Training Successful'
print('writting')
self.recognizer.write('date.yml')
print('writing finished')
except:
self.msg = 'Core: Training Error'
print('except')
tr = Trainer()
sampleFaces, sampleFaceId = (tr.getImgwithId())
tr.train(sampleFaces, sampleFaceId)
Code to recognize face
import os
import cv2
import numpy as np
from PIL import Image
class Recognizer():
def __init__(self):
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.recognizer.read('date.yml')
self.targetImagesDirectory="sampleImgFolder"
self.dataset = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
self.captureVideo = cv2.VideoCapture(0)
self.font = cv2.FONT_HERSHEY_SIMPLEX = 2 # Font
self.predictedUser = []
def gen(self):
while True:
condition, frame = self.captureVideo.read() #capture frame
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#conversion to gray scale
#face detection
faces = self.dataset.detectMultiScale( # Detect face sizes
img,
scaleFactor=1.3,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
end_crd_x = x + w # face start coordinates
end_crd_y = y + h #face end coordinate
#draw rectangle
##parms image, start plot, end plot, thickness, color
cv2.rectangle(frame, (x, y), (end_crd_x, end_crd_y), (0, 255, 33), 1)
predictUser, confidence = self.recognizer.predict(img[y:y+h,x:x+w])
self.predictedUser.append(predictUser)
cv2.imshow('test', frame)
k = cv2.waitKey(100) & 0xff # capture when user press 'esc'
if k == 27:
break
self.captureVideo.release()
cv2.destroyAllWindows()
r = Recognizer()
r.gen()
print(r.predictedUser)
"predictUser, confidence = self.recognizer.predict(img[y:y+h,x:x+w])" line of code in Recognizer class always return same value for label. The output of code to recognize face is attached below:
I would love to know why and where the problem is, as My skills and research could not lead me to identification of problem.
It might because of the data-collection process. I see you using a cascade classifier multiple times, you could limit it. While checking for a face on webcam, you could use classifiers at the time and store only the extracted/cropped faces. Also during prediction, use confidence as a threshold to limit the false prediction.

Emulate keyboard pressing from one infinite loop to another in a GUI

I’m trying to do a GUI where the user is gonna control the buttons by eyeblinking. Basically, a short blink should emulate pressing the keyboard key Tab (to move from one button to another) and a long blink should emulate pressing the key Space (to enter in the selected button).
The idea is that both processes, the window and the eyeblink detection system, run at the same time. So here I get all the problems: as they are both while loops I cannot run them at the same time.
In the code I attach, I simplify this by opening first the main window and afterwards clicking the button Start to run the eyeblink system. With pyautogui.press() I suppose to emulate the keyboard pressing in the main window. However, when the eyeblink detection system is working, the main window is no longer accessible (you cannot press anything).
I have tried to evoke the blink function every frame instead of an endless loop, but it’s too slow and not able to properly detect the blinks. I’ve also tried multiprocessing and ‘Python quits unexpectadly’, no error shown so not sure what’s going on (the code I used to try this is at the end commented). I also tried threading but in a simple way an no error but nothing appears either (again, the code I used to try this is at the end commented).
Here I attach the link to the files (.mp3, .xml, .py):
https://drive.google.com/drive/folders/1U2uwHXzl2MtSTlAKw1L68L3xcRmelP2d?usp=sharing
I’ve just started using Python so my knowledge is not high, I’m running out of time and I’m stuck at this point… So any help would be welcome!! Thanks in advance ;)
MacOs
Python 2.7
OpenCV 3.4
Tkinter (I just chose it because it is easy to handle yet I’m open to change if it’s neccessary)
# Ventana+Blink
from Tkinter import *
import numpy as np
import cv2
# To emulate a keyboard pressing
import pyautogui
import time
# To play the sounds
import subprocess
# from Blink import funcion_blink
# from multiprocessing import Process
# import threading
def Onbutton_clicked():
# while True:
# Repeating 2 times the sound
for x in range (0,2):
subprocess.call(['afplay', 'alarm2.mp3'])
def Onbutton2_clicked():
# Repeating 1 times the sound
for x in range (0,1):
subprocess.call(['afplay', 'sound.mp3'])
def execute_func1():
print('enter\n')
pyautogui.press('space') # press the Space key
for x in range (1,2):
subprocess.call(['afplay', 'unconvinced.mp3'])
def execute_func2():
print('tab\n')
pyautogui.press('tab') # press the Tab key
for x in range (1,2):
subprocess.call(['afplay', 'case-closed.mp3'])
def execute_func3():
print('space\n')
pyautogui.press('space') # press the Space key
for x in range (1,2):
subprocess.call(['afplay', 'cheerful.mp3'])
# ----- Eyeblink detection system -----
def funcion_blink():
#XML classifiers should be in the folder with this file
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
video_capture = cv2.VideoCapture(0)
det = 0
n = 0
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(
roi_gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
face_img = gray[x:x + w, y:y + h]
face_res = cv2.resize(face_img, (100, 100), interpolation=cv2.INTER_CUBIC)
eye_reg = face_res[15:85, 20:50]
cv2.rectangle(frame, (x+15*w/100, y + 2*h / 10), (x + w*85/100, y + (5 * h / 10)), (0, 0, 255), 2)
if (det < 10):
tmpl_eyes = eye_reg
det = det + 1
print('template acquired\n')
elif (det == 10):
# template matching
wt, ht = tmpl_eyes.shape[::-1]
#res_templ = cv2.matchTemplate(eye_reg, tmpl_eyes, cv2.TM_CCORR_NORMED)
res_templ = cv2.matchTemplate(eye_reg, tmpl_eyes, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_templ)
# print(max_val, n)
#value 0.92 should be adapted to the conditions and camera position
if (max_val>0.90):
n=n+1
else:
if (n>=12):
execute_func1()
#here should go the code that triggers some action when the person blinks??
elif (n>=6):
execute_func2()
elif (n>=3):
execute_func3()
n = 0
print(max_val, n)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
cv2.destroyAllWindows()
video_capture.release()
# ---- Main window ----
def main_window():
root= Tk()
root.geometry('700x700')
# Create the buttons of the main window
button=Button(root, text='alarm', command=Onbutton_clicked)
button.bind('<Return>', Onbutton_clicked)
button.pack()
button2=Button(root, text='extra', command=Onbutton2_clicked)
button2.bind('<Return>', Onbutton2_clicked)
button2.pack()
# By pressing this button we start running the eyeblink detection system
button3=Button(root, text='Start', command=funcion_blink)
button3.bind('<Button-1>', funcion_blink)
button3.pack()
# To maintain the window until you close it
root.mainloop()
# Execute the main window
main_window()
# ---- Trials ----
# while True:
# main_window()
# funcion_blink()
# It just plays one function and when it finishes it plays the next one
# Multiprocessing
# if __name__ == '__main__':
# Process(target=main_window).start()
# Process(target=funcion_blink).start()
# PYTHON QUITS UNEXPECTADLY
# Threading
# p1 = threading.Thread(target=main_window, args=())
# p2 = threading.Thread(target=funcion_blink, args=())
# p1.start()
# p2.start()

OpenCv Python: How to save name of the recognized face from face recognition program after the face is recognised?

Hi everyone I'm working on OpenCV(Python)on a face recognition program. I have two files, one which captures a new user's face and stores it by the name supplied by user. The second file recognizes the user using webcam. Now, my concern is that the user is getting recognised correctly but the name is only shown and not saved. How could I save the name of the recognised person so that it can be transfered or done some operations upon?
#__author__ = 'ADMIN'
import cv2, sys, numpy, os
size = 4
fn_haar = 'haarcascade_frontalface_default.xml'
fn_dir = 'att_faces'
fn_name = "aditya"
path = os.path.join(fn_dir, fn_name)
if not os.path.isdir(path):
os.mkdir(path)
(im_width, im_height) = (112, 92)
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
# The program loops until it has 20 images of the face.
count = 0
while count < 20:
(rval, im) = webcam.read()
im = cv2.flip(im, 1, 0)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
mini = cv2.resize(gray, (gray.shape[1] / size, gray.shape[0] / size))
faces = haar_cascade.detectMultiScale(mini)
faces = sorted(faces, key=lambda x: x[3])
if faces:
face_i = faces[0]
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
pin=sorted([int(n[:n.find('.')]) for n in os.listdir(path)
if n[0]!='.' ]+[0])[-1] + 1
cv2.imwrite('%s/%s.png' % (path, pin), face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.putText(im, fn_name, (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN,
1,(0, 255, 0))
count += 1
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break
Code for face recognition from the dataset
__author__ = 'ADMIN'
import cv2, sys, numpy, os
size = 4
fn_haar = 'haarcascade_frontalface_default.xml'
fn_dir = 'att_faces'
# Part 1: Create fisherRecognizer
print('Training...')
# Create a list of images and a list of corresponding names
(images, lables, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(fn_dir):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
lable = id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(im_width, im_height) = (112, 92)
# Create a Numpy array from the two lists above
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.createFisherFaceRecognizer()
model.train(images, lables)
# Part 2: Use fisherRecognizer on camera stream
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
while True:
(rval, frame) = webcam.read()
frame=cv2.flip(frame,1,0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mini = cv2.resize(gray, (gray.shape[1] / size, gray.shape[0] / size))
faces = haar_cascade.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
# Write the name of recognized face
# [1]
cv2.putText(frame,
'%s - %.0f' % (names[prediction[0]],prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
cv2.imshow('OpenCV', frame)
key = cv2.waitKey(10)
if key == 27:
break
This is my code. where i am not using any sql-server.
I am encoding images from the folder and it will show the recognized face with the name of the image saved. if the image is saved as .. abc.jpg. then it will detect the face during live streaming and show abc.jpg
here is my code :
from PIL import Image
import face_recognition
import cv2
import os
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
known_face_encodings=[]
known_face_names = []
user_appeared = []
root = "/home/erp-next/open cv/dataset/"
for filename in os.listdir(root):
if filename.endswith('.jpg' or '.png'):
try:
print(filename)
path = os.path.join(root, filename)
filter_image = face_recognition.load_image_file(path)
filter_face_encoding = face_recognition.face_encodings(filter_image)
known_face_encodings.append(filter_face_encoding[0])
known_face_names.append(filename)
except:
print("An exception occurred : " + filename )
#print(known_face_encodings)
print(known_face_names)
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
# process_this_frame = True
def face():
while True:
process_this_frame = True
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
print(name)
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
face()
i am also using face_recognition library to encode and detect face.
Thanks.

Categories