I'm trying to put face recogniton code that uses opencv to the left hand side of a tkinter window. By doing this I wish to leave the right hand side of the window free so I can output text. e.g. when a face is detected the program will display "Name: Present" I am new to both Tkinter and OpenCV andI can't seem to find a straight answer online. Any Help is appreciated, thanks!
Here is my code below:
import face_recognition
import cv2
import numpy as np
import tkinter
from tkinter import *
import PySimpleGUI as sg
import xlsxwriter
import os
from PIL import ImageTk,Image
from datetime import datetime;
import datetime
#Defines time
now = datetime.datetime.now().time()
#Setup for period segment of spreadsheetname
if now.hour<9:
name = "HomeRoom "
elif now.hour==9 and now.min<=50:
name = "Period1 "
elif now.hour==10 and now.min<=40:
name = "Period2 "
elif now.hour==11 and now.min<=50:
name = "Period3 "
elif now.hour==12 and now.min<=40:
name = "Period4 "
elif now.hour==14 and now.min<=10:
name = "Period5 "
elif now.hour<=15:
name = "Period6 "
else:
name = "Testing "
# Webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# to break loop
Printed = False
#Defines todays date #day/month/year-HourAM/PM
todays_date = str(datetime.datetime.now().strftime("%d-%m-%Y %I%p"))
#Sets up spreadsheet
workbook = xlsxwriter.Workbook(name + todays_date +'.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Name')
worksheet.write('B1', 'Attendance')
worksheet.write('A6', 'Jordan Terzian')
worksheet.write('B6', 'Absent')
worksheet.write('A5', 'Daniel Pearce')
worksheet.write('B5', 'Absent')
worksheet.write('A4', 'Ewan Krall')
worksheet.write('B4', 'Absent')
worksheet.write('A3', 'Norman Brosow')
worksheet.write('B3', 'Absent')
worksheet.write('A2', 'Mitchell Benson')
worksheet.write('B2', 'Absent')
# classmates
jordan_image = face_recognition.load_image_file("jordan.jpg")
jordan_face_encoding = face_recognition.face_encodings(jordan_image)[0]
daniel_image = face_recognition.load_image_file("daniel.jpg")
daniel_face_encoding = face_recognition.face_encodings(daniel_image)[0]
ewan_image = face_recognition.load_image_file("ewan.jpg")
ewan_face_encoding = face_recognition.face_encodings(ewan_image)[0]
norman_image = face_recognition.load_image_file("norman.jpg")
norman_face_encoding = face_recognition.face_encodings(norman_image)[0]
mitch_image = face_recognition.load_image_file("mitch.jpg")
mitch_face_encoding = face_recognition.face_encodings(mitch_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
jordan_face_encoding,
daniel_face_encoding,
ewan_face_encoding,
norman_face_encoding,
mitch_face_encoding,
]
known_face_names = [
"Jordan Terzian",
"Daniel Pearce",
"Ewan Krall",
"Norman Brosow",
"Mitchell Benson",
]
# Initialize variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face,
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#Writes to spreadsheet and GUI
if name == "Jordan Terzian" and not Printed:
print("Jordan Terzian is Present")
Printed = True
worksheet.write('B6', 'Present')
elif name == "Daniel Pearce" and not Printed:
print("Daniel Pearce is Present")
Printed = True
worksheet.write('B5', 'Present')
elif name == "Ewan Krall" and not Printed:
print("Ewan Krall is Present")
Printed = True
worksheet.write('B4', 'Present')
elif name == "Norman Brosow" and not Printed:
print("Norman Brosow is Present")
Printed = True
worksheet.write('B3', 'Present')
elif name == "Mitchell Benson" and not Printed:
print("Michell Benson is Present")
Printed = True
worskheet.write('B2', 'Present')
# Display the resulting image
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam, Closes webcam
video_capture.release()
cv2.destroyAllWindows()
workbook.close()
It is simple example which gets frame from cv2 and replaces it in PhotoImage which is displayed on Canvas. It uses after() to run function update_frame() periodically so it doesn't block root.mainloop() which has to run all time.
You will have to run code from while True in function update_frame() without using while True
import tkinter as tk
from PIL import Image, ImageTk
import cv2
# --- functions ---
def update_frame():
ret, frame = cap.read()
image = Image.fromarray(frame)
photo.paste(image)
#description['text'] = 'new text'
root.after(10, update_frame) # update it again after 10ms
# --- main ---
cap = cv2.VideoCapture(0)
# get first frame
ret, frame = cap.read()
# - GUI -
root = tk.Tk()
image = Image.fromarray(frame)
photo = ImageTk.PhotoImage(image) # it has to be after `tk.Tk()`
canvas = tk.Canvas(root, width=photo.width(), height=photo.height())
canvas.pack(side='left', fill='both', expand=True)
canvas.create_image((0,0), image=photo, anchor='nw')
description = tk.Label(root, text="Place for description")
description.pack(side='right')
# - start -
update_frame() # update it first time
root.mainloop() # start program - this loop runs all time
# - after close -
cap.release()
BTW: I have example with buttons Play, Stop, Save Image: python-examples/cv2/tkinter-CV
EDIT: I can test it but it could be something like this.
import face_recognition
import cv2
import numpy as np
import tkinter
#from tkinter import * # PEP8: `import *` is not preferred
#import PySimpleGUI as sg #
import xlsxwriter
import os
from PIL import ImageTk, Image
#from datetime import datetime;
import datetime
# --- functions ---
def process_frame():
global process_this_frame
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face,
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#Writes to spreadsheet and GUI
if name == "Jordan Terzian" and not Printed:
print("Jordan Terzian is Present")
Printed = True
worksheet.write('B6', 'Present')
elif name == "Daniel Pearce" and not Printed:
print("Daniel Pearce is Present")
Printed = True
worksheet.write('B5', 'Present')
elif name == "Ewan Krall" and not Printed:
print("Ewan Krall is Present")
Printed = True
worksheet.write('B4', 'Present')
elif name == "Norman Brosow" and not Printed:
print("Norman Brosow is Present")
Printed = True
worksheet.write('B3', 'Present')
elif name == "Mitchell Benson" and not Printed:
print("Michell Benson is Present")
Printed = True
worskheet.write('B2', 'Present')
description['text'] = name
image = Image.fromarray(frame)
photo.paste(image)
root.after(40, process_frame) # update it again after 40ms - it gives 25 FPS (1000ms/40ms=25)
# --- init ---
#Defines time
now = datetime.datetime.now().time()
#Setup for period segment of spreadsheetname
if now.hour<9:
name = "HomeRoom "
elif now.hour==9 and now.min<=50:
name = "Period1 "
elif now.hour==10 and now.min<=40:
name = "Period2 "
elif now.hour==11 and now.min<=50:
name = "Period3 "
elif now.hour==12 and now.min<=40:
name = "Period4 "
elif now.hour==14 and now.min<=10:
name = "Period5 "
elif now.hour<=15:
name = "Period6 "
else:
name = "Testing "
# to break loop
Printed = False
#Defines todays date #day/month/year-HourAM/PM
todays_date = str(datetime.datetime.now().strftime("%d-%m-%Y %I%p"))
#Sets up spreadsheet
workbook = xlsxwriter.Workbook(name + todays_date +'.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Name')
worksheet.write('B1', 'Attendance')
worksheet.write('A6', 'Jordan Terzian')
worksheet.write('B6', 'Absent')
worksheet.write('A5', 'Daniel Pearce')
worksheet.write('B5', 'Absent')
worksheet.write('A4', 'Ewan Krall')
worksheet.write('B4', 'Absent')
worksheet.write('A3', 'Norman Brosow')
worksheet.write('B3', 'Absent')
worksheet.write('A2', 'Mitchell Benson')
worksheet.write('B2', 'Absent')
# classmates
#jordan_image = face_recognition.load_image_file("jordan.jpg")
#jordan_face_encoding = face_recognition.face_encodings(jordan_image)[0]
#
#daniel_image = face_recognition.load_image_file("daniel.jpg")
#daniel_face_encoding = face_recognition.face_encodings(daniel_image)[0]
#
#ewan_image = face_recognition.load_image_file("ewan.jpg")
#ewan_face_encoding = face_recognition.face_encodings(ewan_image)[0]
#
#norman_image = face_recognition.load_image_file("norman.jpg")
#norman_face_encoding = face_recognition.face_encodings(norman_image)[0]
#
#mitch_image = face_recognition.load_image_file("mitch.jpg")
#mitch_face_encoding = face_recognition.face_encodings(mitch_image)[0]
# Create arrays of known face encodings and their names
#known_face_encodings = [
# jordan_face_encoding,
# daniel_face_encoding,
# ewan_face_encoding,
# norman_face_encoding,
# mitch_face_encoding,
#]
filenames = [
"jordan.jpg",
"daniel.jpg",
"ewan.jpg",
"norman.jpg",
"mitch.jpg"
]
known_face_encodings = []
for name in filenames:
image = face_recognition.load_image_file(name)
face_encoding = face_recognition.face_encodings(image)[0]
known_face_encodings.append(face_encoding)
known_face_names = [
"Jordan Terzian",
"Daniel Pearce",
"Ewan Krall",
"Norman Brosow",
"Mitchell Benson",
]
# Initialize variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# --- main ---
# Webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# get first frame to get size
ret, frame = cap.read()
# - GUI -
root = tk.Tk()
image = Image.fromarray(frame)
photo = ImageTk.PhotoImage(image) # it has to be after `tk.Tk()`
canvas = tk.Canvas(root, width=photo.width(), height=photo.height())
canvas.pack(side='left', fill='both', expand=True)
canvas.create_image((0,0), image=photo, anchor='nw')
description = tk.Label(root, text="Place for description")
description.pack(side='right')
# - start -
process_frame() # update it first time
root.mainloop() # start program - this loop runs all time
# --- end ---
# Release handle to the webcam, Closes webcam
video_capture.release()
#cv2.destroyAllWindows()
workbook.close()
Related
I am a new developer working at an interI am developing a web application which takes your attendance using face recognition and puts the entry in the database using django, python, open-cv and the face_recognition library of python. The Login page looks like this: Login Page.
When I click the login button, my views program directs to an external python function which opens a separate camera window which does the face recognition. The Camera Window.
What I want to do is display the camera directly in the browser window rather than as a separate window. I found some tutorials and also tried to implement them but using them I can only display the frames in the browser window while the functionality of face recognition is lost.
The Views Code (views.py):
def camera_on(request):
out = Popen('E:/Study/Internship/Capstone/Project/Web App/web_app/Basic.py', shell=True, stdout=PIPE)
op = out.stdout.read().decode("utf-8")
new_str = ''
for i in range(len(op)-2):
new_str += op[i]
request.session['name'] = new_str
return render(request, 'open_camera.html', {'data': new_str})
This code accesses the Basics.py file which opens the camera window, does the face recognition and gives the entry in the database.
The Basics.py code:
from datetime import datetime, date
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_app.settings')
django.setup()
import cv2
import face_recognition
import numpy as np
from opencamera.models import Logs
import pymysql
conn=pymysql.connect(host='localhost', user='root', password='', database='projectdb')
cur=conn.cursor()
video_capture = cv2.VideoCapture(0)
cur.execute('SELECT Name, Photo FROM employee')
result=cur.fetchall()
known_face_names=[]
known_face_encodings=[]
for i in range(len(result)):
known_face_names.append(result[i][0])
for i in range(len(result)):
img = face_recognition.load_image_file(result[i][1])
img_encode = face_recognition.face_encodings(img)[0]
known_face_encodings.append(img_encode)
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while process_this_frame < 15:
nam = "N/A"
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name1 = "N/A"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name1 = known_face_names[best_match_index]
nam=str(name1)
face_names.append(name1)
for (top, right, bottom, left), name1 in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left - 10, top - 50), (right + 10, bottom + 50), (0, 255, 0), 2)
cv2.rectangle(frame, (left - 10, bottom + 30), (right + 10, bottom + 50), (0, 255, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name1, (left + 10, bottom + 45), font, 0.5, (0,0,0), 1)
cv2.imshow('Attendance Cam', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if nam != "N/A":
process_this_frame += 1
if process_this_frame == 13:
print(nam)
today = date.today()
now = datetime.now()
now=now.strftime('%H:%M:%S')
sql = "SELECT Action FROM logs WHERE Name=%s"
recs = (nam)
cur.execute(sql, recs)
status = cur.fetchall()
stat = ''
if len(status) !=0:
length = len(status) - 1
stat = status[length][0]
if stat == 'Exit' or len(status) == 0:
attd = 'INSERT INTO logs (Name, Action, Date, Time) VALUES (%s, "Entry", %s, %s)'
rec = (nam, today, now)
cur.execute(attd, rec)
video_capture.release()
cv2.destroyAllWindows()
This code opens the camera, puts a green box around your face if its in the database and then makes an entry in the logs if recognized.
I need help integrating this functionality in the live stream in the browser window.
How can I integrate pickled face_data.dat to python face-recognition default example which shows real-time webcam view?
import face_recognition
import cv2
import numpy as np
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden"
]
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces and face enqcodings in the frame of video
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
I've pickled the images and tried modifying the program as follows but It's showing the wrong names.
import face_recognition
import cv2
import numpy as np
import pickle
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load face encodings
with open('dataset_faces.dat', 'rb') as f:
all_face_encodings = pickle.load(f)
# Grab the list of names and the list of encodings
face_names = list(all_face_encodings.keys())
face_encodings = np.array(list(all_face_encodings.values()))
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces and face enqcodings in the frame of video
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
# face_distances = face_recognition.face_distance(face_encodings, face_encoding)
# best_match_index = np.argmin(face_distances)
# if matches[best_match_index]:
# name = face_names[best_match_index]
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
I've checked pickled data using this script. It is showing the correct output to the relevant image.
import face_recognition
import pickle
import numpy as np
# Load face encodings
with open('dataset_faces.dat', 'rb') as f:
all_face_encodings = pickle.load(f)
# Grab the list of names and the list of encodings
face_names = list(all_face_encodings.keys())
face_encodings = np.array(list(all_face_encodings.values()))
# Try comparing an unknown image
unknown_image = face_recognition.load_image_file("obama.jpg")
unknown_face = face_recognition.face_encodings(unknown_image)
result = face_recognition.compare_faces(face_encodings, unknown_face)
# Print the result as a list of names with True/False
names_with_result = list(zip(face_names, result))
print(names_with_result)
I'm using face_recognition package for face recognition
Input image file is base64 encoded,
I'm trying to decode the data and
face_recognition.face_encodings(decodedBase64Data)
And i have face encoded data list to compare.
The problem is i need to convert the base64 data to image that i can encode using face_encodings.
I tried with
decodedData = base64.b64decode(data)
encodeFace = np.frombuffer(decodedData, np.uint8)
And pass the encodedFace to
face_recognition.face_encodings(decodedBase64Data)
I get error Unsupported image type, must be 8bit gray or RGB image.
How to convert base64 into image compatible to face_encodings?
Edit :
Code attached for reference
import base64
import numpy as np
import json
import face_recognition as fr
with open('Face_Encoding_Data.json') as f:
EncodeJsonData = json.load(f)
personName = list(EncodeJsonData.keys())
encodedImgList = list(EncodeJsonData.values())
"""
EncodeJsonData = {"name1" : [encoded data 1], "name2" : [encoded data 2]}
128 byte
"""
base64Data = """ base64 encoded image with face """
encodeFace = np.frombuffer(base64.b64decode(base64Data), np.uint8)
matches = fr.compare_faces(encodedImgList, encodeFace, tolerance=0.5)
faceDist = fr.face_distance(encodedImgList, encodeFace)
matchIndex = np.argmin(faceDist)
name = "unknown"
if matches[matchIndex]:
name = personName[matchIndex]
print(name)
Please share the code for better understanding of problem or you can use below code as a refrence
import cv2
import os
import numpy as np
from PIL import Image
import time
cap = cv2.VideoCapture(1)
count=1
path='dataset2'
img=[]
imagepath = [os.path.join(path,f)for f in os.listdir(path)]
c=len(imagepath)
#for i in imagepath: i access the each images from my folder of images
while count<=c:
image = face_recognition.load_image_file("dataset2/vrushang."+str(count)+".jpg")
#now i will make list of the encoding parts to compare it runtime detected face
img.append(face_recognition.face_encodings(image)[0])
time.sleep(1)
count=count+1
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
img2 = []
img2 = img[0]
print"this is img2"
print img
while True:
ret, frame = cap.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
if process_this_frame:
face_locations = face_recognition.face_locations(small_frame)
face_encodings = face_recognition.face_encodings(small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
match = face_recognition.compare_faces(img, face_encoding)
print match
if match[0]==True:
name = "vrushang"
elif match[1]==True:
name = "hitu"
elif match[3]==True:
name = "sardar patel"
elif match[2]==True:
name = "yaksh"
else:
name = "unknown"
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1)==27:
break```
Try this code to load encoded image for face_recognition package :
import urllib.request as ur
import face_recognition as fr
image = 'Input image file is base64 encoded'
decoded= ur.urlopen(image)
image_loaded = fr.load_image_file(decoded)
=> for test :
face_locations = fr.face_locations(image_loaded)
print("I found {} face(s) in this photograph.".format(len(face_locations)))
I have some code here and it is a little sloppy, is there any way that I could put the images, encodings, and names into separate files and import them into the main code for use? I have tried putting them into a separate file and then importing them, but it still shows a not defined error? Can anyone help me find out why, or how to fix it.
main code
import cv2
import numpy as np
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
me_image = face_recognition.load_image_file("me.jpg")
me_face_encoding = face_recognition.face_encodings(me_image)[0]
mom_image = face_recognition.load_image_file("mom.jpg")
mom_face_encoding = face_recognition.face_encodings(mom_image)[0]
mattm_image = face_recognition.load_image_file("mattm.jpg")
mattm_face_encoding = face_recognition.face_encodings(mattm_image)[0]
soph_image = face_recognition.load_image_file("soph.jpg")
soph_face_encoding = face_recognition.face_encodings(soph_image)[0]
known_face_encodings = [
me_face_encoding,
mom_face_encoding,
mattm_face_encoding,
soph_face_encoding
]
known_face_names = [
"Jacob North",
"Shelly North",
"Matt Mersino",
"Sophia North"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name,(left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
Code I Wish To Separate
me_image = face_recognition.load_image_file("me.jpg")
me_face_encoding = face_recognition.face_encodings(me_image)[0]
mom_image = face_recognition.load_image_file("mom.jpg")
mom_face_encoding = face_recognition.face_encodings(mom_image)[0]
mattm_image = face_recognition.load_image_file("mattm.jpg")
mattm_face_encoding = face_recognition.face_encodings(mattm_image)[0]
soph_image = face_recognition.load_image_file("soph.jpg")
soph_face_encoding = face_recognition.face_encodings(soph_image)[0]
known_face_encodings = [
me_face_encoding,
mom_face_encoding,
mattm_face_encoding,
soph_face_encoding
]
known_face_names = [
"Jacob North",
"Shelly North",
"Matt Mersino",
"Sophia North"
]
I just want to make it neater and easier to access.
Hi everyone I'm working on OpenCV(Python)on a face recognition program. I have two files, one which captures a new user's face and stores it by the name supplied by user. The second file recognizes the user using webcam. Now, my concern is that the user is getting recognised correctly but the name is only shown and not saved. How could I save the name of the recognised person so that it can be transfered or done some operations upon?
#__author__ = 'ADMIN'
import cv2, sys, numpy, os
size = 4
fn_haar = 'haarcascade_frontalface_default.xml'
fn_dir = 'att_faces'
fn_name = "aditya"
path = os.path.join(fn_dir, fn_name)
if not os.path.isdir(path):
os.mkdir(path)
(im_width, im_height) = (112, 92)
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
# The program loops until it has 20 images of the face.
count = 0
while count < 20:
(rval, im) = webcam.read()
im = cv2.flip(im, 1, 0)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
mini = cv2.resize(gray, (gray.shape[1] / size, gray.shape[0] / size))
faces = haar_cascade.detectMultiScale(mini)
faces = sorted(faces, key=lambda x: x[3])
if faces:
face_i = faces[0]
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
pin=sorted([int(n[:n.find('.')]) for n in os.listdir(path)
if n[0]!='.' ]+[0])[-1] + 1
cv2.imwrite('%s/%s.png' % (path, pin), face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.putText(im, fn_name, (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN,
1,(0, 255, 0))
count += 1
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break
Code for face recognition from the dataset
__author__ = 'ADMIN'
import cv2, sys, numpy, os
size = 4
fn_haar = 'haarcascade_frontalface_default.xml'
fn_dir = 'att_faces'
# Part 1: Create fisherRecognizer
print('Training...')
# Create a list of images and a list of corresponding names
(images, lables, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(fn_dir):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
lable = id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(im_width, im_height) = (112, 92)
# Create a Numpy array from the two lists above
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.createFisherFaceRecognizer()
model.train(images, lables)
# Part 2: Use fisherRecognizer on camera stream
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
while True:
(rval, frame) = webcam.read()
frame=cv2.flip(frame,1,0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mini = cv2.resize(gray, (gray.shape[1] / size, gray.shape[0] / size))
faces = haar_cascade.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
# Write the name of recognized face
# [1]
cv2.putText(frame,
'%s - %.0f' % (names[prediction[0]],prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
cv2.imshow('OpenCV', frame)
key = cv2.waitKey(10)
if key == 27:
break
This is my code. where i am not using any sql-server.
I am encoding images from the folder and it will show the recognized face with the name of the image saved. if the image is saved as .. abc.jpg. then it will detect the face during live streaming and show abc.jpg
here is my code :
from PIL import Image
import face_recognition
import cv2
import os
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
known_face_encodings=[]
known_face_names = []
user_appeared = []
root = "/home/erp-next/open cv/dataset/"
for filename in os.listdir(root):
if filename.endswith('.jpg' or '.png'):
try:
print(filename)
path = os.path.join(root, filename)
filter_image = face_recognition.load_image_file(path)
filter_face_encoding = face_recognition.face_encodings(filter_image)
known_face_encodings.append(filter_face_encoding[0])
known_face_names.append(filename)
except:
print("An exception occurred : " + filename )
#print(known_face_encodings)
print(known_face_names)
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
# process_this_frame = True
def face():
while True:
process_this_frame = True
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
print(name)
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
face()
i am also using face_recognition library to encode and detect face.
Thanks.