Killing a child thread running a multiprocessing operation - python

I'm new to the multiprocessing thing. I was designing an app where the user can select a video and the app would do facial recognition on it and show it. I used concurrent.futures.ProcessPoolExecutor to multiprocess the frames to improve the total time required. My problem is when I need to integrate the face recognition multiprocessing part in the app, I would need to run the entire thing on a separate thread to keep it responsive.
Usually when the stand alone multiprocessing part runs, it can be stopped by raising an exception. BUt while running on a separate thread, they have their own stack. I have seen people suggesting using a message to verify if the code should keep running or not. But my caller function is not running on a "while loop" to check that.
All i'm asking is, if there is any way I can raise an exception to stop both threads or any other way to stop running that child thread from the main thread. My code is attached below. The sub() is the caller function and the process() is the function that is used as the multiprocessing target.
Thanks in advance.
import cv2
import face_recognition
import numpy as np
import time
import pickle
import concurrent.futures
import queue
import file_video_stream
def process(sframe, facesencoded, knownfacenames,process_this_frame):
if sframe is not None :
if process_this_frame:
small_frame = cv2.resize(sframe, (0, 0), fx=0.5, fy=0.5)
rgb_small_frame = small_frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_small_frame)
unknown_face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
matches = face_recognition.compare_faces(facesencoded, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(facesencoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = knownfacenames[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 2
right *= 2
bottom *= 2
left *= 2
# Draw a box around the face
cv2.rectangle(sframe, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
# Draw a label with a name below the face
cv2.rectangle(sframe, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(sframe, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
show_frame = cv2.resize(sframe, (1080,720))
return show_frame
else :
show_frame = cv2.resize(sframe, (1080,720))
return show_frame
def sub():
list_original = []
frame_number = 0
alt_frame_number = 0
proc_frame = 0
q1= queue.Queue()
with open("known.pickle", 'rb') as ki:
faces = pickle.load(ki)
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
fvs = file_video_stream.FileVideoStream('Attendance.mp4')
fvs.start()
time.sleep(1.0)
process_this_frame = True
with concurrent.futures.ProcessPoolExecutor() as executor:
while fvs.more():
#for _ in range (5):
frame = fvs.read()
time.sleep(0.1)
list_element = executor.submit(process,frame,faces_encoded,known_face_names,process_this_frame)
time.sleep(0.1)
if list_element is not None:
frame_number +=1
#print (frame_number)
list_original.append(list_element)
else :
fvs.stop()
break
process_this_frame = not process_this_frame
print ("Total number of frames read:",frame_number)
#print ("Total number of frames processed:",alt_frame_number)
fvs.stop()
for res in list_original:
q1.put(res.result())
while not q1.empty():
dump = q1.get()
cv2.imshow('Video', dump)
time.sleep(0.01)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
sub()

Related

Run multiple functions on single videostream multiprocessing

Hey I am trying to run different face detection models simultaneously. I am using opencv library to open Video Stream and created different process objects for different face detection models. When I run the program the first method is running successfully but second method exits with an error that can't receive frame.
The major challenge is the while loop for reading the capture source(cap) which makes it different from the question posted on stackoverflow before
The code is as follows:
import cv2
import dlib
from multiprocessing import Process
def haar_cascade():
while True:
ret,frame=cap.read()
cv2.imshow('input',frame)
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
classifier = cv2.CascadeClassifier('haarcascade_frontalface2.xml')
faces = classifier.detectMultiScale(frame)
for result in faces:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
if cv2.waitKey(1) == ord('q'):
break
cv2.imshow('harr-cascade',frame)
def dlib_hog():
while True:
ret,frame=cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1) # result
#to draw faces on image
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
if cv2.waitKey(1) == ord('q'):
break
cv2.imshow('dlib-hog',frame)
if __name__ == "__main__":
cap =cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
harrProcess=Process(target=haar_cascade)
harrProcess.start()
dlibProcess=Process(target=dlib_hog)
dlibProcess.start()
# When everything done, release the capture
harrProcess.join()
dlibProcess.join()
cap.release()
cv2.destroyAllWindows()
How can I create a multiprocessing model that read source from single source and perform independent operation?
I have made various attempts:
I tried using multiprocessing with a producer process and two consumer processes. The frame created by the producer must be converted to a shared-memory array and then converted back to a numpy array when retrieved by a consumer. There is sufficient overhead in these operations that I was finding that frames were being lost.
I tried using multithreading with a producer thread and two consumer threads. This has less overhead with regards to passing frames from the producer and consumer. The problem, of course, with multithreading is that due to contention for the Global Interpreter Lock, any CPU-intensive processing required by a consumer cannot be run in parallel with CPU-intensive processing required by the other consumer and could even cause the producer to miss frames. Unfortunately, I don't know if when using a camera for input whether there is a way to detect missed frames on the part of the producer. To remediate these problems I pass a multiprocessing pool to the consumer threads to which they can submit tasks that perform the CPU-intensive processing on the frames. Here, too, there is sufficient overhead in passing frames from one process to another and frames are lost.
As in bullet point 2 above, I use multithreading but instead of submitting CPU-intensive work to the multiprocessing pool, I perform it within the consumer thread. This seems to cause fewer missed frames for the consumer. But I can't tell if it is causing the producer now to miss frames it would not otherwise miss. So using a multiprocessing pool for doing the CPU-intensive work seems to be the wiser approach. Of course, if your CPU is fast enough, neither the consumer not producer should miss frames. But option 1 (see second code example), i.e. using just multiprocessing, is probably best.
In the following demos, since I don't have access to your XML file, I have dummied out the processing for one of your consumers. You terminate the program by just hitting the enter key:
Using Multithreading
Set USE_POOL_FOR_COMPUTATION = False to perform CPU-intensive processing by direct call instead of submitting the work to a multiprocessing pool:
#!/usr/bin/env python3
import threading
import multiprocessing
import cv2
import dlib
USE_POOL_FOR_COMPUTATION = True
class Producer:
def __init__(self):
# Create shared memory version of a numpy array:
self._frame = None
self._condition = threading.Condition()
self._running = True
# The latest frame number retrieved
self._latest_frame_number = 0
def run(self, cap):
while self._running:
ret, self._frame = cap.read()
if not ret:
self._running = False
else:
self._latest_frame_number += 1
with self._condition:
self._condition.notify_all()
def stop(self):
self._running = False
def get_frame(self, sequence_number):
with self._condition:
# We block until we find a frame sequence number >= sequence_number.
self._condition.wait_for(lambda: not self._running or self._latest_frame_number >= sequence_number)
# Even after the stop method has been called and we are no longer running,
# there could still be an unprocessed frame. But when we are called again, the current
# frame number will be < the expected frame number:
return (self._latest_frame_number, None if self._latest_frame_number < sequence_number else self._frame)
def process_haar_cascade(frame):
classifier = cv2.CascadeClassifier('haarcascade_frontalface2.xml')
faces = classifier.detectMultiScale(frame)
for result in faces:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def haar_cascade(producer, pool):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'haar_cascade missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
cv2.imshow('input', frame) # Unmodified frame
# Since I don't have required xml file, just skip processing:
"""
if USE_POOL_FOR_COMPUTATION:
frame = pool.apply(process_haar_cascade, args=(frame,))
else:
frame = process_haar_cascade(frame)
"""
cv2.imshow('harr-cascade', frame)
def process_dlib_hog(frame):
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1) # result
#to draw faces on image
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def dlib_hog(producer, pool):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'dlib_hog missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
if USE_POOL_FOR_COMPUTATION:
frame = pool.apply(process_dlib_hog, args=(frame,))
else:
frame = process_dlib_hog(frame)
cv2.imshow('dlib-hog', frame)
def main():
producer = Producer()
pool = multiprocessing.Pool(2) if USE_POOL_FOR_COMPUTATION else None
# Pass pool for CPU-Intensive work:
consumer1_thread = threading.Thread(target=haar_cascade, args=(producer, pool))
consumer1_thread.start()
consumer2_thread = threading.Thread(target=dlib_hog, args=(producer, pool))
consumer2_thread.start()
cap = cv2.VideoCapture(0)
producer_thread = threading.Thread(target=producer.run, args=(cap,))
producer_thread.start()
input('Hit enter to terminate:\n')
producer.stop()
producer_thread.join()
consumer1_thread.join()
consumer2_thread.join()
cap.release()
cv2.destroyAllWindows()
if USE_POOL_FOR_COMPUTATION:
pool.close()
pool.join()
if __name__ == '__main__':
main()
Using Multiprocessing
The multiprocessing.RawArray that is used to hold the sharable frame must be allocated before the consumer process is run so that all processes have access to this array. This requires knowing in advance how large an array to create:
#!/usr/bin/env python3
import multiprocessing
import ctypes
import cv2
import numpy as np
import dlib
class Producer:
def __init__(self):
# Discover how large a framesize is by getting the first frame
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
if ret:
self._shape = frame.shape
frame_size = self._shape[0] * self._shape[1] * self._shape[2]
self._shared_array = multiprocessing.RawArray(ctypes.c_ubyte, frame_size)
else:
self._arr = None
cap.release()
self._condition = multiprocessing.Condition()
self._running = multiprocessing.RawValue('i', 1)
# The latest frame number retrieved
self._latest_frame_number = multiprocessing.RawValue('i', 0)
self._lock = multiprocessing.Lock()
def run(self):
cap = cv2.VideoCapture(0)
while self._running.value:
ret, frame = cap.read()
if not ret:
self._running.value = 0
with self._condition:
self._condition.notify_all()
cap.release()
break
with self._lock:
self._latest_frame_number.value += 1
# np array to shared_array
temp = np.frombuffer(self._shared_array, dtype=frame.dtype)
temp[:] = frame.flatten(order='C')
with self._condition:
self._condition.notify_all()
def stop(self):
self._running.value = 0
def get_frame(self, sequence_number):
with self._condition:
# We block until we find a frame sequence number >= sequence_number.
self._condition.wait_for(lambda: not self._running.value or self._latest_frame_number.value >= sequence_number)
# Even after the stop method has been called and we are no longer running,
# there could still be an unprocessed frame. But when we are called again, the current
# frame number will be < the expected frame number:
if self._latest_frame_number.value < sequence_number:
return (self._latest_frame_number.value, None)
with self._lock:
# Convert to np array:
return self._latest_frame_number.value, np.ctypeslib.as_array(self._shared_array).reshape(self._shape)
def process_haar_cascade(frame):
classifier = cv2.CascadeClassifier('haarcascade_frontalface2.xml')
faces = classifier.detectMultiScale(frame)
for result in faces:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def haar_cascade(producer):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'haar_cascade missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
cv2.imshow('input', frame) # Unmodified frame
# Since I don't have required xml file, just skip processing:
#frame = process_haar_cascade(frame)
cv2.imshow('harr-cascade', frame)
def process_dlib_hog(frame):
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1) # result
#to draw faces on image
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def dlib_hog(producer):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'dlib_hog missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
frame = process_dlib_hog(frame)
cv2.imshow('dlib-hog', frame)
def main():
producer = Producer()
# Pass pool for CPU-Intensive work:
consumer1_process = multiprocessing.Process(target=haar_cascade, args=(producer,))
consumer1_process.start()
consumer2_process = multiprocessing.Process(target=dlib_hog, args=(producer,))
consumer2_process.start()
producer_process = multiprocessing.Process(target=producer.run)
producer_process.start()
input('Hit enter to terminate:\n')
producer.stop()
producer_process.join()
consumer1_process.join()
consumer2_process.join()
if __name__ == '__main__':
main()

How do I livestream the video I have captured using OpenCV on my webpage which is made using Django while also keeping the face recognition function

I am a new developer working at an interI am developing a web application which takes your attendance using face recognition and puts the entry in the database using django, python, open-cv and the face_recognition library of python. The Login page looks like this: Login Page.
When I click the login button, my views program directs to an external python function which opens a separate camera window which does the face recognition. The Camera Window.
What I want to do is display the camera directly in the browser window rather than as a separate window. I found some tutorials and also tried to implement them but using them I can only display the frames in the browser window while the functionality of face recognition is lost.
The Views Code (views.py):
def camera_on(request):
out = Popen('E:/Study/Internship/Capstone/Project/Web App/web_app/Basic.py', shell=True, stdout=PIPE)
op = out.stdout.read().decode("utf-8")
new_str = ''
for i in range(len(op)-2):
new_str += op[i]
request.session['name'] = new_str
return render(request, 'open_camera.html', {'data': new_str})
This code accesses the Basics.py file which opens the camera window, does the face recognition and gives the entry in the database.
The Basics.py code:
from datetime import datetime, date
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_app.settings')
django.setup()
import cv2
import face_recognition
import numpy as np
from opencamera.models import Logs
import pymysql
conn=pymysql.connect(host='localhost', user='root', password='', database='projectdb')
cur=conn.cursor()
video_capture = cv2.VideoCapture(0)
cur.execute('SELECT Name, Photo FROM employee')
result=cur.fetchall()
known_face_names=[]
known_face_encodings=[]
for i in range(len(result)):
known_face_names.append(result[i][0])
for i in range(len(result)):
img = face_recognition.load_image_file(result[i][1])
img_encode = face_recognition.face_encodings(img)[0]
known_face_encodings.append(img_encode)
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while process_this_frame < 15:
nam = "N/A"
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name1 = "N/A"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name1 = known_face_names[best_match_index]
nam=str(name1)
face_names.append(name1)
for (top, right, bottom, left), name1 in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left - 10, top - 50), (right + 10, bottom + 50), (0, 255, 0), 2)
cv2.rectangle(frame, (left - 10, bottom + 30), (right + 10, bottom + 50), (0, 255, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name1, (left + 10, bottom + 45), font, 0.5, (0,0,0), 1)
cv2.imshow('Attendance Cam', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if nam != "N/A":
process_this_frame += 1
if process_this_frame == 13:
print(nam)
today = date.today()
now = datetime.now()
now=now.strftime('%H:%M:%S')
sql = "SELECT Action FROM logs WHERE Name=%s"
recs = (nam)
cur.execute(sql, recs)
status = cur.fetchall()
stat = ''
if len(status) !=0:
length = len(status) - 1
stat = status[length][0]
if stat == 'Exit' or len(status) == 0:
attd = 'INSERT INTO logs (Name, Action, Date, Time) VALUES (%s, "Entry", %s, %s)'
rec = (nam, today, now)
cur.execute(attd, rec)
video_capture.release()
cv2.destroyAllWindows()
This code opens the camera, puts a green box around your face if its in the database and then makes an entry in the logs if recognized.
I need help integrating this functionality in the live stream in the browser window.

How can I integrate Opencv into a Tkinter window

I'm trying to put face recogniton code that uses opencv to the left hand side of a tkinter window. By doing this I wish to leave the right hand side of the window free so I can output text. e.g. when a face is detected the program will display "Name: Present" I am new to both Tkinter and OpenCV andI can't seem to find a straight answer online. Any Help is appreciated, thanks!
Here is my code below:
import face_recognition
import cv2
import numpy as np
import tkinter
from tkinter import *
import PySimpleGUI as sg
import xlsxwriter
import os
from PIL import ImageTk,Image
from datetime import datetime;
import datetime
#Defines time
now = datetime.datetime.now().time()
#Setup for period segment of spreadsheetname
if now.hour<9:
name = "HomeRoom "
elif now.hour==9 and now.min<=50:
name = "Period1 "
elif now.hour==10 and now.min<=40:
name = "Period2 "
elif now.hour==11 and now.min<=50:
name = "Period3 "
elif now.hour==12 and now.min<=40:
name = "Period4 "
elif now.hour==14 and now.min<=10:
name = "Period5 "
elif now.hour<=15:
name = "Period6 "
else:
name = "Testing "
# Webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# to break loop
Printed = False
#Defines todays date #day/month/year-HourAM/PM
todays_date = str(datetime.datetime.now().strftime("%d-%m-%Y %I%p"))
#Sets up spreadsheet
workbook = xlsxwriter.Workbook(name + todays_date +'.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Name')
worksheet.write('B1', 'Attendance')
worksheet.write('A6', 'Jordan Terzian')
worksheet.write('B6', 'Absent')
worksheet.write('A5', 'Daniel Pearce')
worksheet.write('B5', 'Absent')
worksheet.write('A4', 'Ewan Krall')
worksheet.write('B4', 'Absent')
worksheet.write('A3', 'Norman Brosow')
worksheet.write('B3', 'Absent')
worksheet.write('A2', 'Mitchell Benson')
worksheet.write('B2', 'Absent')
# classmates
jordan_image = face_recognition.load_image_file("jordan.jpg")
jordan_face_encoding = face_recognition.face_encodings(jordan_image)[0]
daniel_image = face_recognition.load_image_file("daniel.jpg")
daniel_face_encoding = face_recognition.face_encodings(daniel_image)[0]
ewan_image = face_recognition.load_image_file("ewan.jpg")
ewan_face_encoding = face_recognition.face_encodings(ewan_image)[0]
norman_image = face_recognition.load_image_file("norman.jpg")
norman_face_encoding = face_recognition.face_encodings(norman_image)[0]
mitch_image = face_recognition.load_image_file("mitch.jpg")
mitch_face_encoding = face_recognition.face_encodings(mitch_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
jordan_face_encoding,
daniel_face_encoding,
ewan_face_encoding,
norman_face_encoding,
mitch_face_encoding,
]
known_face_names = [
"Jordan Terzian",
"Daniel Pearce",
"Ewan Krall",
"Norman Brosow",
"Mitchell Benson",
]
# Initialize variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face,
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#Writes to spreadsheet and GUI
if name == "Jordan Terzian" and not Printed:
print("Jordan Terzian is Present")
Printed = True
worksheet.write('B6', 'Present')
elif name == "Daniel Pearce" and not Printed:
print("Daniel Pearce is Present")
Printed = True
worksheet.write('B5', 'Present')
elif name == "Ewan Krall" and not Printed:
print("Ewan Krall is Present")
Printed = True
worksheet.write('B4', 'Present')
elif name == "Norman Brosow" and not Printed:
print("Norman Brosow is Present")
Printed = True
worksheet.write('B3', 'Present')
elif name == "Mitchell Benson" and not Printed:
print("Michell Benson is Present")
Printed = True
worskheet.write('B2', 'Present')
# Display the resulting image
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam, Closes webcam
video_capture.release()
cv2.destroyAllWindows()
workbook.close()
It is simple example which gets frame from cv2 and replaces it in PhotoImage which is displayed on Canvas. It uses after() to run function update_frame() periodically so it doesn't block root.mainloop() which has to run all time.
You will have to run code from while True in function update_frame() without using while True
import tkinter as tk
from PIL import Image, ImageTk
import cv2
# --- functions ---
def update_frame():
ret, frame = cap.read()
image = Image.fromarray(frame)
photo.paste(image)
#description['text'] = 'new text'
root.after(10, update_frame) # update it again after 10ms
# --- main ---
cap = cv2.VideoCapture(0)
# get first frame
ret, frame = cap.read()
# - GUI -
root = tk.Tk()
image = Image.fromarray(frame)
photo = ImageTk.PhotoImage(image) # it has to be after `tk.Tk()`
canvas = tk.Canvas(root, width=photo.width(), height=photo.height())
canvas.pack(side='left', fill='both', expand=True)
canvas.create_image((0,0), image=photo, anchor='nw')
description = tk.Label(root, text="Place for description")
description.pack(side='right')
# - start -
update_frame() # update it first time
root.mainloop() # start program - this loop runs all time
# - after close -
cap.release()
BTW: I have example with buttons Play, Stop, Save Image: python-examples/cv2/tkinter-CV
EDIT: I can test it but it could be something like this.
import face_recognition
import cv2
import numpy as np
import tkinter
#from tkinter import * # PEP8: `import *` is not preferred
#import PySimpleGUI as sg #
import xlsxwriter
import os
from PIL import ImageTk, Image
#from datetime import datetime;
import datetime
# --- functions ---
def process_frame():
global process_this_frame
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face,
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#Writes to spreadsheet and GUI
if name == "Jordan Terzian" and not Printed:
print("Jordan Terzian is Present")
Printed = True
worksheet.write('B6', 'Present')
elif name == "Daniel Pearce" and not Printed:
print("Daniel Pearce is Present")
Printed = True
worksheet.write('B5', 'Present')
elif name == "Ewan Krall" and not Printed:
print("Ewan Krall is Present")
Printed = True
worksheet.write('B4', 'Present')
elif name == "Norman Brosow" and not Printed:
print("Norman Brosow is Present")
Printed = True
worksheet.write('B3', 'Present')
elif name == "Mitchell Benson" and not Printed:
print("Michell Benson is Present")
Printed = True
worskheet.write('B2', 'Present')
description['text'] = name
image = Image.fromarray(frame)
photo.paste(image)
root.after(40, process_frame) # update it again after 40ms - it gives 25 FPS (1000ms/40ms=25)
# --- init ---
#Defines time
now = datetime.datetime.now().time()
#Setup for period segment of spreadsheetname
if now.hour<9:
name = "HomeRoom "
elif now.hour==9 and now.min<=50:
name = "Period1 "
elif now.hour==10 and now.min<=40:
name = "Period2 "
elif now.hour==11 and now.min<=50:
name = "Period3 "
elif now.hour==12 and now.min<=40:
name = "Period4 "
elif now.hour==14 and now.min<=10:
name = "Period5 "
elif now.hour<=15:
name = "Period6 "
else:
name = "Testing "
# to break loop
Printed = False
#Defines todays date #day/month/year-HourAM/PM
todays_date = str(datetime.datetime.now().strftime("%d-%m-%Y %I%p"))
#Sets up spreadsheet
workbook = xlsxwriter.Workbook(name + todays_date +'.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Name')
worksheet.write('B1', 'Attendance')
worksheet.write('A6', 'Jordan Terzian')
worksheet.write('B6', 'Absent')
worksheet.write('A5', 'Daniel Pearce')
worksheet.write('B5', 'Absent')
worksheet.write('A4', 'Ewan Krall')
worksheet.write('B4', 'Absent')
worksheet.write('A3', 'Norman Brosow')
worksheet.write('B3', 'Absent')
worksheet.write('A2', 'Mitchell Benson')
worksheet.write('B2', 'Absent')
# classmates
#jordan_image = face_recognition.load_image_file("jordan.jpg")
#jordan_face_encoding = face_recognition.face_encodings(jordan_image)[0]
#
#daniel_image = face_recognition.load_image_file("daniel.jpg")
#daniel_face_encoding = face_recognition.face_encodings(daniel_image)[0]
#
#ewan_image = face_recognition.load_image_file("ewan.jpg")
#ewan_face_encoding = face_recognition.face_encodings(ewan_image)[0]
#
#norman_image = face_recognition.load_image_file("norman.jpg")
#norman_face_encoding = face_recognition.face_encodings(norman_image)[0]
#
#mitch_image = face_recognition.load_image_file("mitch.jpg")
#mitch_face_encoding = face_recognition.face_encodings(mitch_image)[0]
# Create arrays of known face encodings and their names
#known_face_encodings = [
# jordan_face_encoding,
# daniel_face_encoding,
# ewan_face_encoding,
# norman_face_encoding,
# mitch_face_encoding,
#]
filenames = [
"jordan.jpg",
"daniel.jpg",
"ewan.jpg",
"norman.jpg",
"mitch.jpg"
]
known_face_encodings = []
for name in filenames:
image = face_recognition.load_image_file(name)
face_encoding = face_recognition.face_encodings(image)[0]
known_face_encodings.append(face_encoding)
known_face_names = [
"Jordan Terzian",
"Daniel Pearce",
"Ewan Krall",
"Norman Brosow",
"Mitchell Benson",
]
# Initialize variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# --- main ---
# Webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# get first frame to get size
ret, frame = cap.read()
# - GUI -
root = tk.Tk()
image = Image.fromarray(frame)
photo = ImageTk.PhotoImage(image) # it has to be after `tk.Tk()`
canvas = tk.Canvas(root, width=photo.width(), height=photo.height())
canvas.pack(side='left', fill='both', expand=True)
canvas.create_image((0,0), image=photo, anchor='nw')
description = tk.Label(root, text="Place for description")
description.pack(side='right')
# - start -
process_frame() # update it first time
root.mainloop() # start program - this loop runs all time
# --- end ---
# Release handle to the webcam, Closes webcam
video_capture.release()
#cv2.destroyAllWindows()
workbook.close()

Face Recognition System on Mac

I was following the tutorial of face and eyes recognition here, https://towardsdatascience.com/a-beginners-guide-to-building-your-own-face-recognition-system-to-creep-out-your-friends-df3f4c471d55.
However, when I do python3 detect_blinks.py, some errors occurre, and I don't know how to fix that.
The first time I tried, the error 1 occurred. After several times trying the same command (python3 detect_blinks.py.), the error become 2.
1.
qt.qpa.plugin: Could not find the Qt platform plugin "cocoa" in ""
This application failed to start because no Qt platform plugin could
be initialized. Reinstalling the application may fix this problem.
2.
Traceback (most recent call last): File "detect_blinks.py", line
70, in
best_match_index = np.argmin(face_distances) File "<array_function internals>", line 5, in argmin File
"/Users/maurice/Dev/newcvtest/lib/python3.8/site-packages/numpy/core/fromnumeric.py",
line 1267, in argmin
return _wrapfunc(a, 'argmin', axis=axis, out=out)
File "/Users/maurice/Dev/newcvtest/lib/python3.8/site-packages/numpy/core/fromnumeric.py",
line 61, in _wrapfunc
return bound(*args, **kwds)
ValueError: attempt to get argmin of an empty sequence
this is my python code:
#code forked and tweaked from https://github.com/ageitgey/face_recognition/blob/master/examples/facerec_from_webcam_faster.py
#to extend, just add more people into the known_people folder
import face_recognition
import cv2
import numpy as np
import os
import glob
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
#make array of sample pictures with encodings
known_face_encodings = []
known_face_names = []
dirname = os.path.dirname(__file__)
path = os.path.join(dirname, 'known_people/')
#make an array of all the saved jpg files' paths
list_of_files = [f for f in glob.glob(path+'*.jpg')]
#find number of known faces
number_files = len(list_of_files)
names = list_of_files.copy()
for i in range(number_files):
globals()['image_{}'.format(i)] = face_recognition.load_image_file(list_of_files[i])
globals()['image_encoding_{}'.format(i)] = face_recognition.face_encodings(globals()['image_{}'.format(i)])[0]
known_face_encodings.append(globals()['image_encoding_{}'.format(i)])
# Create array of known names
names[i] = names[i].replace("known_people/", "")
known_face_names.append(names[i])
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
This error occurs when your known_face_encodings is filled with an empty array. This may be due to not setting the working directory properly, thus not able to pick up the images from the folder and encode them and match from the real-time processed frame.
So, check your working directory folder and set the "path" variable properly instead of path = os.path.join(dirname, 'known_people/')

Importing Another File

I have some code here and it is a little sloppy, is there any way that I could put the images, encodings, and names into separate files and import them into the main code for use? I have tried putting them into a separate file and then importing them, but it still shows a not defined error? Can anyone help me find out why, or how to fix it.
main code
import cv2
import numpy as np
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
me_image = face_recognition.load_image_file("me.jpg")
me_face_encoding = face_recognition.face_encodings(me_image)[0]
mom_image = face_recognition.load_image_file("mom.jpg")
mom_face_encoding = face_recognition.face_encodings(mom_image)[0]
mattm_image = face_recognition.load_image_file("mattm.jpg")
mattm_face_encoding = face_recognition.face_encodings(mattm_image)[0]
soph_image = face_recognition.load_image_file("soph.jpg")
soph_face_encoding = face_recognition.face_encodings(soph_image)[0]
known_face_encodings = [
me_face_encoding,
mom_face_encoding,
mattm_face_encoding,
soph_face_encoding
]
known_face_names = [
"Jacob North",
"Shelly North",
"Matt Mersino",
"Sophia North"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name,(left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
Code I Wish To Separate
me_image = face_recognition.load_image_file("me.jpg")
me_face_encoding = face_recognition.face_encodings(me_image)[0]
mom_image = face_recognition.load_image_file("mom.jpg")
mom_face_encoding = face_recognition.face_encodings(mom_image)[0]
mattm_image = face_recognition.load_image_file("mattm.jpg")
mattm_face_encoding = face_recognition.face_encodings(mattm_image)[0]
soph_image = face_recognition.load_image_file("soph.jpg")
soph_face_encoding = face_recognition.face_encodings(soph_image)[0]
known_face_encodings = [
me_face_encoding,
mom_face_encoding,
mattm_face_encoding,
soph_face_encoding
]
known_face_names = [
"Jacob North",
"Shelly North",
"Matt Mersino",
"Sophia North"
]
I just want to make it neater and easier to access.

Categories