CV2: closing webcam and reopening in different function - python

In this code I want to display 3 types of classification: "Known", "Unknown" and "Hostile" using open cv and face_recognition.
The first function (firtsCase()) contains "Known", "Unknown" and that it's everything ok.
What I want to do is: when the program knows that is in the function secondCase() that is "hostile" and I press "q", the webcam must close and launch the function that sends me back to the cases "Known" and "Unknown" (call at the end of secondCase() with firstCase()) to reopen the webcam and reclassify.
You can find it in the latest part of function secondCase().
I have this error:
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
cv2.error: OpenCV(4.5.5) /Users/runner/work/opencv-python/opencv-python/opencv/modules/imgproc/src/resize.cpp:4052: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
I know this error shows up because it is trying to resize something that has already been manipulated, but I don't know how can I fix it.
This is the code:
video_capture = cv2.VideoCapture(0)
face2 = face_recognition.load_image_file(os.path.abspath("path2"))
face2_face_encoding = face_recognition.face_encodings(face2)[0]
known_face_encodings = [
face2_face_encoding
]
known_face_names = [
"Giulia"
]
face_location = []
face_encodings = []
face_names = []
def firstCase():
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
process_this_frame = True
face = False
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.6)
name = "Sconosciuto"
face = True
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face = False
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
face_names.append(name)
process_this_frame = not process_this_frame
if face:
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (40,48,48), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (40,48,48), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.6, (255, 255, 255), 1)
cv2.imshow('Video', frame)
else:
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.6, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
def secondCase():
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
process_this_frame = True
face = False
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.6)
name = "Ostile"
face = True
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face = False
face_names.append(name)
process_this_frame = not process_this_frame
if face:
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0,0,0), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0,0,0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.6, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
firstCase()
print("Inizio scansione :" + time.ctime() + "\nCitofono attivo. Citofona premendo a... ")
sel = selectors.DefaultSelector()
sel.register(sys.stdin, selectors.EVENT_READ)
sys.stdout.flush()
pairs = sel.select(timeout=5)
if pairs:
if input("Citofona premendo 'a' \n"):
firstCase()
else:
secondCase()
Thank you all for helping.

Related

AttributeError: 'NoneType' object has no attribute 'get_busy'

I'm using pygame to play some sounds, I have the code
global PU1Sound
PU1Sound = pygame.mixer.Sound("PU1.mp3")
and later using
PU1S = PU1Sound.play()
while PU1S.get_busy() == True:
time.sleep(0.01)
PU1Sound.stop
and this works fine, playing the sound and waiting for it to play.
but later in the code I use
SCS = SCSound.play()
while SCS.get_busy() == True:
time.sleep(0.01)
SCSound.stop
but for some reason this dose not work and instead results in the following error
while SCS.get_busy() == True:
AttributeError: 'NoneType' object has no attribute 'get_busy'
But if I put the code that is causing an error below the the code that isn't, it works like it should, but nothing about the placement should be causing it to cause a error.
so this works
def VideoD():
PU1S = PU1Sound.play()
while PU1S.get_busy() == True:
time.sleep(0.01)
PU1Sound.stop
SVS = ScanVSound.play()
while SVS.get_busy() == True:
time.sleep(0.01)
ScanVSound.stop
> SCS = SCSound.play()
> while SCS.get_busy() == True:
> time.sleep(0.01)
> SCSound.stop
> PU2S = PU2Sound.play()
> while PU2S.get_busy() == True:
> time.sleep(0.1)
> PU2Sound.stop
> IPCS = IPCSound.play()
> while IPCS.get_busy() == True:
> time.sleep(0.1)
> IPCSound.stop
global username
global TimeWaiting
global c
clear()
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
face1_image = face_recognition.load_image_file("facetest1.jpg")
face1_face_encoding = face_recognition.face_encodings(face1_image)[0]
known_face_encodings = [
face1_face_encoding,
]
known_face_names = [
username
]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
scanThread.start()
while True:
ret, Frame = video_capture.read()
small_frame = cv2.resize(Frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
if TimeWaiting == False:
GrantedD()
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(Frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(Frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(Frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
Frame = cv2.resize(Frame, (300,250))
cv2.imshow('Video', Frame)
cv2.moveWindow('Video', 40,30)
cv2.resizeWindow('Video', 300,250)
if cv2.waitKey(1) & 0xFF == ord('q') or (c <= 0):
process_this_frame = False
video_capture.release()
title()
but this dose not work
def VideoD():
PU1S = PU1Sound.play()
while PU1S.get_busy() == True:
time.sleep(0.01)
PU1Sound.stop
SVS = ScanVSound.play()
while SVS.get_busy() == True:
time.sleep(0.01)
ScanVSound.stop
global username
global TimeWaiting
global c
clear()
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
face1_image = face_recognition.load_image_file("facetest1.jpg")
face1_face_encoding = face_recognition.face_encodings(face1_image)[0]
known_face_encodings = [
face1_face_encoding,
]
known_face_names = [
username
]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
scanThread.start()
while True:
ret, Frame = video_capture.read()
small_frame = cv2.resize(Frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
if TimeWaiting == False:
GrantedD()
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(Frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(Frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(Frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
Frame = cv2.resize(Frame, (300,250))
cv2.imshow('Video', Frame)
cv2.moveWindow('Video', 40,30)
cv2.resizeWindow('Video', 300,250)
if cv2.waitKey(1) & 0xFF == ord('q') or (c <= 0):
process_this_frame = False
video_capture.release()
> SCS = SCSound.play()
> while SCS.get_busy() == True:
> time.sleep(0.01)
> SCSound.stop
> PU2S = PU2Sound.play()
> while PU2S.get_busy() == True:
> time.sleep(0.1)
> PU2Sound.stop
> IPCS = IPCSound.play()
> while IPCS.get_busy() == True:
> time.sleep(0.1)
> IPCSound.stop
title()
The only instance of NoneType is None, so your SCS being None is triggering the exception. You expected SCSound.play() to return a channel but it returned None instead because it couldn't find a channel.

Camera Not Opening when Deployed On server Django/Python

Unable to open Camera On server, where its the same copy with Same settings
cam = cv2.VideoCapture(0)
Used this to initialise camera (webcam)
and below code for processing the data stream and below image shows the error on server click here to view the error
def identify_faces(video_capture):
buf_length = 10
known_conf = 6
buf = [[]] * buf_length
i = 0
process_this_frame = True
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_frame = small_frame[:, :, ::-1]
if process_this_frame:
predictions = predict(rgb_frame, model_path="folder/folder/models/trainedmodel.clf")
process_this_frame = not process_this_frame
face_names = []
for name, (top, right, bottom, left) in predictions:
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
identify1(frame, name, buf, buf_length, known_conf)
face_names.append(name)
buf[i] = face_names
i = (i + 1) % buf_length
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

How to connect RSTP with OpenCV?

Hi I try to connect OpenCV, Face recognition Library with my RSTP CCTV. But when i run my code i getting an error like below. Hope someone can help me regarding on this issue. I attach here my code
Error:
[h264 # 000002a9659dba00] error while decoding MB 10 94, bytestream -5
[h264 # 000002a953c1e2c0] Invalid NAL unit 8, skipping. Traceback
(most recent call last): File "face-present.py", line 125, in
cv2.imshow('Video', frame) cv2.error: OpenCV(4.0.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:350:
error: (-215:Assertion failed) size.width>0 && size.height>0 in
function 'cv::imshow'
mycode.py
import face_recognition
import cv2
video_capture = cv2.VideoCapture("rtsp://admin:adam12345#192.168.0.158:554/Streaming/channels/101")
roy_image = face_recognition.load_image_file("images/roy1.jpg")
roy_face_encoding = face_recognition.face_encodings(roy_image,num_jitters=100)[0]
# Load a second sample picture and learn how to recognize it.
henrik_image = face_recognition.load_image_file("images/Mr_henrik.jpg")
henrik_face_encoding = face_recognition.face_encodings(henrik_image,num_jitters=100)[0]
stefan_image = face_recognition.load_image_file("images/stefan.jpg")
stefan_face_encoding = face_recognition.face_encodings(stefan_image,num_jitters=100)[0]
hairi_image = face_recognition.load_image_file("images/Hairi.jpeg")
hairi_face_encoding = face_recognition.face_encodings(hairi_image,num_jitters=100)[0]
syam_image = face_recognition.load_image_file("images/syam1.jpeg")
syam_face_encoding = face_recognition.face_encodings(syam_image,num_jitters=100)[0]
#print(syam_face_encoding)
# Create arrays of known face encodings and their names
known_face_encodings = [
roy_face_encoding,
stefan_face_encoding,
henrik_face_encoding,
hairi_face_encoding,
syam_face_encoding
]
known_face_names = [
"roy",
"stefan",
"henrik",
"hairi",
"syam"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# # Process video frame frequency
# process_frame_freq = 4
# process_this_frame = process_frame_freq
while True:
if video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
if ret:
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, None, fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
if face_locations: # prevent manipulation of null variable
top, right, bottom, left = face_locations[0]
# faces_recognized += 1
# print("[%i] Face recognized..." % faces_recognized)
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cropped_face = frame[top:bottom, left:right]
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#print(face_encodings)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.5)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
print(name)
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()

face recognition by opencv python

i have a code for face recognition by open cv python
import face_recognition as fr
import os
import cv2
import face_recognition
import numpy as np
from time import sleep
def get_encoded_faces():
"""
looks through the faces folder and encodes all
the faces
:return: dict of (name, image encoded)
"""
encoded = {}
for dirpath, dnames, fnames in os.walk("./faces"):
for f in fnames:
if f.endswith(".jpg") or f.endswith(".png"):
face = fr.load_image_file("faces/" + f)
encoding = fr.face_encodings(face)[0]
encoded[f.split(".")[0]] = encoding
return encoded
def unknown_image_encoded(img):
"""
encode a face given the file name
"""
face = fr.load_image_file("faces/" + img)
encoding = fr.face_encodings(face)[0]
return encoding
def classify_face(im):
"""
will find all of the faces in a given image and label
them if it knows what they are
:param im: str of file path
:return: list of face names
"""
faces = get_encoded_faces()
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
img = cv2.imread(im, 1)
#img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
#img = img[:,:,::-1]
face_locations = face_recognition.face_locations(img)
unknown_face_encodings = face_recognition.face_encodings(img, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Draw a box around the face
cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
# Draw a label with a name below the face
cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
# Display the resulting image
while True:
cv2.imshow('Video', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
return face_names
print(classify_face("test-image");
it is taking image that we save to test but i want it to take image from camera then recognize it So any one here can please tell how i can change test image so it will take test image from camera not what we save in data base to test ...........
If you want to check single frame from camera then simply use
cap = cv2.VideoCapture(0)
status, img = cap.read()
instead of
img = cv2.imread(im, 1)
If you want to check faces in stream then you need to put all in loop
def classify_face(im):
faces = get_encoded_faces()
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
cap = cv2.VideoCapture(0)
while True:
status, img = cap.read()
face_locations = face_recognition.face_locations(img)
unknown_face_encodings = face_recognition.face_encodings(img, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Draw a box around the face
cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
# Draw a label with a name below the face
cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
cv2.imshow('Video', img)
print(face_names)
if cv2.waitKey(1) & 0xFF == ord('q'):
return
EDIT: Full code (with small changes) for work with video stream. It works for me. But if previous function did works for you then this version may not help you - function classify_face is almost the same as in previous example.
import os
import cv2
import face_recognition as fr
import numpy as np
def get_encoded_faces(folder="./faces"):
"""
looks through the faces folder and encodes all
the faces
:return: dict of (name, image encoded)
"""
encoded = {}
for dirpath, dnames, fnames in os.walk(folder):
for f in fnames:
if f.lower().endswith(".jpg") or f.lower().endswith(".png"):
fullpath = os.path.join(dirpath, f)
face = fr.load_image_file(fullpath)
# normally face_encodings check if face is on image - and it can get empty result
height, width = face.shape[:2]
encoding = fr.face_encodings(face, known_face_locations=[(0, width, height, 0)])
if len(encoding) > 0:
encoding = encoding[0]
encoded[f.split(".")[0]] = encoding
return encoded
def classify_face(im):
"""
will find all of the faces in a given image and label
them if it knows what they are
:param im: str of file path
:return: list of face names
"""
faces = get_encoded_faces()
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
cap = cv2.VideoCapture(0)
while True:
status, img = cap.read()
#print('status:', status)
face_locations = fr.face_locations(img)
unknown_face_encodings = fr.face_encodings(img, face_locations)
face_names = []
for location, face_encoding in zip(face_locations, unknown_face_encodings): # I moved `zip()` in this place
# See if the face is a match for the known face(s)
matches = fr.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = fr.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
top, right, bottom, left = location
# Draw a box around the face
cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
# Draw a label with a name below the face
cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
print('face_names:', face_names)
cv2.imshow('Video', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
return face_names
# --- main ---
print(classify_face("test-image"))
cv2.destroyAllWindows()

OpenCV returns an error when I use "face_recognition" and my webcam to identify different faces?

I was trying to repeat the code of an example of face_recognition(from: https://github.com/ageitgey/face_recognition/blob/master/examples/facerec_from_webcam_faster.py).
Unfortunately I gotta an error of opencv:
---------------------------------------------------------------------------
error Traceback (most recent call last)
<ipython-input-11-8bd8ed75eefa> in <module>()
3 # small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
4 # rgb_small_frame = small_frame[:, :, ::-1]
----> 5 rgb_small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)[:, :, ::-1]
6 if process_this_frame:
7 face_locations = face_recognition.face_locations(rgb_small_frame)
error: OpenCV(3.4.2) C:\projects\opencv-python\opencv\modules\imgproc\src\resize.cpp:4044: error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize'
On the issues I notice someone also has the same question but there is no actual solution from the article. Is there anyone can help me? Thank you in advance.
ENVIRONMENT:
python:3.6
Opencv:3.4.2
dlib:19.15.0
face_recognition:1.2.3
Below is my code :
import face_recognition
import cv2
import os
os.chdir(r'E:\1. Learning\face_recognition')
video_capture = cv2.VideoCapture(0)
linhan_image = face_recognition.load_image_file("linhan.jpg")
linhan_face_encoding = face_recognition.face_encodings(linhan_image, num_jitters=100)[0]
ketian_image = face_recognition.load_image_file("ketian.jpg")
ketian_face_encoding = face_recognition.face_encodings(ketian_image, num_jitters=100)[0]
linwanqi_image = face_recognition.load_image_file("linwanqi.jpg")
linwanqi_face_encoding = face_recognition.face_encodings(linwanqi_image, num_jitters=100)[0]
known_face_encodings = [
linhan_face_encoding,
ketian_face_encoding,
linwanqi_face_encoding
]
known_face_names = [
"linhan",
"ketian",
"linwanqi"
]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
ret, frame = video_capture.read()
# small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# rgb_small_frame = small_frame[:, :, ::-1]
rgb_small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.4)
name = "Unknown"
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

Categories