the list in QThread class can't get data - python

I intend to use face_recognition's face_location function to identify the position of the face in the image and add a box. The image is then displayed in the GUI designed in PYQT5
However, only camera images can be displayed, but there is no box
after debugging, I found that the list that gets the location of the face cannot get the data, but this works in programs that do not use threads of PYQT5
enter image description here
enter image description here
here is my code
QThread class
class camera_Thread(QThread):
def __init__(self, sign_face_ui_obj):
super(camera_Thread, self).__init__()
self.sign_face_ui_obj = sign_face_ui_obj
self.camera = cv2.VideoCapture(0, cv2.CAP_DSHOW)
self.face_names = ""
def run(self):
process_this_frame = True
while True:
try:
ret, frame = self.camera.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
except:
continue
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
small_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2RGB)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb_small_frame = small_frame
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, self.face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
show = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
self.sign_face_ui_obj.ui.cameraLb.setPixmap(QPixmap.fromImage(show))
self.sign_face_ui_obj.ui.cameraLb.setScaledContents(True)
MainWindow
def __init__(self, parent=None):
super(Sign_Face_Logic, self).__init__(parent)
self.ui=Ui_SignFace()
self.ui.setupUi(self)
if __name__ == '__main__':
App = QApplication(sys.argv)
user_interface = Sign_Face_Logic()
th = funtion_ui_v1.camera_Thread(user_interface)
th.start()
user_interface.show()
sys.exit(App.exec_()) ```

Related

My raspberry pi cam is not being detected

I am trying to use a raspberry pi cam but it is not being detected. I have tried using a different cam to see if it was broken and it isn't. I had got it working beforehand but for some reason, it's now not working.
I'm using a pi 4 B
I get This error
Traceback (most recent call last): File "/home/pi/Desktop/FaceDec_Robot/Robot-python/main.py", line 39, in <module>
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) cv2.error: OpenCV(4.5.1) ../modules/imgproc/src/resize.cpp:4051: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
meaning the array is empty but dont know why.
code:
import cv2
import os
import face_recognition
import numpy as np
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture("/dev/video0", cv2.CAP_V4L)
#loop over all the images in the folder and put them in a list
known_face_encodings = []
known_face_names = []
for file in os.listdir("Tested/known/"):
if file.endswith(".jpg"):
#load the image
known_image = face_recognition.load_image_file("Tested/known/" + file)
#get the face encoding
known_face_encoding = face_recognition.face_encodings(known_image)[0]
#add the encoding to the list
known_face_encodings.append(known_face_encoding)
#add the name to the list
known_face_names.append(file[:-4])
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
#loop over the frames
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
# cv2.imshow('Video', frame)
#save the image
cv2.imwrite("Tested/test.jpg", frame)
I have also tried setting the video_capture to 0 but that still does not work

OpenCV: Can we discover the first and last frame of a bounding box moving?

Working on python I have a video where I:
First draw a bounding box (bbox).
Next, I need to know the frame on which the bbox starts moving.
Finally, I need to know the last frame on which the bbox stops moving.
Initial code:
import cv2
import time
import numpy as np
#open video
cap = cv2.VideoCapture('MyVideo.avi')
#total number of frames
totalFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print(totalFrames)
#MOSSE tracker
tracker = cv2.legacy_TrackerMOSSE.create()
#take first frame
ret, frame = cap.read()
image = cv2.resize(frame, (1600,900))
#create bounding box
bbox = cv2.selectROI("Tracking", image, False) #create bounding box
tracker.init(image,bbox)
#define bounding box
def drawBox(image,bbox):
x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
cv2.rectangle(image, (x,y),((x+w),(y+h)), (255, 0, 255), 3, 1)
cv2.putText(image, "Tracking", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
#work within video
while True:
success, frame = cap.read()
# resize screen size
image = cv2.resize(frame, (1600, 900))
#tracker
success, bbox1 = tracker.update(image)
This is the part where I believe I could figure out when my bbox starts moving and print the frame of interes.
if success:
drawBox(image,bbox1)
if bbox1 == bbox:
print('static')
else:
print(cap.get(cv2.CAP_PROP_POS_FRAMES))
else:
cv2.putText(image, "Lost", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
Remaining code
#show video
cv2.imshow("feed", image)
if cv2.waitKey(40) == 27:
break
cv2.destroyAllWindows()
cap.release()

Python face-recognition package with pickled data

How can I integrate pickled face_data.dat to python face-recognition default example which shows real-time webcam view?
import face_recognition
import cv2
import numpy as np
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden"
]
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces and face enqcodings in the frame of video
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
I've pickled the images and tried modifying the program as follows but It's showing the wrong names.
import face_recognition
import cv2
import numpy as np
import pickle
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load face encodings
with open('dataset_faces.dat', 'rb') as f:
all_face_encodings = pickle.load(f)
# Grab the list of names and the list of encodings
face_names = list(all_face_encodings.keys())
face_encodings = np.array(list(all_face_encodings.values()))
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces and face enqcodings in the frame of video
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
# face_distances = face_recognition.face_distance(face_encodings, face_encoding)
# best_match_index = np.argmin(face_distances)
# if matches[best_match_index]:
# name = face_names[best_match_index]
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
I've checked pickled data using this script. It is showing the correct output to the relevant image.
import face_recognition
import pickle
import numpy as np
# Load face encodings
with open('dataset_faces.dat', 'rb') as f:
all_face_encodings = pickle.load(f)
# Grab the list of names and the list of encodings
face_names = list(all_face_encodings.keys())
face_encodings = np.array(list(all_face_encodings.values()))
# Try comparing an unknown image
unknown_image = face_recognition.load_image_file("obama.jpg")
unknown_face = face_recognition.face_encodings(unknown_image)
result = face_recognition.compare_faces(face_encodings, unknown_face)
# Print the result as a list of names with True/False
names_with_result = list(zip(face_names, result))
print(names_with_result)

How to connect RSTP with OpenCV?

Hi I try to connect OpenCV, Face recognition Library with my RSTP CCTV. But when i run my code i getting an error like below. Hope someone can help me regarding on this issue. I attach here my code
Error:
[h264 # 000002a9659dba00] error while decoding MB 10 94, bytestream -5
[h264 # 000002a953c1e2c0] Invalid NAL unit 8, skipping. Traceback
(most recent call last): File "face-present.py", line 125, in
cv2.imshow('Video', frame) cv2.error: OpenCV(4.0.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:350:
error: (-215:Assertion failed) size.width>0 && size.height>0 in
function 'cv::imshow'
mycode.py
import face_recognition
import cv2
video_capture = cv2.VideoCapture("rtsp://admin:adam12345#192.168.0.158:554/Streaming/channels/101")
roy_image = face_recognition.load_image_file("images/roy1.jpg")
roy_face_encoding = face_recognition.face_encodings(roy_image,num_jitters=100)[0]
# Load a second sample picture and learn how to recognize it.
henrik_image = face_recognition.load_image_file("images/Mr_henrik.jpg")
henrik_face_encoding = face_recognition.face_encodings(henrik_image,num_jitters=100)[0]
stefan_image = face_recognition.load_image_file("images/stefan.jpg")
stefan_face_encoding = face_recognition.face_encodings(stefan_image,num_jitters=100)[0]
hairi_image = face_recognition.load_image_file("images/Hairi.jpeg")
hairi_face_encoding = face_recognition.face_encodings(hairi_image,num_jitters=100)[0]
syam_image = face_recognition.load_image_file("images/syam1.jpeg")
syam_face_encoding = face_recognition.face_encodings(syam_image,num_jitters=100)[0]
#print(syam_face_encoding)
# Create arrays of known face encodings and their names
known_face_encodings = [
roy_face_encoding,
stefan_face_encoding,
henrik_face_encoding,
hairi_face_encoding,
syam_face_encoding
]
known_face_names = [
"roy",
"stefan",
"henrik",
"hairi",
"syam"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# # Process video frame frequency
# process_frame_freq = 4
# process_this_frame = process_frame_freq
while True:
if video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
if ret:
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, None, fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
if face_locations: # prevent manipulation of null variable
top, right, bottom, left = face_locations[0]
# faces_recognized += 1
# print("[%i] Face recognized..." % faces_recognized)
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cropped_face = frame[top:bottom, left:right]
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#print(face_encodings)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.5)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
print(name)
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()

Importing Another File

I have some code here and it is a little sloppy, is there any way that I could put the images, encodings, and names into separate files and import them into the main code for use? I have tried putting them into a separate file and then importing them, but it still shows a not defined error? Can anyone help me find out why, or how to fix it.
main code
import cv2
import numpy as np
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
me_image = face_recognition.load_image_file("me.jpg")
me_face_encoding = face_recognition.face_encodings(me_image)[0]
mom_image = face_recognition.load_image_file("mom.jpg")
mom_face_encoding = face_recognition.face_encodings(mom_image)[0]
mattm_image = face_recognition.load_image_file("mattm.jpg")
mattm_face_encoding = face_recognition.face_encodings(mattm_image)[0]
soph_image = face_recognition.load_image_file("soph.jpg")
soph_face_encoding = face_recognition.face_encodings(soph_image)[0]
known_face_encodings = [
me_face_encoding,
mom_face_encoding,
mattm_face_encoding,
soph_face_encoding
]
known_face_names = [
"Jacob North",
"Shelly North",
"Matt Mersino",
"Sophia North"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name,(left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
Code I Wish To Separate
me_image = face_recognition.load_image_file("me.jpg")
me_face_encoding = face_recognition.face_encodings(me_image)[0]
mom_image = face_recognition.load_image_file("mom.jpg")
mom_face_encoding = face_recognition.face_encodings(mom_image)[0]
mattm_image = face_recognition.load_image_file("mattm.jpg")
mattm_face_encoding = face_recognition.face_encodings(mattm_image)[0]
soph_image = face_recognition.load_image_file("soph.jpg")
soph_face_encoding = face_recognition.face_encodings(soph_image)[0]
known_face_encodings = [
me_face_encoding,
mom_face_encoding,
mattm_face_encoding,
soph_face_encoding
]
known_face_names = [
"Jacob North",
"Shelly North",
"Matt Mersino",
"Sophia North"
]
I just want to make it neater and easier to access.

Categories