Camera Not Opening when Deployed On server Django/Python - python

Unable to open Camera On server, where its the same copy with Same settings
cam = cv2.VideoCapture(0)
Used this to initialise camera (webcam)
and below code for processing the data stream and below image shows the error on server click here to view the error
def identify_faces(video_capture):
buf_length = 10
known_conf = 6
buf = [[]] * buf_length
i = 0
process_this_frame = True
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_frame = small_frame[:, :, ::-1]
if process_this_frame:
predictions = predict(rgb_frame, model_path="folder/folder/models/trainedmodel.clf")
process_this_frame = not process_this_frame
face_names = []
for name, (top, right, bottom, left) in predictions:
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
identify1(frame, name, buf, buf_length, known_conf)
face_names.append(name)
buf[i] = face_names
i = (i + 1) % buf_length
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

Related

CV2: closing webcam and reopening in different function

In this code I want to display 3 types of classification: "Known", "Unknown" and "Hostile" using open cv and face_recognition.
The first function (firtsCase()) contains "Known", "Unknown" and that it's everything ok.
What I want to do is: when the program knows that is in the function secondCase() that is "hostile" and I press "q", the webcam must close and launch the function that sends me back to the cases "Known" and "Unknown" (call at the end of secondCase() with firstCase()) to reopen the webcam and reclassify.
You can find it in the latest part of function secondCase().
I have this error:
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
cv2.error: OpenCV(4.5.5) /Users/runner/work/opencv-python/opencv-python/opencv/modules/imgproc/src/resize.cpp:4052: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
I know this error shows up because it is trying to resize something that has already been manipulated, but I don't know how can I fix it.
This is the code:
video_capture = cv2.VideoCapture(0)
face2 = face_recognition.load_image_file(os.path.abspath("path2"))
face2_face_encoding = face_recognition.face_encodings(face2)[0]
known_face_encodings = [
face2_face_encoding
]
known_face_names = [
"Giulia"
]
face_location = []
face_encodings = []
face_names = []
def firstCase():
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
process_this_frame = True
face = False
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.6)
name = "Sconosciuto"
face = True
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face = False
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
face_names.append(name)
process_this_frame = not process_this_frame
if face:
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (40,48,48), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (40,48,48), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.6, (255, 255, 255), 1)
cv2.imshow('Video', frame)
else:
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.6, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
def secondCase():
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
process_this_frame = True
face = False
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.6)
name = "Ostile"
face = True
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face = False
face_names.append(name)
process_this_frame = not process_this_frame
if face:
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0,0,0), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0,0,0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.6, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
firstCase()
print("Inizio scansione :" + time.ctime() + "\nCitofono attivo. Citofona premendo a... ")
sel = selectors.DefaultSelector()
sel.register(sys.stdin, selectors.EVENT_READ)
sys.stdout.flush()
pairs = sel.select(timeout=5)
if pairs:
if input("Citofona premendo 'a' \n"):
firstCase()
else:
secondCase()
Thank you all for helping.

AttributeError: 'NoneType' object has no attribute 'get_busy'

I'm using pygame to play some sounds, I have the code
global PU1Sound
PU1Sound = pygame.mixer.Sound("PU1.mp3")
and later using
PU1S = PU1Sound.play()
while PU1S.get_busy() == True:
time.sleep(0.01)
PU1Sound.stop
and this works fine, playing the sound and waiting for it to play.
but later in the code I use
SCS = SCSound.play()
while SCS.get_busy() == True:
time.sleep(0.01)
SCSound.stop
but for some reason this dose not work and instead results in the following error
while SCS.get_busy() == True:
AttributeError: 'NoneType' object has no attribute 'get_busy'
But if I put the code that is causing an error below the the code that isn't, it works like it should, but nothing about the placement should be causing it to cause a error.
so this works
def VideoD():
PU1S = PU1Sound.play()
while PU1S.get_busy() == True:
time.sleep(0.01)
PU1Sound.stop
SVS = ScanVSound.play()
while SVS.get_busy() == True:
time.sleep(0.01)
ScanVSound.stop
> SCS = SCSound.play()
> while SCS.get_busy() == True:
> time.sleep(0.01)
> SCSound.stop
> PU2S = PU2Sound.play()
> while PU2S.get_busy() == True:
> time.sleep(0.1)
> PU2Sound.stop
> IPCS = IPCSound.play()
> while IPCS.get_busy() == True:
> time.sleep(0.1)
> IPCSound.stop
global username
global TimeWaiting
global c
clear()
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
face1_image = face_recognition.load_image_file("facetest1.jpg")
face1_face_encoding = face_recognition.face_encodings(face1_image)[0]
known_face_encodings = [
face1_face_encoding,
]
known_face_names = [
username
]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
scanThread.start()
while True:
ret, Frame = video_capture.read()
small_frame = cv2.resize(Frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
if TimeWaiting == False:
GrantedD()
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(Frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(Frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(Frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
Frame = cv2.resize(Frame, (300,250))
cv2.imshow('Video', Frame)
cv2.moveWindow('Video', 40,30)
cv2.resizeWindow('Video', 300,250)
if cv2.waitKey(1) & 0xFF == ord('q') or (c <= 0):
process_this_frame = False
video_capture.release()
title()
but this dose not work
def VideoD():
PU1S = PU1Sound.play()
while PU1S.get_busy() == True:
time.sleep(0.01)
PU1Sound.stop
SVS = ScanVSound.play()
while SVS.get_busy() == True:
time.sleep(0.01)
ScanVSound.stop
global username
global TimeWaiting
global c
clear()
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
face1_image = face_recognition.load_image_file("facetest1.jpg")
face1_face_encoding = face_recognition.face_encodings(face1_image)[0]
known_face_encodings = [
face1_face_encoding,
]
known_face_names = [
username
]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
scanThread.start()
while True:
ret, Frame = video_capture.read()
small_frame = cv2.resize(Frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
if TimeWaiting == False:
GrantedD()
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(Frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(Frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(Frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
Frame = cv2.resize(Frame, (300,250))
cv2.imshow('Video', Frame)
cv2.moveWindow('Video', 40,30)
cv2.resizeWindow('Video', 300,250)
if cv2.waitKey(1) & 0xFF == ord('q') or (c <= 0):
process_this_frame = False
video_capture.release()
> SCS = SCSound.play()
> while SCS.get_busy() == True:
> time.sleep(0.01)
> SCSound.stop
> PU2S = PU2Sound.play()
> while PU2S.get_busy() == True:
> time.sleep(0.1)
> PU2Sound.stop
> IPCS = IPCSound.play()
> while IPCS.get_busy() == True:
> time.sleep(0.1)
> IPCSound.stop
title()
The only instance of NoneType is None, so your SCS being None is triggering the exception. You expected SCSound.play() to return a channel but it returned None instead because it couldn't find a channel.

How to connect RSTP with OpenCV?

Hi I try to connect OpenCV, Face recognition Library with my RSTP CCTV. But when i run my code i getting an error like below. Hope someone can help me regarding on this issue. I attach here my code
Error:
[h264 # 000002a9659dba00] error while decoding MB 10 94, bytestream -5
[h264 # 000002a953c1e2c0] Invalid NAL unit 8, skipping. Traceback
(most recent call last): File "face-present.py", line 125, in
cv2.imshow('Video', frame) cv2.error: OpenCV(4.0.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:350:
error: (-215:Assertion failed) size.width>0 && size.height>0 in
function 'cv::imshow'
mycode.py
import face_recognition
import cv2
video_capture = cv2.VideoCapture("rtsp://admin:adam12345#192.168.0.158:554/Streaming/channels/101")
roy_image = face_recognition.load_image_file("images/roy1.jpg")
roy_face_encoding = face_recognition.face_encodings(roy_image,num_jitters=100)[0]
# Load a second sample picture and learn how to recognize it.
henrik_image = face_recognition.load_image_file("images/Mr_henrik.jpg")
henrik_face_encoding = face_recognition.face_encodings(henrik_image,num_jitters=100)[0]
stefan_image = face_recognition.load_image_file("images/stefan.jpg")
stefan_face_encoding = face_recognition.face_encodings(stefan_image,num_jitters=100)[0]
hairi_image = face_recognition.load_image_file("images/Hairi.jpeg")
hairi_face_encoding = face_recognition.face_encodings(hairi_image,num_jitters=100)[0]
syam_image = face_recognition.load_image_file("images/syam1.jpeg")
syam_face_encoding = face_recognition.face_encodings(syam_image,num_jitters=100)[0]
#print(syam_face_encoding)
# Create arrays of known face encodings and their names
known_face_encodings = [
roy_face_encoding,
stefan_face_encoding,
henrik_face_encoding,
hairi_face_encoding,
syam_face_encoding
]
known_face_names = [
"roy",
"stefan",
"henrik",
"hairi",
"syam"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# # Process video frame frequency
# process_frame_freq = 4
# process_this_frame = process_frame_freq
while True:
if video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
if ret:
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, None, fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
if face_locations: # prevent manipulation of null variable
top, right, bottom, left = face_locations[0]
# faces_recognized += 1
# print("[%i] Face recognized..." % faces_recognized)
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cropped_face = frame[top:bottom, left:right]
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#print(face_encodings)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.5)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
print(name)
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()

Function sent in process can't change value of class field

Variable self.process_this_frame is not changed in process.
For now I want to process frame only when previous frame that should be processed is processed and skip other frames.
import cv2
import face_recognition
import multiprocessing
class FaceLocationSender:
def __init__(self, camera_url):
self.video_capture = cv2.VideoCapture(camera_url)
self.face_locations = []
self.process_this_frame = True
def get_faces_from_frame(self, frame):
self.face_locations = face_recognition.face_locations(frame)
self.process_this_frame = True
def start(self):
while True:
ret, frame = self.video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=1/2, fy=1/2)
rgb_small_frame = small_frame[:, :, ::-1]
if self.process_this_frame:
self.process_this_frame = False
process = multiprocessing.Process(target=self.get_faces_from_frame, args=(rgb_small_frame,))
process.start()
for (top, right, bottom, left) in self.face_locations:
top *= 2
right *= 2
bottom *= 2
left *= 2
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 0), 2)
cv2.imshow('Video', cv2.resize(frame, (1200, 600)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
self.video_capture.release()
cv2.destroyAllWindows()

OpenCV returns an error when I use "face_recognition" and my webcam to identify different faces?

I was trying to repeat the code of an example of face_recognition(from: https://github.com/ageitgey/face_recognition/blob/master/examples/facerec_from_webcam_faster.py).
Unfortunately I gotta an error of opencv:
---------------------------------------------------------------------------
error Traceback (most recent call last)
<ipython-input-11-8bd8ed75eefa> in <module>()
3 # small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
4 # rgb_small_frame = small_frame[:, :, ::-1]
----> 5 rgb_small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)[:, :, ::-1]
6 if process_this_frame:
7 face_locations = face_recognition.face_locations(rgb_small_frame)
error: OpenCV(3.4.2) C:\projects\opencv-python\opencv\modules\imgproc\src\resize.cpp:4044: error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize'
On the issues I notice someone also has the same question but there is no actual solution from the article. Is there anyone can help me? Thank you in advance.
ENVIRONMENT:
python:3.6
Opencv:3.4.2
dlib:19.15.0
face_recognition:1.2.3
Below is my code :
import face_recognition
import cv2
import os
os.chdir(r'E:\1. Learning\face_recognition')
video_capture = cv2.VideoCapture(0)
linhan_image = face_recognition.load_image_file("linhan.jpg")
linhan_face_encoding = face_recognition.face_encodings(linhan_image, num_jitters=100)[0]
ketian_image = face_recognition.load_image_file("ketian.jpg")
ketian_face_encoding = face_recognition.face_encodings(ketian_image, num_jitters=100)[0]
linwanqi_image = face_recognition.load_image_file("linwanqi.jpg")
linwanqi_face_encoding = face_recognition.face_encodings(linwanqi_image, num_jitters=100)[0]
known_face_encodings = [
linhan_face_encoding,
ketian_face_encoding,
linwanqi_face_encoding
]
known_face_names = [
"linhan",
"ketian",
"linwanqi"
]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
ret, frame = video_capture.read()
# small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# rgb_small_frame = small_frame[:, :, ::-1]
rgb_small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.4)
name = "Unknown"
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

Categories