read videocapture frame using cv2 but any time showing a error
error is :
Traceback (most recent call last):
File "d:\pythonprojects\gym\demo.py", line 33, in <module>
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
code is
<import cv2
import numpy as np
import os
from PIL import Image
from Attendance import attendance
from datetime import datetime
from database import\*
def getProfile(Id):
query="SELECT \* FROM users WHERE id="+str(Id)
cursor=mycursor.execute(query)
profile = mycursor.fetchone()
\# profile=None
\# for row in cursor:
\# profile=row
\# con.close()
return profile
\# os.chdir(os.getcwd())
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("face-trainner.yml")
cap = cv2.VideoCapture(0) #Get vidoe feed from the Camera
cap.set(3, 640)
cap.set(4, 480)
font = cv2.FONT_HERSHEY_COMPLEX
while(True):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
nbr_predicted, conf = recognizer.predict(gray[y:y+h, x:x+w])
print(nbr_predicted, conf)
if(conf < 80):
profile=getProfile(nbr_predicted)
if profile != None:
time_now=datetime.now()
newdate=time_now.strftime('%Y-%m-%d')
newtime=time_now.strftime('%H:%M:%S')
attendance(nbr_predicted,newtime,newdate)
cv2.putText(img, "Name: "+str(profile[4]), (x, y+h+30), font, 0.4, (0, 0, 255), 1)
cv2.putText(img, "Gender: " + str(profile[7]), (x, y + h + 50), font, 0.4, (0, 0, 255), 1)
else:
cv2.putText(img, "Name: Unknown", (x, y + h + 30), font, 0.4, (0, 0, 255), 1)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('Preview',img) #Display the Video
cv2.waitKey(1)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
This error seems to be due to img variable being None or empty. Not sure why it is None in your case. It could be due to various reasons like if the camera is not connected properly, or the camera driver is not installed, or there are permission issues.
You can add a condition after cap.read() call:
...
while True:
ret, img = cap.read()
if img is None:
print("Could not read frame from the source.")
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
...
Related
this is code without threading, it runs well (I want to recognise emotion which this code does)
from cv2 import LINE_4, imread
from fer import FER
import matplotlib
import matplotlib.pyplot as plt
import cv2
from deepface import DeepFace
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(1)
if not cap.isOpened():
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Cannot open Webcam")
def emotion_recog():
while True:
ret, frame = cap.read()
result = DeepFace.analyze(frame, actions= ['emotion'], enforce_detection=False)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.1,4)
for(x, y, w, h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, result['dominant_emotion'], (50,50), font, 3, (0, 0, 255), 2, cv2.LINE_4)
#print(result['dominant_emotion'])
#print(emotion_recog.result['dominant_emotion'])
cv2.imshow('Original Video', frame)
if cv2.waitKey(2) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
When I introduce threading It doesn't work - it gives error -
Exception has occurred: TypeError
object of type 'NoneType' has no len()
I want to capture emotion, send it to another function and display result based on emotion detected
import numpy as np
import random
import time
from threading import *
from cv2 import LINE_4, imread
from fer import FER
import matplotlib.pyplot as plt
import cv2
from deepface import DeepFace
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(1)
if not cap.isOpened():
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Cannot open Webcam")
current_mean = None
def emotion_recog():
while True:
result, frame = cap.read()
result = DeepFace.analyze(frame, actions= ['emotion'], enforce_detection=False)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.1,4)
for(x, y, w, h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, result['dominant_emotion'], (50,50), font, 3, (0, 0, 255), 2, cv2.LINE_4)
#print(result['dominant_emotion'])
#print(emotion_recog.result['dominant_emotion'])
cv2.imshow('Original Video', frame)
if cv2.waitKey(2) & 0xFF == ord('q'):
break
t1 = Thread(target=emotion_recog)
t1.start()
cap.release()
cv2.destroyAllWindows()
Hey I started learning python not so long time ago. Right now Im creating (or more likely trying to) face and motion detection script based on OpenCV library. Unfortunately Im stuck since few days cause I cant solve problem with I guess its called multi threading.
Here is my code:
import time
import cv2
import datetime
from discord_webhook import DiscordWebhook
import threading
faceCascade = cv2.CascadeClassifier("face_recognition.xml")
# define a video capture object
video_capture = cv2.VideoCapture(0)
#writing video
frame_width = int(video_capture.get(3))
frame_height = int(video_capture.get(4))
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
out = cv2.VideoWriter(datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p") + '.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
#screenshot when detectors get triggered
def screenshot():
cv2.imwrite('screenshot.png',video_capture.read()[1])
#webhook notify
def alert():
webhook = DiscordWebhook(url="", rate_limit_retry=True,
content='!ALERT!')
webhook.execute()
while(True):
# Capture the video frame by frame
ret, frame = video_capture.read()
text="not detected"
text1="not detected"
timestamp = datetime.datetime.now()
#face recognition
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5,minSize=(30, 30))
if int(format(len(faces))) > 0:
#print("Found {0} faces!".format(len(faces)))
text="detected"
else:
text="not detected"
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#motion detection
ret, frame1 = video_capture.read()
difference = cv2.absdiff(frame, frame1) # find the difference between the frames
gray = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY) # create threshold
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) < 5000:
continue
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
text1="detected"
# Display the resulting frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Face status: {}".format(text), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame, "Motion status: {}".format(text1), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 1)
#check if motion is detected if not change status text
if text1 == "not detected":
text1="detected"
else:
text1="not detected"
out.write(frame)
cv2.imshow('Press Q to quit', frame)
# the 'q' button is set as the
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
video_capture.release()
out.release()
cv2.destroyAllWindows()
Im trying to send webhook and make screen shot every 3 minutes if motion is detected but for loops completely lags this app. Tried time.sleep or thread timers but it only freezes or lags app. If someone can explain me how to solve this problem I will be very thankful. Have a great day or night
import cv2
import imageio
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
def detect(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for(x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
gray_face = gray[y:y+h, x:x+w]
color_face = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(gray_face, 1.1, 3)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(color_face, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
return frame
reader = imageio.get_reader('1.mp4')
fps = reader.get_meta_data()['fps']
writer = imageio.get_writer('output.mp4', fps=fps)
for i, frame in enumerate(reader):
frame = detect(frame)
writer.append_data(frame)
print(i)
writer.close()
the error I received is this;
Traceback (most recent call last):
File "C:/Users/Mustafa Koca/PycharmProjects/mustafakoca/face_det.py", line 23, in <module>
frame = detect(frame)
File "C:/Users/Mustafa Koca/PycharmProjects/mustafakoca/face_det.py", line 9, in detect
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
cv2.error: OpenCV(4.1.2) C:\projects\opencv-python\opencv\modules\objdetect\src\cascadedetect.cpp:1689: error: (-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
xml files are not in current folder but in some folder of cv2 module and you have to use full path to load them.
Lucky there is special variabel with folder name - cv2.data.haarcascades
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
To make sure you can use os.path.join() instead of +
face_cascade = cv2.CascadeClassifier(os.path.join(cv2.data.haarcascades, 'haarcascade_frontalface_default.xml'))
eye_cascade = cv2.CascadeClassifier(os.path.join(cv2.data.haarcascades, 'haarcascade_eye.xml'))
If xml files are loaded then this should gives False
print(face_cascade.empty())
print(eye_cascade.empty())
I am working with OpenCV in Python for facial identification and I want to crop the live video from my webcam to just output the face it recognizes.
I have tried using ROI but I do not know how to correctly implement it.
import cv2
import sys
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 2)
roi = frame[y:y+h, x:x+w]
cropped = frame[roi]
# Display the resulting frame
cv2.imshow('Face', cropped)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
.
Traceback (most recent call last):
File "C:/Users/Ben/Desktop/facerecog/facerecog2.py", line 31, in <module>
cv2.imshow('Face', cropped)
cv2.error: OpenCV(4.1.1) C:\projects\opencv-python\opencv\modules\core\src\array.cpp:2492: error: (-206:Bad flag (parameter or structure field)) Unrecognized or unsupported array type in function 'cvGetMat'
You get cropped image with
cropped = frame[y:y+h, x:x+w]
and then you can display it.
But sometimes there is no face on frame and it will not create cropped and you can get error. Better create this variabel before for and check it after for
cropped = None
for (x, y, w, h) in faces:
cropped = frame[y:y+h, x:x+w]
if cropped is not None:
cv2.imshow('Face', cropped)
#else:
# cv2.imshow('Face', frame)
or
if faces:
(x, y, w, h) = faces[0]
cropped = frame[y:y+h, x:x+w]
cv2.imshow('Face', cropped)
I don't know what you want to do if there will be many faces on frame.
I am doing a project where its a motion based detection program.
However it detects changes in the background as "motion" so i'd like a way to recapture a new first frame every few minutes to replace the current one to fix this issue.
I am using a Raspberry Pi 2B and a Logitech Webcam.
The code i am using is based of : Pyimagesearch
This is my version of the code.
Please help me
(Edit)I have changed my code to a BackgroundSubtractionMOG2 now my issue is how do i add Mean-Shift Tracking so that it'll recognize its the same object that entered the screen in the frame earlier?
import sys
sys.path.append('/usr/local/lib/python3.4/site-packages')
import numpy as np
import cv2
import imutils
from imutils import contours
import datetime
import time
#cap = cv2.VideoCapture("/home/pi/Desktop/Proj/VideoTestSample.mp4")
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while (cap.isOpened()):
(grabbed, frame) = cap.read()
text = " "
if not grabbed:
break
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
fgmask = fgbg.apply(gray)
thresh = cv2.erode(fgmask, None, iterations=2)
(_,cnts,hierarchy) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for (i,c) in enumerate(cnts):
if cv2.contourArea(c) < 300:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)
cv2.putText(frame, "#{}".format(i + 1), (x, y - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
text = "REC"
cv2.putText(frame, "{}". format(text), (10,20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,(0,0,255), 1)
cv2.imshow('frame',frame)
cv2.imshow('gray', gray)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()