OpenCV - Motion capture with multiple ip cameras - python

I have three IP cameras around my house and I want to capture an image when a motion is detected. I want to run the motion capture algorithm in the same time for all 3 cameras.
I manage to do the job for one camera - Open the stream + motion detection algorithm + store image in case of detection :
import cv2
cap3 = cv2.VideoCapture('http://X.X.X.X:XXXX/stream.mjpg')
ret3, frame31 = cap3.read()
ret3, frame32 = cap3.read()
while (True):
diff3 = cv2.absdiff(frame31, frame32)
gray3 = cv2.cvtColor(diff3, cv2.COLOR_BGR2GRAY)
blur3 = cv2.GaussianBlur(gray3, (5, 5), 0)
_, tresh3 = cv2.threshold(blur3, 30, 255, cv2.THRESH_BINARY)
dilated3 = cv2.dilate(tresh3, None, iterations=3)
contours3, _ = cv2.findContours(dilated3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours3:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 800:
continue
cv2.rectangle(frame31, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame31, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
t = time.localtime()
filename = "RASP" + str(t[0]) + str(t[1]) + str(t[2]) + "_" + str(t[3]) + str(t[4]) + str(t[5]) + ".jpg"
cv2.imwrite(filename, frame31)
frame31 = frame32
ret3, frame32 = cap3.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap3.release()
cv2.destroyAllWindows()
The problem I have is when I try to do the same job in parallel for the three cameras.
What I do is duplicating the same process in the while loop for the three cameras and when I do so, it starts running for a few seconds and then I get this error :
Traceback (most recent call last):
File "C:/Users/Guillaume/PycharmProjects/IPCAM/IPCAM2.py", line 54, in <module>
gray2 = cv2.cvtColor(diff2, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.2.0) C:\projects\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
The code I run in below :
import cv2
import numpy as np
from datetime import datetime
import time
cap2 = cv2.VideoCapture('rtsp://') # IPCAM2
cap = cv2.VideoCapture('rtsp://') # IPCAM1
cap3 = cv2.VideoCapture('http://') # RASP
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent / 100)
height = int(frame.shape[0] * percent / 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
while (True):
ret1, frame11 = cap.read()
ret1, frame12 = cap.read()
ret2, frame21 = cap2.read()
ret2, frame22 = cap2.read()
ret3, frame31 = cap3.read()
ret3, frame32 = cap3.read()
diff1 = cv2.absdiff(frame11, frame12)
gray1 = cv2.cvtColor(diff1, cv2.COLOR_BGR2GRAY)
blur1 = cv2.GaussianBlur(gray1, (5, 5), 0)
_, tresh1 = cv2.threshold(blur1, 40, 255, cv2.THRESH_BINARY)
dilated1 = cv2.dilate(tresh1, None, iterations=3)
contours1, _ = cv2.findContours(dilated1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours1:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 1000:
continue
cv2.rectangle(frame11, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame11, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
t = time.localtime()
filename = str(t[0]) + str(t[1]) + str(t[2]) + "_" + str(t[3]) + str(t[4]) + str(t[5]) + ".jpg"
cv2.imwrite(filename, frame11)
# cv2.line(frame, (0, 300), (200, 200), (0, 255, 0), 5)
resizedframe11 = rescale_frame(frame11, percent=75)
cv2.imshow('frame', resizedframe11)
frame11 = frame12
ret1, frame12 = cap.read()
diff2 = cv2.absdiff(frame21, frame22)
gray2 = cv2.cvtColor(diff2, cv2.COLOR_BGR2GRAY)
blur2 = cv2.GaussianBlur(gray2, (5, 5), 0)
_, tresh2 = cv2.threshold(blur2, 40, 255, cv2.THRESH_BINARY)
dilated2 = cv2.dilate(tresh2, None, iterations=3)
contours2, _ = cv2.findContours(dilated2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours2:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 1000:
continue
cv2.rectangle(frame21, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame21, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
t = time.localtime()
filename = str(t[0]) + str(t[1]) + str(t[2]) + "_" + str(t[3]) + str(t[4]) + str(t[5]) + ".jpg"
cv2.imwrite(filename, frame21)
resizedframe21 = rescale_frame(frame21, percent=75)
cv2.imshow('frame2', resizedframe21)
frame21 = frame22
ret2, frame22 = cap2.read()
diff3 = cv2.absdiff(frame31, frame32)
gray3 = cv2.cvtColor(diff3, cv2.COLOR_BGR2GRAY)
blur3 = cv2.GaussianBlur(gray3, (5, 5), 0)
_, tresh3 = cv2.threshold(blur3, 40, 255, cv2.THRESH_BINARY)
dilated3 = cv2.dilate(tresh3, None, iterations=3)
contours3, _ = cv2.findContours(dilated3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours3:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 800:
continue
cv2.rectangle(frame31, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame31, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
t = time.localtime()
filename = "RASP" + str(t[0]) + str(t[1]) + str(t[2]) + "_" + str(t[3]) + str(t[4]) + str(t[5]) + ".jpg"
cv2.imwrite(filename, frame31)
resizedframe31 = rescale_frame(frame31, percent=75)
cv2.imshow('frame3', resizedframe31)
frame31 = frame32
ret3, frame32 = cap3.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Thanks Kartik and thekangaroo for your answers.
I managed to run my three cameras at the same time using threads. I am just opening them and showing a resized stream.
There is another issue as one cameras, and then a second, stops after a random time between 5 to 20 seconds. The stream stops and then the windows closes without any messages.
It seems to me that it is due to lagging getting the image from the cameras... any ideas to avoid that with openCV ?
Thanks again for your helpful answers.
Below is the code I use :
import cv2
import threading
import time
class camThread(threading.Thread):
def __init__(self, previewName, camID):
threading.Thread.__init__(self)
self.previewName = previewName
self.camID = camID
def run(self):
print("Starting " + self.previewName)
camPreview(self.previewName, self.camID)
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent / 100)
height = int(frame.shape[0] * percent / 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
def camPreview(previewName, camID):
cv2.namedWindow(previewName)
cam = cv2.VideoCapture(camID)
if cam.isOpened(): # try to get the first frame
rval, frame = cam.read()
else:
time.sleep(10)
rval, frame = cam.read()
percent = 50
width = int(frame.shape[1] * percent / 100)
height = int(frame.shape[0] * percent / 100)
dim = (width, height)
while rval:
# cv2.imshow(previewName, frame)
cv2.imshow(previewName, cv2.resize(frame, dim, interpolation=cv2.INTER_AREA))
time.sleep(0.5)
rval, frame = cam.read()
key = cv2.waitKey(20)
print(previewName + str(cam.isOpened()))
# Create two threads as follows
thread1 = camThread("CLIO", 'rtsp://xxxx')
thread2 = camThread("JARDIN", 'rtsp://xxxx')
thread3 = camThread("RASPCAM", 'http://xxxx')
thread1.start()
thread2.start()
thread3.start()

Related

i = i[0] leads to IndexError: invalid index to scalar variable [duplicate]

This question already has answers here:
How to fix IndexError: invalid index to scalar variable
(6 answers)
Closed 2 months ago.
Im getting an error every time i run this script in my environment
Traceback (most recent call last):
File "FaceMark.py", line 55, in <module>
i = i[0]
IndexError: invalid index to scalar variable.
The camera does turns on but as soon it detects my hand or face the camera windows shuts down and it throws me the error posted above.
here's the entire script:
import cv2
import mediapipe as mp
import time
import numpy as np
thres = 0.45 # Threshold to detect object
nms_threshold = 0.2`your text`
cap = cv2.VideoCapture()
cap.set(3, 1280)
cap.set(4, 720)
cap.set(10, 150)
classNames = []
classFile = 'coco.names'
with open(classFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
cap = cv2.VideoCapture(0)
pTime = 0
cTime = 0
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=2)
drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=2)
mpHands = mp.solutions.hands
hands = mpHands.Hands()
mpDrawHand = mp.solutions.drawing_utils
while True:
success, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
classIds, confs, bbox = net.detect(img, confThreshold=thres)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
indices = cv2.dnn.NMSBoxes(bbox, confs, thres, nms_threshold)
results = faceMesh.process(imgRGB)
resultsHand = hands.process(imgRGB)
for i in indices:
i = i[0]
box = bbox[i]
# colors = np.random.uniform(0, 255, size=(len(box), 3))
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y), (x + w, h + y), color=(0, 255, 0), thickness=2)
cv2.putText(img, classNames[classIds[i][0] - 1].upper(), (box[0] + 10, box[1] + 30),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
print("Objects Ids: ", classIds)
if resultsHand.multi_hand_landmarks:
for handLms in resultsHand.multi_hand_landmarks:
for id, lm in enumerate(handLms.landmark):
print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
mpDrawHand.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS)
if results.multi_face_landmarks:
for faceLms in results.multi_face_landmarks:
mpDraw.draw_landmarks(img, faceLms, mpFaceMesh.FACE_CONNECTIONS,
drawSpec, drawSpec)
for id, lm in enumerate(faceLms.landmark):`
# print(lm)
ih, iw, ic = img.shape
x, y = int(lm.x * iw), int(lm.y * ih)
print("Face id: ", id, x, y)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}', (20, 70), cv2.FONT_HERSHEY_PLAIN,
3, (255, 0, 0), 3)
cv2.imshow('image', img)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
how can i solve the problem around the indexes loop? i have tried other solutions here in similar questions, but i havent had any luck.
The variable i returns an integer value. This is not a list or tuple. If you change the for loop as follows, it will likely work.
for i in indices:
box = bbox[i]
# colors = np.random.uniform(0, 255, size=(len(box), 3))
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y), (x + w, h + y), color=(0, 255, 0), thickness=2)
cv2.putText(img, classNames[classIds[i] - 1].upper(), (box[0] + 10, box[1] + 30),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
print("Objects Ids: ", classIds)

Opencv giving a C++ exception error with LBPHFaceRecognizer

i am trying face recognition with opencv's LBPH with GUI made by tkinter. the first time my program runs . but when i exit camera with 'q' button and start again with push button in GUI it gives error. i have tried many workarounds but still no answer. can someone help me out?
HERE IS MY CODE
recognizer.read('C:/data.yml')
id = 0
# set text style
fontface = cv2.FONT_HERSHEY_SIMPLEX
fontscale = 1
fontcolor = (203, 23, 252)
cam = cv2.VideoCapture(0)
name_to_track=value_.get()
print(name_to_track)
# get data from sqlite by ID
while (True):
# camera read
ret, img = cam.read()
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
result = face.process(rgb)
if result.detections:
for id_, detect in enumerate(result.detections):
bbox_ = detect.location_data.relative_bounding_box
ih, iw, ic = img.shape
bbox = int(bbox_.xmin * iw), int(bbox_.ymin * ih), \
int(bbox_.width * iw), int(bbox_.height * ih)
x = bbox[0]
y = bbox[1]
w = bbox[2]
h = bbox[3]
cv2.rectangle(img, bbox, (255, 0, 255), 2)
img_size=gray[y:y + h, x:x + w]
img_size=np.array(img_size)
print("x:"+str(x)+"y:"+str(y))
if(x<0 or y<0 or w <0 or h<0):
continue
id, conf = recognizer.predict(gray[y:y + h, x:x + w])
#print(conf)
if (conf < 50):
profile = getProfile(id)
# set text to window
if (profile != None):
# cv2.PutText(cv2.fromarray(img),str(id),(x+y+h),font,(0,0,255),2);
cv2.putText(img, "ID: " + str(profile[0]) + ' Acc:' + str(round(float(1 - conf / 100), 2)),
(x, y + h + 30), fontface, fontscale, fontcolor, 2)
if (profile[1] == name_to_track):
cv2.putText(img, "Tracking", (x, y + h + 60), fontface, fontscale, fontcolor, 2)
else:
cv2.putText(img, "Name " + str(profile[1]), (x, y + h + 60), fontface, fontscale, fontcolor, 2)
else:
cv2.putText(img, "Unknown", (x, y + h + 30), fontface, fontscale, [255, 0, 0], 2)
cv2.imshow("face", img)
if cv2.waitKey(1) == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
THANKS!!

Object Detection Using Raspberry Pi and Android IP Camera with Python and OpenCV

Here is my code that I have used for object detection using raspberry pi and Android Ip Camera. Here I'm not getting any output and the code does not provide any errors. Can someone figure out what is the error?
import urllib.request
import cv2
import numpy as np
import datetime
import math
#global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
MinCountourArea = 3000 #Adjust ths value according to your usage
BinarizationThreshold = 70 #Adjust ths value according to your usage
OffsetRefLines = 150 #Adjust ths value according to your usage
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
This is the code i have used to obtain the video stream from my IP camera
ReferenceFrame = None
while True:
camera=cv2.VideoCapture("http://192.168.1.6:8080/shot.jpg")
camera.set(3,640)
camera.set(4,480)
(ret,Frame)=camera.read()
height = np.size(Frame,0)
width = np.size(Frame,1)
#if cannot grab a frame, this program ends here.
if not ret:
break
This is the code part i have used to display the lines and frame for object detection and object counting
#gray-scale convertion and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, BinarizationThreshold, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
CoorYEntranceLine = (height / 2)-OffsetRefLines
CoorYExitLine = (height / 2)+OffsetRefLines
cv2.line(Frame, (0,CoorYEntranceLine), (width,CoorYEntranceLine), (255, 0, 0), 2)
cv2.line(Frame, (0,CoorYExitLine), (width,CoorYExitLine), (0, 0, 255), 2)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = (x+x+w)/2
CoordYCentroid = (y+y+h)/2
ObjectCentroid = (CoordXCentroid,CoordYCentroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow('Salida',Frame)
cv2.waitKey(1);
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
The correct code
import numpy as np
import math
def nothing(x):
pass
width=0
height=0
EntranceCounter = 0
OffsetRefLines = 150
ExitCounter = 0
BinarizationThreshold = 70
MinCountourArea = 3000
cap = cv2.VideoCapture(0);
path="http://192.168.1.6:8080/video"
cap.open(path)
ReferenceFrame = None
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
#cv2.namedWindow("Tracking")
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("UH", "Tracking", 255, 255, nothing)
cv2.createTrackbar("US", "Tracking", 255, 255, nothing)
cv2.createTrackbar("UV", "Tracking", 255, 255, nothing)
while True:
#frame = cv2.imread('smarties.png')
if cap.isOpened():
rval, frame = cap.read()
while rval:
rval,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.GaussianBlur(hsv, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = hsv
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, hsv)
FrameThresh = cv2.threshold(FrameDelta, 25, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
cv2.line(frame, (0,170), (2000,170), (255, 0, 0), 5)
cv2.line(frame, (0,470), (2000,470), (0, 0, 255), 5)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = int(x+x+w)/2
CoordYCentroid = int(y+y+h)/2
ObjectCentroid = (x,y)
cv2.circle(frame, ObjectCentroid, 2, (0, 255, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,170,470)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,170,470)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (250, 0, 1), 2)
cv2.putText(frame, "Exits: {}".format(str(ExitCounter)), (10, 110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
imS = cv2.resize(frame, (400, 400)) # Resize image
#imSS = cv2.resize(mask, (200, 200))
#imSSS = cv2.resize(frame, (200, 200))
cv2.imshow("frame", imS)
#cv2.imshow("mask", imSS)
#cv2.imshow("res", imSSS)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()

'numpy.ndarray' object has no attribute 'detectMultiScale'

so here is my code, I'm using OpenCV's cascade files to detect face and eye position and then using it in the following part.
# Importing relevant libraries
import cv2 import os import tensorflow from keras.models import load_model import numpy as np import pygame import time
# Adding alarm to raise alert pygame.init() sound = pygame.mixer.Sound('alarm.wav')
# initialising -> cascade files into cascade classifier -> OpenCV
face = cv2.CascadeClassifier('/Users/ankush/Desktop/sleepy-driver-alert/haar-cascade-files/haarcascade_frontalface_alt.xml') l_eye = cv2.CascadeClassifier('/Users/ankush/Desktop/sleepy-driver-alert/haar-cascade-files/haarcascade_lefteye_2splits.xml') r_eye = cv2.CascadeClassifier('/Users/ankush/Desktop/sleepy-driver-alert/haar-cascade-files/haarcascade_righteye_2splits.xml')
# start-of-execution label = ['Close', 'Open']
# load model and video capture
model = load_model('/Users/ankush/Desktop/sleepy-driver-alert/models/cnnCat2.h5') path = os.getcwd() cap = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_COMPLEX_SMALL count = 0 score = 0 dep = 2 rpred = [99] lpred = [99]
while (True):
ret, frame = cap.read()
height, width = frame.shape[:2]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face.detectMultiScale(gray, minNeighbors=5, scaleFactor=1.1, minSize=(25, 25))
left_eye_gray = r_eye.detectMultiScale(gray)
right_eye_gray = l_eye.detectMultiScale(gray)
cv2.rectangle(frame, (0, height - 50), (200, height),
(0, 0, 0), thickness=cv2.FILLED)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (100, 100, 100), 1)
# Next few lines check if eyes are open or not ->rpred,lpred variables
# right
for (x, y, w, h) in right_eye_gray:
r_eye = frame[y:y + h, x:x + w]
count = count + 1
r_eye = cv2.cvtColor(r_eye, cv2.COLOR_BGR2GRAY)
r_eye = cv2.resize(r_eye, (24, 24))
r_eye = r_eye / 255
r_eye = r_eye.reshape(24, 24, -1)
r_eye = np.expand_dims(r_eye, axis=0)
rpred = model.predict_classes(r_eye)
if (rpred[0] == 1):
label = 'Open'
if (rpred[0] == 0):
label = 'Closed'
break
# left
for (x, y, w, h) in left_eye_gray:
l_eye = frame[y:y + h, x:x + w]
count = count + 1
l_eye = cv2.cvtColor(l_eye, cv2.COLOR_BGR2GRAY)
l_eye = cv2.resize(l_eye, (24, 24))
l_eye = l_eye / 255
l_eye = l_eye.reshape(24, 24, -1)
l_eye = np.expand_dims(l_eye, axis=0)
lpred = model.predict_classes(l_eye)
if (lpred[0] == 1):
label = 'Open'
if (lpred[0] == 0):
label = 'Closed'
break
# if closed eyes detected
if (rpred[0] == 0 and lpred[0] == 0):
score = score + 1
cv2.putText(frame, "Closed", (10, height - 20), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
# if for some reason only one is closed, driver is not sleeping
else:
score = score - 1
cv2.putText(frame, "Open", (10, height - 20), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
if (score < 0):
score = 0
cv2.putText(frame, 'Score:' + str(score), (100, height - 20), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
# if person is very sleepy, high score, eyes closed
if (score > 15):
cv2.imwrites(os.path.join(path, 'image.jpg'), frame)
try:
sound.play()
except:
pass
if (dep < 16):
dep = dep + 2
else:
dep = dep - 2
if (dep < 2):
dep = 2
cv2.rectangle(frame, (0, 0), (width, height), (0, 0, 255), dep)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release() cv2.destroyAllWindows()
And the error that I'm getting is;
Traceback (most recent call last): File
"/Users/ankush/Desktop/sleepy-driver-alert/Sleepy-driver-detector.py",
line 44, in
right_eye_gray = l_eye.detectMultiScale(gray) AttributeError: 'numpy.ndarray' object has no attribute 'detectMultiScale'
What am I doing wrong? I'm a beginner.

OpenCV assertion failed with negative values

I am making a Face Liveliness detection program with OpenCV and Keras.
I am stuck with this error:
OpenCV assertion failed with negative values
I tried all the suggested answers but none of them solved my issue.
I have already tried all the solutions which are present on StackOverflow and Github issues but none of them worked out in my case.
video_capture = cv2.VideoCapture(0)
model = load_model()
print("[LOG] COLLECTING images.....")
images = []
for direc, _, files in tqdm(os.walk(dataset)):
for file in files:
if file.endswith("jpg"):
images.append(os.path.join(direc, file))
return model, face_detector, open_eyes_detector, left_eye__detector, right_eye_detector, video_capture, images
def process_and_encode(images):
known_encodings = []
known_names = []
print("[LOG] Encoding faces....")
for image_path in tqdm(images):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(image, model='hog')
encoding = face_recognition.face_encodings(image, boxes)
name = image_path.split(os.path.sep)[-2]
if len(encoding) > 0:
known_encodings.append(encoding[0])
known_names.append(name)
return {"encodings": known_encodings, "names": known_names}
def detect_and_display(model, video_capture, face_detector, open_eyes_detector, left_eye_detector, right_eye_detector,
data, eyes_detected):
frame = video_capture.read()
try:
frame = cv2.resize(frame, (0, 0), fx=0.6, fy=0.6)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
faces = face_detector.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(50, 50),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
encoding = face_recognition.face_encodings(rgb, [(y, x + w, y + h, x)])
matches = face_recognition.compare_faces(data["encodings"], encoding)
name = "Unknown"
if True in matches:
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
face = frame[y:y + h, x:x + w]
gray_face = gray[y:y + h, x:x + w]
eyes = []
open_eyes_glasses = open_eyes_detector.detectMultiScale(
gray_face,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
if len(open_eyes_glasses) == 2:
eyes_detected[name] += '1'
for (ex, ey, ew, eh) in open_eyes_glasses:
cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
# otherwise try detecting eyes using left and right_eye_detector
# which can detect open and closed eyes
else:
# separate the face into left and right sides
left_face = frame[y:y + h, x + int(w / 2):x + w]
left_face_gray = gray[y:y + h, x + int(w / 2):x + w]
right_face = frame[y:y + h, x:x + int(w / 2)]
right_face_gray = gray[y:y + h, x:x + int(w / 2)]
# Detect the left eye
left_eye = left_eye_detector.detectMultiScale(
left_face_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Detect the right eye
right_eye = right_eye_detector.detectMultiScale(
right_face_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
eye_status = '1' # we suppose the eyes are open
# For each eye check wether the eye is closed.
# If one is closed we conclude the eyes are closed
for (ex, ey, ew, eh) in right_eye:
color = (0, 255, 0)
pred = predict(right_face[ey:ey + eh, ex:ex + ew], model)
if pred == 'closed':
eye_status = '0'
color = (0, 0, 255)
cv2.rectangle(right_face, (ex, ey), (ex + ew, ey + eh), color, 2)
for (ex, ey, ew, eh) in left_eye:
color = (0, 255, 0)
pred = predict(left_face[ey:ey + eh, ex:ex + ew], model)
if pred == 'closed':
eye_status = '0'
color = (0, 0, 255)
cv2.rectangle(left_face, (ex, ey), (ex + ew, ey + eh), color, 2)
eyes_detected[name] += eye_status
# Each time, we check if the person has blinked
# If yes, we display its name
if isBlinking(eyes_detected[name], 3):
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display name
y = y - 15 if y - 15 > 15 else y + 15
cv2.putText(frame, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
return frame
except Exception as e:
print(str(e))
if __name__ == "__main__":
(model, face_detector, open_eyes_detector, left_eye_detector, right_eye_detector, video_capture, images) = init()
data = process_and_encode(images)
eyes_detected = defaultdict(str)
while True:
frame = detect_and_display(model, video_capture, face_detector, open_eyes_detector, left_eye_detector,
right_eye_detector, data, eyes_detected)
cv2.imshow("Face Liveness Detector", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
video_capture.stop()
Error message:-
Expected cv::UMat for argument 'src'
Traceback (most recent call last):
File "C:/Users/Saksham Dubey/PycharmProjects/FacePay/FaceLive.py", line 190, in
cv2.imshow("Face Liveness Detector", frame)
cv2.error: OpenCV(4.1.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:352: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'
[ WARN:0] terminating async callback
That could probably be because you try to use imshow() but there have been no imwite() before. Not exactly a solution but a working example. Take a look:
import cv2 # pip install opencv-python
import datetime
from cv2.data import haarcascades as hc
import requests
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier("%s/haarcascade_frontalface_default.xml" % hc)
eye_cascade = cv2.CascadeClassifier('%s/haarcascade_eye.xml' % hc)
profile_cascade = cv2.CascadeClassifier('%s/haarcascade_profileface.xml' % hc)
fullbody_cascade = cv2.CascadeClassifier('%s/haarcascade_fullbody.xml' % hc)
smile_cascade = cv2.CascadeClassifier('%s/haarcascade_smile.xml' % hc)
eyesglasses_cascade = cv2.CascadeClassifier('%s/haarcascade_eye_tree_eyeglasses.xml' % hc)
mouth_cascade = cv2.CascadeClassifier('%s/haarcascade_mcs_mouth.xml' % hc)
filename = 'output/'+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
def recognizer():
while True:
ret, frame = cap.read()
profile_count = recognize_profile(frame)
face_count, eye_count = recognize_face(frame, True)
cv2.imwrite('%s.png' % filename, frame)
image = cv2.imread('%s.png' % filename)
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def recognize_profile(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
profiles = profile_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in profiles:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
return len(profiles)
def recognize_face(frame, recognize_eyes=None):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
eyes = []
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
if recognize_eyes:
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(roi_gray)
# draw bounding boxes around detected features
for (ex, ey, ew, eh) in eyes:
eye_center = (ex + ew // 2, ey + eh // 2)
radius = int(round((ew + eh) * 0.25))
cv2.circle(roi_color, eye_center, radius, (0, 0, 255), 4)
return len(faces), len(eyes)
def snapshot():
while True:
# Capture frame-by-frame
ret, frame = cap.read()
recognize_profile(frame)
recognize_face(frame, True)
cv2.imwrite('%s.png' % filename, frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def live_video_recognizer():
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
out = cv2.VideoWriter('%s.avi' % filename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
while True:
ret, frame = cap.read()
recognize_profile(frame)
recognize_face(frame, True)
if ret is True:
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
recognizer()
# snapshot()
# live_video_recognizer()

Categories