I am doing a project where its a motion based detection program.
However it detects changes in the background as "motion" so i'd like a way to recapture a new first frame every few minutes to replace the current one to fix this issue.
I am using a Raspberry Pi 2B and a Logitech Webcam.
The code i am using is based of : Pyimagesearch
This is my version of the code.
Please help me
(Edit)I have changed my code to a BackgroundSubtractionMOG2 now my issue is how do i add Mean-Shift Tracking so that it'll recognize its the same object that entered the screen in the frame earlier?
import sys
sys.path.append('/usr/local/lib/python3.4/site-packages')
import numpy as np
import cv2
import imutils
from imutils import contours
import datetime
import time
#cap = cv2.VideoCapture("/home/pi/Desktop/Proj/VideoTestSample.mp4")
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while (cap.isOpened()):
(grabbed, frame) = cap.read()
text = " "
if not grabbed:
break
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
fgmask = fgbg.apply(gray)
thresh = cv2.erode(fgmask, None, iterations=2)
(_,cnts,hierarchy) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for (i,c) in enumerate(cnts):
if cv2.contourArea(c) < 300:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)
cv2.putText(frame, "#{}".format(i + 1), (x, y - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
text = "REC"
cv2.putText(frame, "{}". format(text), (10,20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,(0,0,255), 1)
cv2.imshow('frame',frame)
cv2.imshow('gray', gray)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Related
read videocapture frame using cv2 but any time showing a error
error is :
Traceback (most recent call last):
File "d:\pythonprojects\gym\demo.py", line 33, in <module>
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
code is
<import cv2
import numpy as np
import os
from PIL import Image
from Attendance import attendance
from datetime import datetime
from database import\*
def getProfile(Id):
query="SELECT \* FROM users WHERE id="+str(Id)
cursor=mycursor.execute(query)
profile = mycursor.fetchone()
\# profile=None
\# for row in cursor:
\# profile=row
\# con.close()
return profile
\# os.chdir(os.getcwd())
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("face-trainner.yml")
cap = cv2.VideoCapture(0) #Get vidoe feed from the Camera
cap.set(3, 640)
cap.set(4, 480)
font = cv2.FONT_HERSHEY_COMPLEX
while(True):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
nbr_predicted, conf = recognizer.predict(gray[y:y+h, x:x+w])
print(nbr_predicted, conf)
if(conf < 80):
profile=getProfile(nbr_predicted)
if profile != None:
time_now=datetime.now()
newdate=time_now.strftime('%Y-%m-%d')
newtime=time_now.strftime('%H:%M:%S')
attendance(nbr_predicted,newtime,newdate)
cv2.putText(img, "Name: "+str(profile[4]), (x, y+h+30), font, 0.4, (0, 0, 255), 1)
cv2.putText(img, "Gender: " + str(profile[7]), (x, y + h + 50), font, 0.4, (0, 0, 255), 1)
else:
cv2.putText(img, "Name: Unknown", (x, y + h + 30), font, 0.4, (0, 0, 255), 1)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('Preview',img) #Display the Video
cv2.waitKey(1)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
This error seems to be due to img variable being None or empty. Not sure why it is None in your case. It could be due to various reasons like if the camera is not connected properly, or the camera driver is not installed, or there are permission issues.
You can add a condition after cap.read() call:
...
while True:
ret, img = cap.read()
if img is None:
print("Could not read frame from the source.")
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
...
I am trying to make a motion detector (using the internal camera) in python(3), I am using linux (debian), and I keep getting this error
[ WARN:0#0.724] global /io/opencv/modules/videoio/src/cap_v4l.cpp (889) open VIDEOIO(V4L2:/dev/video0): can't open camera by index
here's the code I'm using
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
else:
vs = cv2.VideoCapture(args["Video"])
firstFrame = None
while True:
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Muon is stuck in helium"
if frame is None:
break
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < args["min_area"]:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Muon is fusing hydrogen"
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
personally, I think that the problem is linux is having trouble using the internal camera, but I ofc have been wrong before, but if that is the problem, can somebody please help me fix it, and if it isn't, can somebody please help me out, and tell me what I need to fix please
Hey I started learning python not so long time ago. Right now Im creating (or more likely trying to) face and motion detection script based on OpenCV library. Unfortunately Im stuck since few days cause I cant solve problem with I guess its called multi threading.
Here is my code:
import time
import cv2
import datetime
from discord_webhook import DiscordWebhook
import threading
faceCascade = cv2.CascadeClassifier("face_recognition.xml")
# define a video capture object
video_capture = cv2.VideoCapture(0)
#writing video
frame_width = int(video_capture.get(3))
frame_height = int(video_capture.get(4))
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
out = cv2.VideoWriter(datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p") + '.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
#screenshot when detectors get triggered
def screenshot():
cv2.imwrite('screenshot.png',video_capture.read()[1])
#webhook notify
def alert():
webhook = DiscordWebhook(url="", rate_limit_retry=True,
content='!ALERT!')
webhook.execute()
while(True):
# Capture the video frame by frame
ret, frame = video_capture.read()
text="not detected"
text1="not detected"
timestamp = datetime.datetime.now()
#face recognition
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5,minSize=(30, 30))
if int(format(len(faces))) > 0:
#print("Found {0} faces!".format(len(faces)))
text="detected"
else:
text="not detected"
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#motion detection
ret, frame1 = video_capture.read()
difference = cv2.absdiff(frame, frame1) # find the difference between the frames
gray = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY) # create threshold
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) < 5000:
continue
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
text1="detected"
# Display the resulting frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Face status: {}".format(text), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame, "Motion status: {}".format(text1), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 1)
#check if motion is detected if not change status text
if text1 == "not detected":
text1="detected"
else:
text1="not detected"
out.write(frame)
cv2.imshow('Press Q to quit', frame)
# the 'q' button is set as the
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
video_capture.release()
out.release()
cv2.destroyAllWindows()
Im trying to send webhook and make screen shot every 3 minutes if motion is detected but for loops completely lags this app. Tried time.sleep or thread timers but it only freezes or lags app. If someone can explain me how to solve this problem I will be very thankful. Have a great day or night
I tried to use Haar cascades called haarcascade_profileface.xml and lbpcascade_profileface.xml together but the camera does not even open at all. How can I fix this issue where I want both haar cascades to work?
This is done on the raspberry pi and can also run on Linux and windows as well. Please explain as best as possible! Here is the code:
import numpy as np
import cv2
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(18,GPIO.OUT)
face_cascade = cv2.CascadeClassifier('Haarcascade_profileface.xml')
side_face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface_improved.xml')
prevTime = 0
## This will get our web camera
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
retval, frame = cap.read()
if not retval:
break
_, img = cap.read() ## This gets each frame from the video, cap.read returns 2 variables flag - indicate frame is correct and 2nd is f
##img = cv2.imread('Z.png') Then we get our image we want to use
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # This method only works on gray skin images, so we have to convert the gray scale to rgb image
faces = face_cascade.detectMultiScale(gray, 1.1, 5) ## Next, we detect the faces
if len(faces) > 0:
print("[INFO] found {0} faces!".format(len(faces)))
GPIO.output(18,GPIO.HIGH)
else:
print("No face")
GPIO.output(18,GPIO.LOW)
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
fps = 1/(sec)
str = "FPS : %0.1f" % fps
for (x, y, w, h) in faces: ## We draw a rectangle around the faces so we can see it correctly
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0)) ## The faces will be a list of coordinates
cv2.putText(img, 'Myface', (x, y), font, fontScale=1, color=(255,70,120),thickness=2)
side_faces = side_face_cascade.detectMultiScale(gray, 1.1, 5)
for (ex, ey, ew, eh) in side_faces: ## We draw a rectangle around the faces so we can see it correctly
cv2.rectangle(img, (ex, ey), (ex+ew, ey+eh), (255, 0, 0)) ## The faces will be a list of coordinates
cv2.putText(img, 'Myface', (ex, ey), font, fontScale=1, color=(255,70,120),thickness=2)
cv2.putText(frame, 'Number of Faces Detected: ' + str, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
cv2.imshow('img', img) ## Last we show the image
x = cv2.waitKey(30) & 0xff
if x==27:
break
## Press escape to exit the program
cap.release()
OpenCV actually provides a "side-face" detector. It is called 'haarcascade_profileface.xml'. You can do:
side_face_cascade = cv2.CascadeClassifier('haarcascade_profileface.xml')
side_faces = side_face_cascade.detectMultiScale(gray, 1.1, 5)
i write a code from Open CV Document about Motion Detection .i want to save my input as a video and my output as a video but i have problems. i save video but i can just save the output video and some time the input video is the same out put video.my input video is from my computer's camera and it's like a normal video but the output is that video in addition by squares around motion objects.i need your help.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()
i tried this but it doesn't work.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi', fourcc, 20.0, (fwidth, fheight))
out1 = cv.VideoWriter('input.avi', fourcc, 20.0, (fwidth, fheight))
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
if ret == True:
out.write(Final_Movie)
out.write(frame2)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()