Related
I have three IP cameras around my house and I want to capture an image when a motion is detected. I want to run the motion capture algorithm in the same time for all 3 cameras.
I manage to do the job for one camera - Open the stream + motion detection algorithm + store image in case of detection :
import cv2
cap3 = cv2.VideoCapture('http://X.X.X.X:XXXX/stream.mjpg')
ret3, frame31 = cap3.read()
ret3, frame32 = cap3.read()
while (True):
diff3 = cv2.absdiff(frame31, frame32)
gray3 = cv2.cvtColor(diff3, cv2.COLOR_BGR2GRAY)
blur3 = cv2.GaussianBlur(gray3, (5, 5), 0)
_, tresh3 = cv2.threshold(blur3, 30, 255, cv2.THRESH_BINARY)
dilated3 = cv2.dilate(tresh3, None, iterations=3)
contours3, _ = cv2.findContours(dilated3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours3:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 800:
continue
cv2.rectangle(frame31, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame31, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
t = time.localtime()
filename = "RASP" + str(t[0]) + str(t[1]) + str(t[2]) + "_" + str(t[3]) + str(t[4]) + str(t[5]) + ".jpg"
cv2.imwrite(filename, frame31)
frame31 = frame32
ret3, frame32 = cap3.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap3.release()
cv2.destroyAllWindows()
The problem I have is when I try to do the same job in parallel for the three cameras.
What I do is duplicating the same process in the while loop for the three cameras and when I do so, it starts running for a few seconds and then I get this error :
Traceback (most recent call last):
File "C:/Users/Guillaume/PycharmProjects/IPCAM/IPCAM2.py", line 54, in <module>
gray2 = cv2.cvtColor(diff2, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.2.0) C:\projects\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
The code I run in below :
import cv2
import numpy as np
from datetime import datetime
import time
cap2 = cv2.VideoCapture('rtsp://') # IPCAM2
cap = cv2.VideoCapture('rtsp://') # IPCAM1
cap3 = cv2.VideoCapture('http://') # RASP
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent / 100)
height = int(frame.shape[0] * percent / 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
while (True):
ret1, frame11 = cap.read()
ret1, frame12 = cap.read()
ret2, frame21 = cap2.read()
ret2, frame22 = cap2.read()
ret3, frame31 = cap3.read()
ret3, frame32 = cap3.read()
diff1 = cv2.absdiff(frame11, frame12)
gray1 = cv2.cvtColor(diff1, cv2.COLOR_BGR2GRAY)
blur1 = cv2.GaussianBlur(gray1, (5, 5), 0)
_, tresh1 = cv2.threshold(blur1, 40, 255, cv2.THRESH_BINARY)
dilated1 = cv2.dilate(tresh1, None, iterations=3)
contours1, _ = cv2.findContours(dilated1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours1:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 1000:
continue
cv2.rectangle(frame11, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame11, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
t = time.localtime()
filename = str(t[0]) + str(t[1]) + str(t[2]) + "_" + str(t[3]) + str(t[4]) + str(t[5]) + ".jpg"
cv2.imwrite(filename, frame11)
# cv2.line(frame, (0, 300), (200, 200), (0, 255, 0), 5)
resizedframe11 = rescale_frame(frame11, percent=75)
cv2.imshow('frame', resizedframe11)
frame11 = frame12
ret1, frame12 = cap.read()
diff2 = cv2.absdiff(frame21, frame22)
gray2 = cv2.cvtColor(diff2, cv2.COLOR_BGR2GRAY)
blur2 = cv2.GaussianBlur(gray2, (5, 5), 0)
_, tresh2 = cv2.threshold(blur2, 40, 255, cv2.THRESH_BINARY)
dilated2 = cv2.dilate(tresh2, None, iterations=3)
contours2, _ = cv2.findContours(dilated2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours2:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 1000:
continue
cv2.rectangle(frame21, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame21, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
t = time.localtime()
filename = str(t[0]) + str(t[1]) + str(t[2]) + "_" + str(t[3]) + str(t[4]) + str(t[5]) + ".jpg"
cv2.imwrite(filename, frame21)
resizedframe21 = rescale_frame(frame21, percent=75)
cv2.imshow('frame2', resizedframe21)
frame21 = frame22
ret2, frame22 = cap2.read()
diff3 = cv2.absdiff(frame31, frame32)
gray3 = cv2.cvtColor(diff3, cv2.COLOR_BGR2GRAY)
blur3 = cv2.GaussianBlur(gray3, (5, 5), 0)
_, tresh3 = cv2.threshold(blur3, 40, 255, cv2.THRESH_BINARY)
dilated3 = cv2.dilate(tresh3, None, iterations=3)
contours3, _ = cv2.findContours(dilated3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours3:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 800:
continue
cv2.rectangle(frame31, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame31, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
t = time.localtime()
filename = "RASP" + str(t[0]) + str(t[1]) + str(t[2]) + "_" + str(t[3]) + str(t[4]) + str(t[5]) + ".jpg"
cv2.imwrite(filename, frame31)
resizedframe31 = rescale_frame(frame31, percent=75)
cv2.imshow('frame3', resizedframe31)
frame31 = frame32
ret3, frame32 = cap3.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Thanks Kartik and thekangaroo for your answers.
I managed to run my three cameras at the same time using threads. I am just opening them and showing a resized stream.
There is another issue as one cameras, and then a second, stops after a random time between 5 to 20 seconds. The stream stops and then the windows closes without any messages.
It seems to me that it is due to lagging getting the image from the cameras... any ideas to avoid that with openCV ?
Thanks again for your helpful answers.
Below is the code I use :
import cv2
import threading
import time
class camThread(threading.Thread):
def __init__(self, previewName, camID):
threading.Thread.__init__(self)
self.previewName = previewName
self.camID = camID
def run(self):
print("Starting " + self.previewName)
camPreview(self.previewName, self.camID)
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent / 100)
height = int(frame.shape[0] * percent / 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
def camPreview(previewName, camID):
cv2.namedWindow(previewName)
cam = cv2.VideoCapture(camID)
if cam.isOpened(): # try to get the first frame
rval, frame = cam.read()
else:
time.sleep(10)
rval, frame = cam.read()
percent = 50
width = int(frame.shape[1] * percent / 100)
height = int(frame.shape[0] * percent / 100)
dim = (width, height)
while rval:
# cv2.imshow(previewName, frame)
cv2.imshow(previewName, cv2.resize(frame, dim, interpolation=cv2.INTER_AREA))
time.sleep(0.5)
rval, frame = cam.read()
key = cv2.waitKey(20)
print(previewName + str(cam.isOpened()))
# Create two threads as follows
thread1 = camThread("CLIO", 'rtsp://xxxx')
thread2 = camThread("JARDIN", 'rtsp://xxxx')
thread3 = camThread("RASPCAM", 'http://xxxx')
thread1.start()
thread2.start()
thread3.start()
I want to ask how to change the color of Bounding Box and Font of a detected object when it is past a line. I am currently working on a project on human walking speed estimation by using Haar-cascade. The program works as such: the detected object passes two imaginary lines and when it passes the second line the program will show the speed. If the speed of the detected humans is below 3 km/h, the Bounding Box and the font will be shown in RED, but if it is more than 3 km/h it will be shown in GREEN. And I want the text of the speed to be shown for 5 seconds.
Hope you can help me solve this. Here's the program that I've worked on.
import time
cascade_src = 'haarcascade_fullbody.xml'
video_src = 'video-1.mp4'
#line a
ax1=15
ay=225
ax2=600
#line b
bx1=15
by=275
bx2=600
#car num
i = 1
start_time = time.time()
#video ....
cap = cv2.VideoCapture(video_src)
human_cascade = cv2.CascadeClassifier(cascade_src)
videoWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
videoHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('wisuda-14.mp4', fourcc, 25.0, (videoWidth,videoHeight))
def Speed_Cal(time):
try:
Speed = (9.144*3600)/(time*1000)
return Speed
except ZeroDivisionError:
print (5)
while True:
ret, img = cap.read()
if (type(img) == type(None)):
break
#bluring to have exacter detection
blurred = cv2.blur(img, ksize=(3,3))
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
human = human_cascade.detectMultiScale(gray, scaleFactor=1.04865, minNeighbors=6)
#line a #i know road has got
cv2.line(img,(ax1,ay),(ax2,ay),(255,0,0),2)
#line b
cv2.line(img,(bx1,by),(bx2,by),(255,0,0),2)
for (x,y,w,h) in human:
cv2.rectangle(img, (x,y), (x + w, y + h), (0, 0, 255), 2)
roi_blurred = blurred[x: x + h, y:y + w]
roi_gray = gray[x: x + h, y:y + w]
roi_img = img[x: x + h, y:y + w]
cv2.circle(img,(int((x+x+w)/2),int((y+y+h)/2)), 2,(0,255,0), -1)
#cv2.putText(img, "ID : " + str(i), (x, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1);
while int(ay) == int((y+y+h)/2):
start_time = time.time()
break
while int(ay) <= int((y+y+h)/2):
if int(by) <= int((y+y+h)/2)&int(by+10) >= int((y+y+h)/2):
cv2.line(img, (bx1,by), (bx2,by), (0,255,0), 2)
Speed = Speed_Cal(time.time() - start_time)
print("ID Number "+str(i)+" Speed: " + str(int(Speed)))
i = i + 1
cv2.putText(img, "Speed: "+str(int(Speed))+"km/jam", (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2);
break
else :
break
out.write(img)
cv2.imshow('video', img)
cv2.imshow('Gray', gray)
cv2.imshow('Blurr', blurred)
if cv2.waitKey(33) == 27:
break
cap.release()
out.release()
cv2.destroyAllWindows()
I do really hope you can help me guys, please.
cv2.rectangle(img, (x,y), (x + w, y + h), (0, 0, 255), 2) the tuple with 3 elements: (0, 0, 255) is correspondent to the RGB (or BGR I forgot) value of the bounding rectangle, changing the values will change the color. For more information on bounding rectangles, check out the OpenCV drawing functions doc: https://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html
As for the text color, cv2.putText(img, "Speed: "+str(int(Speed))+"km/jam", (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2), changing the tuple (255,0,0) will change the text color.
import time
start = time.time()
sec = 5
while True:
if condition:
start = time.time()
if time.time() - start < sec:
#do whatever
I am currently working on a project on human walking speed estimation by using Haar-cascade. The program works as such: the detected object passes two imaginary lines and when it passes the second line the program will show the speed. The currently program that I've worked only calculate the detected object that walks from above to the bottom, but I wanted the program will catch the calculation from below to the top too so it can calculating in both ways.
here's the program:
import cv2
import time
cascade_src = 'haarcascade_fullbody.xml'
video_src = 'video-1.mp4'
#line a
ax1=15
ay=225
ax2=600
#line b
bx1=15
by=275
bx2=600
#car num
i = 1
start_time = time.time()
#video ....
cap = cv2.VideoCapture(video_src)
human_cascade = cv2.CascadeClassifier(cascade_src)
videoWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
videoHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('wisuda-14.mp4', fourcc, 25.0, (videoWidth,videoHeight))
def Speed_Cal(time):
try:
Speed = (9.144*3600)/(time*1000)
return Speed
except ZeroDivisionError:
print (5)
while True:
ret, img = cap.read()
if (type(img) == type(None)):
break
#bluring to have exacter detection
blurred = cv2.blur(img, ksize=(3,3))
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
human = human_cascade.detectMultiScale(gray, scaleFactor=1.04865, minNeighbors=6)
#line a #i know road has got
cv2.line(img,(ax1,ay),(ax2,ay),(255,0,0),2)
#line b
cv2.line(img,(bx1,by),(bx2,by),(255,0,0),2)
for (x,y,w,h) in human:
cv2.rectangle(img, (x,y), (x + w, y + h), (0, 0, 255), 2)
roi_blurred = blurred[x: x + h, y:y + w]
roi_gray = gray[x: x + h, y:y + w]
roi_img = img[x: x + h, y:y + w]
cv2.circle(img,(int((x+x+w)/2),int((y+y+h)/2)), 2,(0,255,0), -1)
#cv2.putText(img, "ID : " + str(i), (x, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1);
while int(ay) == int((y+y+h)/2):
start_time = time.time()
break
while int(ay) <= int((y+y+h)/2):
if int(by) <= int((y+y+h)/2)&int(by+10) >= int((y+y+h)/2):
cv2.line(img, (bx1,by), (bx2,by), (0,255,0), 2)
Speed = Speed_Cal(time.time() - start_time)
print("ID Number "+str(i)+" Speed: " + str(int(Speed)))
i = i + 1
cv2.putText(img, "Speed: "+str(int(Speed))+"km/jam", (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2);
break
else :
break
out.write(img)
cv2.imshow('video', img)
cv2.imshow('Gray', gray)
cv2.imshow('Blurr', blurred)
if cv2.waitKey(33) == 27:
break
cap.release()
out.release()
cv2.destroyAllWindows()
Hope you can help guys
I wish to alter this code in order to track a singular large object in motion, i.e. a person, when I run the code as is, the display will track a person as multiple objects rather than as one singular object.
Ignoring the firebase, I want to draw the rectangle around the entire object, rather than parts of the object.
Also, I wish to change the orientation of the lines set on the display from horizontal to vertical, please?
import datetime
import math
import cv2
import numpy as np
from firebase import firebase
# global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
min_area = 3000 # Adjust ths value according to your usage
_threshold = 70 # Adjust ths value according to your usage
OffsetRefLines = 150 # Adjust ths value according to your usage
# Check if an object in entering in monitored zone
def check_entrance_line_crossing(y, coor_y_entrance, coor_y_exit):
abs_distance = abs(y - coor_y_entrance)
if ((abs_distance <= 2) and (y < coor_y_exit)):
return 1
else:
return 0
# Check if an object in exitting from monitored zone
def check_exit_line_crossing(y, coor_y_entrance, coor_y_exit):
abs_distance = abs(y - coor_y_exit)
if ((abs_distance <= 2) and (y > coor_y_entrance)):
return 1
else:
return 0
camera = cv2.VideoCapture(0)
# force 640x480 webcam resolution
camera.set(3, 640)
camera.set(4, 480)
ReferenceFrame = None
# Frames may discard while adjusting to light
for i in range(0, 20):
(grabbed, Frame) = camera.read()
while True:
(grabbed, Frame) = camera.read()
height = np.size(Frame, 0)
width = np.size(Frame, 1)
# if cannot grab a frame, this program ends here.
if not grabbed:
break
# gray-scale and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
# Background subtraction and image manipulation
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, _threshold, 255, cv2.THRESH_BINARY)[1]
# Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
qtty_of_count = 0
# plot reference lines (entrance and exit lines)
coor_y_entrance = (height // 2) - OffsetRefLines
coor_y_exit = (height // 2) + OffsetRefLines
cv2.line(Frame, (0, coor_y_entrance), (width, coor_y_entrance), (255, 0, 0), 2)
cv2.line(Frame, (0, coor_y_exit), (width, coor_y_exit), (0, 0, 255), 2)
# check all found count
for c in cnts:
# if a contour has small area, it'll be ignored
if cv2.contourArea(c) < min_area:
continue
qtty_of_count = qtty_of_count + 1
app = firebase.FirebaseApplication('https://finalyearproj-caa49.firebaseio.com/', None)
## result = app.post('/people', {'count': qtty_of_count})##
update = app.put('/people', "count", qtty_of_count)
print("Updated value in FB" + str(update))
# draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# find object's centroid
coor_x_centroid = (x + x + w) // 2
coor_y_centroid = (y + y + h) // 2
ObjectCentroid = (coor_x_centroid, coor_y_centroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (check_entrance_line_crossing(coor_y_centroid, coor_y_entrance, coor_y_exit)):
EntranceCounter += 1
if (check_exit_line_crossing(coor_y_centroid, coor_y_entrance, coor_y_exit)):
ExitCounter += 1
print("Total countours found: " + str(qtty_of_count))
# Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("Original Frame", Frame)
cv2.waitKey(1)
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
The program is saying Process finished with exit code 0 but i am not getting any output. I am using Python version 2.7 and the program's job is to detect free parking slots in a car park. It also has pedestrian detection. Any help will be very much appreciated please i badly need this code to work. Thanks
Here is the link to the source code, along with a video link of how it works
https://github.com/ankit1khare/ComputerVision
DESIRED OUTPUT: The program should open the input video and draw the parking overlay on top of the video.
Here are the codes for the main program
import yaml
import numpy as np
import cv2
# path references
fn = "Khare_testvideo_01.mp4" #3
#fn = "datasets\parkinglot_1_720p.mp4"
#fn = "datasets\street_high_360p.mp4"
fn_yaml = "Khare_yml_01.yml"
fn_out = "Khare_outputvideo_01.avi"
cascade_src = 'Khare_classifier_02.xml'
car_cascade = cv2.CascadeClassifier(cascade_src)
global_str = "Last change at: "
change_pos = 0.00
dict = {
'text_overlay': True,
'parking_overlay': True,
'parking_id_overlay': True,
'parking_detection': True,
'motion_detection': True,
'pedestrian_detection': False, # takes a lot of processing power
'min_area_motion_contour': 500, # area given to detect motion
'park_laplacian_th': 2.8,
'park_sec_to_wait': 1, # 4 wait time for changing the status of a region
'start_frame': 0, # begin frame from specific frame number
'show_ids': True, # shows id on each region
'classifier_used': True,
'save_video': True
}
# Set from video
cap = cv2.VideoCapture(fn)
print("video found")
video_info = { 'fps': cap.get(cv2.CAP_PROP_FPS),
'width': int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)*0.6),
'height': int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)*0.6),
'fourcc': cap.get(cv2.CAP_PROP_FOURCC),
'num_of_frames': int(cap.get(cv2.CAP_PROP_FRAME_COUNT))}
cap.set(cv2.CAP_PROP_POS_FRAMES, dict['start_frame']) # jump to frame number specified
def run_classifier(img, id):
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(img, 1.1, 1)
if cars == ():
return False
else:
# parking_status[id] = False
return True
# Define the codec and create VideoWriter object
if dict['save_video']:
fourcc = cv2.VideoWriter_fourcc('X','V','I','D') # options: ('P','I','M','1'), ('D','I','V','X'), ('M','J','P','G'), ('X','V','I','D')
out = cv2.VideoWriter(fn_out, -1, 25.0,(video_info['width'], video_info['height']))
print("save video -- out w * H")
# initialize the HOG descriptor/person detector. Take a lot of processing power.
if dict['pedestrian_detection']:
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Use Background subtraction
if dict['motion_detection']:
fgbg = cv2.createBackgroundSubtractorMOG2(history=300, varThreshold=16, detectShadows=True)
# Read YAML data (parking space polygons)
with open(fn_yaml, 'r') as stream:
parking_data = yaml.load(stream)
parking_contours = []
parking_bounding_rects = []
parking_mask = []
parking_data_motion = []
if parking_data != None:
for park in parking_data:
points = np.array(park['points'])
rect = cv2.boundingRect(points)
points_shifted = points.copy()
points_shifted[:, 0] = points[:, 0] - rect[0] # shift contour to region of interest
points_shifted[:, 1] = points[:, 1] - rect[1]
parking_contours.append(points)
parking_bounding_rects.append(rect)
mask = cv2.drawContours(np.zeros((rect[3], rect[2]), dtype=np.uint8), [points_shifted], contourIdx=-1,
color = 255, thickness=-1, lineType=cv2.LINE_8)
mask = mask == 255
parking_mask.append(mask)
kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)) # morphological kernel
kernel_dilate = cv2.getStructuringElement(cv2.MORPH_RECT,(5,19))
if parking_data != None:
parking_status = [False]*len(parking_data)
parking_buffer = [None]*len(parking_data)
# bw = ()
def print_parkIDs(park, coor_points, frame_rev):
moments = cv2.moments(coor_points)
centroid = (int(moments['m10']/moments['m00'])-3, int(moments['m01']/moments['m00'])+3)
# putting numbers on marked regions
cv2.putText(frame_rev, str(park['id']), (centroid[0]+1, centroid[1]+1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
cv2.putText(frame_rev, str(park['id']), (centroid[0]-1, centroid[1]-1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
cv2.putText(frame_rev, str(park['id']), (centroid[0]+1, centroid[1]-1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
cv2.putText(frame_rev, str(park['id']), (centroid[0]-1, centroid[1]+1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
cv2.putText(frame_rev, str(park['id']), centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)
while(cap.isOpened()):
video_cur_pos = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0 # Current position of the video file in seconds
video_cur_frame = cap.get(cv2.CAP_PROP_POS_FRAMES) # Index of the frame to be decoded/captured next
ret, frame_initial = cap.read()
if ret == True:
frame = cv2.resize(frame_initial, None, fx=0.6, fy=0.6)
if ret == False:
print("Video ended")
break
# Background Subtraction
frame_blur = cv2.GaussianBlur(frame.copy(), (5,5), 3)
# frame_blur = frame_blur[150:1000, 100:1800]
frame_gray = cv2.cvtColor(frame_blur, cv2.COLOR_BGR2GRAY)
frame_out = frame.copy()
# Drawing the Overlay. Text overlay at the left corner of screen
if dict['text_overlay']:
str_on_frame = "%d/%d" % (video_cur_frame, video_info['num_of_frames'])
cv2.putText(frame_out, str_on_frame, (5, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.8, (0, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame_out,global_str + str(round(change_pos, 2)) + 'sec', (5, 60), cv2.FONT_HERSHEY_SIMPLEX,
0.8, (255, 0, 0), 2, cv2.LINE_AA)
# motion detection for all objects
if dict['motion_detection']:
# frame_blur = frame_blur[380:420, 240:470]
# cv2.imshow('dss', frame_blur)
fgmask = fgbg.apply(frame_blur)
bw = np.uint8(fgmask==255)*255
bw = cv2.erode(bw, kernel_erode, iterations=1)
bw = cv2.dilate(bw, kernel_dilate, iterations=1)
# cv2.imshow('dss',bw)
# cv2.imwrite("frame%d.jpg" % co, bw)
(_, cnts, _) = cv2.findContours(bw.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# print(cv2.contourArea(c))
# if the contour is too small, we ignore it
if cv2.contourArea(c) < dict['min_area_motion_contour']:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame_out, (x, y), (x + w, y + h), (255, 0, 0), 1)
# detecting cars and vacant spaces
if dict['parking_detection']:
for ind, park in enumerate(parking_data):
points = np.array(park['points'])
rect = parking_bounding_rects[ind]
roi_gray = frame_gray[rect[1]:(rect[1]+rect[3]), rect[0]:(rect[0]+rect[2])] # crop roi for faster calcluation
laplacian = cv2.Laplacian(roi_gray, cv2.CV_64F)
# cv2.imshow('oir', laplacian)
points[:, 0] = points[:, 0] - rect[0] # shift contour to roi
points[:, 1] = points[:, 1] - rect[1]
delta = np.mean(np.abs(laplacian * parking_mask[ind]))
# if(delta<2.5):
# print("ind, del", ind, delta)
status = delta < dict['park_laplacian_th']
# If detected a change in parking status, save the current time
if status != parking_status[ind] and parking_buffer[ind]==None:
parking_buffer[ind] = video_cur_pos
change_pos = video_cur_pos
# print("state ", ind,delta)
# applying classifier in case a change is detected in the status of area
# if dict['classifier_used']:
# classifier_result = run_classifier(roi_gray)
# if classifier_result:
# print(classifier_result)
# If status is still different than the one saved and counter is open
elif status != parking_status[ind] and parking_buffer[ind] != None:
if video_cur_pos - parking_buffer[ind] > dict['park_sec_to_wait']:
parking_status[ind] = status
parking_buffer[ind] = None
# If status is still same and counter is open
elif status == parking_status[ind] and parking_buffer[ind] != None:
parking_buffer[ind] = None
# changing the color on the basis on status change occured in the above section and putting numbers on areas
if dict['parking_overlay']:
for ind, park in enumerate(parking_data):
points = np.array(park['points'])
if parking_status[ind]:
color = (0, 255, 0)
rect = parking_bounding_rects[ind]
roi_gray_ov = frame_gray[rect[1]:(rect[1] + rect[3]),
rect[0]:(rect[0] + rect[2])] # crop roi for faster calcluation
res = run_classifier(roi_gray_ov, ind)
if res:
parking_data_motion.append(parking_data[ind])
# del parking_data[ind]
color = (0, 0, 255)
else:
color = (0, 0, 255)
cv2.drawContours(frame_out, [points], contourIdx=-1,
color=color, thickness=2, lineType=cv2.LINE_8)
if dict['show_ids']:
print_parkIDs(park, points, frame_out)
if parking_data_motion != []:
for index, park_coord in enumerate(parking_data_motion):
points = np.array(park_coord['points'])
color = (0, 0, 255)
recta = parking_bounding_rects[ind]
roi_gray1 = frame_gray[recta[1]:(recta[1] + recta[3]),
recta[0]:(recta[0] + recta[2])] # crop roi for faster calcluation
# laplacian = cv2.Laplacian(roi_gray, cv2.CV_64F)
# delta2 = np.mean(np.abs(laplacian * parking_mask[ind]))
# state = delta2<1
# classifier_result = run_classifier(roi_gray1, index)
# cv2.imshow('dsd', roi_gray1)
fgbg1 = cv2.createBackgroundSubtractorMOG2(history=300, varThreshold=16, detectShadows=True)
roi_gray1_blur = cv2.GaussianBlur(roi_gray1.copy(), (5, 5), 3)
# cv2.imshow('sd', roi_gray1_blur)
fgmask1 = fgbg1.apply(roi_gray1_blur)
bw1 = np.uint8(fgmask1 == 255) * 255
bw1 = cv2.erode(bw1, kernel_erode, iterations=1)
bw1 = cv2.dilate(bw1, kernel_dilate, iterations=1)
# cv2.imshow('sd', bw1)
# cv2.imwrite("frame%d.jpg" % co, bw)
(_, cnts1, _) = cv2.findContours(bw1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts1:
print(cv2.contourArea(c))
# if the contour is too small, we ignore it
if cv2.contourArea(c) < 4:
continue
(x, y, w, h) = cv2.boundingRect(c)
classifier_result1 = run_classifier(roi_gray1, index)
if classifier_result1:
# print(classifier_result)
color = (0, 0, 255) # Red again if car found by classifier
else:
color = (0,255, 0)
classifier_result1 = run_classifier(roi_gray1, index)
if classifier_result1:
# print(classifier_result)
color = (0, 0, 255) # Red again if car found by classifier
else:
color = (0, 255, 0)
cv2.drawContours(frame_out, [points], contourIdx=-1,
color=color, thickness=2, lineType=cv2.LINE_8)
if dict['pedestrian_detection']:
# detect people in the image. Slows down the program, requires high GPU speed
(rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4), padding=(8, 8), scale=1.05)
# draw the bounding boxes
for (x, y, w, h) in rects:
cv2.rectangle(frame_out, (x, y), (x + w, y + h), (255, 0, 0), 2)
# write the output frames
if dict['save_video']:
#if video_cur_frame % 35 == 0: # take every 30 frames
out.write(frame_out)
# Display video
cv2.imshow('frame', frame_out)
# cv2.imshow('background mask', bw)
k = cv2.waitKey(1)
if k == ord('q'):
break
elif k == ord('c'):
cv2.imwrite('frame%d.jpg' % video_cur_frame, frame_out)
elif k == ord('j'):
cap.set(cv2.CAP_PROP_POS_FRAMES, video_cur_frame+1000) # jump 1000 frames
elif k == ord('u'):
cap.set(cv2.CAP_PROP_POS_FRAMES, video_cur_frame + 500) # jump 500 frames
if cv2.waitKey(33) == 27:
break
cv2.waitKey(0)
cap.release()
if dict['save_video']: out.release()
cv2.destroyAllWindows()
change your these lines
`if dict['save_video']:
fourcc = cv2.VideoWriter_fourcc('X','V','I','D') # options: ('P','I','M','1'), ('D','I','V','X'), ('M','J','P','G'), ('X','V','I','D')
out = cv2.VideoWriter(fn_out, -1, 25.0,(video_info['width'], video_info['height']))`
to
`if dict['save_video']:
fourcc = cv2.VideoWriter_fourcc(*'XVID') # options: ('P','I','M','1'), ('D','I','V','X'), ('M','J','P','G'), ('X','V','I','D')
out = cv2.VideoWriter(fn_out, fourcc, 25.0,(video_info['width'], video_info['height']))`
and try again
Also put your functions/methods definitions to the top of the code.