OpenCV Color Detection in video ROI - python

I am trying to figure out color in a specific ROI in Traffic Light Video. The code although predicts the color correctly it doesn't do it for the specific ROI i am looking at.
Initially when the traffic video starts the ROI region has no (RGY) colors but it still predicts and shows RED based on other areas. What am i doing wrong.
Have uploaded the test Video here for testing -- https://ufile.io/ha20buns
Python Code below.
import cv2
import numpy as np
cap = cv2.VideoCapture('D:\Videos\Sample.mp4')
while True:
ret,frame = cap.read()
if ret == False:
break
frame = cv2.resize(frame,(1920 ,1080))
#Extract required section from entire frame
roiColor = cv2.rectangle(frame.copy(),(1022, 565),(1411, 709),(255,255,255),2) #For SampleTL.mp4
blcolor = (255, 0, 0)
cv2.rectangle(frame, (1022, 565),(1411, 709), blcolor)
hsv = cv2.cvtColor(roiColor,cv2.COLOR_BGR2HSV)
#red
lower_hsv_red = np.array([157,177,122])
upper_hsv_red = np.array([179,255,255])
mask_red = cv2.inRange(hsv,lowerb=lower_hsv_red,upperb=upper_hsv_red)
red_blur = cv2.medianBlur(mask_red, 7)
#green
lower_hsv_green = np.array([49,79,137])
upper_hsv_green = np.array([90,255,255])
mask_green = cv2.inRange(hsv,lowerb=lower_hsv_green,upperb=upper_hsv_green)
green_blur = cv2.medianBlur(mask_green, 7)
lower_hsv_yellow = np.array([15,150,150])
upper_hsv_yellow = np.array([35,255,255])
mask_yellow = cv2.inRange(hsv,lowerb=lower_hsv_yellow,upperb=upper_hsv_yellow)
yellow_blur = cv2.medianBlur(mask_yellow, 7)
#Because the image is a binary image, If the image has a white point, which is 255, then take his maximum max value 255
red_color = np.max(red_blur)
green_color = np.max(green_blur)
yellow_color = np.max(yellow_blur)
if red_color == 255:
print('red')
cv2.rectangle(frame,(1020,50),(1060,90),(0,0,255),2 ) #Draw a rectangular frame by coordinates
cv2.putText(frame, "red", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255),2) #red text information
elif green_color == 255:
print('green')
cv2.rectangle(frame,(1020,50),(1060,90),(0,255 ,0),2)
cv2.putText(frame, "green", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0),2)
elif yellow_color == 255:
print('yellow')
cv2.rectangle(frame,(1020,50),(1060,90),(0,255 ,0),2)
cv2.putText(frame, "yellow", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 0),2)
cv2.imshow('frame',frame)
red_blur = cv2.resize(red_blur,(300,200))
green_blur = cv2.resize(green_blur,(300,200))
yellow_blur = cv2.resize(yellow_blur, (300,200))
#cv2.imshow('red_window',red_blur)
#cv2.imshow('green_window',green_blur)
#cv2.imshow('yellow_window',yellow_blur)
c = cv2.waitKey(10)
if c==27:
break
cap.release()
cv2.destroyAllWindows() # destroy all opened windows

cv2.rectangle doesn't crop the image but returns the original image with a drawn rectangle. Try this instead:
roiColor = frame[565:709, 1022:1411]

Related

python Real time YOLOv4 detection from desktop screen

There is a simple code for using Yolo to display a video or camera from a PC:
import cv2
import time
Conf_threshold = 0.4
NMS_threshold = 0.4
COLORS = [(0, 255, 0), (0, 0, 255), (255, 0, 0),
(255, 255, 0), (255, 0, 255), (0, 255, 255)]
class_name = []
with open('Resources\coco.names.txt', 'r') as f:
class_name = [cname.strip() for cname in f.readlines()]
# print(class_name)
net = cv2.dnn.readNet('Resources\yolov4-tiny.weights', 'Resources\yolov4-tiny.cfg')
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA_FP16)
model = cv2.dnn_DetectionModel(net)
model.setInputParams(size=(416, 416), scale=1/255, swapRB=True)
cap = cv2.VideoCapture('test.mp4')
starting_time = time.time()
frame_counter = 0
while True:
ret, frame = cap.read()
frame_counter += 1
if ret == False:
break
classes, scores, boxes = model.detect(frame, Conf_threshold, NMS_threshold)
for (classid, score, box) in zip(classes, scores, boxes):
color = COLORS[int(classid) % len(COLORS)]
label = "%s : %f" % (class_name[classid], score)
cv2.rectangle(frame, box, color, 1)
cv2.putText(frame, label, (box[0], box[1]-10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, color, 1)
endingTime = time.time() - starting_time
fps = frame_counter/endingTime
# print(fps)
cv2.putText(frame, f'FPS: {fps}', (5, 35),
cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
But I need a screen capture instead of test.mp4 video. To create screenshots from the screen, this code was used:
import numpy as np
import cv2
from mss import mss
def screen_record_efficiency():
bbox = {'top': 0, 'left': 0, 'width': 800, 'height': 600}
sct = mss()
font = cv2.FONT_HERSHEY_SIMPLEX
while 1:
# grab image
sct_img = np.array(sct.grab(bbox))
# display image
cv2.imshow('screen', sct_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
I can’t understand how it is possible to load an array with images from the second code instead of a video file and how to combine it in general. Due to poor knowledge, Python i can’t solve this problem in any way, and I didn’t find a video or article on capturing the desktop in real time
If you pass an array with pictures directly, by type frame=sct_img, then it displays an error that 4 arguments are passed instead of 3 (or something similar).

OpenCV: Can we discover the first and last frame of a bounding box moving?

Working on python I have a video where I:
First draw a bounding box (bbox).
Next, I need to know the frame on which the bbox starts moving.
Finally, I need to know the last frame on which the bbox stops moving.
Initial code:
import cv2
import time
import numpy as np
#open video
cap = cv2.VideoCapture('MyVideo.avi')
#total number of frames
totalFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print(totalFrames)
#MOSSE tracker
tracker = cv2.legacy_TrackerMOSSE.create()
#take first frame
ret, frame = cap.read()
image = cv2.resize(frame, (1600,900))
#create bounding box
bbox = cv2.selectROI("Tracking", image, False) #create bounding box
tracker.init(image,bbox)
#define bounding box
def drawBox(image,bbox):
x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
cv2.rectangle(image, (x,y),((x+w),(y+h)), (255, 0, 255), 3, 1)
cv2.putText(image, "Tracking", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
#work within video
while True:
success, frame = cap.read()
# resize screen size
image = cv2.resize(frame, (1600, 900))
#tracker
success, bbox1 = tracker.update(image)
This is the part where I believe I could figure out when my bbox starts moving and print the frame of interes.
if success:
drawBox(image,bbox1)
if bbox1 == bbox:
print('static')
else:
print(cap.get(cv2.CAP_PROP_POS_FRAMES))
else:
cv2.putText(image, "Lost", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
Remaining code
#show video
cv2.imshow("feed", image)
if cv2.waitKey(40) == 27:
break
cv2.destroyAllWindows()
cap.release()

Augmented Reality line that moves with an image in OpenCV

I want to do the following things in openCV. The problem statement that I have is with a bottle, which needs a line on the image and the line needs to rotate as per the movement of bottle.
The first image needs to have red lines as the borders and initiate a green line
The second image needs to have the green line in the middle when the bottle gets rotated. That is the green line has to follow the rotation of the bottle
Finally as per the third image, the application needs to kill itself or save the picture when the green line gets aligned to the red line
I tried doing this in OpenCV using template matching. I tried keeping a template image and then tracking the template image using template matching algorithm. But it does not seem to work properly in this case.
import cv2
from time import sleep
import numpy as np
vid = cv2.VideoCapture(0)
sleep(2)
line_show = False
save_reference = False
template_compare_method = cv2.TM_SQDIFF_NORMED
i = 0
while True:
check, frame = vid.read()
print(check)
frame1 = cv2.line(frame, (500, 0), (500, 720), (255, 0, 0), 7)
frame1 = cv2.line(frame1, (800, 0), (800, 720), (255, 0, 0), 7)
if line_show:
h, w = frame1.shape[:2]
if not save_reference:
reference = frame1[200:500, 780:790]
cv2.imwrite("../../images/white_image.jpg", reference)
save_reference = True
if save_reference:
reference_image = cv2.imread('../../images/white_image.jpg')
result = cv2.matchTemplate(reference_image, frame1, template_compare_method)
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
MPx, MPy = mnLoc
trows, tcols = reference_image.shape[:2]
frame1 = cv2.rectangle(frame1, (MPx, MPy), (MPx+tcols, MPy+trows), (0, 0, 255), 2)
cv2.imshow("image", frame1)
key = cv2.waitKey(1)
if key == ord('l'):
line_show = True
if key == ord('k'):
cv2.imwrite("../../images/saved_image_"+str(i)+".jpg", frame1)
i = i + 1
if key == ord('s'):
cv2.imwrite("../../images/saved_image.jpg", frame)
vid.release()
print("Image saved")
break
elif key == ord('q'):
vid.release()
cv2.destroyAllWindows()
break
Can I use any other algorithms, or am I approaching this problem in a wrong way by looking it as a object tracking task, where I save a small image and track it through template matching ?
Can I use some other algorithms like Meanshift, Frame Difference etc. to achieve this ?
If I were you, I would solve this problem using line algorithm. Of course, you can choose any other robust algorithm. My idea is to solve the problem as quickly as possible.
Assume I have the following image with left and right boundaries (blue), and I have the green-line.
When green-line passes the left-border, quit.
Tracking the green-line
First you need to find the features of the frame to track efficiently the green-line.
while True:
ret, frm = cap.read()
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
Sample output:
Second, find the approximate length of the green-line:
There is no direct way to find the length, do error-trial calculation.
Once you are sure, initialize the line algorithm.
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
Third, get the coordinates, and check if the green-line is in the border.
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
if x1 <= 232:
break
Code:
import cv2
cap = cv2.VideoCapture("sample.mp4")
while True:
ret, frm = cap.read()
if ret:
rgt_bdr = cv2.line(frm, (794, 250), (794, 1250), (255, 0, 0), 7)
lft_bdr = cv2.line(frm, (232, 250), (232, 1250), (255, 0, 0), 7)
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
cv2.line(frm,
pt1=(x1, y1),
pt2=(x2, y2),
color=(0, 255, 0),
thickness=3)
print("({}, {})-({}, {})".format(x1, y1, x2, y2))
if x1 <= 232:
break
cv2.imshow("frm", frm)
cv2.waitKey(1)

Object Detection Using Raspberry Pi and Android IP Camera with Python and OpenCV

Here is my code that I have used for object detection using raspberry pi and Android Ip Camera. Here I'm not getting any output and the code does not provide any errors. Can someone figure out what is the error?
import urllib.request
import cv2
import numpy as np
import datetime
import math
#global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
MinCountourArea = 3000 #Adjust ths value according to your usage
BinarizationThreshold = 70 #Adjust ths value according to your usage
OffsetRefLines = 150 #Adjust ths value according to your usage
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
This is the code i have used to obtain the video stream from my IP camera
ReferenceFrame = None
while True:
camera=cv2.VideoCapture("http://192.168.1.6:8080/shot.jpg")
camera.set(3,640)
camera.set(4,480)
(ret,Frame)=camera.read()
height = np.size(Frame,0)
width = np.size(Frame,1)
#if cannot grab a frame, this program ends here.
if not ret:
break
This is the code part i have used to display the lines and frame for object detection and object counting
#gray-scale convertion and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, BinarizationThreshold, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
CoorYEntranceLine = (height / 2)-OffsetRefLines
CoorYExitLine = (height / 2)+OffsetRefLines
cv2.line(Frame, (0,CoorYEntranceLine), (width,CoorYEntranceLine), (255, 0, 0), 2)
cv2.line(Frame, (0,CoorYExitLine), (width,CoorYExitLine), (0, 0, 255), 2)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = (x+x+w)/2
CoordYCentroid = (y+y+h)/2
ObjectCentroid = (CoordXCentroid,CoordYCentroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow('Salida',Frame)
cv2.waitKey(1);
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
The correct code
import numpy as np
import math
def nothing(x):
pass
width=0
height=0
EntranceCounter = 0
OffsetRefLines = 150
ExitCounter = 0
BinarizationThreshold = 70
MinCountourArea = 3000
cap = cv2.VideoCapture(0);
path="http://192.168.1.6:8080/video"
cap.open(path)
ReferenceFrame = None
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
#cv2.namedWindow("Tracking")
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("UH", "Tracking", 255, 255, nothing)
cv2.createTrackbar("US", "Tracking", 255, 255, nothing)
cv2.createTrackbar("UV", "Tracking", 255, 255, nothing)
while True:
#frame = cv2.imread('smarties.png')
if cap.isOpened():
rval, frame = cap.read()
while rval:
rval,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.GaussianBlur(hsv, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = hsv
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, hsv)
FrameThresh = cv2.threshold(FrameDelta, 25, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
cv2.line(frame, (0,170), (2000,170), (255, 0, 0), 5)
cv2.line(frame, (0,470), (2000,470), (0, 0, 255), 5)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = int(x+x+w)/2
CoordYCentroid = int(y+y+h)/2
ObjectCentroid = (x,y)
cv2.circle(frame, ObjectCentroid, 2, (0, 255, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,170,470)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,170,470)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (250, 0, 1), 2)
cv2.putText(frame, "Exits: {}".format(str(ExitCounter)), (10, 110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
imS = cv2.resize(frame, (400, 400)) # Resize image
#imSS = cv2.resize(mask, (200, 200))
#imSSS = cv2.resize(frame, (200, 200))
cv2.imshow("frame", imS)
#cv2.imshow("mask", imSS)
#cv2.imshow("res", imSSS)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()

OpenCV code in PyCharm: Process finished with exit code 0

The program is saying Process finished with exit code 0 but i am not getting any output. I am using Python version 2.7 and the program's job is to detect free parking slots in a car park. It also has pedestrian detection. Any help will be very much appreciated please i badly need this code to work. Thanks
Here is the link to the source code, along with a video link of how it works
https://github.com/ankit1khare/ComputerVision
DESIRED OUTPUT: The program should open the input video and draw the parking overlay on top of the video.
Here are the codes for the main program
import yaml
import numpy as np
import cv2
# path references
fn = "Khare_testvideo_01.mp4" #3
#fn = "datasets\parkinglot_1_720p.mp4"
#fn = "datasets\street_high_360p.mp4"
fn_yaml = "Khare_yml_01.yml"
fn_out = "Khare_outputvideo_01.avi"
cascade_src = 'Khare_classifier_02.xml'
car_cascade = cv2.CascadeClassifier(cascade_src)
global_str = "Last change at: "
change_pos = 0.00
dict = {
'text_overlay': True,
'parking_overlay': True,
'parking_id_overlay': True,
'parking_detection': True,
'motion_detection': True,
'pedestrian_detection': False, # takes a lot of processing power
'min_area_motion_contour': 500, # area given to detect motion
'park_laplacian_th': 2.8,
'park_sec_to_wait': 1, # 4 wait time for changing the status of a region
'start_frame': 0, # begin frame from specific frame number
'show_ids': True, # shows id on each region
'classifier_used': True,
'save_video': True
}
# Set from video
cap = cv2.VideoCapture(fn)
print("video found")
video_info = { 'fps': cap.get(cv2.CAP_PROP_FPS),
'width': int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)*0.6),
'height': int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)*0.6),
'fourcc': cap.get(cv2.CAP_PROP_FOURCC),
'num_of_frames': int(cap.get(cv2.CAP_PROP_FRAME_COUNT))}
cap.set(cv2.CAP_PROP_POS_FRAMES, dict['start_frame']) # jump to frame number specified
def run_classifier(img, id):
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(img, 1.1, 1)
if cars == ():
return False
else:
# parking_status[id] = False
return True
# Define the codec and create VideoWriter object
if dict['save_video']:
fourcc = cv2.VideoWriter_fourcc('X','V','I','D') # options: ('P','I','M','1'), ('D','I','V','X'), ('M','J','P','G'), ('X','V','I','D')
out = cv2.VideoWriter(fn_out, -1, 25.0,(video_info['width'], video_info['height']))
print("save video -- out w * H")
# initialize the HOG descriptor/person detector. Take a lot of processing power.
if dict['pedestrian_detection']:
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Use Background subtraction
if dict['motion_detection']:
fgbg = cv2.createBackgroundSubtractorMOG2(history=300, varThreshold=16, detectShadows=True)
# Read YAML data (parking space polygons)
with open(fn_yaml, 'r') as stream:
parking_data = yaml.load(stream)
parking_contours = []
parking_bounding_rects = []
parking_mask = []
parking_data_motion = []
if parking_data != None:
for park in parking_data:
points = np.array(park['points'])
rect = cv2.boundingRect(points)
points_shifted = points.copy()
points_shifted[:, 0] = points[:, 0] - rect[0] # shift contour to region of interest
points_shifted[:, 1] = points[:, 1] - rect[1]
parking_contours.append(points)
parking_bounding_rects.append(rect)
mask = cv2.drawContours(np.zeros((rect[3], rect[2]), dtype=np.uint8), [points_shifted], contourIdx=-1,
color = 255, thickness=-1, lineType=cv2.LINE_8)
mask = mask == 255
parking_mask.append(mask)
kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)) # morphological kernel
kernel_dilate = cv2.getStructuringElement(cv2.MORPH_RECT,(5,19))
if parking_data != None:
parking_status = [False]*len(parking_data)
parking_buffer = [None]*len(parking_data)
# bw = ()
def print_parkIDs(park, coor_points, frame_rev):
moments = cv2.moments(coor_points)
centroid = (int(moments['m10']/moments['m00'])-3, int(moments['m01']/moments['m00'])+3)
# putting numbers on marked regions
cv2.putText(frame_rev, str(park['id']), (centroid[0]+1, centroid[1]+1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
cv2.putText(frame_rev, str(park['id']), (centroid[0]-1, centroid[1]-1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
cv2.putText(frame_rev, str(park['id']), (centroid[0]+1, centroid[1]-1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
cv2.putText(frame_rev, str(park['id']), (centroid[0]-1, centroid[1]+1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
cv2.putText(frame_rev, str(park['id']), centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)
while(cap.isOpened()):
video_cur_pos = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0 # Current position of the video file in seconds
video_cur_frame = cap.get(cv2.CAP_PROP_POS_FRAMES) # Index of the frame to be decoded/captured next
ret, frame_initial = cap.read()
if ret == True:
frame = cv2.resize(frame_initial, None, fx=0.6, fy=0.6)
if ret == False:
print("Video ended")
break
# Background Subtraction
frame_blur = cv2.GaussianBlur(frame.copy(), (5,5), 3)
# frame_blur = frame_blur[150:1000, 100:1800]
frame_gray = cv2.cvtColor(frame_blur, cv2.COLOR_BGR2GRAY)
frame_out = frame.copy()
# Drawing the Overlay. Text overlay at the left corner of screen
if dict['text_overlay']:
str_on_frame = "%d/%d" % (video_cur_frame, video_info['num_of_frames'])
cv2.putText(frame_out, str_on_frame, (5, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.8, (0, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame_out,global_str + str(round(change_pos, 2)) + 'sec', (5, 60), cv2.FONT_HERSHEY_SIMPLEX,
0.8, (255, 0, 0), 2, cv2.LINE_AA)
# motion detection for all objects
if dict['motion_detection']:
# frame_blur = frame_blur[380:420, 240:470]
# cv2.imshow('dss', frame_blur)
fgmask = fgbg.apply(frame_blur)
bw = np.uint8(fgmask==255)*255
bw = cv2.erode(bw, kernel_erode, iterations=1)
bw = cv2.dilate(bw, kernel_dilate, iterations=1)
# cv2.imshow('dss',bw)
# cv2.imwrite("frame%d.jpg" % co, bw)
(_, cnts, _) = cv2.findContours(bw.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# print(cv2.contourArea(c))
# if the contour is too small, we ignore it
if cv2.contourArea(c) < dict['min_area_motion_contour']:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame_out, (x, y), (x + w, y + h), (255, 0, 0), 1)
# detecting cars and vacant spaces
if dict['parking_detection']:
for ind, park in enumerate(parking_data):
points = np.array(park['points'])
rect = parking_bounding_rects[ind]
roi_gray = frame_gray[rect[1]:(rect[1]+rect[3]), rect[0]:(rect[0]+rect[2])] # crop roi for faster calcluation
laplacian = cv2.Laplacian(roi_gray, cv2.CV_64F)
# cv2.imshow('oir', laplacian)
points[:, 0] = points[:, 0] - rect[0] # shift contour to roi
points[:, 1] = points[:, 1] - rect[1]
delta = np.mean(np.abs(laplacian * parking_mask[ind]))
# if(delta<2.5):
# print("ind, del", ind, delta)
status = delta < dict['park_laplacian_th']
# If detected a change in parking status, save the current time
if status != parking_status[ind] and parking_buffer[ind]==None:
parking_buffer[ind] = video_cur_pos
change_pos = video_cur_pos
# print("state ", ind,delta)
# applying classifier in case a change is detected in the status of area
# if dict['classifier_used']:
# classifier_result = run_classifier(roi_gray)
# if classifier_result:
# print(classifier_result)
# If status is still different than the one saved and counter is open
elif status != parking_status[ind] and parking_buffer[ind] != None:
if video_cur_pos - parking_buffer[ind] > dict['park_sec_to_wait']:
parking_status[ind] = status
parking_buffer[ind] = None
# If status is still same and counter is open
elif status == parking_status[ind] and parking_buffer[ind] != None:
parking_buffer[ind] = None
# changing the color on the basis on status change occured in the above section and putting numbers on areas
if dict['parking_overlay']:
for ind, park in enumerate(parking_data):
points = np.array(park['points'])
if parking_status[ind]:
color = (0, 255, 0)
rect = parking_bounding_rects[ind]
roi_gray_ov = frame_gray[rect[1]:(rect[1] + rect[3]),
rect[0]:(rect[0] + rect[2])] # crop roi for faster calcluation
res = run_classifier(roi_gray_ov, ind)
if res:
parking_data_motion.append(parking_data[ind])
# del parking_data[ind]
color = (0, 0, 255)
else:
color = (0, 0, 255)
cv2.drawContours(frame_out, [points], contourIdx=-1,
color=color, thickness=2, lineType=cv2.LINE_8)
if dict['show_ids']:
print_parkIDs(park, points, frame_out)
if parking_data_motion != []:
for index, park_coord in enumerate(parking_data_motion):
points = np.array(park_coord['points'])
color = (0, 0, 255)
recta = parking_bounding_rects[ind]
roi_gray1 = frame_gray[recta[1]:(recta[1] + recta[3]),
recta[0]:(recta[0] + recta[2])] # crop roi for faster calcluation
# laplacian = cv2.Laplacian(roi_gray, cv2.CV_64F)
# delta2 = np.mean(np.abs(laplacian * parking_mask[ind]))
# state = delta2<1
# classifier_result = run_classifier(roi_gray1, index)
# cv2.imshow('dsd', roi_gray1)
fgbg1 = cv2.createBackgroundSubtractorMOG2(history=300, varThreshold=16, detectShadows=True)
roi_gray1_blur = cv2.GaussianBlur(roi_gray1.copy(), (5, 5), 3)
# cv2.imshow('sd', roi_gray1_blur)
fgmask1 = fgbg1.apply(roi_gray1_blur)
bw1 = np.uint8(fgmask1 == 255) * 255
bw1 = cv2.erode(bw1, kernel_erode, iterations=1)
bw1 = cv2.dilate(bw1, kernel_dilate, iterations=1)
# cv2.imshow('sd', bw1)
# cv2.imwrite("frame%d.jpg" % co, bw)
(_, cnts1, _) = cv2.findContours(bw1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts1:
print(cv2.contourArea(c))
# if the contour is too small, we ignore it
if cv2.contourArea(c) < 4:
continue
(x, y, w, h) = cv2.boundingRect(c)
classifier_result1 = run_classifier(roi_gray1, index)
if classifier_result1:
# print(classifier_result)
color = (0, 0, 255) # Red again if car found by classifier
else:
color = (0,255, 0)
classifier_result1 = run_classifier(roi_gray1, index)
if classifier_result1:
# print(classifier_result)
color = (0, 0, 255) # Red again if car found by classifier
else:
color = (0, 255, 0)
cv2.drawContours(frame_out, [points], contourIdx=-1,
color=color, thickness=2, lineType=cv2.LINE_8)
if dict['pedestrian_detection']:
# detect people in the image. Slows down the program, requires high GPU speed
(rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4), padding=(8, 8), scale=1.05)
# draw the bounding boxes
for (x, y, w, h) in rects:
cv2.rectangle(frame_out, (x, y), (x + w, y + h), (255, 0, 0), 2)
# write the output frames
if dict['save_video']:
#if video_cur_frame % 35 == 0: # take every 30 frames
out.write(frame_out)
# Display video
cv2.imshow('frame', frame_out)
# cv2.imshow('background mask', bw)
k = cv2.waitKey(1)
if k == ord('q'):
break
elif k == ord('c'):
cv2.imwrite('frame%d.jpg' % video_cur_frame, frame_out)
elif k == ord('j'):
cap.set(cv2.CAP_PROP_POS_FRAMES, video_cur_frame+1000) # jump 1000 frames
elif k == ord('u'):
cap.set(cv2.CAP_PROP_POS_FRAMES, video_cur_frame + 500) # jump 500 frames
if cv2.waitKey(33) == 27:
break
cv2.waitKey(0)
cap.release()
if dict['save_video']: out.release()
cv2.destroyAllWindows()
change your these lines
`if dict['save_video']:
fourcc = cv2.VideoWriter_fourcc('X','V','I','D') # options: ('P','I','M','1'), ('D','I','V','X'), ('M','J','P','G'), ('X','V','I','D')
out = cv2.VideoWriter(fn_out, -1, 25.0,(video_info['width'], video_info['height']))`
to
`if dict['save_video']:
fourcc = cv2.VideoWriter_fourcc(*'XVID') # options: ('P','I','M','1'), ('D','I','V','X'), ('M','J','P','G'), ('X','V','I','D')
out = cv2.VideoWriter(fn_out, fourcc, 25.0,(video_info['width'], video_info['height']))`
and try again
Also put your functions/methods definitions to the top of the code.

Categories