opencv traffic light detection - python

import cv2
import numpy as np
import warnings
warnings.filterwarnings("ignore")
cap = cv2.VideoCapture(0)
while True :
ret ,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20,0,0])
upper_yellow = np.array([40,255,255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
res = cv2.bitwise_and(frame,frame, mask= mask)
img = cv2.medianBlur(res, 5)
ccimg = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cimg = cv2.cvtColor(ccimg, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20,param1=50, param2=30, minRadius=20, maxRadius=30)
if circles is not None:
print("circle is found")
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow('detected circles', cimg)
cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
I am tring to detect trafict light by using opencv , initially i want to detect yellow color by using HSV space and then median filtering and finding the circle but it raises errors async ReadSample() call is failed with error status: -1072873821 and OnReadSample() is called with error status: -1072873821 probably errors are caused by the if state for checking if it finds any circle or not also the error is a long list but these two are repeated.

The capture device is failing to read a frame. The OnReadSample() call is failing on cap.read() and you should implement logic to handle a frame not being read. I've demonstrated this below:
import cv2
import numpy as np
import warnings
warnings.filterwarnings("ignore")
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20, 0, 0])
upper_yellow = np.array([40, 255, 255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
res = cv2.bitwise_and(frame, frame, mask=mask)
img = cv2.medianBlur(res, 5)
ccimg = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cimg = cv2.cvtColor(ccimg, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=20, maxRadius=30)
if circles is not None:
print("circle is found")
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow('detected circles', cimg)
cv2.imshow('res', res)
else:
print("Read Failed")
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()

Related

Save both input video and output video in Open CV python

i write a code from Open CV Document about Motion Detection .i want to save my input as a video and my output as a video but i have problems. i save video but i can just save the output video and some time the input video is the same out put video.my input video is from my computer's camera and it's like a normal video but the output is that video in addition by squares around motion objects.i need your help.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()
i tried this but it doesn't work.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi', fourcc, 20.0, (fwidth, fheight))
out1 = cv.VideoWriter('input.avi', fourcc, 20.0, (fwidth, fheight))
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
if ret == True:
out.write(Final_Movie)
out.write(frame2)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()

Separation of bounding boxes

In this problem we are trying to detect persons in a WEBCAM video in REAL TIME. The code is working fine for 1 person but when more than one person is entering then the code is failing miserably. Here is the code :-
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
kernel = np.ones((5,5), np.uint8)
background = None
while True:
ret,frame = cap.read()
gray = frame.copy()
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11,11), 0)
if background is None:
background = gray
continue
delta = cv2.absdiff(background, gray)
thresh = cv2.threshold(delta, 5, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
thresh = cv2.dilate(thresh, kernel, iterations=2)
_,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if(len(contours)==0):
continue
#areas = [cv2.contourArea(c) for c in contours]
#max_index = np.argmax(areas)
#cnt=contours[max_index]
#(x,y,w,h) = cv2.boundingRect(cnt)
#if(1.0*(w*h)/(640*480)<0.75):
#cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 3)
#print("Area: ",w*h)
for i in range(len(contours)):
(x,y,w,h) = cv2.boundingRect(contours[i])
if(w*h<=90000):
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 5)
#cv2.imshow('thresh', thresh)
cv2.imshow('frame', frame)
if cv2.waitKey(1)==27:
break
cap.release()
cv2.destroyAllWindows()
I think the problem is that the code is not able to separate the different contours of the different persons detected but I may not be the only reason. Can someone help me?

How to put bounding box around the detected human outline

Here is the python code I have written :-
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
How to put bounding box around the detected human outline and improve efficiency of the python code to perform background subtraction on the live video feed taken from webcam. Can someone help?
Drawing Contour Using Background Subtraction
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
gray=cv2.cvtColor(fgmask,cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(gray,25,255,cv2.THRESH_BINARY)
_,contours,hierarchy = cv2.findContours(th1,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 1000 and area < 40000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(fgmask,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Drawing Contour using HSV Masking and Convex Hull
Set value for hsv mask.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
frame = cv2.imread(frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array([50,103,40])
upper = np.array([255,255, 255])
mask = cv2.inRange(hsv, lower, upper)
fg = cv2.bitwise_and(frame, frame, mask=255-mask)
fg = cv2.cvtColor(fg.copy(),cv2.COLOR_HSV2BGR)
fg = cv2.cvtColor(fg,cv2.COLOR_BGR2GRAY)
fg = cv2.threshold(fg, 120,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
#plt.imshow(fg)
#plt.show()
fgclosing = cv2.morphologyEx(fg.copy(), cv2.MORPH_CLOSE, kernel)
se = np.ones((3,3),np.uint8)
#fgdilated = cv2.morphologyEx(fgclosing, cv2.MORPH_CLOSE,cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4)))
fgdilated = cv2.dilate(fgclosing, kernel = se , iterations = 8)
img = frame.copy()
ret, threshed_img = cv2.threshold(fgdilated,
127, 255, cv2.THRESH_BINARY)
image, contours, hier = cv2.findContours(threshed_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
#print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 44000:
# get convex hull
hull = cv2.convexHull(cnt)
#cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
#print(hull)
(x,y,w,h) = cv2.boundingRect(cnt)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
contours = hull
#c1 = max(contours, key=cv2.contourArea)
hull = cv2.convexHull(cnt)
c = hull
#print(c)
cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
cv2.drawContours(img, [c], -1, (0, 255, 255), 2)
cv2.circle(img, extLeft, 8, (0, 0, 255), -1)
cv2.circle(img, extRight, 8, (0, 255, 0), -1)
cv2.circle(img, extTop, 8, (255, 0, 0), -1)
cv2.circle(img, extBot, 8, (255, 255, 0), -1)
lx = extLeft[1]
ly = extLeft[0]
rx = extRight[1]
ry = extRight[0]
tx = extTop[1]
ty = extTop[0]
bx = extBot[1]
by = extBot[0]
x,y = lx,by
w,h = abs(rx-lx),abs(ty-by)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,str(extLeft[0])+','+str(extLeft[1]),(extLeft), font, 2,(0, 0, 255),2,cv2.LINE_AA)
cv2.putText(img,str(extRight[0])+','+str(extRight[1]),(extRight), font, 2,(0, 255, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extTop[0])+','+str(extTop[1]),(extTop), font, 2,(255, 0, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extBot[0])+','+str(extBot[1]),(extBot), font, 2,(255, 255, 0),2,cv2.LINE_AA)
im = frame[tx:bx,ly:ry,:]
cx = im.shape[1]//2
cy = im.shape[0]//2
cv2.circle(im, (cx,cy), 15, (0, 255, 0))
plt.imshow(img)
plt.show()
You can use findContours.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
mask = 255 - fgmask
_, contours, _ = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
fgmask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
for contour in contours:
area = cv2.contourArea(contour)
#only show contours that match area criterea
if area > 500 and area < 20000:
rect = cv2.boundingRect(contour)
x, y, w, h = rect
cv2.rectangle(fgmask, (x, y), (x+w, y+h), (0, 255, 0), 3)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I have tested with the video https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi

what's rules for cv2.approxPolyDP to detect point?

I have below two input images:
First:
Second:
Now I wanna detect the square area.
And here is my code:
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import cv2
import argparse
import math
import sys
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())
img = cv2.imread(args["image"])
# img = cv2.bitwise_not(img,img)
# gray = cv2.imread(args["image"],0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
cv2.imshow('gray', gray)
cv2.waitKey(0)
gray = cv2.normalize(gray, gray, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
cv2.imshow('normalized', gray)
cv2.waitKey(0)
# sys.exit()
ret, thresh = cv2.threshold(gray, 230, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('thresh', thresh)
# cv2.imwrite('./wni230.png',thresh)
cv2.waitKey(0)
square_cnts = []
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8))
cv2.imshow('dilated', thresh)
cv2.waitKey(0)
# sys.exit()
tmpimage, contours, h = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt0 = contours[0]
tmp_h = h[::-1]
for index, cnt in enumerate(contours[::-1]):
print '!!!!!!!!!!! new round !!!!!!!!!!'
print cv2.arcLength(cnt, True)
# print cnt
approx = cv2.approxPolyDP(cnt, 0.1*cv2.arcLength(cnt, True), True)
print '**********approx*****'
print approx
print len(approx)
print '**********approx end*****'
if len(approx) == 4:
print 'h[i][2]:%s' % tmp_h[0][index][2]
print 'h[i][3]:%s' % tmp_h[0][index][3]
if ret > 0.5:
print "Parallelogram"
elif 0.3 < ret < 0.5:
print "Rectangle"
elif 0 < ret < 0.3:
print "Rhombus"
else:
print "square"
print cv2.arcLength(cnt, True)
print approx
cv2.drawContours(img, [cnt], 0, (0, 0, 255), 2)
cv2.imshow('tmpsquare', img)
cv2.waitKey(0)
if int(cv2.arcLength(cnt, True)) >= 96:
x, y, w, h = cv2.boundingRect(approx)
for j in approx:
cv2.circle(img, (int(j[0][0]), int(j[0][1])), 1, (0, 255, 0), 2)
cv2.imshow('final', img[y:y+h, x:x+w])
cv2.waitKey(0)
print 'target but long squere detected...'
cv2.waitKey(0)
cv2.imshow('img', img)
cv2.imwrite('tmp.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
So, I got the two output images:
First:
Second:
Line in red is contour, point in green is points detected by approxPolyDP.
So, the first one is what I wanna.
However, how approxPolyDP locate the up most point for the second one?
Could anyone please explain the rules?
Env:
Python: 2.7.10
Opencv: 3.2.0
Thanks.
Wesley

Tracking white color

I am trying to understand how to work with ROI, but the following code is not working as expected.
I have a white ball walking through a black background, and I want to detect and keep tracking of this ball. Unfortunately, the square does not move.I think the problem is the color array, but after trying many values, I still haven't found the correct solution.
import numpy as np
import cv2
cap = cv2.VideoCapture('oieee.avi')
ret,frame = cap.read()
r,h,c,w = 485,50,890,50
track_window = (c,r,w,h)
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 0., 240.)), np.array((255.,15.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window
cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',frame)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
A print screen of my video http://imgur.com/VG3TgFF

Categories