Separation of bounding boxes - python

In this problem we are trying to detect persons in a WEBCAM video in REAL TIME. The code is working fine for 1 person but when more than one person is entering then the code is failing miserably. Here is the code :-
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
kernel = np.ones((5,5), np.uint8)
background = None
while True:
ret,frame = cap.read()
gray = frame.copy()
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11,11), 0)
if background is None:
background = gray
continue
delta = cv2.absdiff(background, gray)
thresh = cv2.threshold(delta, 5, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
thresh = cv2.dilate(thresh, kernel, iterations=2)
_,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if(len(contours)==0):
continue
#areas = [cv2.contourArea(c) for c in contours]
#max_index = np.argmax(areas)
#cnt=contours[max_index]
#(x,y,w,h) = cv2.boundingRect(cnt)
#if(1.0*(w*h)/(640*480)<0.75):
#cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 3)
#print("Area: ",w*h)
for i in range(len(contours)):
(x,y,w,h) = cv2.boundingRect(contours[i])
if(w*h<=90000):
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 5)
#cv2.imshow('thresh', thresh)
cv2.imshow('frame', frame)
if cv2.waitKey(1)==27:
break
cap.release()
cv2.destroyAllWindows()
I think the problem is that the code is not able to separate the different contours of the different persons detected but I may not be the only reason. Can someone help me?

Related

Load multiple images with a time interval to overlay an object in a webcam feed

I'm trying to load several images from a folder so that they are processed in an exact same manner. The code below detects a blue object in webcam feed and overlays it with the template image img where the webcam frame is im0
hsv = cv2.cvtColor(im0, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 120, 120), (180, 255, 255))#<- blue # RED: (0, 120, 120), (10, 255, 255))
thresh = cv2.dilate(mask, None, iterations=2)
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0]
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
height = 480
width = 640
if y + h < height and x + w < width:
logo = cv2.resize(img, (w, h))
img2gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
_, logo_mask = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY)
roi = im0[y:y+h, x:x+w]
roi[np.where(logo_mask)] = 0
roi += logo
cv2.imshow(str(p), im0)#im0 2
cv2.waitKey(1) # 1 millisecond
I am wondering how should I create a timer here so that the exact same processing happens to the img2, img3 and so on?

Drawing contures on canvas with OpenCV

I want to detect contours in freshly made screenshot, and while I can access number of contours found, I cannot draw them on black canva, this is my code:
while True:
keyboard.wait('right')
img = pyautogui.screenshot()
img = cv.cvtColor(np.array(img), cv.COLOR_RGB2BGR)
img = CropImage(img)
blank = np.zeros(img.shape[:2], dtype='uint8')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray, 122, 255, cv.THRESH_BINARY)
contours, hierarchies = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
print(f'{len(contours) } contours found')
cv.drawContours(blank, contours, -1, (0,255,0))
cv.imwrite('C:/screeny/screen0.png', blank)

Problem with opencv (cv2) detecting motion in my code

I have written a code to detect motion through a webcam but whenever I start the code, it works but it detects the entire full screen instead of objects moving in the screen. I noticed that if I close the camera it seems to remove the detection; Here is the code:
import cv2
first_frame = None
video = cv2.VideoCapture(0)
while True:
check, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21,21),0)
if first_frame is None:
first_frame=gray
continue
delta_frame = cv2.absdiff(first_frame,gray)
thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
thresh_frame = cv2.dilate(thresh_frame,None, iterations=2)
(cnts,_) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 4000:
continue
(x,y, w, h)= cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h),(0,225,0),3)
cv2.imshow("Delta Frame",delta_frame)
cv2.imshow("Capturing",gray)
cv2.imshow("Threshold Frame",thresh_frame)
cv2.imshow("Color Frame",frame)
key = cv2.waitKey(5)
if key==ord('q'):
break
video.release()
cv2.destroyAllWindows()
The code should be able to detect only moving object in the screen.
I had similar issue a while back when I started with opencv; the problem with the program is the first frame, the first frame captured a dark screen. The difference detected by cv2.absdiff of the first frame(first_frame) vs the next frames(gray) was big enough that the cv2.findContours was indicated on the whole screen. This may be caused by camera delay
It can be solved by incorporating a slight delay between when the camera load to when the program records the first frame(first_frame) with time.sleep(). Try this:
import cv2, time
first_frame = None
video = cv2.VideoCapture(0)
# the camera has some lag time hence the starting of video.read outside loop and sleep
video.read()
time.sleep(2)
while True:
check, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21,21),0)
if first_frame is None:
first_frame=gray
continue
delta_frame = cv2.absdiff(first_frame,gray)
thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
thresh_frame = cv2.dilate(thresh_frame,None, iterations=2)
(cnts,_) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 4000:
continue
(x,y, w, h)= cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h),(0,225,0),3)
cv2.imshow("Delta Frame",delta_frame)
cv2.imshow("Capturing",gray)
cv2.imshow("Threshold Frame",thresh_frame)
cv2.imshow("Color Frame",frame)
key = cv2.waitKey(5)
if key==ord('q'):
break
video.release()
cv2.destroyAllWindows()
time.sleep takes care of the lag time of the camera starting.
Try the code, it worked here

what's rules for cv2.approxPolyDP to detect point?

I have below two input images:
First:
Second:
Now I wanna detect the square area.
And here is my code:
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import cv2
import argparse
import math
import sys
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())
img = cv2.imread(args["image"])
# img = cv2.bitwise_not(img,img)
# gray = cv2.imread(args["image"],0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
cv2.imshow('gray', gray)
cv2.waitKey(0)
gray = cv2.normalize(gray, gray, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
cv2.imshow('normalized', gray)
cv2.waitKey(0)
# sys.exit()
ret, thresh = cv2.threshold(gray, 230, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('thresh', thresh)
# cv2.imwrite('./wni230.png',thresh)
cv2.waitKey(0)
square_cnts = []
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8))
cv2.imshow('dilated', thresh)
cv2.waitKey(0)
# sys.exit()
tmpimage, contours, h = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt0 = contours[0]
tmp_h = h[::-1]
for index, cnt in enumerate(contours[::-1]):
print '!!!!!!!!!!! new round !!!!!!!!!!'
print cv2.arcLength(cnt, True)
# print cnt
approx = cv2.approxPolyDP(cnt, 0.1*cv2.arcLength(cnt, True), True)
print '**********approx*****'
print approx
print len(approx)
print '**********approx end*****'
if len(approx) == 4:
print 'h[i][2]:%s' % tmp_h[0][index][2]
print 'h[i][3]:%s' % tmp_h[0][index][3]
if ret > 0.5:
print "Parallelogram"
elif 0.3 < ret < 0.5:
print "Rectangle"
elif 0 < ret < 0.3:
print "Rhombus"
else:
print "square"
print cv2.arcLength(cnt, True)
print approx
cv2.drawContours(img, [cnt], 0, (0, 0, 255), 2)
cv2.imshow('tmpsquare', img)
cv2.waitKey(0)
if int(cv2.arcLength(cnt, True)) >= 96:
x, y, w, h = cv2.boundingRect(approx)
for j in approx:
cv2.circle(img, (int(j[0][0]), int(j[0][1])), 1, (0, 255, 0), 2)
cv2.imshow('final', img[y:y+h, x:x+w])
cv2.waitKey(0)
print 'target but long squere detected...'
cv2.waitKey(0)
cv2.imshow('img', img)
cv2.imwrite('tmp.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
So, I got the two output images:
First:
Second:
Line in red is contour, point in green is points detected by approxPolyDP.
So, the first one is what I wanna.
However, how approxPolyDP locate the up most point for the second one?
Could anyone please explain the rules?
Env:
Python: 2.7.10
Opencv: 3.2.0
Thanks.
Wesley

Tracking white color

I am trying to understand how to work with ROI, but the following code is not working as expected.
I have a white ball walking through a black background, and I want to detect and keep tracking of this ball. Unfortunately, the square does not move.I think the problem is the color array, but after trying many values, I still haven't found the correct solution.
import numpy as np
import cv2
cap = cv2.VideoCapture('oieee.avi')
ret,frame = cap.read()
r,h,c,w = 485,50,890,50
track_window = (c,r,w,h)
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 0., 240.)), np.array((255.,15.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window
cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',frame)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
A print screen of my video http://imgur.com/VG3TgFF

Categories