Hi I want to Change live video frame in shape before while loop my webcam default resolution 640x480.
When i run it say "(480, 640, 3)" it still default resolution
......
ret, frame1 = cap.read()
cropped1 = frame1[0:240, 0:320]
ret, cropped1 = cap.read()
ret, frame2 = cap.read()
print(cropped1.shape)
while cap.isOpened():
diff = cv2.absdiff(cropped1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
......
Finally i did it!!!
...
ret, frame1 = cap.read()
ret, frame2 = cap.read()
print(frame1.shape)
while cap.isOpened():
diff = cv2.absdiff(frame1, frame2) # 640x480 frame
diff2 = diff[0:240, 0:320] # Top Left
diff3 = diff[0:240, 320:640] # Top Right
diff4 = diff[240:640, 0:320] # Down Left
diff5 = diff[240:480, 320:640] # Down Right
gray2 = cv2.cvtColor(diff2, cv2.COLOR_BGR2GRAY)
blur2 = cv2.GaussianBlur(gray2, (5,5), 0)
_, thresh2 = cv2.threshold(blur2, 20, 255, cv2.THRESH_BINARY)
dilated2 = cv2.dilate(thresh2, None, iterations=3)
contours2, _ = cv2.findContours(dilated2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
gray3 = cv2.cvtColor(diff3, cv2.COLOR_BGR2GRAY)
blur3 = cv2.GaussianBlur(gray3, (5,5), 0)
_, thresh3 = cv2.threshold(blur3, 20, 255, cv2.THRESH_BINARY)
dilated3 = cv2.dilate(thresh3, None, iterations=3)
contours3, _ = cv2.findContours(dilated3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
.
.
.
gray5 ...
Related
I want to detect contours in freshly made screenshot, and while I can access number of contours found, I cannot draw them on black canva, this is my code:
while True:
keyboard.wait('right')
img = pyautogui.screenshot()
img = cv.cvtColor(np.array(img), cv.COLOR_RGB2BGR)
img = CropImage(img)
blank = np.zeros(img.shape[:2], dtype='uint8')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray, 122, 255, cv.THRESH_BINARY)
contours, hierarchies = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
print(f'{len(contours) } contours found')
cv.drawContours(blank, contours, -1, (0,255,0))
cv.imwrite('C:/screeny/screen0.png', blank)
i write a code from Open CV Document about Motion Detection .i want to save my input as a video and my output as a video but i have problems. i save video but i can just save the output video and some time the input video is the same out put video.my input video is from my computer's camera and it's like a normal video but the output is that video in addition by squares around motion objects.i need your help.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()
i tried this but it doesn't work.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi', fourcc, 20.0, (fwidth, fheight))
out1 = cv.VideoWriter('input.avi', fourcc, 20.0, (fwidth, fheight))
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
if ret == True:
out.write(Final_Movie)
out.write(frame2)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()
In this problem we are trying to detect persons in a WEBCAM video in REAL TIME. The code is working fine for 1 person but when more than one person is entering then the code is failing miserably. Here is the code :-
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
kernel = np.ones((5,5), np.uint8)
background = None
while True:
ret,frame = cap.read()
gray = frame.copy()
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11,11), 0)
if background is None:
background = gray
continue
delta = cv2.absdiff(background, gray)
thresh = cv2.threshold(delta, 5, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
thresh = cv2.dilate(thresh, kernel, iterations=2)
_,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if(len(contours)==0):
continue
#areas = [cv2.contourArea(c) for c in contours]
#max_index = np.argmax(areas)
#cnt=contours[max_index]
#(x,y,w,h) = cv2.boundingRect(cnt)
#if(1.0*(w*h)/(640*480)<0.75):
#cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 3)
#print("Area: ",w*h)
for i in range(len(contours)):
(x,y,w,h) = cv2.boundingRect(contours[i])
if(w*h<=90000):
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 5)
#cv2.imshow('thresh', thresh)
cv2.imshow('frame', frame)
if cv2.waitKey(1)==27:
break
cap.release()
cv2.destroyAllWindows()
I think the problem is that the code is not able to separate the different contours of the different persons detected but I may not be the only reason. Can someone help me?
Here is the python code I have written :-
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
How to put bounding box around the detected human outline and improve efficiency of the python code to perform background subtraction on the live video feed taken from webcam. Can someone help?
Drawing Contour Using Background Subtraction
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
gray=cv2.cvtColor(fgmask,cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(gray,25,255,cv2.THRESH_BINARY)
_,contours,hierarchy = cv2.findContours(th1,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 1000 and area < 40000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(fgmask,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Drawing Contour using HSV Masking and Convex Hull
Set value for hsv mask.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
frame = cv2.imread(frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array([50,103,40])
upper = np.array([255,255, 255])
mask = cv2.inRange(hsv, lower, upper)
fg = cv2.bitwise_and(frame, frame, mask=255-mask)
fg = cv2.cvtColor(fg.copy(),cv2.COLOR_HSV2BGR)
fg = cv2.cvtColor(fg,cv2.COLOR_BGR2GRAY)
fg = cv2.threshold(fg, 120,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
#plt.imshow(fg)
#plt.show()
fgclosing = cv2.morphologyEx(fg.copy(), cv2.MORPH_CLOSE, kernel)
se = np.ones((3,3),np.uint8)
#fgdilated = cv2.morphologyEx(fgclosing, cv2.MORPH_CLOSE,cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4)))
fgdilated = cv2.dilate(fgclosing, kernel = se , iterations = 8)
img = frame.copy()
ret, threshed_img = cv2.threshold(fgdilated,
127, 255, cv2.THRESH_BINARY)
image, contours, hier = cv2.findContours(threshed_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
#print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 44000:
# get convex hull
hull = cv2.convexHull(cnt)
#cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
#print(hull)
(x,y,w,h) = cv2.boundingRect(cnt)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
contours = hull
#c1 = max(contours, key=cv2.contourArea)
hull = cv2.convexHull(cnt)
c = hull
#print(c)
cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
cv2.drawContours(img, [c], -1, (0, 255, 255), 2)
cv2.circle(img, extLeft, 8, (0, 0, 255), -1)
cv2.circle(img, extRight, 8, (0, 255, 0), -1)
cv2.circle(img, extTop, 8, (255, 0, 0), -1)
cv2.circle(img, extBot, 8, (255, 255, 0), -1)
lx = extLeft[1]
ly = extLeft[0]
rx = extRight[1]
ry = extRight[0]
tx = extTop[1]
ty = extTop[0]
bx = extBot[1]
by = extBot[0]
x,y = lx,by
w,h = abs(rx-lx),abs(ty-by)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,str(extLeft[0])+','+str(extLeft[1]),(extLeft), font, 2,(0, 0, 255),2,cv2.LINE_AA)
cv2.putText(img,str(extRight[0])+','+str(extRight[1]),(extRight), font, 2,(0, 255, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extTop[0])+','+str(extTop[1]),(extTop), font, 2,(255, 0, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extBot[0])+','+str(extBot[1]),(extBot), font, 2,(255, 255, 0),2,cv2.LINE_AA)
im = frame[tx:bx,ly:ry,:]
cx = im.shape[1]//2
cy = im.shape[0]//2
cv2.circle(im, (cx,cy), 15, (0, 255, 0))
plt.imshow(img)
plt.show()
You can use findContours.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
mask = 255 - fgmask
_, contours, _ = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
fgmask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
for contour in contours:
area = cv2.contourArea(contour)
#only show contours that match area criterea
if area > 500 and area < 20000:
rect = cv2.boundingRect(contour)
x, y, w, h = rect
cv2.rectangle(fgmask, (x, y), (x+w, y+h), (0, 255, 0), 3)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I have tested with the video https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi
So, I've downloaded this source code from http://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
:
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500,
help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
But when I run it, it gives me this error:
Traceback (most recent call last):
File "/Users/luistripa/Downloads/basic-motion-detection/motion_detector.py", line 57, in <module>
cv2.CHAIN_APPROX_SIMPLE)
ValueError: too many values to unpack (expected 2)
Can anyone help me fix this?
try adding [-2:] behind
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2:]