Save both input video and output video in Open CV python - python

i write a code from Open CV Document about Motion Detection .i want to save my input as a video and my output as a video but i have problems. i save video but i can just save the output video and some time the input video is the same out put video.my input video is from my computer's camera and it's like a normal video but the output is that video in addition by squares around motion objects.i need your help.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()
i tried this but it doesn't work.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi', fourcc, 20.0, (fwidth, fheight))
out1 = cv.VideoWriter('input.avi', fourcc, 20.0, (fwidth, fheight))
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
if ret == True:
out.write(Final_Movie)
out.write(frame2)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()

Related

Trying to print the center position of a square

I have tried to use cv2.putText and it appears to show the position based on the the top right of the window and not the actual center of the image. It will probably be an obvious fix since I just started using opencv
import os
import numpy as np
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
radius = 3
thickness = 2
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while (True):
ret, frames = video_capture.read()
gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frames, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = (x+w//2), (y+h//2)
cv2.circle(frames, (cx, cy), radius, (255, 0, 0), -1)
cv2.putText(frames, str(text), org, font, fontScale, color, thickness)
cv2.imshow('Video', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
With the cv2.getTextSize() function, you can calculate the pixel size of the text you will put and subtract it from the text's position. In this way, the text will be right on the center.
text = (x+w//2), (y+h//2)
text_size,t = cv2.getTextSize(text=str(text), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, thickness=1)
text_size_x,text_size_y = text_size
text_pos = (x+w//2)-(text_size_x//2), (y+h//2)+(text_size_y//2)
Here is a working code
import os
import numpy as np
import cv2
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
radius = 3
thickness = 2
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while (True):
ret, frames = video_capture.read()
gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frames, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = (x+w//2), (y+h//2)
text_size,t = cv2.getTextSize(text=str(text), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, thickness=1)
text_size_x,text_size_y = text_size
text_pos = (x+w//2)-(text_size_x//2), (y+h//2)+(text_size_y//2)
#cv2.circle(frames, (cx, cy), radius, (255, 0, 0), -1)
cv2.putText(frames, str(text), text_pos, font, fontScale, color, thickness)
cv2.imshow('Video', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

"can't open camera by index" using cv2 on Linux

I am trying to make a motion detector (using the internal camera) in python(3), I am using linux (debian), and I keep getting this error
[ WARN:0#0.724] global /io/opencv/modules/videoio/src/cap_v4l.cpp (889) open VIDEOIO(V4L2:/dev/video0): can't open camera by index
here's the code I'm using
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
else:
vs = cv2.VideoCapture(args["Video"])
firstFrame = None
while True:
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Muon is stuck in helium"
if frame is None:
break
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < args["min_area"]:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Muon is fusing hydrogen"
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
personally, I think that the problem is linux is having trouble using the internal camera, but I ofc have been wrong before, but if that is the problem, can somebody please help me fix it, and if it isn't, can somebody please help me out, and tell me what I need to fix please

Saving two Operated Videos from a webcam using OpenCV

I would like to save two vides together at the same time using python. like this YoloV2, Yolo 9000, SSD Mobilenet, Faster RCNN NasNet comparison, I am not sure he saved them at the same time or not, likely he used another program to merage them.
can I do it using python?
this my code, I saved two vides separately
import cv2
import numpy as np
from imutils.video import FPS
# capturing video through webcam
import time
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') #Define the codec and create VideoWriter object
fourcc1 = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') #Define the codec and create VideoWriter object
out = cv2.VideoWriter('colors.avi', fourcc, 20.0, (640, 480))
out1 = cv2.VideoWriter('colors1.avi', fourcc1, 20.0, (640, 480))
cap = cv2.VideoCapture(0)
#video dimension in python-opencv
width = cap.get(3) # float
height = cap.get(4) # float
print width,height
time.sleep(2.0)
while(1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
red_lower = np.array([136,87,111],np.uint8)
red_upper = np.array([180,255,255],np.uint8)
red = cv2.inRange(hsv, red_lower, red_upper)
kernal = np.ones((5, 5), "uint8")
red = cv2.dilate(red, kernal)
res_red = cv2.bitwise_and(img, img, mask = red)
(_, contours, hierarchy)=cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area > 300):
x, y, w, h = cv2.boundingRect(contour)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, "Red Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255))
out.write(res_red)
out1.write(img)
cv2.imshow("Color Tracking", img)
cv2.imshow("Color Tracking1", res_red)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
output: two vides

How to put bounding box around the detected human outline

Here is the python code I have written :-
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
How to put bounding box around the detected human outline and improve efficiency of the python code to perform background subtraction on the live video feed taken from webcam. Can someone help?
Drawing Contour Using Background Subtraction
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
gray=cv2.cvtColor(fgmask,cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(gray,25,255,cv2.THRESH_BINARY)
_,contours,hierarchy = cv2.findContours(th1,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 1000 and area < 40000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(fgmask,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Drawing Contour using HSV Masking and Convex Hull
Set value for hsv mask.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
frame = cv2.imread(frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array([50,103,40])
upper = np.array([255,255, 255])
mask = cv2.inRange(hsv, lower, upper)
fg = cv2.bitwise_and(frame, frame, mask=255-mask)
fg = cv2.cvtColor(fg.copy(),cv2.COLOR_HSV2BGR)
fg = cv2.cvtColor(fg,cv2.COLOR_BGR2GRAY)
fg = cv2.threshold(fg, 120,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
#plt.imshow(fg)
#plt.show()
fgclosing = cv2.morphologyEx(fg.copy(), cv2.MORPH_CLOSE, kernel)
se = np.ones((3,3),np.uint8)
#fgdilated = cv2.morphologyEx(fgclosing, cv2.MORPH_CLOSE,cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4)))
fgdilated = cv2.dilate(fgclosing, kernel = se , iterations = 8)
img = frame.copy()
ret, threshed_img = cv2.threshold(fgdilated,
127, 255, cv2.THRESH_BINARY)
image, contours, hier = cv2.findContours(threshed_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
#print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 44000:
# get convex hull
hull = cv2.convexHull(cnt)
#cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
#print(hull)
(x,y,w,h) = cv2.boundingRect(cnt)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
contours = hull
#c1 = max(contours, key=cv2.contourArea)
hull = cv2.convexHull(cnt)
c = hull
#print(c)
cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
cv2.drawContours(img, [c], -1, (0, 255, 255), 2)
cv2.circle(img, extLeft, 8, (0, 0, 255), -1)
cv2.circle(img, extRight, 8, (0, 255, 0), -1)
cv2.circle(img, extTop, 8, (255, 0, 0), -1)
cv2.circle(img, extBot, 8, (255, 255, 0), -1)
lx = extLeft[1]
ly = extLeft[0]
rx = extRight[1]
ry = extRight[0]
tx = extTop[1]
ty = extTop[0]
bx = extBot[1]
by = extBot[0]
x,y = lx,by
w,h = abs(rx-lx),abs(ty-by)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,str(extLeft[0])+','+str(extLeft[1]),(extLeft), font, 2,(0, 0, 255),2,cv2.LINE_AA)
cv2.putText(img,str(extRight[0])+','+str(extRight[1]),(extRight), font, 2,(0, 255, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extTop[0])+','+str(extTop[1]),(extTop), font, 2,(255, 0, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extBot[0])+','+str(extBot[1]),(extBot), font, 2,(255, 255, 0),2,cv2.LINE_AA)
im = frame[tx:bx,ly:ry,:]
cx = im.shape[1]//2
cy = im.shape[0]//2
cv2.circle(im, (cx,cy), 15, (0, 255, 0))
plt.imshow(img)
plt.show()
You can use findContours.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
mask = 255 - fgmask
_, contours, _ = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
fgmask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
for contour in contours:
area = cv2.contourArea(contour)
#only show contours that match area criterea
if area > 500 and area < 20000:
rect = cv2.boundingRect(contour)
x, y, w, h = rect
cv2.rectangle(fgmask, (x, y), (x+w, y+h), (0, 255, 0), 3)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I have tested with the video https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi

(Edited) BackgroundSubtractionMOG2 + Mean-Shift Tracking?

I am doing a project where its a motion based detection program.
However it detects changes in the background as "motion" so i'd like a way to recapture a new first frame every few minutes to replace the current one to fix this issue.
I am using a Raspberry Pi 2B and a Logitech Webcam.
The code i am using is based of : Pyimagesearch
This is my version of the code.
Please help me
(Edit)I have changed my code to a BackgroundSubtractionMOG2 now my issue is how do i add Mean-Shift Tracking so that it'll recognize its the same object that entered the screen in the frame earlier?
import sys
sys.path.append('/usr/local/lib/python3.4/site-packages')
import numpy as np
import cv2
import imutils
from imutils import contours
import datetime
import time
#cap = cv2.VideoCapture("/home/pi/Desktop/Proj/VideoTestSample.mp4")
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while (cap.isOpened()):
(grabbed, frame) = cap.read()
text = " "
if not grabbed:
break
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
fgmask = fgbg.apply(gray)
thresh = cv2.erode(fgmask, None, iterations=2)
(_,cnts,hierarchy) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for (i,c) in enumerate(cnts):
if cv2.contourArea(c) < 300:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)
cv2.putText(frame, "#{}".format(i + 1), (x, y - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
text = "REC"
cv2.putText(frame, "{}". format(text), (10,20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,(0,0,255), 1)
cv2.imshow('frame',frame)
cv2.imshow('gray', gray)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Categories