import numpy as np
import cv2 as cv
cap = cv.VideoCapture("walking")
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3))
fgbg = cv.bgsegm.createBackgroundSubtractorGMG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)
im2, contours, hierarchy = cv.findContours(fgmask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(fgmask, contours, -1, (0,255,0), 3)
if len(contours) > 0:
for count in contours:
x,y,w,h = cv.boundingRect(count)
cv.rectangle(fgmask,(x,y),(x+w,y+h),(0,255,0),2)
cv.imshow('frame',fgmask)
cv.imshow("stan",frame)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows()
This is my code, I am trying to get a box around the contours in the video which in this case is of multiple pedestrians walking. No box is showing up at all, am I making a simple mistake somewhere?
Indentation bug. The drawing of the bounding boxes for the detected contours, as well as showing the results is not in the while loop. Therefore, you are just reading in frames, finding the contours of these frames but you're not displaying them on the display window.
You need to do that for the frame to update as you're reading in the video.
Take the while loop and the first 5 lines that follow and move it to indentation level 0:
import numpy as np
import cv2 as cv
cap = cv.VideoCapture("walking")
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3))
fgbg = cv.bgsegm.createBackgroundSubtractorGMG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)
im2, contours, hierarchy = cv.findContours(fgmask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(fgmask, contours, -1, (0,255,0),
if len(contours) > 0:
for count in contours:
x,y,w,h = cv.boundingRect(count)
cv.rectangle(fgmask,(x,y),(x+w,y+h),(0,255,0),2)
cv.imshow('frame',fgmask)
cv.imshow("stan",frame)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows()
Related
My code does the following :
Grab the video frame.
Convert the video frame to the HSV colour space.
Split the frame into individual components (separate images for H, S, and V).
Apply a threshold to each component
Locates the centroid and applies the bounding circle
I can find the two largest contours. From the two largest indexed contours how do I calculate the moments for these contours to find the centroid?
Here is an example image to help. Blue are the centroids, Red is the minimum enclosing circle and white are the laser pointers (After the threshold has been applied).
#Dependacies import
from cmath import inf
from pickle import FRAME
import cv2
from matplotlib.pyplot import hsv
import numpy as np
import imutils
# Video Capture
cap = cv2.VideoCapture(0)
if (cap.isOpened()== False):
print("Error opening video stream or file")
# Read until video is completed
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read() #reading video
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #Making hsv frame
red_lower = np.array ([16, 132, 0])
red_upper = np.array ([20, 236, 255])
mask = cv2.inRange (hsv,red_lower,red_upper)
res = cv2.bitwise_and(frame,frame, mask= mask)
thresh = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
if ret== True:
contours, hiearachy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt = sorted(contours,key=cv2.contourArea)
print ('Number of contours found = ', len(contours))
if len(contours) >= 2:
def get_contour_areas(contours):
all_areas = []
for cnt in contours:
area = cv2.contourArea(cnt)
all_areas.append(area)
return all_areas
sorted_contours= sorted(contours, key=cv2.contourArea, reverse= True)
largest_item= sorted_contours[0]
second_item= sorted_contours[1]
cv2.drawContours(frame, largest_item, -1, (255,0,0),1)
cv2.drawContours(frame, second_item, -1, (255,0,0),1)
if ret == True:
cv2.imshow('Frame',frame)
cv2.imshow('mask', mask)
cv2.imshow('gray', thresh)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
I’m trying to segment the moving propeller of this Video. My approach is, to detect all black and moving pixels to separate the propeller from the rest.
Here is what I tried so far:
import numpy as np
import cv2
x,y,h,w = 350,100,420,500 # Croping values
cap = cv2.VideoCapture('Video Path')
while(1):
_, frame = cap.read()
frame = frame[y:y+h, x:x+w] # Crop Video
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_black = np.array([0,0,0])
upper_black = np.array([360,255,90])
mask = cv2.inRange(hsv, lower_black, upper_black)
res = cv2.bitwise_and(frame,frame, mask= mask)
nz = np.argwhere(mask)
cv2.imshow('Original',frame)
cv2.imshow('Propeller Segmentation',mask)
k = cv2.waitKey(30) & 0xff # press esc to exit
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Screenshot form the Video
Result of the Segmentation
With function cv.createBackgroundSubtractorMOG2()
I think you should have a look at background subtraction. It should be the right approach for your problem.
OpenCV provides a good tutorial on this: Link
I'm a newbie in programming and I need to write code to detect balloon on the fixed background using numpy and openCV in live video and to return the centre of the object [balloon].
Sorry about the ignorance of the questions.
Since I'm new, I had troubles with thinking about the logic of doing it, I don't have the resources to "teach the machine" and creating cascade XML to detect balloons so I thought about 1 possible solution :
Using cv2.createBackgroundSubtractorMOG2() to detect motion with the same background and once there is some object [balloon], count all the white pixels in the live video and return the centre of it, with the right threshold amount of white pixels.
The problem is, I don't know how to get the value of the pixel from 0-255 to know if it's white or black and shows the video at the same time, I think that there is a much easier way that I couldn't find guides for it.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
img_arr = np.array(fgmask)
cv2.imshow('frame',fgmask)
for i in fgmask:
for j in i:
print(fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I'm getting fray video on the output and lots of values that I don't know how to understand them on the output.
I would use
changes = (fgmask>200).sum()
to compare all pixels with almost white value (>200) and count these pixels.
And then I can compare result with some value to treat it as move.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum()
is_moving = (changes > 10000)
print(changes, is_moving)
cv2.imshow('frame', fgmask)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
print() needs some time to display text so printing all pixels (many times in loop) can slow down program. So I skip this. I don't have to know values of all pixels.
EDIT: Using answer in how to detect region of large # of white pixels using opencv? and add code which can find white regions and draw rectangle. Program opens two window - one with grayscale fgmask and other with RGB frame and they can be hidden one behind another. You have to move one window to see another.
EDIT: I added code which use cv2.contourArea(cnt) and (x,y,w,h) = cv2.boundingRect(cnt) to create list with items (area,x,y,w,h) for all counturs and then get max(items) to get contour with the biggest area. And then it use (x + w//2, y + h//2) as center for red circle.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum() #
is_moving = (changes > 10000)
print(changes, is_moving)
items = []
contours, hier = cv2.findContours(fgmask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if 200 < area:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(fgmask, (x,y),(x+w,y+h),255, 2)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0), 2)
items.append( (area, x, y, w, h) )
if items:
main_item = max(items)
area, x, y, w, h = main_item
if w > h:
r = w//2
else:
r = h//2
cv2.circle(frame, (x+w//2, y+h//2), r, (0,0,255), 2)
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame', frame)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
I'm new to opencv and for a school project i need to detect a red and a green circle with a camera, so i've use blobdetection, but it detect me the two colors, i think that my mask is bad, each color is linked to a specific action.
At the moment my code detect well red and green circle on the same page but i want it to detect only red circle on a white page.
Thank for your help
# Standard imports
import cv2
import numpy as np;
# Read image
im = cv2.VideoCapture(0)
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 100;
params.maxThreshold = 200;
# Filter by Area.
params.filterByArea = True
params.minArea = 200
params.maxArea = 20000
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.1
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.1
blueLower = (0,85,170) #100,130,50
blueUpper = (140,110,255) #200,200,130
while(1):
ret, frame=im.read()
mask = cv2.inRange(frame, blueLower, blueUpper)
mask = cv2.erode(mask, None, iterations=0)
mask = cv2.dilate(mask, None, iterations=0)
frame = cv2.bitwise_and(frame,frame,mask = mask)
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(mask)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(mask, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Display the resulting frame
frame = cv2.bitwise_and(frame,im_with_keypoints,mask = mask)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
im.release()
cv2.destroyAllWindows()
EDIT 1: Code update
Now i got a issue where my full circle isn't detected.
No Blob Detection
Second Version
# Standard imports
import cv2
import numpy as np;
# Read image
im = cv2.VideoCapture(0)
while(1):
ret, frame=im.read()
lower = (130,150,80) #130,150,80
upper = (250,250,120) #250,250,120
mask = cv2.inRange(frame, lower, upper)
lower, contours, upper = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
blob = max(contours, key=lambda el: cv2.contourArea(el))
M = cv2.moments(blob)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
canvas = im.copy()
cv2.circle(canvas, center, 2, (0,0,255), -1)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
im.release()
cv2.destroyAllWindows()
You need to work out what the BGR numbers for your green are (let's say for arguments sake [0, 255, 0]), then create a mask that ignores any colours outside a tolerance around your green:
mask = cv2.inRange(image, lower, upper)
Take a look at this tutorial for a step by step.
Play around with lower and upper to get the right behaviour. Then you can find the contours in the mask:
_, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
Then go through the contours list to find the biggest one (filter out any possible noise):
blob = max(contours, key=lambda el: cv2.contourArea(el))
And that's your final 'blob'. You can find the center by doing:
M = cv2.moments(blob)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
You can draw this center onto a copy of your image, for checking:
canvas = im.copy()
cv2.circle(canvas, center, 2, (0,0,255), -1)
Obviously, this makes the assumption that there's only one green ball and nothing else green in the image. But it's a start.
EDIT - RESPONSE TO SECOND POST
I think the following should work. I haven't tested it, but you should be able to at least do a bit more debugging with the canvas and mask displayed:
# Standard imports
import cv2
import numpy as np;
# Read image
cam = cv2.VideoCapture(0)
while(1):
ret, frame = cam.read()
if not ret:
break
canvas = frame.copy()
lower = (130,150,80) #130,150,80
upper = (250,250,120) #250,250,120
mask = cv2.inRange(frame, lower, upper)
try:
# NB: using _ as the variable name for two of the outputs, as they're not used
_, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
blob = max(contours, key=lambda el: cv2.contourArea(el))
M = cv2.moments(blob)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
cv2.circle(canvas, center, 2, (0,0,255), -1)
except (ValueError, ZeroDivisionError):
pass
cv2.imshow('frame',frame)
cv2.imshow('canvas',canvas)
cv2.imshow('mask',mask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
im.release()
cv2.destroyAllWindows()
You should use HSV color space for better results if you wanna make filter by color.
ret, frame=im.read()
frame= cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Add this to your code
mask = cv2.inRange(frame, blueLower, blueUpper)
I'm using OpenCV to locate moving cars from a traffic cam. The video I'm working with starts off with the objects of interest in the initial frame. My question is how would I go about isolating the background without any of the vehicles, to serve as the frame of origin for subsequent motion detection.
camera = cv2.VideoCapture(video)
firstFrame = None
while True:
(grabbed, frame) = camera.read()
if not grabbed:
break
frame = cv2.resize(frame, None, fx=2, fy=2)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11,11), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
_, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
if cv2.contourArea(c) < 500:
continue
(x,y,w,h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)
cv2.imshow('frame', frame)
cv2.imshow('thresh', thresh)
cv2.imshow('frame delta', frameDelta)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
Ultimately, first frame will be absent of any vehicles, proceeded by the original frames of the video.
Thanks
To solve the problem you described, you should look more into object detection and classification. Locating moving cars is not particularly a background subtraction problem. An example of using Haar Cascades can be found here: https://github.com/andrewssobral/vehicle_detection_haarcascades