Understanding how to deploy python code to pop up balloons - python

I'm a newbie in programming and I need to write code to detect balloon on the fixed background using numpy and openCV in live video and to return the centre of the object [balloon].
Sorry about the ignorance of the questions.
Since I'm new, I had troubles with thinking about the logic of doing it, I don't have the resources to "teach the machine" and creating cascade XML to detect balloons so I thought about 1 possible solution :
Using cv2.createBackgroundSubtractorMOG2() to detect motion with the same background and once there is some object [balloon], count all the white pixels in the live video and return the centre of it, with the right threshold amount of white pixels.
The problem is, I don't know how to get the value of the pixel from 0-255 to know if it's white or black and shows the video at the same time, I think that there is a much easier way that I couldn't find guides for it.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
img_arr = np.array(fgmask)
cv2.imshow('frame',fgmask)
for i in fgmask:
for j in i:
print(fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I'm getting fray video on the output and lots of values that I don't know how to understand them on the output.

I would use
changes = (fgmask>200).sum()
to compare all pixels with almost white value (>200) and count these pixels.
And then I can compare result with some value to treat it as move.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum()
is_moving = (changes > 10000)
print(changes, is_moving)
cv2.imshow('frame', fgmask)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
print() needs some time to display text so printing all pixels (many times in loop) can slow down program. So I skip this. I don't have to know values of all pixels.
EDIT: Using answer in how to detect region of large # of white pixels using opencv? and add code which can find white regions and draw rectangle. Program opens two window - one with grayscale fgmask and other with RGB frame and they can be hidden one behind another. You have to move one window to see another.
EDIT: I added code which use cv2.contourArea(cnt) and (x,y,w,h) = cv2.boundingRect(cnt) to create list with items (area,x,y,w,h) for all counturs and then get max(items) to get contour with the biggest area. And then it use (x + w//2, y + h//2) as center for red circle.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum() #
is_moving = (changes > 10000)
print(changes, is_moving)
items = []
contours, hier = cv2.findContours(fgmask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if 200 < area:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(fgmask, (x,y),(x+w,y+h),255, 2)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0), 2)
items.append( (area, x, y, w, h) )
if items:
main_item = max(items)
area, x, y, w, h = main_item
if w > h:
r = w//2
else:
r = h//2
cv2.circle(frame, (x+w//2, y+h//2), r, (0,0,255), 2)
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame', frame)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()

Related

Segment black AND moving Pixels

I’m trying to segment the moving propeller of this Video. My approach is, to detect all black and moving pixels to separate the propeller from the rest.
Here is what I tried so far:
import numpy as np
import cv2
x,y,h,w = 350,100,420,500 # Croping values
cap = cv2.VideoCapture('Video Path')
while(1):
_, frame = cap.read()
frame = frame[y:y+h, x:x+w] # Crop Video
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_black = np.array([0,0,0])
upper_black = np.array([360,255,90])
mask = cv2.inRange(hsv, lower_black, upper_black)
res = cv2.bitwise_and(frame,frame, mask= mask)
nz = np.argwhere(mask)
cv2.imshow('Original',frame)
cv2.imshow('Propeller Segmentation',mask)
k = cv2.waitKey(30) & 0xff # press esc to exit
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Screenshot form the Video
Result of the Segmentation
With function cv.createBackgroundSubtractorMOG2()
I think you should have a look at background subtraction. It should be the right approach for your problem.
OpenCV provides a good tutorial on this: Link

OpenCV wont stream/update my video. How do I update the iimshow windows? [duplicate]

This question already has answers here:
What does OpenCV's cvWaitKey( ) function do?
(9 answers)
Closed 2 years ago.
I am having difficulty getting my video feed to work. I am trying to do simple object detection roughly following this tutorial, but have come across an issue. For some reason, the imshow windows aren't updating, they just keep showing the first frame. Any idea why? I am using cv2.VideoCapture and updating the frames every loop.
From what I can tell, the frames are successfully updating, as if I hold my hand up close to the camera, I can see the output values for frames changing down to [0,0,0,]ish, and when I take it away, they shoot back up as color comes back in.
Here is my code:
# Imports
from imutils.video import VideoStream
import numpy as np
import cv2
import imutils
import time
# CONSTANTS
MIN_AREA = 500
vs = cv2.VideoCapture(0)
#vs = VideoStream(src=0).start()
time.sleep(2)
firstFrame = None
secondFrame = None
while True:
frame = vs.read()
if frame[0] is False: # If read returned False, there was no frame to grab.
print("Error getting frame")
exit()
else: # Gets the image
frame = frame[1]
#Resize to make the image less intensive to process
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to gray to make the image easier to run through Gaussian blur.
gray = cv2.GaussianBlur(gray, (21, 21), 0) # Smooths out the pixels to get rid of any high variation between pixel intensities in a given region (x, x)
# Makes sure I am always comparing the last 2 frames in
if firstFrame is None:
print("Entered 1st")
firstFrame = gray
continue
elif secondFrame is None:
print("Entered 2nd")
secondFrame = gray
else:
print("Entered else")
firstFrame = secondFrame
secondFrame = gray;
# Compute Abs diffrence between current frame and first frame.
frameDelta = cv2.absdiff(firstFrame,secondFrame) # Simple subtraction of pixel intensities
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] # Thresholding the frameDelta. Only showing changes greater than x pixels, given by 2nd parameter argument.
thresh = cv2.dilate(thresh, None, iterations=2)
contours = cv2.findContours(thresh.copy(), cv2. RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
# Loop over the contours.
# If the current contour is too small, ignore it
for c in contours:
if cv2.contourArea(c) < MIN_AREA:
continue
# Else a bounding box is drawn around it
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Showing frames
cv2.imshow("Normal",frame)
cv2.imshow("Thresh",thresh)
cv2.imshow("Frame Delta", frameDelta)
vs.release()
cv2.destroyAllWindows()
Found the answer!
Apparently, I needed to add this code
if cv2.waitKey(1) & 0xFF == ord('q'):
break
Although I don't know why?

How this code can be modified to improve it's stability to detect eye gaze/pupil?. In low light this code is performing really bad

I am using hough-circle transform to detect eye pupil. If there is any shade or abit low light it gets detecting unusual circles around the eyes. I have tried several filters but did not work that much.
I have tried gaussian blur, median blur and blur to reduce the noise but not getting that much results.
import numpy as np
import cv2
cap = cv2.VideoCapture(1)
count=0
while True:
# Capture frame-by-frame
_, frame = cap.read()
roi = frame[0: 480, 0: 840]
cv2.imshow("roi",roi)
cimg = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
lower_black = np.array([0, 0, 0])
upper_black = np.array([180, 255, 50])
mask = cv2.inRange(cimg, lower_black, upper_black)
mask = cv2.blur(mask,(9,9),0)
# mask = cv2.medianBlur(mask,)
#circles = cv2.HoughCircles(cimg,cv2.HOUGH_GRADIENT,1,300,param1=300,param2=20,minRadius=10,maxRadius=40)
# circles = cv2.HoughCircles(grey,cv2.HOUGH_GRADIENT,minDist=30,minRadius=0,maxRadius=0)
#circles = cv2.HoughCircles(cimg,cv2.HOUGH_GRADIENT,100,200)
circles = cv2.HoughCircles(mask,cv2.HOUGH_GRADIENT,1,300,param1=300,param2=20,minRadius=10,maxRadius=40)
if circles is None:
cv2.imshow("roi",mask)
print ("Not Found")
continue
a =circles.tolist()
lst1=[item[0] for item in a[0]]
lst2=[item[1] for item in a[0]]
i=0
left_x=0
right_x=0
while i < len(lst1):
if (i==0):
left_x=lst1[i]
if (i==1):
right_x=lst1[i]
i=i+1
i=0
left_y=0
right_y=0
while i < len(lst2):
if (i==0):
left_y=lst2[i]
if (i==1):
right_y=lst2[i]
i=i+1
print("whole List")
print(a)
print("Left_X",left_x)
print("Right_X",right_x)
print("Left_y",left_y)
print("Right_y",right_y)
for i in circles[0,:]:
cv2.circle(mask,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(mask,(i[0],i[1]),2,(0,0,255),3)
# Display the resulting frame
cv2.imshow('hsv',mask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
I want to detect the pupils. The circles need to be stable.

Bounding box not showing up in background subtracted video using opencv

import numpy as np
import cv2 as cv
cap = cv.VideoCapture("walking")
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3))
fgbg = cv.bgsegm.createBackgroundSubtractorGMG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)
im2, contours, hierarchy = cv.findContours(fgmask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(fgmask, contours, -1, (0,255,0), 3)
if len(contours) > 0:
for count in contours:
x,y,w,h = cv.boundingRect(count)
cv.rectangle(fgmask,(x,y),(x+w,y+h),(0,255,0),2)
cv.imshow('frame',fgmask)
cv.imshow("stan",frame)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows()
This is my code, I am trying to get a box around the contours in the video which in this case is of multiple pedestrians walking. No box is showing up at all, am I making a simple mistake somewhere?
Indentation bug. The drawing of the bounding boxes for the detected contours, as well as showing the results is not in the while loop. Therefore, you are just reading in frames, finding the contours of these frames but you're not displaying them on the display window.
You need to do that for the frame to update as you're reading in the video.
Take the while loop and the first 5 lines that follow and move it to indentation level 0:
import numpy as np
import cv2 as cv
cap = cv.VideoCapture("walking")
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3))
fgbg = cv.bgsegm.createBackgroundSubtractorGMG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)
im2, contours, hierarchy = cv.findContours(fgmask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(fgmask, contours, -1, (0,255,0),
if len(contours) > 0:
for count in contours:
x,y,w,h = cv.boundingRect(count)
cv.rectangle(fgmask,(x,y),(x+w,y+h),(0,255,0),2)
cv.imshow('frame',fgmask)
cv.imshow("stan",frame)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows()

Red and yellow triangles detection using openCV in Python

I am trying to detect red triangles and yellow triangles differentiating them using openCV in Python. I am a beginner.
I would like, on a first hand, detectecing, counting (yellow and red) and mark with a rectangle all the triangles the camera can see. I would like also to find their mass-center.
For the moment, I just detect one single triangle at a time without finding it color.
My calcul of mass center does not work, giving me the error:
centroid_x = int(M['m10']/M['m00'])
ZeroDivisionError: float division by zero
I have wrote the following code inspired from examples from the web
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
print cap.get(3)
print cap.get(4)
# changing display size
ret = cap.set(3,320)
ret = cap.set(4,240)
def getthresholdedimg(hsv):
yellow = cv2.inRange(hsv,np.array((10,100,100)),np.array((30,255,255)))
red = cv2.inRange(hsv,np.array((0,0,0)),np.array((190,255,255)))
both = cv2.add(yellow,red)
return both
def nothing(x):
pass
# Create a black image, a window
img = np.zeros((300,512,3), np.uint8)
cv2.namedWindow('image')
while(True):
thr1 = 50
thr2 = 110
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gaussian_blur = cv2.GaussianBlur(gray,(5,5),0)
# Our operations on the frame come here
canny = cv2.Canny(gray,thr1,thr2)
canny_blur = cv2.Canny(gaussian_blur,thr1,thr2)
# Our operations on the frame come here
contours,hier = cv2.findContours(canny,1,2)
for cnt in contours:
approx = cv2.approxPolyDP(cnt,0.02*cv2.arcLength(cnt,True),True)
if len(approx)==3:
cv2.drawContours(frame,[cnt],0,(0,255,0),2)
tri = approx
M = cv2.moments(cnt)
centroid_x = int(M['m10']/M['m00'])
centroid_y = int(M['m01']/M['m00'])
cv2.circle(img,(centroid_x,centroid_y),3,255,-1)
for vertex in tri:
cv2.circle(frame,(vertex[0][0],vertex[0][1]),3,(64,0,128),-1)
cv2.line(img,(vertex[0][0],vertex[0][1]),(centroid_x,centroid_y),(0,0,255),1)
# Display the resulting frame
cv2.imshow('normal flux',frame)
cv2.imshow('gray conversion',gray)
cv2.imshow('canny edges conversion',canny)
cv2.imshow('canny edges gaussian blur',canny_blur)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Can you help me please?
Maybe you want to do
M = cv2.moments(tri)
instead of M = cv2.moments(cnt) ?
Your camera might not be reading it.
Try this after your cap = cv2.videoCapture(0):
while(1):
# Gets retval and frames from each video
ret ,frame = cap.read()
#Check to see if retval is not None or empty
if not ret
break;

Categories