Segment black AND moving Pixels - python

I’m trying to segment the moving propeller of this Video. My approach is, to detect all black and moving pixels to separate the propeller from the rest.
Here is what I tried so far:
import numpy as np
import cv2
x,y,h,w = 350,100,420,500 # Croping values
cap = cv2.VideoCapture('Video Path')
while(1):
_, frame = cap.read()
frame = frame[y:y+h, x:x+w] # Crop Video
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_black = np.array([0,0,0])
upper_black = np.array([360,255,90])
mask = cv2.inRange(hsv, lower_black, upper_black)
res = cv2.bitwise_and(frame,frame, mask= mask)
nz = np.argwhere(mask)
cv2.imshow('Original',frame)
cv2.imshow('Propeller Segmentation',mask)
k = cv2.waitKey(30) & 0xff # press esc to exit
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Screenshot form the Video
Result of the Segmentation
With function cv.createBackgroundSubtractorMOG2()

I think you should have a look at background subtraction. It should be the right approach for your problem.
OpenCV provides a good tutorial on this: Link

Related

How To Track A Mask In OpenCV

so far ive written a program that uses my webcam and isolates all the colours except green. So, there is only going to be one green circle as there is nothing else green in my room (ill attach an image below), but i was wondering how i could track the mask and get the position of the x and y and either store it in a variable, or print it to the terminal, thanks.
[1]: https://i.stack.imgur.com/pil64.png
from cv2 import VideoCapture
import numpy as np
import cv2 as cv
import os
cap = cv.VideoCapture(0)
while True:
_, frame = cap.read()
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
low_green = np.array([50, 50, 100])
high_green = np.array([100, 150, 255])
mask =cv.inRange(hsv, low_green, high_green)
res = cv.bitwise_and(frame, frame, mask = mask)
cv.imshow("frame", frame)
cv.imshow("mask", mask)
cv.imshow("res", res)
k = cv.waitKey(5) & 0xFF
if k == 27:
break
cv.destroyAllWindows()
cap.release()
cv.waitKey(0)```

How to detect the one color in the image?

How to detect (True/False) if the frame contains at least one for example rgb(213, 39, 27) pixel?
I want to process frame only when the detection is true.
import cv2
VIDEO_URL = ''
cap = cv2.VideoCapture(VIDEO_URL)
fps = cap.get(cv2.CAP_PROP_FPS)
wait_ms = int(1000/fps)
while True:
ret, frame = cap.read()
img = cv2.cvtColor(frame, cv2.IMREAD_COLOR)[815:970, 360:1920]
#image processing here
if cv2.waitKey(wait_ms) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
You can use hsv images to detect any color.
Eg-Let You want to identify blue color in your image -
Include this code inside your while loop.
hsvimg = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lb=np.array([94,80,2])
ub=np.array([126,255,255])
mask = cv2.inRange(hsvimg, lb, ub)
if 255 in mask:
print("Blue color present")
This code determines the blue color. You can find any other color by changing the HSV range.

Understanding how to deploy python code to pop up balloons

I'm a newbie in programming and I need to write code to detect balloon on the fixed background using numpy and openCV in live video and to return the centre of the object [balloon].
Sorry about the ignorance of the questions.
Since I'm new, I had troubles with thinking about the logic of doing it, I don't have the resources to "teach the machine" and creating cascade XML to detect balloons so I thought about 1 possible solution :
Using cv2.createBackgroundSubtractorMOG2() to detect motion with the same background and once there is some object [balloon], count all the white pixels in the live video and return the centre of it, with the right threshold amount of white pixels.
The problem is, I don't know how to get the value of the pixel from 0-255 to know if it's white or black and shows the video at the same time, I think that there is a much easier way that I couldn't find guides for it.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
img_arr = np.array(fgmask)
cv2.imshow('frame',fgmask)
for i in fgmask:
for j in i:
print(fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I'm getting fray video on the output and lots of values that I don't know how to understand them on the output.
I would use
changes = (fgmask>200).sum()
to compare all pixels with almost white value (>200) and count these pixels.
And then I can compare result with some value to treat it as move.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum()
is_moving = (changes > 10000)
print(changes, is_moving)
cv2.imshow('frame', fgmask)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
print() needs some time to display text so printing all pixels (many times in loop) can slow down program. So I skip this. I don't have to know values of all pixels.
EDIT: Using answer in how to detect region of large # of white pixels using opencv? and add code which can find white regions and draw rectangle. Program opens two window - one with grayscale fgmask and other with RGB frame and they can be hidden one behind another. You have to move one window to see another.
EDIT: I added code which use cv2.contourArea(cnt) and (x,y,w,h) = cv2.boundingRect(cnt) to create list with items (area,x,y,w,h) for all counturs and then get max(items) to get contour with the biggest area. And then it use (x + w//2, y + h//2) as center for red circle.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum() #
is_moving = (changes > 10000)
print(changes, is_moving)
items = []
contours, hier = cv2.findContours(fgmask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if 200 < area:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(fgmask, (x,y),(x+w,y+h),255, 2)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0), 2)
items.append( (area, x, y, w, h) )
if items:
main_item = max(items)
area, x, y, w, h = main_item
if w > h:
r = w//2
else:
r = h//2
cv2.circle(frame, (x+w//2, y+h//2), r, (0,0,255), 2)
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame', frame)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()

Remove background with opencv python

Here is my solution for getting a binary image:
import cv2
import numpy as np
img = cv2.imread('crop.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
ok,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('image',img)
cv2.imshow('threshold',thresh)
k = cv2.waitKey(0) & 0xff
if k == ord('q'):
cv2.destroyAllWindows()
Below is the result I get. How can I remove background from hand?
original image
threshold image
You can use color detection to get a mask for hand region. If you want to do background subtraction on video, then that can be achieved by storing the background and subtracting the upcoming frames from the background.
import cv2
cap=cv2.VideoCapture(1)
j=0
while 1:
ret,frame=cap.read()
if(j==0):
bg=frame.copy().astype("float")
if(j<30):
cv2.accumulateWeighted(frame,bg,0.5)
j=j+1
diff=cv2.absdiff(frame,bg.astype("uint8"))
diff=cv2.cvtColor(diff,cv2.COLOR_BGR2GRAY)
thre,diff=cv2.threshold(diff,25,255,cv2.THRESH_BINARY)
cv2.imshow("j",diff)
if(cv2.waitKey(1) & 0XFF==ord('q')):
break
cap.release()
cv2.destroyAllWindows()

How to detect only the left eye in a video with opencv?

I want to detect only the left eye in a video streaming, but I can't. When I run this code, it detects two eyes (right and left) and sometimes it also detects the mouth or the nose like it is an eye.
Follow the code below:
import cv2
import numpy as np
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_lefteye_2splits.xml')
video_capture = cv2.VideoCapture(0)
while True:
ret, img = video_capture.read()
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray_img,
scaleFactor=1.98,
minNeighbors=8,
minSize=(80, 80),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
#print 'faces: ', faces
for (p,q,r,s) in faces:
#cv2.rectangle(img,(p,q),(p+r,q+s),(255,0,0),3)
face_gray = gray_img[q:q+s, p:p+r]
face_color = img[q:q+s, p:p+r]
eyes = eye_cascade.detectMultiScale(face_gray)
for (ep,eq,er,es) in eyes:
cv2.rectangle(face_color,(ep,eq),(ep+er,eq+es), (0,255,0),3)
rimg = cv2.flip(img, 1) # invert the object img
cv2.imshow("Video", rimg)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
So how should I do to detect only the left eye?
PS: I'm using Python 2.7.13, NumPy 1.10.0 and OpenCV 2.4.13.3 in this program.
You can use a simple for loop to iterate through the eyes returned by eye-cascade and find the index of the eye with minimum x-coordinate. That will give you the index of the left-most eye.
def getleftmosteye(eyes):
leftmost=9999999
leftmostindex=-1
for i in range(0,2):
if eyes[i][0]<leftmost:
leftmost=eyes[i][0]
leftmostindex=i
return eyes[leftmostindex]
Now you can get coordinates of the left eye once get its index.

Categories