How To Track A Mask In OpenCV - python

so far ive written a program that uses my webcam and isolates all the colours except green. So, there is only going to be one green circle as there is nothing else green in my room (ill attach an image below), but i was wondering how i could track the mask and get the position of the x and y and either store it in a variable, or print it to the terminal, thanks.
[1]: https://i.stack.imgur.com/pil64.png
from cv2 import VideoCapture
import numpy as np
import cv2 as cv
import os
cap = cv.VideoCapture(0)
while True:
_, frame = cap.read()
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
low_green = np.array([50, 50, 100])
high_green = np.array([100, 150, 255])
mask =cv.inRange(hsv, low_green, high_green)
res = cv.bitwise_and(frame, frame, mask = mask)
cv.imshow("frame", frame)
cv.imshow("mask", mask)
cv.imshow("res", res)
k = cv.waitKey(5) & 0xFF
if k == 27:
break
cv.destroyAllWindows()
cap.release()
cv.waitKey(0)```

Related

Python, drawing a polyline to a color detector code using webcam

I'm using Python3 and OpenCV (4.6.0) to realize a script that:
detects a specific color tone (green) and draw (with the use of the polyline) the path followed by the green object during the use of the webcam.
calculates the distance traveled by the object.
So far, I'm trying:
import numpy as np
import cv2
from math import dist
cap = cv2.VideoCapture(0)
distance=0
distcm=0
centroide={}
mask={}
centroide[0]=(0,0)
i=1
while True:
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_green = np.array([50, 50, 50])
upper_green = np.array([70, 255, 255])
mask = cv2.inRange(hsv, lower_green, upper_green)
result = cv2.bitwise_and(frame, frame, mask=mask)
contourns,hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cont_sorted = sorted(contourns, key=cv2.contourArea, reverse=True)[:5]
for each in contourns:
area= cv2.contourArea(each)
if len(cont_sorted) > 0 and area>300 :
x,y,w,h = cv2.boundingRect(cont_sorted[0])
centroide[i]=[x+(w/2), y+(h/2)]
distance+= dist(centroide[i],centroide[i-1])
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),5)
mask=cv2.line(frame,(int(centroide[i-1][0]), int(centroide[i-1][1])),(int(centroide[i][0]),int(centroide[i][1])),(255,0,0),5)
#mask=cv2.polylines(frame,np.int32(np.array(centroide)),(255,0,0),5)
i=i+1
cv2.imshow('orig',frame)
distancecm=distance*0.0264583333;
print('distancia en cm:',distancecm)
if cv2.waitKey(30) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
With the previous code what it does is only to create a line between one centroid and another but the line of previous centroids does not remain static to observe the path of the object. Any suggestions?
Thanks
LJ

Segment black AND moving Pixels

I’m trying to segment the moving propeller of this Video. My approach is, to detect all black and moving pixels to separate the propeller from the rest.
Here is what I tried so far:
import numpy as np
import cv2
x,y,h,w = 350,100,420,500 # Croping values
cap = cv2.VideoCapture('Video Path')
while(1):
_, frame = cap.read()
frame = frame[y:y+h, x:x+w] # Crop Video
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_black = np.array([0,0,0])
upper_black = np.array([360,255,90])
mask = cv2.inRange(hsv, lower_black, upper_black)
res = cv2.bitwise_and(frame,frame, mask= mask)
nz = np.argwhere(mask)
cv2.imshow('Original',frame)
cv2.imshow('Propeller Segmentation',mask)
k = cv2.waitKey(30) & 0xff # press esc to exit
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Screenshot form the Video
Result of the Segmentation
With function cv.createBackgroundSubtractorMOG2()
I think you should have a look at background subtraction. It should be the right approach for your problem.
OpenCV provides a good tutorial on this: Link

how to combine the running frames with the new masking frames in opencv?

i am trying to add a blur filter for the only specified range of hue, saturation, and value. and then combining my (frame, smoothed) together. but if i do it. i get a live feed where its only pure white and not the colors of my specified range of set values. is there anyway to make the mask not turn to pure white?
here is my code sample
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([128,0,0])
upper_red = np.array([255,255,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
kernal = np.ones((15,15), np.float32)/225
smoothed = cv2.filter2D(res, -1, kernal)
fused_img = cv2.add(frame, smoothed)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',fused_img)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
I just now realized that the values multiply together! here is what needs to happen. you need to do addWeighted and change around the alpha until you are happy!
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0,0,0])
upper_red = np.array([255,255,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
kernal = np.ones((15,15), np.float32)/225
smoothed = cv2.filter2D(res, -1, kernal)
fused_img = cv2.addWeighted(smoothed, 0.1, frame, 1, 0)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res', fused_img)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()

Understanding how to deploy python code to pop up balloons

I'm a newbie in programming and I need to write code to detect balloon on the fixed background using numpy and openCV in live video and to return the centre of the object [balloon].
Sorry about the ignorance of the questions.
Since I'm new, I had troubles with thinking about the logic of doing it, I don't have the resources to "teach the machine" and creating cascade XML to detect balloons so I thought about 1 possible solution :
Using cv2.createBackgroundSubtractorMOG2() to detect motion with the same background and once there is some object [balloon], count all the white pixels in the live video and return the centre of it, with the right threshold amount of white pixels.
The problem is, I don't know how to get the value of the pixel from 0-255 to know if it's white or black and shows the video at the same time, I think that there is a much easier way that I couldn't find guides for it.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
img_arr = np.array(fgmask)
cv2.imshow('frame',fgmask)
for i in fgmask:
for j in i:
print(fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I'm getting fray video on the output and lots of values that I don't know how to understand them on the output.
I would use
changes = (fgmask>200).sum()
to compare all pixels with almost white value (>200) and count these pixels.
And then I can compare result with some value to treat it as move.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum()
is_moving = (changes > 10000)
print(changes, is_moving)
cv2.imshow('frame', fgmask)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
print() needs some time to display text so printing all pixels (many times in loop) can slow down program. So I skip this. I don't have to know values of all pixels.
EDIT: Using answer in how to detect region of large # of white pixels using opencv? and add code which can find white regions and draw rectangle. Program opens two window - one with grayscale fgmask and other with RGB frame and they can be hidden one behind another. You have to move one window to see another.
EDIT: I added code which use cv2.contourArea(cnt) and (x,y,w,h) = cv2.boundingRect(cnt) to create list with items (area,x,y,w,h) for all counturs and then get max(items) to get contour with the biggest area. And then it use (x + w//2, y + h//2) as center for red circle.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum() #
is_moving = (changes > 10000)
print(changes, is_moving)
items = []
contours, hier = cv2.findContours(fgmask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if 200 < area:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(fgmask, (x,y),(x+w,y+h),255, 2)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0), 2)
items.append( (area, x, y, w, h) )
if items:
main_item = max(items)
area, x, y, w, h = main_item
if w > h:
r = w//2
else:
r = h//2
cv2.circle(frame, (x+w//2, y+h//2), r, (0,0,255), 2)
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame', frame)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()

How to separately detect hand from body after skin detection?

I have used skin detection to detect skin color, but in the process other body parts also get detected. Is it possible to detect the hand alone or is there any any algorithm to distinguish between hand and body?
Here is the image:
My code is provided below:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
ret,frame=cap.read()
HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
l = np.array([0, 50, 80], dtype = "uint8")
u = np.array([23, 255, 255], dtype = "uint8")
skinDetect = cv2.inRange(HSV, l, u)
cv2.imshow('Masked Image',skinDetect)
_,contours, hierarchy = cv2.findContours(skinDetect,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
# discard areas that are too small
if h<30 or w<30:
continue
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('Rectangle',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Categories