I have used skin detection to detect skin color, but in the process other body parts also get detected. Is it possible to detect the hand alone or is there any any algorithm to distinguish between hand and body?
Here is the image:
My code is provided below:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
ret,frame=cap.read()
HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
l = np.array([0, 50, 80], dtype = "uint8")
u = np.array([23, 255, 255], dtype = "uint8")
skinDetect = cv2.inRange(HSV, l, u)
cv2.imshow('Masked Image',skinDetect)
_,contours, hierarchy = cv2.findContours(skinDetect,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
# discard areas that are too small
if h<30 or w<30:
continue
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('Rectangle',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Related
I'm using Python3 and OpenCV (4.6.0) to realize a script that:
detects a specific color tone (green) and draw (with the use of the polyline) the path followed by the green object during the use of the webcam.
calculates the distance traveled by the object.
So far, I'm trying:
import numpy as np
import cv2
from math import dist
cap = cv2.VideoCapture(0)
distance=0
distcm=0
centroide={}
mask={}
centroide[0]=(0,0)
i=1
while True:
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_green = np.array([50, 50, 50])
upper_green = np.array([70, 255, 255])
mask = cv2.inRange(hsv, lower_green, upper_green)
result = cv2.bitwise_and(frame, frame, mask=mask)
contourns,hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cont_sorted = sorted(contourns, key=cv2.contourArea, reverse=True)[:5]
for each in contourns:
area= cv2.contourArea(each)
if len(cont_sorted) > 0 and area>300 :
x,y,w,h = cv2.boundingRect(cont_sorted[0])
centroide[i]=[x+(w/2), y+(h/2)]
distance+= dist(centroide[i],centroide[i-1])
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),5)
mask=cv2.line(frame,(int(centroide[i-1][0]), int(centroide[i-1][1])),(int(centroide[i][0]),int(centroide[i][1])),(255,0,0),5)
#mask=cv2.polylines(frame,np.int32(np.array(centroide)),(255,0,0),5)
i=i+1
cv2.imshow('orig',frame)
distancecm=distance*0.0264583333;
print('distancia en cm:',distancecm)
if cv2.waitKey(30) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
With the previous code what it does is only to create a line between one centroid and another but the line of previous centroids does not remain static to observe the path of the object. Any suggestions?
Thanks
LJ
My code does the following :
Grab the video frame.
Convert the video frame to the HSV colour space.
Split the frame into individual components (separate images for H, S, and V).
Apply a threshold to each component
Locates the centroid and applies the bounding circle
I can find the two largest contours. From the two largest indexed contours how do I calculate the moments for these contours to find the centroid?
Here is an example image to help. Blue are the centroids, Red is the minimum enclosing circle and white are the laser pointers (After the threshold has been applied).
#Dependacies import
from cmath import inf
from pickle import FRAME
import cv2
from matplotlib.pyplot import hsv
import numpy as np
import imutils
# Video Capture
cap = cv2.VideoCapture(0)
if (cap.isOpened()== False):
print("Error opening video stream or file")
# Read until video is completed
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read() #reading video
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #Making hsv frame
red_lower = np.array ([16, 132, 0])
red_upper = np.array ([20, 236, 255])
mask = cv2.inRange (hsv,red_lower,red_upper)
res = cv2.bitwise_and(frame,frame, mask= mask)
thresh = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
if ret== True:
contours, hiearachy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt = sorted(contours,key=cv2.contourArea)
print ('Number of contours found = ', len(contours))
if len(contours) >= 2:
def get_contour_areas(contours):
all_areas = []
for cnt in contours:
area = cv2.contourArea(cnt)
all_areas.append(area)
return all_areas
sorted_contours= sorted(contours, key=cv2.contourArea, reverse= True)
largest_item= sorted_contours[0]
second_item= sorted_contours[1]
cv2.drawContours(frame, largest_item, -1, (255,0,0),1)
cv2.drawContours(frame, second_item, -1, (255,0,0),1)
if ret == True:
cv2.imshow('Frame',frame)
cv2.imshow('mask', mask)
cv2.imshow('gray', thresh)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
I’m trying to segment the moving propeller of this Video. My approach is, to detect all black and moving pixels to separate the propeller from the rest.
Here is what I tried so far:
import numpy as np
import cv2
x,y,h,w = 350,100,420,500 # Croping values
cap = cv2.VideoCapture('Video Path')
while(1):
_, frame = cap.read()
frame = frame[y:y+h, x:x+w] # Crop Video
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_black = np.array([0,0,0])
upper_black = np.array([360,255,90])
mask = cv2.inRange(hsv, lower_black, upper_black)
res = cv2.bitwise_and(frame,frame, mask= mask)
nz = np.argwhere(mask)
cv2.imshow('Original',frame)
cv2.imshow('Propeller Segmentation',mask)
k = cv2.waitKey(30) & 0xff # press esc to exit
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Screenshot form the Video
Result of the Segmentation
With function cv.createBackgroundSubtractorMOG2()
I think you should have a look at background subtraction. It should be the right approach for your problem.
OpenCV provides a good tutorial on this: Link
i am trying to add a blur filter for the only specified range of hue, saturation, and value. and then combining my (frame, smoothed) together. but if i do it. i get a live feed where its only pure white and not the colors of my specified range of set values. is there anyway to make the mask not turn to pure white?
here is my code sample
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([128,0,0])
upper_red = np.array([255,255,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
kernal = np.ones((15,15), np.float32)/225
smoothed = cv2.filter2D(res, -1, kernal)
fused_img = cv2.add(frame, smoothed)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',fused_img)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
I just now realized that the values multiply together! here is what needs to happen. you need to do addWeighted and change around the alpha until you are happy!
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0,0,0])
upper_red = np.array([255,255,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
kernal = np.ones((15,15), np.float32)/225
smoothed = cv2.filter2D(res, -1, kernal)
fused_img = cv2.addWeighted(smoothed, 0.1, frame, 1, 0)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res', fused_img)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
I have a problem with my graduation project. I am a new user this site. I am sorry if i will do a fault.
Now my problem, I am taking frames with my webcam in python/openCV and filtering it with red and blue threshold. I could this part. But i wanna create a matrix with thresholded image. Is it possible?
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
lower_red = np.array([0, 100, 100])
upper_red = np.array([10, 255, 255])
red_mask = cv2.inRange(hsv, lower_red, upper_red) # I have the Red threshold image.
# Threshold the HSV image to get only blue colors
blue_mask = cv2.inRange(hsv, lower_blue, upper_blue)
mask = blue_mask + red_mask
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
This is the code of threshold filter. What will i do next part for matrix? This is my threshold filter example Please do not see outside of my platform :)
I wanna write the red is '1' in matrix and the others are '0', the last thing blue is '2' or 'X'.