This is an example of what I will be seeing, I am trying to count the number of items that are blue in the video. In this example it would be 2, my shirt and the phone. How would I go about doing this?
Here is my code
import numpy as np import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
width = int(cap.get(3))
height = int(cap.get(4))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([90, 50, 50])
upper_blue = np.array([130, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('frame', result)
cv2.imshow('mask', mask)
if cv2.waitKey(1) == ord('q'):
break
cap.release() cv2.destroyAllWindows()
Related
I'm trying to edit a Video and if use the "cv2.imshow" function i can see my edited frames,
but when i try to open the saved the Video i get a error "0xc10100be".
it seems like there is a problem with the saving of the edited frames and i think it has something to do with the size of the images. I tried changing the sizes, but to no avail.
Help would be appreciated, cheers.
import cv2
import numpy as np
video = cv2.VideoCapture(".......mp4")
result = cv2.VideoWriter('filename.mp4',
cv2.VideoWriter_fourcc(*'mp4v'),
15.0, (960, 450))
while(True):
ret, frame = video.read()
if ret == True:
width, height = 450, 960
pts1 = np.float32([[68, 755], [1908, 733], [63, 803], [1909, 787]])
pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
Img = cv2.warpPerspective(frame, matrix, (width, height))
imgOutput = cv2.rotate(Img, cv2.ROTATE_90_COUNTERCLOCKWISE)
imgHSV = cv2.cvtColor(imgOutput, cv2.COLOR_BGR2HSV)
h_min = 19
h_max = 60
s_min = 73
s_max = 237
v_min = 20
v_max = 188
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHSV, lower, upper)
result.write(mask)
cv2.imshow('Output', mask)
if cv2.waitKey(1) & 0xFF == ord('s'):
break
# Break the loop
else:
break
video.release()
result.release()
cv2.destroyAllWindows()
print("The video was successfully saved")
so what I'm doing here is taking a frame of my Video, cropping it, put an HSV filter on and get a Black & White mask so that only the relevant stuff is visible in white.
and i'm tring to safe those Black and white frames into a Video.
import cv2
import numpy as np
import warnings
warnings.filterwarnings("ignore")
cap = cv2.VideoCapture(0)
while True :
ret ,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20,0,0])
upper_yellow = np.array([40,255,255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
res = cv2.bitwise_and(frame,frame, mask= mask)
img = cv2.medianBlur(res, 5)
ccimg = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cimg = cv2.cvtColor(ccimg, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20,param1=50, param2=30, minRadius=20, maxRadius=30)
if circles is not None:
print("circle is found")
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow('detected circles', cimg)
cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
I am tring to detect trafict light by using opencv , initially i want to detect yellow color by using HSV space and then median filtering and finding the circle but it raises errors async ReadSample() call is failed with error status: -1072873821 and OnReadSample() is called with error status: -1072873821 probably errors are caused by the if state for checking if it finds any circle or not also the error is a long list but these two are repeated.
The capture device is failing to read a frame. The OnReadSample() call is failing on cap.read() and you should implement logic to handle a frame not being read. I've demonstrated this below:
import cv2
import numpy as np
import warnings
warnings.filterwarnings("ignore")
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20, 0, 0])
upper_yellow = np.array([40, 255, 255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
res = cv2.bitwise_and(frame, frame, mask=mask)
img = cv2.medianBlur(res, 5)
ccimg = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cimg = cv2.cvtColor(ccimg, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=20, maxRadius=30)
if circles is not None:
print("circle is found")
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow('detected circles', cimg)
cv2.imshow('res', res)
else:
print("Read Failed")
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
I am learning openCV with python with reference to this article and I tried the same code as they have given but even the first phase of background removal is not working.
cap = cv2.VideoCapture(1)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1) # Horizontal Flip
cv2.imshow('original', frame)
# Background Removal
bgSubtractor = cv2.createBackgroundSubtractorMOG2(
history=10, varThreshold=30, detectShadows=False)
fgmask = bgSubtractor.apply(frame)
kernel = np.ones((5, 5), np.uint8)
# The effect is to remove the noise in the background
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel, iterations=2)
# To close the holes in the objects
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel, iterations=2)
img = cv2.bitwise_and(frame, frame, mask=fgmask)
cv2.imshow('image after bitwise_fgmask', img)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
output for fgmask :
output for img is same as the original frame.
what's wrong in this and what to do ?
You have to move bgSubtractor out of the while loop. Otherwise you will be recreating it every frame:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
# Background Removal
bgSubtractor = cv2.createBackgroundSubtractorMOG2(
history=10, varThreshold=30, detectShadows=False)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1) # Horizontal Flip
cv2.imshow('original', frame)
fgmask = bgSubtractor.apply(frame)
kernel = np.ones((5, 5), np.uint8)
# The effect is to remove the noise in the background
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel, iterations=2)
# To close the holes in the objects
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel, iterations=2)
img = cv2.bitwise_and(frame, frame, mask=fgmask)
cv2.imshow('image after bitwise_fgmask', img)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I have worked some time with OpenCV to pop the colors on the screen. At the end, I succeeded to mask the colors in different windows as in the photo below:
https://prnt.sc/qo3cjy
Was wondering what is the most efficient and a working way to make the python detect the colors, and make it to run the functions() that were written by me. For example, if green was detected, run the function hellogreen(), which will print hello green when green is detected and so on.
Source Code if needed just in case:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([161, 155, 84])
high_red = np.array([179, 255, 255])
red_mask = cv2.inRange(hsv_frame, low_red, high_red)
red = cv2.bitwise_and(frame, frame, mask=red_mask)
# Blue color
low_blue = np.array([94, 80, 2])
high_blue = np.array([126, 255, 255])
blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue)
blue = cv2.bitwise_and(frame, frame, mask=blue_mask)
# Green color
low_green = np.array([25, 52, 72])
high_green = np.array([83, 255, 255])
green_mask = cv2.inRange(hsv_frame, low_green, high_green)
green = cv2.bitwise_and(frame, frame, mask=green_mask)
# Every color except white
low = np.array([0, 42, 0])
high = np.array([179, 255, 255])
mask = cv2.inRange(hsv_frame, low, high)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow("Frame", frame)
cv2.imshow("Red", red)
cv2.imshow("Blue", blue)
cv2.imshow("Green", green)
key = cv2.waitKey(1)
if key == 27:
break
Put these lines at the end of your code (helloblue, hellogreen and hellored are your hypothesized functions):
b = cv2.countNonZero(blue_mask)
r = cv2.countNonZero(red_mask)
g = cv2.countNonZero(green_mask)
if b >= r and b >= g:
helloblue()
elif r >= b and r >= g:
hellred()
elif g >= b and g >= r:
hellgreen()
key = cv2.waitKey(1)
if key == 27:
break
I am trying to understand how to work with ROI, but the following code is not working as expected.
I have a white ball walking through a black background, and I want to detect and keep tracking of this ball. Unfortunately, the square does not move.I think the problem is the color array, but after trying many values, I still haven't found the correct solution.
import numpy as np
import cv2
cap = cv2.VideoCapture('oieee.avi')
ret,frame = cap.read()
r,h,c,w = 485,50,890,50
track_window = (c,r,w,h)
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 0., 240.)), np.array((255.,15.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window
cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',frame)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
A print screen of my video http://imgur.com/VG3TgFF