How do I get all the pixels in an image using opencv? - python

I'm using opencv right now to display different colours by masking everything but that colour. What I want to achieve is to get all the pixel coordinates that are green, black etc.
Some screenshots:
the first image is of a black line and the second image is of a green square. I would like to be able to record the pixel coordinates that have black or green on them. Here's the main code:
import sys
sys.path.append("\Python\Opencv_codes")
import line_following_testing as lf
from line_following_testing import lower_green as lg
from line_following_testing import upper_green as ug
from line_following_testing import lower_black as lb
from line_following_testing import upper_black as ub
import numpy as np
from time import sleep as wait
import cv2
from PIL import Image
green_boundaries = [
([75, 52, 60], [106, 255, 255])
]
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
lf.percentage_calculator(green_boundaries, "green", img)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#cropping and getting the hsv value and converting it to see of the value is their
mask = cv2.inRange(hsv, lg, ug)
green_result = cv2.bitwise_and(hsv, hsv, mask=mask)
#cropping and getting the hsv value and converting it to see of the value is their
mask1 = cv2.inRange(hsv, lb, ub)
black_result = cv2.bitwise_and(hsv, hsv, mask=mask1)
black_canny = cv2.Canny(black_result, 700,900)
cv2.imshow("green", green_result)
cv2.imshow('black', black_result)
cv2.imshow("hsv", hsv)
cv2.imshow('img', img)
cv2.imshow('black_canny', black_canny)
k = cv2.waitKey(30) & 0xff
if k==27:
break
and here is the imported script:
import numpy as np
from time import sleep as wait
import cv2
from PIL import Image
lower_green = np.array([75, 52, 60])
upper_green = np.array([106, 255, 255])
lower_black = np.array([0,0,0])
upper_black = np.array([180,255,45])
def percentage_calculator(boundaries, colour, image):
for(lower, upper) in boundaries:
lower = np.array(lower)
upper = np.array(upper)
# finds colors in boundaries a applies a mask
mask = cv2.inRange(image, lower, upper)
output = cv2.bitwise_and(image, image, mask = mask)
tot_pixel = image.size
pixel = np.count_nonzero(output)
percentage = round(pixel * 100 / tot_pixel, 2)
print(colour + " pixels: " + str(pixel))
print("Total pixels: " + str(tot_pixel))
print("Percentage of " + colour + " pixels: " + str(percentage) + "%")
New code:
import sys
sys.path.append("\Python\Opencv_codes")
import line_following_testing as lf
from line_following_testing import lower_green as lg
from line_following_testing import upper_green as ug
from line_following_testing import lower_black as lb
from line_following_testing import upper_black as ub
import numpy as np
from time import sleep as wait
import cv2
green_boundaries = [
([75, 52, 60], [106, 255, 255])
]
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
lf.percentage_calculator(green_boundaries, "green", img)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#cropping and getting the hsv value and converting it to see of the value is their
mask = cv2.inRange(hsv, lg, ug)
green_result = cv2.bitwise_and(hsv, hsv, mask=mask)
#cropping and getting the hsv value and converting it to see of the value is their
mask1 = cv2.inRange(hsv, lb, ub)
black_result = cv2.bitwise_and(hsv, hsv, mask=mask1)
x, y = mask1[-5:].nonzero()
x_min = min(x)
x_max = max(x)
y_min = min(y)
y_max = max(y)
center_coords_min = (x_min, y_min)
center_coords_max = (x_max, y_max)
cv2.circle(img, center_coords_min, 10, (0, 0, 255), 2)
cv2.circle(img, center_coords_max, 10, (255, 0, 0), 2)
print(x_min, y_min)
print(x_max, y_max)
black_canny = cv2.Canny(black_result, 700,900)
cv2.imshow("green", green_result)
cv2.imshow('black', black_result)
cv2.imshow("hsv", hsv)
cv2.imshow('img', img)
cv2.imshow('black_canny', black_canny)
k = cv2.waitKey(30) & 0xff
if k==27:
break

Using this code get all colors hsv !
import cv2
import numpy as np
img_path = r"img_path"
def nothing(x):
pass
def crop_image_contours(image_copy):
cropped_image = None
# convert the image to grayscale format
img_gray = cv2.cvtColor(image_copy, cv2.COLOR_BGR2GRAY)
# apply binary thresholding
ret, thresh = cv2.threshold(img_gray, 100, 255, cv2.THRESH_BINARY)
contours1, hierarchy1 = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
buckle = []
# computes the bounding box for the contour, and draws it on the frame,
for contour in contours1:
# Find bounding rectangles
if cv2.contourArea(contour) > 100:
box = cv2.minAreaRect(contour)
box = cv2.boxPoints(box)
box = np.array(box, dtype="int")
x, y, w, h = cv2.boundingRect(contour)
cropped_image = image_copy[y:y+h, x:x+w]
buckle.append(box)
return cropped_image
def crop_img(image, coord):
image_cpy = image.copy()
cropped_image = image_cpy[coord[1]:coord[3], coord[0]:coord[2]]
return cropped_image
def check_sum(image, coord, hsv_value):
c_image = crop_img(image, coord)
[[l_h, l_s, l_v], [u_h, u_s, u_v]] = hsv_value
lower_range = np.array([l_h, l_s, l_v])
upper_range = np.array([u_h, u_s, u_v])
img_crop = cv2.cvtColor(c_image, cv2.COLOR_BGR2HSV)
val = cv2.inRange(img_crop, lower_range, upper_range)
hsv_score = val.sum()
# print(hsv_score)
useCamera = False
cv2.namedWindow('image')
# create trackbars for color change
cv2.createTrackbar('HMin', 'image', 0, 179, nothing) # Hue is from 0-179 for Opencv
cv2.createTrackbar('SMin', 'image', 0, 255, nothing)
cv2.createTrackbar('VMin', 'image', 0, 255, nothing)
cv2.createTrackbar('HMax', 'image', 0, 179, nothing)
cv2.createTrackbar('SMax', 'image', 0, 255, nothing)
cv2.createTrackbar('VMax', 'image', 0, 255, nothing)
# Set default value for MAX HSV trackbars.
cv2.setTrackbarPos('HMax', 'image', 179)
cv2.setTrackbarPos('SMax', 'image', 255)
cv2.setTrackbarPos('VMax', 'image', 255)
# Initialize to check if HSV min/max value changes
hMin = sMin = vMin = hMax = sMax = vMax = 0
phMin = psMin = pvMin = phMax = psMax = pvMax = 0
# Output Image to display
if useCamera:
cap = cv2.VideoCapture(0)
# Wait longer to prevent freeze for videos.
waitTime = 330
else:
img = cv2.imread(img_path)
output = img
waitTime = 33
while True:
if useCamera:
# Capture frame-by-frame
ret, img = cap.read()
output = img
# get current positions of all trackbars
hMin = cv2.getTrackbarPos('HMin', 'image')
sMin = cv2.getTrackbarPos('SMin', 'image')
vMin = cv2.getTrackbarPos('VMin', 'image')
hMax = cv2.getTrackbarPos('HMax', 'image')
sMax = cv2.getTrackbarPos('SMax', 'image')
vMax = cv2.getTrackbarPos('VMax', 'image')
# Set minimum and max HSV values to display
lower = np.array([hMin, sMin, vMin])
upper = np.array([hMax, sMax, vMax])
# Create HSV Image and threshold into a range.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
output = cv2.bitwise_and(img, img, mask=mask)
# Print if there is a change in HSV value
if((phMin != hMin) | (psMin != sMin) | (pvMin != vMin) | (phMax != hMax) | (psMax != sMax) | (pvMax != vMax)):
print("[[ %d , %d, %d],[%d , %d, %d]]" % (hMin, sMin, vMin, hMax, sMax,vMax))
phMin = hMin
psMin = sMin
pvMin = vMin
phMax = hMax
psMax = sMax
pvMax = vMax
clr_val = [[phMin, psMin, pvMin], [phMax, psMax, pvMax]]
cv2.imshow('image', output)
# Wait longer to prevent freeze for videos.
if cv2.waitKey(waitTime) & 0xFF == ord('q'):
break
# Release resources
if useCamera:
cap.release()
cv2.destroyAllWindows()

Related

NameError: name 'video' is not defined - Python 3.8

I have a problem with code below, this program code for fire detection from video files
When I run this code I have an error
in
video.release()
NameError: name 'video' is not defined
The video variable is already defined, and it's not clear to me why the error pops up
import cv2
import numpy as np
import sys
import math
title_window = 'Linear Blend'
cv2.namedWindow(title_window)
def on_trackbar(val):
pass
cv2.createTrackbar("Hmax", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Hmin", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Smax", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Smin", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Vmax", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Vmin", title_window , 0, 255, on_trackbar)
def detectFire(src): # src la anh
_, contours, _ = cv2.findContours(src, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) ## contours la danh sach cac mang mau trang
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours] ## lay dien tich cac contour
if len(contour_sizes) > 0:
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1] ## contour co dien toich lon nhat ## biggest_contour danh sach toa do cac pixel mau trang
mask = np.zeros(src.shape, np.uint8)
cv2.drawContours(mask, [biggest_contour], -1, 255, -1) ## chuyen tu biggest_contour qua anh mask
rect = cv2.boundingRect(biggest_contour)
return mask, rect
else:
return src, [0,0,0,0]
if len(sys.argv) == 2:
# load video file from first command line argument
video = cv2.VideoCapture("Fire_smoke.mp4") # doc video tu duong dan
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)); # lay gia tri do rong video
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) # do cao
fps = video.get(cv2.CAP_PROP_FPS) # lay gia tri fps, frame per secoond, so khung tren 1 giay
frame_time = round(1000/fps);
last_white = 0
ret, frame = video.read()
pause = False
sum_diff = 0
loop = 0
while True:
if pause is False:
ret, frame = video.read()
if not ret:
print("... end of video file reached");
break
cv2.imshow("origin", frame)
Hmax = cv2.getTrackbarPos('Hmax',title_window)
Hmin = cv2.getTrackbarPos('Hmin',title_window)
Smax = cv2.getTrackbarPos('Smax',title_window)
Smin = cv2.getTrackbarPos('Smin',title_window)
Vmax = cv2.getTrackbarPos('Vmax',title_window)
Vmin = cv2.getTrackbarPos('Vmin',title_window)
# lower = [Hmin, Smin, Vmin]
# upper = [Hmax, Smax, Vmax]
lower = [6, 152, 138] # gioi han duoi mau lua trong he mau HSV
upper = [48, 248, 255]
blur = cv2.GaussianBlur(frame, (21, 21), 0) # loc nhieu
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV) # chuyen doi khong gian mau
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
mask = cv2.inRange(hsv, lower, upper) #
cv2.imshow("resdfd", mask) ## mask la anh trang den cua cac vung co mau lua
mask, rect = detectFire(mask)
detected_edges = cv2.Canny(mask, 100, 200, 3) ## lay bien anh
n_white = np.sum(detected_edges == 255)
diff_white = n_white - last_white
last_white = n_white
if diff_white > 0 and diff_white < 100:
sum_diff += diff_white
loop += 1
if loop > 50:
loop = 0
if sum_diff > 150:
print("fire")
else:
print("clear")
print("fluctuation:", sum_diff)
sum_diff = 0
cv2.imshow("cen", detected_edges)
# image display and key handling
output = cv2.bitwise_and(frame, frame, mask=mask)
x, y, w, h = rect
cv2.rectangle(output,(x,y),(x+w,y+h),(0,255,0),2)
cv2.line(output, (width-10, height-10), (width-10, height-10-int(n_white*height/5000)), (0,0,255), 5)
cv2.imshow(title_window, output)
if cv2.waitKey(10) == ord('x'):
break
elif cv2.waitKey(10) == ord('p'):
pause = True
elif cv2.waitKey(10) == ord('r'):
pause = False
cv2.destroyAllWindows()
video.release()
I tried to change the variables but I don't understand the cause of the error, I'm very weak in programming

opencv traffic light detection

import cv2
import numpy as np
import warnings
warnings.filterwarnings("ignore")
cap = cv2.VideoCapture(0)
while True :
ret ,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20,0,0])
upper_yellow = np.array([40,255,255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
res = cv2.bitwise_and(frame,frame, mask= mask)
img = cv2.medianBlur(res, 5)
ccimg = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cimg = cv2.cvtColor(ccimg, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20,param1=50, param2=30, minRadius=20, maxRadius=30)
if circles is not None:
print("circle is found")
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow('detected circles', cimg)
cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
I am tring to detect trafict light by using opencv , initially i want to detect yellow color by using HSV space and then median filtering and finding the circle but it raises errors async ReadSample() call is failed with error status: -1072873821 and OnReadSample() is called with error status: -1072873821 probably errors are caused by the if state for checking if it finds any circle or not also the error is a long list but these two are repeated.
The capture device is failing to read a frame. The OnReadSample() call is failing on cap.read() and you should implement logic to handle a frame not being read. I've demonstrated this below:
import cv2
import numpy as np
import warnings
warnings.filterwarnings("ignore")
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20, 0, 0])
upper_yellow = np.array([40, 255, 255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
res = cv2.bitwise_and(frame, frame, mask=mask)
img = cv2.medianBlur(res, 5)
ccimg = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cimg = cv2.cvtColor(ccimg, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=20, maxRadius=30)
if circles is not None:
print("circle is found")
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow('detected circles', cimg)
cv2.imshow('res', res)
else:
print("Read Failed")
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()

How to use opencv detect section of black wire

My goal is to check the black wire has some tape on them. (the tape with specific color Red, yellow, blue)
Now I can detect the tape on the wire.
But I have no idea how to detect the wire and check they have an intersection area.
This is my code :
import numpy as np
import cv2
def load_image(path_img):
return cv2.imread(path_img)
def bgr2hsv(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
def setRangeColor(hsv, lower_color, upper_color):
return cv2.inRange(hsv, lower_color, upper_color)
def contours_img(mask):
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours
def filter_contours_img(contours, img_draw, color_bbox):
count = 0
for c in contours:
rect = cv2.boundingRect(c)
x,y,w,h = rect
area = w * h
if area > 1000:
count = count + 1
cv2.rectangle(img_draw, (x, y), (x+w, y+h), color_bbox, 5)
return img_draw, count
def draw_text_on_image(img_draw, count_yellow, count_red, count_blue):
cv2.rectangle(img_draw, (0, 0), (500, 170), (0,0,0), -1)
cv2.putText(img_draw,'Red Count : ' + str(count_red),
(10,50), # bottomLeftCornerOfText
cv2.FONT_HERSHEY_SIMPLEX, # font
1.5, # fontScale
(0,255,255), # fontColor
2) # lineType
cv2.putText(img_draw,'Yellow Count : ' + str(count_yellow),
(10,100), # bottomLeftCornerOfText
cv2.FONT_HERSHEY_SIMPLEX, # font
1.5, # fontScale
(0,255,255), # fontColor
2) # lineType
cv2.putText(img_draw,'blue Count : ' + str(count_blue),
(10,150), # bottomLeftCornerOfText
cv2.FONT_HERSHEY_SIMPLEX, # font
1.5, # fontScale
(0,255,255), # fontColor
2) # lineType
return img_draw
def main():
path_img = 'images/ph14.jpg'
img = load_image(path_img)
#cv2.imshow('img_title',img)
#img = cv2.resize(img, None,fx=0.5,fy=0.5)
hsv = bgr2hsv(img)
img_draw = img
# define range of Yellow color in HSV
lower_Yellow = np.array([20,100,100])
upper_Yellow = np.array([30,255,255])
mask = setRangeColor(hsv, lower_Yellow, upper_Yellow)
contours = contours_img(mask)
color_bbox = (0, 255, 0)
img_draw, count_yellow = filter_contours_img(contours, img_draw, color_bbox)
print('Yellow Count:', count_yellow)
# define range of Red color in HSV
lower_Red = np.array([0,125,125])
upper_Red = np.array([10,255,255])
mask0 = setRangeColor(hsv, lower_Red, upper_Red)
contours = contours_img(mask0)
color_bbox = (0, 125, 255)
img_draw, count_red0 = filter_contours_img(contours, img_draw, color_bbox)
#print('Red Count:', count_red)
# upper mask (170-180)
lower_red = np.array([170,100,100]) #170
upper_red = np.array([180,255,255]) #180
mask1 = cv2.inRange(hsv, lower_red, upper_red)
contours = contours_img(mask1)
color_bbox = (0, 125, 255)
img_draw, count_red1 = filter_contours_img(contours, img_draw, color_bbox)
count_red = count_red0 + count_red1
print('Red Count:', count_red)
# define range of Blue color in HSV
lower_Blue = np.array([100,125,125])
upper_Blue = np.array([125,255,255])
mask = setRangeColor(hsv, lower_Blue, upper_Blue)
contours = contours_img(mask)
color_bbox = (255, 0, 255)
img_draw, count_blue = filter_contours_img(contours, img_draw, color_bbox)
img_draw = draw_text_on_image(img_draw, count_yellow, count_red, count_blue)
print('Blue Count:', count_blue)
cv2.imwrite('output/output_IMG_ph14.png', img_draw)
if __name__ == '__main__':
main()
I think my final output can show the result like this below. (The Green color line is not necessary.)
This is over detected picture ** (red is detected more than 1)
So in this case, **I think if I can detect the black wire I will able to check
that detection is correct.
enter image description here

Real Time Color Detection and Functioning with it

I have worked some time with OpenCV to pop the colors on the screen. At the end, I succeeded to mask the colors in different windows as in the photo below:
https://prnt.sc/qo3cjy
Was wondering what is the most efficient and a working way to make the python detect the colors, and make it to run the functions() that were written by me. For example, if green was detected, run the function hellogreen(), which will print hello green when green is detected and so on.
Source Code if needed just in case:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([161, 155, 84])
high_red = np.array([179, 255, 255])
red_mask = cv2.inRange(hsv_frame, low_red, high_red)
red = cv2.bitwise_and(frame, frame, mask=red_mask)
# Blue color
low_blue = np.array([94, 80, 2])
high_blue = np.array([126, 255, 255])
blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue)
blue = cv2.bitwise_and(frame, frame, mask=blue_mask)
# Green color
low_green = np.array([25, 52, 72])
high_green = np.array([83, 255, 255])
green_mask = cv2.inRange(hsv_frame, low_green, high_green)
green = cv2.bitwise_and(frame, frame, mask=green_mask)
# Every color except white
low = np.array([0, 42, 0])
high = np.array([179, 255, 255])
mask = cv2.inRange(hsv_frame, low, high)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow("Frame", frame)
cv2.imshow("Red", red)
cv2.imshow("Blue", blue)
cv2.imshow("Green", green)
key = cv2.waitKey(1)
if key == 27:
break
Put these lines at the end of your code (helloblue, hellogreen and hellored are your hypothesized functions):
b = cv2.countNonZero(blue_mask)
r = cv2.countNonZero(red_mask)
g = cv2.countNonZero(green_mask)
if b >= r and b >= g:
helloblue()
elif r >= b and r >= g:
hellred()
elif g >= b and g >= r:
hellgreen()
key = cv2.waitKey(1)
if key == 27:
break

How to put bounding box around the detected human outline

Here is the python code I have written :-
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
How to put bounding box around the detected human outline and improve efficiency of the python code to perform background subtraction on the live video feed taken from webcam. Can someone help?
Drawing Contour Using Background Subtraction
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
gray=cv2.cvtColor(fgmask,cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(gray,25,255,cv2.THRESH_BINARY)
_,contours,hierarchy = cv2.findContours(th1,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 1000 and area < 40000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(fgmask,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Drawing Contour using HSV Masking and Convex Hull
Set value for hsv mask.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
frame = cv2.imread(frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array([50,103,40])
upper = np.array([255,255, 255])
mask = cv2.inRange(hsv, lower, upper)
fg = cv2.bitwise_and(frame, frame, mask=255-mask)
fg = cv2.cvtColor(fg.copy(),cv2.COLOR_HSV2BGR)
fg = cv2.cvtColor(fg,cv2.COLOR_BGR2GRAY)
fg = cv2.threshold(fg, 120,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
#plt.imshow(fg)
#plt.show()
fgclosing = cv2.morphologyEx(fg.copy(), cv2.MORPH_CLOSE, kernel)
se = np.ones((3,3),np.uint8)
#fgdilated = cv2.morphologyEx(fgclosing, cv2.MORPH_CLOSE,cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4)))
fgdilated = cv2.dilate(fgclosing, kernel = se , iterations = 8)
img = frame.copy()
ret, threshed_img = cv2.threshold(fgdilated,
127, 255, cv2.THRESH_BINARY)
image, contours, hier = cv2.findContours(threshed_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
#print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 44000:
# get convex hull
hull = cv2.convexHull(cnt)
#cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
#print(hull)
(x,y,w,h) = cv2.boundingRect(cnt)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
contours = hull
#c1 = max(contours, key=cv2.contourArea)
hull = cv2.convexHull(cnt)
c = hull
#print(c)
cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
cv2.drawContours(img, [c], -1, (0, 255, 255), 2)
cv2.circle(img, extLeft, 8, (0, 0, 255), -1)
cv2.circle(img, extRight, 8, (0, 255, 0), -1)
cv2.circle(img, extTop, 8, (255, 0, 0), -1)
cv2.circle(img, extBot, 8, (255, 255, 0), -1)
lx = extLeft[1]
ly = extLeft[0]
rx = extRight[1]
ry = extRight[0]
tx = extTop[1]
ty = extTop[0]
bx = extBot[1]
by = extBot[0]
x,y = lx,by
w,h = abs(rx-lx),abs(ty-by)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,str(extLeft[0])+','+str(extLeft[1]),(extLeft), font, 2,(0, 0, 255),2,cv2.LINE_AA)
cv2.putText(img,str(extRight[0])+','+str(extRight[1]),(extRight), font, 2,(0, 255, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extTop[0])+','+str(extTop[1]),(extTop), font, 2,(255, 0, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extBot[0])+','+str(extBot[1]),(extBot), font, 2,(255, 255, 0),2,cv2.LINE_AA)
im = frame[tx:bx,ly:ry,:]
cx = im.shape[1]//2
cy = im.shape[0]//2
cv2.circle(im, (cx,cy), 15, (0, 255, 0))
plt.imshow(img)
plt.show()
You can use findContours.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
mask = 255 - fgmask
_, contours, _ = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
fgmask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
for contour in contours:
area = cv2.contourArea(contour)
#only show contours that match area criterea
if area > 500 and area < 20000:
rect = cv2.boundingRect(contour)
x, y, w, h = rect
cv2.rectangle(fgmask, (x, y), (x+w, y+h), (0, 255, 0), 3)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I have tested with the video https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi

Categories