Related
I am able to overlay the picture with the black background, but I am not able to overlay it when I remove the black background. I know I am removing it because when I save the file, the background is removed, but if I try to add the new image, it's still going to have the black background. If I directly add the transformed image, I get the following error:
could not broadcast input array from shape (45,56,4) into shape (45,56,3). I'm not sure what the third dimension is.
final_image is just a regular .JPG file that has some polylines drawn on top of it
EDIT: I wish I could give a traceback error, but this is the only ValueError exception I get when running in Visual Studio Code. I've pasted my entire function for this problem now. My main issue is that I can't get the transparency to show up on the final output, it still outputs as black.
#Check if file exists
fileExists = exists(filename)
if not fileExists:
print("File does not exist")
return -1
#Read image
image = cv.imread(filename)
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
lower_blue = np.array([30,60,180])
upper_blue = np.array([130,255,255])
lower_red = np.array([160,70,140])
upper_red = np.array([170,130,255])
lower_green = np.array([29,60,170])
upper_green = np.array([40,100,260])
lower_orange = np.array([10,85,200])
upper_orange = np.array([20,120,255])
red_mask = cv.inRange(hsv, lower_red, upper_red)
blue_mask = cv.inRange(hsv, lower_blue, upper_blue)
green_mask = cv.inRange(hsv, lower_green, upper_green)
orange_mask = cv.inRange(hsv, lower_orange, upper_orange)
combined_image = cv.addWeighted(red_mask, 0.5, blue_mask, 0.5, 0)
brighter_image = cv.convertScaleAbs(combined_image, alpha=2.0, beta=100)
combined_image = cv.addWeighted(combined_image, 0.5, green_mask, 0.5, 0)
brighter_image = cv.convertScaleAbs(combined_image, alpha=2.0, beta=100)
combined_image = cv.addWeighted(combined_image, 0.5, orange_mask, 0.5, 0)
brighter_image = cv.convertScaleAbs(combined_image, alpha=2.0, beta=100)
#Blur using n * n kernel
blurred_image = cv.blur(brighter_image, (26,26))
# Apply Hough tranform on the blurred image
detected_circles = cv.HoughCircles(blurred_image, cv.HOUGH_GRADIENT, 1, 200, param1=25, param2=20, minRadius=50, maxRadius=90)
#Draw circles that are detected
if detected_circles is not None:
#Convert circle parameters a, b, and r to integers
detected_circles = np.uint16(np.around(detected_circles))
coord = []
for pt in detected_circles[0, :]:
tempCoord = []
a, b, r = pt[0], pt[1], pt[2]
tempCoord.append(a)
tempCoord.append(b)
coord.append(tempCoord)
#Draw the circumfrence of the circle
cv.circle(image, (a,b), r, (0,255,0), 2)
#Draw a small circle of (of radius 1) to show the center
cv.circle(image, (a,b), 1, (0,0,255), 3)
coords = np.array([coord],np.int32)
final_image = cv.polylines(image, [coords], isClosed=True, color=(0,0,0), thickness=2)
#final_image = cv.cvtColor(final_image, cv.COLOR_BGR2BGRA)
logo = cv.imread('opencvlogo.PNG')
resized_logo = cv.resize(logo, (56,45), interpolation = cv.INTER_AREA)
gray_resized_logo = cv.cvtColor(resized_logo, cv.COLOR_BGR2GRAY)
_, alpha = cv.threshold(gray_resized_logo, 0, 255, cv.THRESH_BINARY)
b, g, r = cv.split(resized_logo)
rgba = [b, g, r, alpha]
dst = cv.merge(rgba, 4)
new_logo = cv.imread('logo_clr.png')
new_logo = cv.cvtColor(new_logo, COLOR_BGR2BGRA)
cv.imshow("transparent logo", new_logo)
x_offset=y_offset=0
final_image[y_offset:y_offset+new_logo.shape[0], x_offset:x_offset+new_logo.shape[1]] = new_logo
cv.imshow("Detected Circles", final_image)
k = cv.waitKey(0)
if k == ord('s'):
filename = input("Enter the filename to save: ")
cv.imwrite(filename, final_image)
cv.destroyAllWindows()
I'm using opencv right now to display different colours by masking everything but that colour. What I want to achieve is to get all the pixel coordinates that are green, black etc.
Some screenshots:
the first image is of a black line and the second image is of a green square. I would like to be able to record the pixel coordinates that have black or green on them. Here's the main code:
import sys
sys.path.append("\Python\Opencv_codes")
import line_following_testing as lf
from line_following_testing import lower_green as lg
from line_following_testing import upper_green as ug
from line_following_testing import lower_black as lb
from line_following_testing import upper_black as ub
import numpy as np
from time import sleep as wait
import cv2
from PIL import Image
green_boundaries = [
([75, 52, 60], [106, 255, 255])
]
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
lf.percentage_calculator(green_boundaries, "green", img)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#cropping and getting the hsv value and converting it to see of the value is their
mask = cv2.inRange(hsv, lg, ug)
green_result = cv2.bitwise_and(hsv, hsv, mask=mask)
#cropping and getting the hsv value and converting it to see of the value is their
mask1 = cv2.inRange(hsv, lb, ub)
black_result = cv2.bitwise_and(hsv, hsv, mask=mask1)
black_canny = cv2.Canny(black_result, 700,900)
cv2.imshow("green", green_result)
cv2.imshow('black', black_result)
cv2.imshow("hsv", hsv)
cv2.imshow('img', img)
cv2.imshow('black_canny', black_canny)
k = cv2.waitKey(30) & 0xff
if k==27:
break
and here is the imported script:
import numpy as np
from time import sleep as wait
import cv2
from PIL import Image
lower_green = np.array([75, 52, 60])
upper_green = np.array([106, 255, 255])
lower_black = np.array([0,0,0])
upper_black = np.array([180,255,45])
def percentage_calculator(boundaries, colour, image):
for(lower, upper) in boundaries:
lower = np.array(lower)
upper = np.array(upper)
# finds colors in boundaries a applies a mask
mask = cv2.inRange(image, lower, upper)
output = cv2.bitwise_and(image, image, mask = mask)
tot_pixel = image.size
pixel = np.count_nonzero(output)
percentage = round(pixel * 100 / tot_pixel, 2)
print(colour + " pixels: " + str(pixel))
print("Total pixels: " + str(tot_pixel))
print("Percentage of " + colour + " pixels: " + str(percentage) + "%")
New code:
import sys
sys.path.append("\Python\Opencv_codes")
import line_following_testing as lf
from line_following_testing import lower_green as lg
from line_following_testing import upper_green as ug
from line_following_testing import lower_black as lb
from line_following_testing import upper_black as ub
import numpy as np
from time import sleep as wait
import cv2
green_boundaries = [
([75, 52, 60], [106, 255, 255])
]
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
lf.percentage_calculator(green_boundaries, "green", img)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#cropping and getting the hsv value and converting it to see of the value is their
mask = cv2.inRange(hsv, lg, ug)
green_result = cv2.bitwise_and(hsv, hsv, mask=mask)
#cropping and getting the hsv value and converting it to see of the value is their
mask1 = cv2.inRange(hsv, lb, ub)
black_result = cv2.bitwise_and(hsv, hsv, mask=mask1)
x, y = mask1[-5:].nonzero()
x_min = min(x)
x_max = max(x)
y_min = min(y)
y_max = max(y)
center_coords_min = (x_min, y_min)
center_coords_max = (x_max, y_max)
cv2.circle(img, center_coords_min, 10, (0, 0, 255), 2)
cv2.circle(img, center_coords_max, 10, (255, 0, 0), 2)
print(x_min, y_min)
print(x_max, y_max)
black_canny = cv2.Canny(black_result, 700,900)
cv2.imshow("green", green_result)
cv2.imshow('black', black_result)
cv2.imshow("hsv", hsv)
cv2.imshow('img', img)
cv2.imshow('black_canny', black_canny)
k = cv2.waitKey(30) & 0xff
if k==27:
break
Using this code get all colors hsv !
import cv2
import numpy as np
img_path = r"img_path"
def nothing(x):
pass
def crop_image_contours(image_copy):
cropped_image = None
# convert the image to grayscale format
img_gray = cv2.cvtColor(image_copy, cv2.COLOR_BGR2GRAY)
# apply binary thresholding
ret, thresh = cv2.threshold(img_gray, 100, 255, cv2.THRESH_BINARY)
contours1, hierarchy1 = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
buckle = []
# computes the bounding box for the contour, and draws it on the frame,
for contour in contours1:
# Find bounding rectangles
if cv2.contourArea(contour) > 100:
box = cv2.minAreaRect(contour)
box = cv2.boxPoints(box)
box = np.array(box, dtype="int")
x, y, w, h = cv2.boundingRect(contour)
cropped_image = image_copy[y:y+h, x:x+w]
buckle.append(box)
return cropped_image
def crop_img(image, coord):
image_cpy = image.copy()
cropped_image = image_cpy[coord[1]:coord[3], coord[0]:coord[2]]
return cropped_image
def check_sum(image, coord, hsv_value):
c_image = crop_img(image, coord)
[[l_h, l_s, l_v], [u_h, u_s, u_v]] = hsv_value
lower_range = np.array([l_h, l_s, l_v])
upper_range = np.array([u_h, u_s, u_v])
img_crop = cv2.cvtColor(c_image, cv2.COLOR_BGR2HSV)
val = cv2.inRange(img_crop, lower_range, upper_range)
hsv_score = val.sum()
# print(hsv_score)
useCamera = False
cv2.namedWindow('image')
# create trackbars for color change
cv2.createTrackbar('HMin', 'image', 0, 179, nothing) # Hue is from 0-179 for Opencv
cv2.createTrackbar('SMin', 'image', 0, 255, nothing)
cv2.createTrackbar('VMin', 'image', 0, 255, nothing)
cv2.createTrackbar('HMax', 'image', 0, 179, nothing)
cv2.createTrackbar('SMax', 'image', 0, 255, nothing)
cv2.createTrackbar('VMax', 'image', 0, 255, nothing)
# Set default value for MAX HSV trackbars.
cv2.setTrackbarPos('HMax', 'image', 179)
cv2.setTrackbarPos('SMax', 'image', 255)
cv2.setTrackbarPos('VMax', 'image', 255)
# Initialize to check if HSV min/max value changes
hMin = sMin = vMin = hMax = sMax = vMax = 0
phMin = psMin = pvMin = phMax = psMax = pvMax = 0
# Output Image to display
if useCamera:
cap = cv2.VideoCapture(0)
# Wait longer to prevent freeze for videos.
waitTime = 330
else:
img = cv2.imread(img_path)
output = img
waitTime = 33
while True:
if useCamera:
# Capture frame-by-frame
ret, img = cap.read()
output = img
# get current positions of all trackbars
hMin = cv2.getTrackbarPos('HMin', 'image')
sMin = cv2.getTrackbarPos('SMin', 'image')
vMin = cv2.getTrackbarPos('VMin', 'image')
hMax = cv2.getTrackbarPos('HMax', 'image')
sMax = cv2.getTrackbarPos('SMax', 'image')
vMax = cv2.getTrackbarPos('VMax', 'image')
# Set minimum and max HSV values to display
lower = np.array([hMin, sMin, vMin])
upper = np.array([hMax, sMax, vMax])
# Create HSV Image and threshold into a range.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
output = cv2.bitwise_and(img, img, mask=mask)
# Print if there is a change in HSV value
if((phMin != hMin) | (psMin != sMin) | (pvMin != vMin) | (phMax != hMax) | (psMax != sMax) | (pvMax != vMax)):
print("[[ %d , %d, %d],[%d , %d, %d]]" % (hMin, sMin, vMin, hMax, sMax,vMax))
phMin = hMin
psMin = sMin
pvMin = vMin
phMax = hMax
psMax = sMax
pvMax = vMax
clr_val = [[phMin, psMin, pvMin], [phMax, psMax, pvMax]]
cv2.imshow('image', output)
# Wait longer to prevent freeze for videos.
if cv2.waitKey(waitTime) & 0xFF == ord('q'):
break
# Release resources
if useCamera:
cap.release()
cv2.destroyAllWindows()
Here is my code that I have used for object detection using raspberry pi and Android Ip Camera. Here I'm not getting any output and the code does not provide any errors. Can someone figure out what is the error?
import urllib.request
import cv2
import numpy as np
import datetime
import math
#global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
MinCountourArea = 3000 #Adjust ths value according to your usage
BinarizationThreshold = 70 #Adjust ths value according to your usage
OffsetRefLines = 150 #Adjust ths value according to your usage
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
This is the code i have used to obtain the video stream from my IP camera
ReferenceFrame = None
while True:
camera=cv2.VideoCapture("http://192.168.1.6:8080/shot.jpg")
camera.set(3,640)
camera.set(4,480)
(ret,Frame)=camera.read()
height = np.size(Frame,0)
width = np.size(Frame,1)
#if cannot grab a frame, this program ends here.
if not ret:
break
This is the code part i have used to display the lines and frame for object detection and object counting
#gray-scale convertion and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, BinarizationThreshold, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
CoorYEntranceLine = (height / 2)-OffsetRefLines
CoorYExitLine = (height / 2)+OffsetRefLines
cv2.line(Frame, (0,CoorYEntranceLine), (width,CoorYEntranceLine), (255, 0, 0), 2)
cv2.line(Frame, (0,CoorYExitLine), (width,CoorYExitLine), (0, 0, 255), 2)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = (x+x+w)/2
CoordYCentroid = (y+y+h)/2
ObjectCentroid = (CoordXCentroid,CoordYCentroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow('Salida',Frame)
cv2.waitKey(1);
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
The correct code
import numpy as np
import math
def nothing(x):
pass
width=0
height=0
EntranceCounter = 0
OffsetRefLines = 150
ExitCounter = 0
BinarizationThreshold = 70
MinCountourArea = 3000
cap = cv2.VideoCapture(0);
path="http://192.168.1.6:8080/video"
cap.open(path)
ReferenceFrame = None
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
#cv2.namedWindow("Tracking")
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("UH", "Tracking", 255, 255, nothing)
cv2.createTrackbar("US", "Tracking", 255, 255, nothing)
cv2.createTrackbar("UV", "Tracking", 255, 255, nothing)
while True:
#frame = cv2.imread('smarties.png')
if cap.isOpened():
rval, frame = cap.read()
while rval:
rval,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.GaussianBlur(hsv, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = hsv
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, hsv)
FrameThresh = cv2.threshold(FrameDelta, 25, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
cv2.line(frame, (0,170), (2000,170), (255, 0, 0), 5)
cv2.line(frame, (0,470), (2000,470), (0, 0, 255), 5)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = int(x+x+w)/2
CoordYCentroid = int(y+y+h)/2
ObjectCentroid = (x,y)
cv2.circle(frame, ObjectCentroid, 2, (0, 255, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,170,470)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,170,470)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (250, 0, 1), 2)
cv2.putText(frame, "Exits: {}".format(str(ExitCounter)), (10, 110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
imS = cv2.resize(frame, (400, 400)) # Resize image
#imSS = cv2.resize(mask, (200, 200))
#imSSS = cv2.resize(frame, (200, 200))
cv2.imshow("frame", imS)
#cv2.imshow("mask", imSS)
#cv2.imshow("res", imSSS)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
Here is the python code I have written :-
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
How to put bounding box around the detected human outline and improve efficiency of the python code to perform background subtraction on the live video feed taken from webcam. Can someone help?
Drawing Contour Using Background Subtraction
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
gray=cv2.cvtColor(fgmask,cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(gray,25,255,cv2.THRESH_BINARY)
_,contours,hierarchy = cv2.findContours(th1,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 1000 and area < 40000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(fgmask,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Drawing Contour using HSV Masking and Convex Hull
Set value for hsv mask.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
frame = cv2.imread(frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array([50,103,40])
upper = np.array([255,255, 255])
mask = cv2.inRange(hsv, lower, upper)
fg = cv2.bitwise_and(frame, frame, mask=255-mask)
fg = cv2.cvtColor(fg.copy(),cv2.COLOR_HSV2BGR)
fg = cv2.cvtColor(fg,cv2.COLOR_BGR2GRAY)
fg = cv2.threshold(fg, 120,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
#plt.imshow(fg)
#plt.show()
fgclosing = cv2.morphologyEx(fg.copy(), cv2.MORPH_CLOSE, kernel)
se = np.ones((3,3),np.uint8)
#fgdilated = cv2.morphologyEx(fgclosing, cv2.MORPH_CLOSE,cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4)))
fgdilated = cv2.dilate(fgclosing, kernel = se , iterations = 8)
img = frame.copy()
ret, threshed_img = cv2.threshold(fgdilated,
127, 255, cv2.THRESH_BINARY)
image, contours, hier = cv2.findContours(threshed_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
#print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 44000:
# get convex hull
hull = cv2.convexHull(cnt)
#cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
#print(hull)
(x,y,w,h) = cv2.boundingRect(cnt)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
contours = hull
#c1 = max(contours, key=cv2.contourArea)
hull = cv2.convexHull(cnt)
c = hull
#print(c)
cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
cv2.drawContours(img, [c], -1, (0, 255, 255), 2)
cv2.circle(img, extLeft, 8, (0, 0, 255), -1)
cv2.circle(img, extRight, 8, (0, 255, 0), -1)
cv2.circle(img, extTop, 8, (255, 0, 0), -1)
cv2.circle(img, extBot, 8, (255, 255, 0), -1)
lx = extLeft[1]
ly = extLeft[0]
rx = extRight[1]
ry = extRight[0]
tx = extTop[1]
ty = extTop[0]
bx = extBot[1]
by = extBot[0]
x,y = lx,by
w,h = abs(rx-lx),abs(ty-by)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,str(extLeft[0])+','+str(extLeft[1]),(extLeft), font, 2,(0, 0, 255),2,cv2.LINE_AA)
cv2.putText(img,str(extRight[0])+','+str(extRight[1]),(extRight), font, 2,(0, 255, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extTop[0])+','+str(extTop[1]),(extTop), font, 2,(255, 0, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extBot[0])+','+str(extBot[1]),(extBot), font, 2,(255, 255, 0),2,cv2.LINE_AA)
im = frame[tx:bx,ly:ry,:]
cx = im.shape[1]//2
cy = im.shape[0]//2
cv2.circle(im, (cx,cy), 15, (0, 255, 0))
plt.imshow(img)
plt.show()
You can use findContours.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
mask = 255 - fgmask
_, contours, _ = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
fgmask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
for contour in contours:
area = cv2.contourArea(contour)
#only show contours that match area criterea
if area > 500 and area < 20000:
rect = cv2.boundingRect(contour)
x, y, w, h = rect
cv2.rectangle(fgmask, (x, y), (x+w, y+h), (0, 255, 0), 3)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I have tested with the video https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi
im a beginner to opencv python. I am trying to detect the shape, as well as the centroid of the colored object (detected object within the color range) on this code. PLEASE HELP.Thanks in advance.
CODE:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2, math
import numpy as np
class ColourTracker:
def __init__(self):
cv2.namedWindow("ColourTrackerWindow", cv2.CV_WINDOW_AUTOSIZE)
self.capture = cv2.VideoCapture(0)
self.scale_down = 4
def run(self):
while True:
f, orig_img = self.capture.read()
#orig_img = cv2.flip(orig_img, 1)
#img = cv2.GaussianBlur(orig_img, (5,5), 0)
#laplacian = cv2.Laplacian(orig_img,cv2.CV_64F)
#sobelx = cv2.Sobel(orig_img,cv2.CV_64F,1,0,ksize=5)
#sobely = cv2.Sobel(orig_img,cv2.CV_64F,0,1,ksize=5)
img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2HSV)
img = cv2.resize(img, (len(orig_img[0]) / self.scale_down, len(orig_img) / self.scale_down))
boundaries = [([0, 150, 0], [5, 255, 255])]#,([50, 140, 10], [255, 255, 255]),([10, 150, 180], [255, 255, 255])]
for (lower, upper) in boundaries:
lower = np.array(lower,np.uint8)
upper = np.array(upper,np.uint8)
binary = cv2.inRange(img, lower, upper)
dilation = np.ones((15, 15), "uint8")
binary = cv2.dilate(binary, dilation)
#edge = cv2.Canny(red_binary,200,300,apertureSize = 3)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
largest_contour = None
for idx, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
largest_contour = contour
for cnt in largest_contour:
approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
print len(approx)
if len(approx)==14:
print "circle"
#cv2.drawContours(orig_img,[cnt], 0, (0, 0, 255), 2)
if not largest_contour == None:
moment = cv2.moments(largest_contour)
if moment["m00"] > 1000 / self.scale_down:
rect = cv2.minAreaRect(largest_contour)
rect = ((rect[0][0] * self.scale_down, rect[0][1] * self.scale_down), (rect[1][0] * self.scale_down, rect[1][1] * self.scale_down), rect[2])
#box = cv2.cv.BoxPoints(rect)
#box = np.int0(box)
#cv2.drawContours(img,[cnt],0,255,-1)
cv2.drawContours(orig_img,[cnt], 0, (0, 0, 255), 2)
cv2.imshow("ColourTrackerWindow", orig_img)
if cv2.waitKey(20) == 27:
cv2.destroyWindow("ColourTrackerWindow")
self.capture.release()
break
if __name__ == "__main__":
colour_tracker = ColourTracker()
colour_tracker.run()
:
code:
` #!/usr/bin/env python
-- coding: utf-8 --
import cv2, math
import numpy as np
class ColourTracker:
def init(self):
cv2.namedWindow("ColourTrackerWindow", cv2.CV_WINDOW_AUTOSIZE)
self.capture = cv2.VideoCapture(1)
self.scale_down = 4
def run(self):
while True:
f, orig_img = self.capture.read()
#orig_img = cv2.flip(orig_img, 1)
img = cv2.GaussianBlur(orig_img, (5,5), 0)
img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2HSV)
img = cv2.resize(img, (len(orig_img[0]) / self.scale_down, len(orig_img) / self.scale_down))
boundaries = [([0, 150, 150], [5, 255, 255]),
([40, 80, 10], [255, 255, 255]),
([190, 150, 100], [255, 255, 255])]
for (lower, upper) in boundaries:
lower = np.array(lower,np.uint8)
upper = np.array(upper,np.uint8)
binary = cv2.inRange(img, lower, upper)
dilation = np.ones((15, 15), "uint8")
binary = cv2.dilate(binary, dilation)
canny = cv2.Canny(binary,100,200)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
largest_contour = None
for idx, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
largest_contour = contour
if not largest_contour == None:
moment = cv2.moments(largest_contour)
if moment["m00"] > 1000 / self.scale_down:
rect = cv2.minAreaRect(largest_contour)
rect = ((rect[0][0] * self.scale_down, rect[0][1] * self.scale_down), (rect[1][0] * self.scale_down, rect[1][1] * self.scale_down), rect[2])
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
cv2.drawContours(orig_img,[box], 0, (0, 0, 255), 2)
cv2.imshow("ColourTrackerWindow", orig_img)
cv2.imshow("SHAPE", canny)
if cv2.waitKey(20) == 27:
cv2.destroyWindow("ColourTrackerWindow")
self.capture.release()
break
if name == "main":
colour_tracker = ColourTracker()
colour_tracker.run()`'