OpenCV Background Subtraction to Improve Detection - python

I'm able to detect eggs on conveyor belt but the problem is, If there are no eggs on belt, algorithm still finds objects.
Detected eggs on conveyor:
If there are no eggs on belt:
To reduce wrong detections I'm checking radius and area:
if (radius < 100 and radius > 20):
....
if area > 2200 and area < 8800:
....
But this checkpoints are not enough to prevent wrong detections.
I have tried createBackgroundSubtractorMOG method but I have failed detecting eggs after subtraction.
Conveyor belt types could be vary.
What is the best method for removing static background(belt) from the image?
Code:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
th, bw = cv2.threshold(hsv[:, :, 2], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
morph = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
dist = cv2.distanceTransform(morph, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
borderSize = 75
distborder = cv2.copyMakeBorder(dist, borderSize, borderSize, borderSize, borderSize,
cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED, 0)
gap = 10
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*(borderSize-gap)+1, 2*(borderSize-gap)+1))
kernel2 = cv2.copyMakeBorder(kernel2, gap, gap, gap, gap,
cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED, 0)
distTempl = cv2.distanceTransform(kernel2, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
nxcor = cv2.matchTemplate(distborder, distTempl, cv2.TM_CCOEFF_NORMED)
mn, mx, _, _ = cv2.minMaxLoc(nxcor)
th, peaks = cv2.threshold(nxcor, mx*0.5, 255, cv2.THRESH_BINARY)
peaks8u = cv2.convertScaleAbs(peaks)
#fgmask = self.fgbg.apply(peaks8u)
_,contours, hierarchy = cv2.findContours(peaks8u, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
peaks8u = cv2.convertScaleAbs(peaks) # to use as mask
for cnt in contours:
try:
if len(cnt) >= 5:
(x, y), radius = cv2.minEnclosingCircle(cnt)
radius = int(radius)
#print("radius: ", radius)
if (radius < 100 and radius > 20):
ellipse = cv2.fitEllipse(cnt)
(center , axis, angle) = ellipse
cx,cy = int(center[0]),int(center[1])
ax1,ax2 = int(axis[0])-2,int(axis[1])-2
orientation = int(angle)
area = cv2.contourArea(cnt)
if area > 2200 and area < 8800:
cv2.ellipse(frame, (cx,cy), (ax1,ax2), orientation, 0, 360, (255,0,0), 2)
#cv2.circle(frame, (cx,cy), 1, (0, 255, 0), 15)
cv2.putText(frame,str(int(area)),(cx,cy), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 0, 1, cv2.LINE_AA)

There are two basic options to get the background image:
Obtain the background image for the specific conveyor belt in advance during some setup/calibration process.
Use the data itself to derive the background, for this you have to remove the objects (eggs).
The second option can be implemented by taking mean or median of every pixel over time. Given that most of the time each pixel shows background you will get a reasonably good estimate of the scene without the objects.

Related

how to mark the labels on image using watershed opencv

As this is the output from the watershed and I want to mark the labels like 1,2,3 etc on the regions identified. I have tried to use cv2.puttext as well by using cv2.boudingrect but the labels are not coming in the center of the region identified
for i in range(2, ret3+1):
a=0
b=0
mask = np.where(markers==i, np.uint8(255), np.uint8(0))
x,y,w,h = cv2.boundingRect(mask)
area = cv2.countNonZero(mask[y:y+h,x:x+w])
print ("Label %d at (%d, %d) size (%d x %d) area %d pixels" % (i,x,y,w,h,area))
# Visualize
color = np.uint8(np.random.random_integers(0, 0, 3)).tolist()
output[mask!=0] = color
cv2.putText(img2,'%d'%i,(int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 3.9, color, 15, cv2.LINE_AA)
plt.imshow(img2, cmap='jet')
plt.show()
Through the above code the generated labels are as follows
What i want is that to mark the labels 3,4,5 etc in the center of the objects identified by watershed.
You can find the center of each region like this:
markers = cv2.watershed(img, markers)
labels = np.unique(markers)
for label in labels:
y, x = np.nonzero(markers == label)
cx = int(np.mean(x))
cy = int(np.mean(y))
The result:
Complete example:
import cv2
import numpy as np
img = cv2.imread("water_coins.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening, kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers + 1
# Now, mark the region of unknown with zero
markers[unknown == 255] = 0
markers = cv2.watershed(img, markers)
labels = np.unique(markers)
for label in labels:
y, x = np.nonzero(markers == label)
cx = int(np.mean(x))
cy = int(np.mean(y))
color = (255, 255, 255)
img[markers == label] = np.random.randint(0, 255, size=3)
cv2.circle(img, (cx, cy), 2, color=color, thickness=-1)
cv2.putText(img, f"{label}", (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.35, color, 1, cv2.LINE_AA)
cv2.imwrite("out.jpg", img)
Erode every region independently with a structuring element that has the size of the region label. Then use any remaining pixel.
In some cases (tiny regions), no pixel at all will remain. You have two options
use a pixel from the "ultimate eroded";
use some location near the region and a leader line (but avoiding collisions is uneasy).
You can also work with the inner distances of the regions and pick pixels with maximum distance.

How to get only water level contour with OpenCV python on raspberry pi

I am using raspberry pi4 (8GB) with pi camera to detect water level . I have defined a line from 0,375 to 800,375 . If top most point of water level contour goes above this line then I want to call a function. Here is my code and attached image of setup. How do I get water level contour only. Does it require canny edge detection over contours to get clear water level ? first I am getting largest contour and then defining its top most point.
import numpy as np
import cv2
import time
from datetime import datetime
#color=(255,0,0)
color=(0,255,0)
thickness=2
kernel = np.ones((2,2),np.uint8) # added 01/07/2021
picflag = 0 # set value to 1 once picture is taken
# function to take still picture when water level goes beyond threshold
def takepicture(frame):
currentTime = datetime.now()
picTime = currentTime.strftime("%d.%m.%Y-%H%M%S") # Create file name for our picture
text = currentTime.strftime("%d.%m.%Y-%H:%M:%S")
font = cv2.FONT_HERSHEY_SIMPLEX # font
org = (05, 20) # org
fontScale = 0.5 # fontScale
color = (0, 0, 255) # Red color in BGR
thickness = 1 # Line thickness of 2 px
picName = picTime + '.png'
image = cv2.putText(frame, text, org, font, fontScale, color, thickness, cv2.LINE_AA, False)
cv2.imwrite(picName , image)
picflag = 1
return
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read() # ret = 1 if the video is captured; frame is the image
# Our operations on the frame come here
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#blur = cv2.GaussianBlur(gray,(21,21),0)
gray= cv2.medianBlur(gray, 3) #to remove salt and paper noise
#ret,thresh = cv2.threshold(gray,10,20,cv2.THRESH_BINARY_INV)
ret,thresh = cv2.threshold(gray,127,127,cv2.THRESH_BINARY_INV)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel) # get outer boundaries only added 01/07/2021
thresh = cv2.dilate(thresh,kernel,iterations = 5) # strengthen weak pixels added 01/07/2021
img1, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
#img1,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) #added 01/07/2021
cv2.line(frame, pt1=(0,375), pt2=(800,375), color=(0,0,255), thickness=2) # added 01/07/2021
if len(contours) != 0:
c = max(contours, key = cv2.contourArea) # find the largest contour
#x,y,w,h = cv2.boundingRect(c) # get bounding box of largest contour
img2=cv2.drawContours(frame, c, -1, color, thickness) # draw largest contour
#img2=cv2.drawContours(frame, contours, -1, color, thickness) # draw all contours
#img3 = cv2.rectangle(img2,(x,y),(x+w,y+h),(0,0,255),2) # draw red bounding box in img
#center = (x, y)
#print(center)
left = tuple(c[c[:, :, 0].argmin()][0])
right = tuple(c[c[:, :, 0].argmax()][0])
top = tuple(c[c[:, :, 1].argmin()][0])
bottom = tuple(c[c[:, :, 1].argmax()][0])
# Draw dots onto frame
cv2.drawContours(frame, [c], -1, (36, 255, 12), 2)
cv2.circle(frame, left, 8, (0, 50, 255), -1)
cv2.circle(frame, right, 8, (0, 255, 255), -1)
cv2.circle(frame, top, 8, (255, 50, 0), -1)
cv2.circle(frame, bottom, 8, (255, 255, 0), -1)
#print('left: {}'.format(left))
#print('right: {}'.format(right))
#print(format(top))
top_countour_point = top[1]
print(top_countour_point)
#print('bottom: {}'.format(bottom))
#if ((top_countour_point <= 375) and (picflag == 0)): #checking if contour top point is above line
#takepicture(frame)
#continue
#if ((top_countour_point > 375) and (picflag == 0)) :
#picflag = 0
#continue
# Display the resulting image
# cv2.line(frame, pt1=(0,375), pt2=(800,375), color=(0,0,255), thickness=2) # added 01/07/2021
#cv2.imshow('Contour',img3)
#cv2.imshow('thresh' ,thresh)
cv2.imshow('Contour',frame)
if cv2.waitKey(1) & 0xFF == ord('q'): # press q to quit
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Caveats
As it was pointed out in the comments, there is very little to work with based on your post. In general, I agree with s0mbre that you'd be better of with a water level sensor, and with kavko, that if you do want to use a camera, you need to better control your environment, with lighting, camera angles, background, etc.
However, that is not to say that it is not possible with your current setup, assuming, that it is a static setup except for the water level. As such, there are some assumptions that we can make.
Here are the steps that I took to get an approximate approach:
Gather image data
You have only provided one image with lines already on it, so that's not a lot to go on. What I did is that I removed the line that you added:
Fortunately there wasn't too much of a line left afterwards.
Image processing:
I have loaded the image (this would come from the code you have posted).
Using the assumptions above, I have decided to focus on a narrow slice of the image. I selected only the middle 60 pixels (1)
slc = frame[:, 300:360]
Next, I have converted it to greyscale (2)
gray_slc = cv2.cvtColor(slc, cv2.COLOR_BGR2GRAY)
I have used Canny edge detection (see docs here) to find the edges in the image (3)
edges = cv2.Canny(gray_slc, 50, 90)
After that, I have applied a Hough Transform, to find all the edges (related Stack Overflow answer) (4)
rho = 1
theta = np.pi / 180
threshold = 15
min_line_length = 50
max_line_gap = 20
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
Given that I can assume that all of the top lines are the edge of the container, I averaged the y coordinates of the lines, and picked the lower most one as the water level:
y_avgs = [(line[0, 1] + line[0, 3]) / 2 for line in lines]
water_level = max(y_avgs)
Having this, I just checked, if it is over the threshold you have selected:
trigger_threshold = 375
if water_level > trigger_threshold:
print("Water level is under the selected line")
Now, keep in mind, I only had the one image to go on. Considering lighting conditions, yours results may vary.

How to detect edge if edges of an object has same colour as background

I am tasked to build a license plate detection system and my code does not work if the plate has the same colour of the paint of car (background).
Take a look at this picture below.
I have tried a variety of edge detection technique and my findings are they hardly work.
Here is my image processing pipeline:
Extract the gray channel from the image.
Reduce noise with Iterative Bilaterial Filtering
Detect edges with Adaptive Thresholding
Dilate the edges slightly
Locate contours based on some heuristics.
The edge detection part performed miserably around the plate region.
The pipeline works good and I am able to detect license plates if the car is has a different paint colour than the plate.
Code
def rectangleness(hull):
rect = cv2.boundingRect(hull)
rectPoints = np.array([[rect[0], rect[1]],
[rect[0] + rect[2], rect[1]],
[rect[0] + rect[2], rect[1] + rect[3]],
[rect[0], rect[1] + rect[3]]])
intersection_area = cv2.intersectConvexConvex(np.array(rectPoints), hull)[0]
rect_area = cv2.contourArea(rectPoints)
rectangleness = intersection_area/rect_area
return rectangleness
def preprocess(image):
image = imutils.resize(image, 1000)
# Attenuate shadows by using H channel instead of converting to gray directly
imgHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
_, _, gray = cv2.split(imgHSV)
# Reduce noise while preserve edge with Iterative Bilaterial Filtering
blur = cv2.bilateralFilter(gray, 11, 6, 6)
# Detect edges by thresholding
edge = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 5)
# Dilate edges, kernel size cannot be too big as some fonts are very closed to the edge of the plates
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
dilated = cv2.dilate(edge, kernel)
# Detect contours
edge, contours, _ = cv2.findContours(dilated, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Loop through contours and select the most probable ones
contours = sorted(contours, key = cv2.contourArea, reverse=True)[:10]
for contour in contours:
perimeter = cv2.arcLength(contour, closed=True)
approximate = cv2.approxPolyDP(contour, 0.02*perimeter, closed=True)
if len(approximate) == 4:
(x, y, w, h) = cv2.boundingRect(approximate)
whRatio = w / h
# Heuristics:
# 1. Width of plate should at least be 2x greater than height
# 2. Width of contour should be more than 5 (eliminate false positive)
# 3. Height must not be too small
# 4. Polygon must resemble a rectangle
if (2.0 < whRatio < 6.0) and (w > 5.0) and (h > 20):
hull = cv2.convexHull(approximate, returnPoints=True)
if rectangleness(hull) > 0.75:
print("X Y {} {}".format(x, y))
print("Height: {}".format(h))
print("Width : {}".format(w))
print("Ratio : {}\n".format(w/h))
cv2.drawContours(image, [approximate], -1, (0, 255, 0), 2)
cv2.imshow("Edge", edge)
cv2.imshow("Frame", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
You can use cv2.morphologyEx for making the plate region become more visible. Next step is to find contours and set reasonable conditions to extract the contour that contains the plate. If you want, you can have a look at this github repository where my friend and I show detailed steps about license plate detection and recognition.
import cv2
import numpy as np
img = cv2.imread("a.png")
imgBlurred = cv2.GaussianBlur(img, (7, 7), 0)
gray = cv2.cvtColor(imgBlurred, cv2.COLOR_BGR2GRAY) # convert to gray
sobelx = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=3) # sobelX to get the vertical edges
ret,threshold_img = cv2.threshold(sobelx, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
morph_img_threshold = threshold_img.copy()
element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(22, 3))
cv2.morphologyEx(src=threshold_img, op=cv2.MORPH_CLOSE, kernel=element,
dst=morph_img_threshold)
cv2.imshow("img", img)
cv2.imshow("sobelx", sobelx)
cv2.imshow("morph_img_threshold", morph_img_threshold)
cv2.waitKey()
cv2.destroyAllWindows()

Labeled unique contours (or draw independient contours)

Im working in python and opencv library.
I can threshold a camera capture, find contours (more than one) and draw.
But I have a problem. I try to identify those contours with an unique id or tag. (for example: Id: 1 , Id:2) to track them.
I need this contours use a persistent id.
The goal is draw a line and count more than one contour and sometimes more than one near contours converts in a big one.
Note: Im working with a depth camera and my image its an array of depth.
add a piece of code.
Thanks in advance.
countours = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[1]
# only proceed if at least one contour was found
if len(countours) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
for (i,c) in enumerate(countours):
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
if M["m00"] > 0:
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
centerString = str(center)
x = (int(M["m10"] / M["m00"]))
y = (int(M["m01"] / M["m00"]))
else:
center = int(x), int(y)
if radius > 10:
# draw the circle and centroid on the frame,
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# then update the ponter trail
if self.previous_position:
cv2.line(self.trail, self.previous_position, center,
(255, 255, 255), 2)
cv2.add(self.trail, frame, frame)
print center
self.previous_position = center
if len(countours) < 1:
center = 0
self.trail = numpy.zeros((self.cam_height, self.cam_width, 3),
numpy.uint8)
self.previous_position = None
Two options. First off, the contours are already in a Python list, so the indices of that list can be used to enumerate them. In fact you're already doing that in some sense with (i,c) in enumerate(countours). You can just use the index i to 'color' each contour with the value i drawing on a blank image, and then you'll know which contour is which just by examining the image. The other, probably better option IMO, is to use cv2.connectedComponents() to label the binary images instead of the contours of the binary image. Also pre-labeling you might try morphological operations to close up blobs.
EDIT: I follow your recommendation and I have new problems :)
Label is not persistent. When I move both contours, numbers of labels change.
Add some pictures and my code.
Hand label 0 and circle 1
Hand label 1 and circle 0
while True:
cnt += 1
if (cnt % 10) == 0:
now = time.time()
dt = now - last
fps = 10/dt
fps_smooth = (fps_smooth * smoothing) + (fps * (1.0-smoothing))
last = now
c = dev.color
cad = dev.cad
dev.wait_for_frames()
c = cv2.cvtColor(c, cv2.COLOR_RGB2GRAY)
depth = dev.depth * dev.depth_scale * 1000
print depth
#d = cv2.applyColorMap(depth.astype(np.uint8), cv2.COLORMAP_SUMMER)
print depth
res = np.float32(dev.depth)
depth = 255 * np.logical_and(depth >= 100, depth <= 500)
res2 = depth.astype(np.uint8)
kernel = np.ones((3, 3), np.uint8)
#res2 = cv2.blur(res2,(15,15))
res2 = cv2.erode(res2, kernel, iterations=4)
res2 = cv2.dilate(res2, kernel, iterations=8)
im_floodfill = res2.copy()
h, w = res2.shape[:2]
mask2 = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(im_floodfill, mask2, (0, 0), 255)
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
im_out = res2 | im_floodfill_inv
im_out = cv2.blur(im_out,(5,5))
label = cv2.connectedComponentsWithStats(im_out)
n = label[0] - 1
cog = np.delete(label[3], 0, 0)
for i in xrange(n):
# im = cv2.circle(im,(int(cog[i][0]),int(cog[i][1])), 10, (0,0,255), -1)
cv2.putText(im_out, str(i), (int(cog[i][0]), int(cog[i][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.imshow("Depth", res)
cv2.imshow("OUT", im_out)
cv2.imshow( "C", res2)

Circular Object Detection from image

How to detect optical circles(hollow as well as filled)? Is there any approach which can solve segementation issue in generalize way?
I was not able to detect optical circle when I apply the following approach:
import numpy as np
import cv2
image= cv2.imread("cropped.jpg")
lower_bound = np.array([0,0,0])
upper_bound = np.array([255,255,195])
blur_factor = (3,3)
image= cv2.blur(image, blur_factor)
mask = cv2.inRange(image, lower_bound, upper_bound)
kernel = np.ones((3,3),np.uint8)
closing = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[0]
contours.sort(key=lambda x:cv2.boundingRect(x)[0])
array = []
ii = 1
for c in contours:
(x,y),r = cv2.minEnclosingCircle(c)
center = (int(x),int(y))
r = int(r)
if r >= 12 and r<=15:
cv2.circle(image,center,r,(0,255,0),2)
array.append(center)
for i in array:
text_color = (0, 0, 255)
cv2.putText(image, str(ii), i, cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2)
ii = ii + 1
cv2.imshow("masked",mask)
cv2.imshow("circled",image)
cv2.waitKey(0)
Your question is not entirely clear, but I'm gonna go ahead and suppose you wanna detect black circles on these images.
I'm not gonna delve into smoothing parameters, I don't think that's the issue here (not very blurry image, and easy to segment). Your code is fine for detecting components enclosed in a circle with a certain radius. You're getting a bunch of false positives because an object enclosed in a circle is not necessarily a circle.
Consider the two following pink objects : with your code, both of them are detected with an enclosing circle (in white) with the same radius
Since here we are lucky to try to detect full circle, an easily recognizable object, I would suggest to check for each circle you detect if the object inside it occupies a big part of this circle or not. This will enable to eliminate false positives such as the pink line in example above.
So with minimum tweaking of your code, I would suggest something like
import numpy as np
import cv2
image= cv2.imread(your_image)
lower_bound = np.array([0,0,0])
upper_bound = np.array([255,255,195])
blur_factor = (3,3)
image= cv2.blur(image, blur_factor)
mask = cv2.inRange(image, lower_bound, upper_bound)
maskg=np.copy(mask)
kernel = np.ones((3,3),np.uint8)
closing = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
contours=contours[1]
array = []
ii = 1
for c in contours:
#for mask creation
imgg=np.zeros(image.shape[0:2])
(x,y),r = cv2.minEnclosingCircle(c)
center = (int(x),int(y))
r = int(r)
if r >= 12 and r<=18:
#potential interesting circle. Let's check if it's a full circle. Create a mask with only your full circle
cv2.circle(imgg,center,r,255,-1)
#mask your thresholded image by this mask
masked=cv2.bitwise_and(maskg.astype(np.uint8),maskg.astype(np.uint8),mask=imgg.astype(np.uint8))
#and count how much white pixels are in this mask (divided by the mask's area)
circle_fullness=np.sum(masked)/(np.pi*r**2*255)
#if more than X% of the area is indeed an object, than you've got yourself a full circle
if circle_fullness>=0.8:
#and then do you consider it as positive
array.append(center)
cv2.circle(image, center, r, (0, 255, 0), 2)
for i in array:
text_color = (0, 0, 255)
cv2.putText(image, str(ii), i, cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2)
ii = ii + 1
cv2.imshow("masked",mask)
cv2.imshow("circled",image)
cv2.waitKey(0)
Result [deleted on demand]

Categories