I'm trying to extract the pad section from the following image with OpenCv.
Starting with an image like this:
I am trying to extract into an image like this:
to end up with an image something like this
I currently have the following
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('strip.png')
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresholded = cv2.threshold(grayscale, 0, 255, cv2.THRESH_OTSU)
bbox = cv2.boundingRect(thresholded)
x, y, w, h = bbox
foreground = img[y:y+h, x:x+w]
cv2.imwrite("output.png", foreground)
Which outputs this:
If you look closely to the upper and the lower parts of the image, it seems more cluttered and the center part (which is your desired output), it looks soft and smooth.
Since the center part is homogeneous , a smoothing filter (like an erosion) won't effect that part so much, the upper part otherwise, would change noticeably more.
At the first step, I remove the black background with a simple thresholding. At further I did some smoothing effect on the image and compute the difference between the result and the original image, then thresholded the final result to remove the unwanted pixels.
Then I did some morphology to remove noisy residual of the process. At the end with the help of boundingRect command, I extracted the desired segment (the white contour):
background removed:
the difference image after bluring with erosion:
the difference image after opening process and a threshold:
And finally the bounding box of the white objects:
The code I wrote (C++ opencv):
Mat im = imread("E:/t.jpg", 0);
resize(im, im, Size() , 0.3, 0.3); // # resizing just for better visualization
Mat im1,im2, im3;
// Removing the black background:
threshold(im, im1, 50, 255, THRESH_BINARY);
vector<vector<Point>> contours_1;
findContours(im1, contours_1, RETR_CCOMP, CHAIN_APPROX_NONE);
Rect r = boundingRect(contours_1[0]);
im(r).copyTo(im);
im.copyTo(im3);
imshow("background removed", im);
// detecting the cluttered parts and cut them:
erode(im, im2, Mat::ones(3, 3, CV_8U), Point(-1, -1), 3);
im2.convertTo(im2, CV_32F);
im3.convertTo(im3, CV_32F);
subtract(im2, im3, im1);
double min, max;
minMaxIdx(im1, &min, &max);
im1 = 255*(im1 - min) / (max - min);
im1.convertTo(im1, CV_8U);
imshow("the difference image", im1);
threshold(im1, im1, 250, 255, THRESH_BINARY);
erode(im1, im1, Mat::ones(3, 3, CV_8U), Point(-1, -1), 3);
dilate(im1, im1, Mat::ones(3, 3, CV_8U), Point(-1, -1), 7);
imshow("the difference image thresholded", im1);
vector<Point> idx, hull;
vector<vector<Point>> hullis;
findNonZero(im1, idx);
Rect rr = boundingRect(idx);
rectangle(im, rr, Scalar(255, 255, 255), 2);
imshow("Final segmentation", im);
waitKey(0);
Related
I finished a tutorial on OpenCv for finding lanes, and I am trying to apply it to finding a piece of tape on the floor. I got the code running and set the region of interest but it only finds a few edges of the tape. I think it has to do with the thickness but I am not 100% sure. Any help would be appreciated.
import cv2
import numpy as np
import matplotlib.pyplot as plt
def canny(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
canny = cv2.Canny(blur, 50, 150)
return canny
def display_lines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)
return line_image
def region_of_interest(image):
height = image.shape[0]
polygons = np.array([
[(200, height), (400, height), (355, 0)]
])
mask = np.zeros_like(image)
cv2.fillPoly(mask, polygons, 255)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
image = cv2.imread('tape3.jpg')
lane_image = np.copy(image)
canny_image = canny(image)
cropped_image = region_of_interest(canny_image)
lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap=5)
line_image = display_lines(lane_image, lines)
combo_image = cv2.addWeighted(lane_image, 0.8, line_image, 1, 1)
# cv2 print image
print(region_of_interest(image))
cv2.imshow("result", combo_image)
cv2.waitKey(0)
This may not answer your original question, but this could be an alternate way to achieve what you're looking for.
I started by thresholding the grayscale of the image to try and isolate the tape
Then I used opencv's findContours to get the segmentation points of each white blob
The thresholding method I used is sensitive to light and shadow so you may have to find some other thresholding method if this isn't a workable constraint. If different colored tape is a concern, you can threshold off of other values (convert to HSV or LAB and threshold off of the H or B channels respectively to look for red).
Edit:
If you still want to use HoughLinesP, here's a working example with your picture.
First I applied canny:
Then I used the HoughLinesP function:
I've never used houghLinesP before so I'm not sure of the potential pitfalls, but it seems to work, though it actually creates a bunch of overlapping lines with these parameters, you'll have to play around with it a bit.
Relevant Code:
# canny
canned = cv2.Canny(gray, 591, 269);
# dilate
kernel = np.ones((3,3), np.uint8);
canned = cv2.dilate(canned, kernel, iterations = 1);
# hough
lines = cv2.HoughLinesP(canned, rho = 1, theta = 1*np.pi/180, threshold = 30, minLineLength = 10, maxLineGap = 20);
Edit 2:
I looked at the documentation for the function and the third parameter (theta) refers to the angle resolution. I think it might not have worked in your code because you didn't run dilation on the image after Canny. With a one-degree search resolution it's not hard to imagine that we could miss the very thin line that canny returns. It might even be worth dilating the lines more than I did in the example by using a larger kernel (or dilating multiple times).
I am new to OpenCV and Python and I have been encountering a problem in removing noises in my input image. I only wanted to extract the nucleus of a WBC so I used addition to highlight the nucleus and used thresholding to remove the RBCs in the image. I successfully removed the RBCs but the platelets are not removed and some lines appeared in the borders. I also tried using dilation, erosion, opening and closing to denoise the image but the nucleus gets destroyed.
Here is my code:
img = cv2.imread('1.bmp')
img_2 = cv2.imread('1.bmp')
input_img = cv2.addWeighted(img, 0.55, img_2, 0.6, 0)
retval, threshold = cv2.threshold(input_img, 158, 255, cv2.THRESH_BINARY)
threshold = cv2.cvtColor(threshold, cv2.COLOR_BGR2GRAY)
retval2, threshold2 = cv2.threshold(threshold, 0, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)
blur2 = cv2.medianBlur(threshold2,5)
Here is the original image:
After Thresholding:
If the nucleus of a WBC as you have highlighted is always the largest contour before thresholding, I would suggest using findContours to store it alone and remove the smaller blobs like this:
vector<vector<Point>>contours; //Vector for storing contour
vector<Vec4i> hierarchy;
//Find the contours in the image
findContours(input_img, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
for (int i = 0; i< contours.size(); i++) // iterate through each contour.
{
double a = contourArea(contours[i], false); // Find the area of contour
if (a>largest_area){
largest_area = a;
//Store the index of largest contour
largest_contour_index = i;
// Find the bounding rectangle for biggest contour
bounding_rect = boundingRect(contours[i]);
}
}
Scalar color(255, 255, 255);
// Draw the largest contour using the previously stored index.
Mat dst;
drawContours(dst, contours, largest_contour_index, color, CV_FILLED, 8, hierarchy);
My code is C++ but you can find python examples: How to detect and draw contours using OpenCV in Python?
Currently I am working with an image processing project in which I need to split the image into several segments and then apply watermark on each of the segment.
I have written a code which divides the image into segments by masking. You may find the code here. Now i want to implement watermark on each of these segments. The tutorial for watermarking can be found here.
How am I supposed to do that?
Please help as I am new to OpenCV and Python.
Feel free to ask for any further information needed to solve this.
Thank you!
EDIT
I am adding some code for your inference:
`
segment= 'segment storing location'
image = cv2.imread(image path)
segments = slic(img_as_float(image),compactness= 100.0, n_segments = 10, sigma = 5) #segmentation of image
row, col, _ = image.shape
for (i, segVal) in enumerate(np.unique(segments)):
# construct a mask for the segment
print "[x] inspecting segment %d" % (i)
mask = np.zeros(image.shape[:2], dtype = "uint8")
mask[segments == segVal] = 255 #masking image with different mask to create unique segments
bb= (cv2.bitwise_and(image, image, mask = mask) )
cv2.imwrite(segment + str(i) + ".png",bb) #save image segments created
`
Now after saving the segments, I need to watermark each one of them by calling them one after another. This is the code for watermarking:
import numpy as np
import cv2
import os
wk= 'D:\\watermark\\wm.png'
input_im= 'D:\\watermark\\input\\image_01.jpg'
op= 'D:\\watermark\\output'
alpha = 0.25
watermark = cv2.imread(wk, cv2.IMREAD_UNCHANGED)
(wH, wW) = watermark.shape[:2]
image = cv2.imread(input_im)
(h, w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
overlay = np.zeros((h, w, 4), dtype="uint8")
overlay[h - wH - 500:h - 500, w - wW - 500:w - 500] = watermark #This is the line where we can set the watermark's coordinates
output = image.copy()
cv2.addWeighted(overlay,alpha, output, 1.0, 0, output)
filename = input_im[input_im.rfind(os.path.sep) + 1:]
p = os.path.sep.join((op, filename))
cv2.imwrite(p, output)
Now how can I extract the coordinates of this segment in order to watermark it?
Edit
This is what I get when the lines
`cv2.circle(im, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(im, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2`
are kept outside the loop:
And this is what I get when they are executed within the loop:
You need to find the countour of the image (I've downloaded your segment image to try this), then compute the center of the contour.
To find the contour, you need to convert the image to gray scale and threshold it, dividing totally black pixels (black background) from non-black ones (your segment).
Finding the center of the segment
The only assumption I've made is that the pixel values of your segments are different from 0 (total black). This assumption may be invalid but, since you're working with photos of natural landscape (like the one you posted) this should not be a problem.
Feel free to ask for further details.
import numpy as np
import cv2
im = cv2.imread('try.png')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,1,255,0) # Threshold to highlight non black pixels
image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
# compute the center of the contour
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
cv2.drawContours(im, [c], -1, (0, 255, 0), 2)
cv2.circle(im, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(im, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# show the image
cv2.imshow("Image", im)
cv2.waitKey(0)
This is what I get:
Placing the watermark
Let's say you have the coordinates of the center of the segment region. Knowing the size of the watermark you can convert them coordinates locating the point of the image where to put the left upper corner of the watermark. In this example I assume that them are (x=10,y=10).
I've reused the last image you posted (I'm not drawing the contours, just the watermark).
import numpy as np
import cv2 as cv
# Coordinates where to put the watermark (left upper corner)
cy = 10
cx = 10
# Reading the image
image = cv.imread("try.png")
(h,w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
# Reading the watermark
watermark = cv.imread("watermark.png", cv.IMREAD_UNCHANGED)
(wH, wW) = watermark.shape[:2]
(B, G, R, A) = cv.split(watermark)
B = cv.bitwise_and(B, B, mask=A)
G = cv.bitwise_and(G, G, mask=A)
R = cv.bitwise_and(R, R, mask=A)
watermark = cv.merge([B, G, R, A])
# Creating the image's overlay with the watermark
overlay = np.zeros((h, w, 4), dtype="uint8")
overlay[cy:wH + cy, cx:wW + cx] = watermark
# Applying the overlay
output = image.copy()
cv.addWeighted(overlay, 0.4, output, 1.0, 0, output)
cv.imshow("out", output)
cv.waitKey()
I have an image to process.I need detect all the circles in the image.Here is it.
And here is my code.
import cv2
import cv2.cv as cv
img = cv2.imread(imgpath)
cv2.imshow("imgorg",img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow("gray",gray)
ret,thresh = cv2.threshold(gray, 199, 255, cv.CV_THRESH_BINARY_INV)
cv2.imshow("thresh",thresh)
cv2.waitKey(0)
cv2.destrotAllWindows()
Then,I got a image like this.
And I tried to use erode and dilate to divided them into single.But it doesnt work.My question is how to divide these contacted circles into single,so i can detect them.
According to #Micka's idea,I tried to process the image in following way,and here is my code.
import cv2
import cv2.cv as cv
import numpy as np
def findcircles(img,contours):
minArea = 300;
minCircleRatio = 0.5;
for contour in contours:
area = cv2.contourArea(contour)
if area < minArea:
continue
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
circleArea = radius*radius*cv.CV_PI;
if area/circleArea < minCircleRatio:
continue;
cv2.circle(img, center, radius, (0, 255, 0), 2)
cv2.imshow("imggg",img)
img = cv2.imread("a.png")
cv2.imshow("org",img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,threshold = cv2.threshold(gray, 199, 255,cv. CV_THRESH_BINARY_INV)
cv2.imshow("threshold",threshold)
blur = cv2.medianBlur(gray,5)
cv2.imshow("blur",blur)
laplacian=cv2.Laplacian(blur,-1,ksize = 5,delta = -50)
cv2.imshow("laplacian",laplacian)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
dilation = cv2.dilate(laplacian,kernel,iterations = 1)
cv2.imshow("dilation", dilation)
result= cv2.subtract(threshold,dilation)
cv2.imshow("result",result)
contours, hierarchy = cv2.findContours(result,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
findcircles(gray,contours)
But I dont get the same effect as #Micka's.I dont know which step is wrong.
Adapting the idea of #jochen I came to this:
extract the full circle mask as you've done (I called it fullForeground )
from your colored image, compute grayscale, blur (median blur size 7) it and and extract edges, for example with cv::Laplacian
This laplacian thresholded > 50 gives:
cv::Laplacian(blurred, lap, 0, 5); // no delta
lapMask = lap > 50; // thresholding to values > 50
This one dilated once gives:
cv::dilate(lapMask, dilatedThresholdedLaplacian, cv::Mat()); // dilate the edge mask once
Now subtraction fullForeground - dilatedThresholdedLaplacian (same as and_not operator for this type of masks) gives:
from this you can compute contours. For each contour you can compute the area and compare it to the area of an enclosing circle, giving this code and result:
std::vector<std::vector<cv::Point> > contours;
cv::findContours(separated.clone(), contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
double minArea = 500;
double minCircleRatio = 0.5;
for(unsigned int i=0; i<contours.size(); ++i)
{
double cArea = cv::contourArea(contours[i]);
if(cArea < minArea) continue;
//filteredContours.push_back(contours[i]);
//cv::drawContours(input, contours, i, cv::Scalar(0,255,0), 1);
cv::Point2f center;
float radius;
cv::minEnclosingCircle(contours[i], center, radius);
double circleArea = radius*radius*CV_PI;
if(cArea/circleArea < minCircleRatio) continue;
cv::circle(input, center, radius, cv::Scalar(0,0,255),2);
}
here is another image showing the coverage:
hope this helps
I think the first mistake ist the value of thesh.
In your example the command cv2.threshold converts all white areas to black and everything else to white. I would suggest using a smaller value for thesh so that all black pixel get converted to white and all white or "colored" pixels (inside the circles) get converted to black or vise versa. The value of thesh should be a little bigger than the brightest of the black pixels.
See opencv docu for threshold for more information.
Afterwards I would let opencv find all contours in the thresholded image and filter them for "valid" circles, e.g. by size and shape.
If that is not sufficiant you could segment the inner circle from the rest of the image: First compute threasholdImageA with all white areas colored black. Then compute threasholdImageB with all the black areas being black. Afterwards combine both, threasholdImageA and threasholdImageB, (e.g. with numpy.logical_and) to have a binary image with only the inner circle being white and the rest black. Of course the values for the threshold have to be chosen wisely to get the specific result.
That way also circles where the inner part directly touches the background will be segmented.
I'm working on opencv problem to figure out which circles are filled. However, sometimes edge of circles are cause of false positive. It makes my wonder if I can remove these circles by turning all pixels white that have high R value in RGB. My approach is to create a mask of pixels that are pinkish and then subtract mask from original image to remove circles. As of now I am getting black mask. I'm doing something wrong. Please guide.
rgb = cv2.imread(img, cv2.CV_LOAD_IMAGE_COLOR)
rgb_filtered = cv2.inRange(rgb, (200, 0, 90), (255, 110, 255))
cv2.imwrite('mask.png',rgb_filtered)
Here is my solution. Unfortunately it's in C++ too and this is how it works:
threshold the image to find out which parts are background (white paper)
find the circles by extracting contours.
now each contour is assumed to be a circle, so compute the minimum circle enclosing that contour. No parameter tuning necessary if the input is ok (that means every circle is a single contour, so circle may not be connected by drawing for example)
check for each circle, whether there are more foreground (drawing) or background (white paper) pixel inside (by some ratio threshold).
int main()
{
cv::Mat colorImage = cv::imread("countFilledCircles.png");
cv::Mat image = cv::imread("countFilledCircles.png", CV_LOAD_IMAGE_GRAYSCALE);
// threshold the image!
cv::Mat thresholded;
cv::threshold(image,thresholded,0,255,CV_THRESH_BINARY_INV | CV_THRESH_OTSU);
// save threshold image for demonstration:
cv::imwrite("countFilledCircles_threshold.png", thresholded);
// find outer-contours in the image these should be the circles!
cv::Mat conts = thresholded.clone();
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(conts,contours,hierarchy, CV_RETR_EXTERNAL, CV_C HAIN_APPROX_SIMPLE, cv::Point(0,0));
// colors in which marked/unmarked circle outlines will be drawn:
cv::Scalar colorMarked(0,255,0);
cv::Scalar colorUnMarked(0,0,255);
// each outer contour is assumed to be a circle
// TODO: you could first find the mean radius of all assumed circles and try to find outlier (dirt etc in the image)
for(unsigned int i=0; i<contours.size(); ++i)
{
cv::Point2f center;
float radius;
// find minimum circle enclosing the contour
cv::minEnclosingCircle(contours[i],center,radius);
bool marked = false;
cv::Rect circleROI(center.x-radius, center.y-radius, center.x+radius, center.y+radius);
//circleROI = circleROI & cv::Rect(0,0,image.cols, image.rows);
// count pixel inside the circle
float sumCirclePixel = 0;
float sumCirclePixelMarked = 0;
for(int j=circleROI.y; j<circleROI.y+circleROI.height; ++j)
for(int i=circleROI.x; i<circleROI.x+circleROI.width; ++i)
{
cv::Point2f current(i,j);
// test if pixel really inside the circle:
if(cv::norm(current-center) < radius)
{
// count total number of pixel in the circle
sumCirclePixel = sumCirclePixel+1.0f;
// and count all pixel in the circle which hold the segmentation threshold
if(thresholded.at<unsigned char>(j,i))
sumCirclePixelMarked = sumCirclePixelMarked + 1.0f;
}
}
const float ratioThreshold = 0.5f;
if(sumCirclePixel)
if(sumCirclePixelMarked/sumCirclePixel > ratioThreshold) marked = true;
// draw the circle for demonstration
if(marked)
cv::circle(colorImage,center,radius,colorMarked,1);
else
cv::circle(colorImage,center,radius,colorUnMarked,1);
}
cv::imshow("thres", thresholded);
cv::imshow("colorImage", colorImage);
cv::imwrite("countFilledCircles_output.png", colorImage);
cv::waitKey(-1);
}
giving me these results:
after otsu thresholding:
final image:
I've tried to come up with a solution in Python. Basically the process is the following:
Gaussian blur to reduce noise.
Otsu's threshold.
Find contours that have no parents, those contours should be the circles.
Check the ratio of white-to-black pixels inside each contour.
You may need to tune up the white ratio threshold to fit your application. I've used 0.7 as it seems a reasonable value.
import cv2
import numpy
# Read image and apply gaussian blur
img = cv2.imread("circles.png", cv2.CV_LOAD_IMAGE_GRAYSCALE)
img = cv2.GaussianBlur(img, (5, 5), 0)
# Apply OTSU thresholding and reverse it so the circles are in the foreground (white)
_, otsu = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
otsu = cv2.bitwise_not(otsu).astype("uint8")
# Find contours that have no parent
contours, hierarchy = cv2.findContours(numpy.copy(otsu), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
parent_contours = [contours[idx] for idx, val in enumerate(hierarchy[0]) if val[3] == -1]
# Loop through all contours to check the ratio of white to black pixels inside each one
filled_circles_contours = list()
for contour in parent_contours:
contour_mask = numpy.zeros(img.shape).astype("uint8")
cv2.drawContours(contour_mask, [contour], -1, 1, thickness=-1)
white_len_mask = len(cv2.findNonZero(contour_mask))
white_len_thresholded = len(cv2.findNonZero(contour_mask * otsu))
white_ratio = float(white_len_thresholded) / white_len_mask
if white_ratio > 0.7:
filled_circles_contours.append(contour)
# Show image with detected circles
cv2.drawContours(img, filled_circles_contours, -1, (0, 0, 0), thickness=2)
cv2.namedWindow("Result")
cv2.imshow("Result", img)
cv2.waitKey(0)
This is the result I obtained from applying the code above to your image:
Here's how I did it:
Convert to grayscale, apply gaussian blur to remove noises
Apply otsu thresholding, it's quite good to separate fore and background, you should read about it
Apply Hough circle transform to find candidate circles, sadly this requires heavy tuning. Maybe watershed segmentation is a better alternative
Extract the ROI from the candidate circles, and find the ratio of black and white pixels.
Here's my sample result:
When we draw our result on original image:
Here's the sample code (sorry in C++):
void findFilledCircles( Mat& img ){
Mat gray;
cvtColor( img, gray, CV_BGR2GRAY );
/* Apply some blurring to remove some noises */
GaussianBlur( gray, gray, Size(5, 5), 1, 1);
/* Otsu thresholding maximizes inter class variance, pretty good in separating background from foreground */
threshold( gray, gray, 0.0, 255.0, CV_THRESH_OTSU );
erode( gray, gray, Mat(), Point(-1, -1), 1 );
/* Sadly, this is tuning heavy, adjust the params for Hough Circles */
double dp = 1.0;
double min_dist = 15.0;
double param1 = 40.0;
double param2 = 10.0;
int min_radius = 15;
int max_radius = 22;
/* Use hough circles to find the circles, maybe we could use watershed for segmentation instead(?) */
vector<Vec3f> found_circles;
HoughCircles( gray, found_circles, CV_HOUGH_GRADIENT, dp, min_dist, param1, param2, min_radius, max_radius );
/* This is just to draw coloured circles on the 'originally' gray image */
vector<Mat> out = { gray, gray, gray };
Mat output;
merge( out, output );
float diameter = max_radius * 2;
float area = diameter * diameter;
Mat roi( max_radius, max_radius, CV_8UC3, Scalar(255, 255, 255) );
for( Vec3f circ: found_circles ) {
/* Basically we extract the region of the circles, and count the ratio of black pixels (0) and white pixels (255) */
Mat( gray, Rect( circ[0] - max_radius, circ[1] - max_radius, diameter, diameter ) ).copyTo( roi );
float filled_percentage = 1.0 - 1.0 * countNonZero( roi ) / area;
/* If more than half is filled, then maybe it's filled */
if( filled_percentage > 0.5 )
circle( output, Point2f( circ[0], circ[1] ), max_radius, Scalar( 0, 0, 255), 3 );
else
circle( output, Point2f( circ[0], circ[1] ), max_radius, Scalar( 255, 255, 0), 3 );
}
namedWindow("");
moveWindow("", 0, 0);
imshow("", output );
waitKey();
}