I followed along at http://www.pyimagesearch.com/2016/10/03/bubble-sheet-multiple-choice-scanner-and-test-grader-using-omr-python-and-opencv/
now I am trying to make this function in real time. My end goal is to detect a solid colored circle so this looked like a good start.
My crash:
~/py:.python test_grader.py
Traceback (most recent call last):
File "test_grader.py", line 82, in <module>
questionCnts = contours.sort_contours(questionCnts,
AttributeError: 'list' object has no attribute 'sort_contours'
questionCnts = contours.sort_contours(questionCnts,
method="top-to-bottom")[0] << line 82 crashing
questionCnts is defined as [].. I dont understand how appending the contour is supposed to add this method.
Full source
from imutils.perspective import four_point_transform
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
import copy
cap = cv2.VideoCapture(0)
ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
while(True):
ret, image = cap.read()
clone = copy.copy(image)
gray = cv2.cvtColor(clone, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 75, 200)
ret, thresh = cv2.threshold(gray,127,255,1) #only black squares?
contours, h = cv2.findContours(thresh,1,1) #was 2
# find contours in the edge map, then initialize
# the contour that corresponds to the document
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
# cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
docCnt = None
# ensure that at least one contour was found
if len(cnts) > 0:
# sort the contours according to their size in
# descending order
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
# loop over the sorted contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points,
# then we can assume we have found the paper
if len(approx) == 4:
docCnt = approx
break
# apply a four point perspective transform to both the
# original image and grayscale image to obtain a top-down
# birds eye view of the paper
paper = four_point_transform(image, docCnt.reshape(4, 2))
warped = four_point_transform(gray, docCnt.reshape(4, 2))
# apply Otsu's thresholding method to binarize the warped
# piece of paper
thresh = cv2.threshold(warped, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# find contours in the thresholded image, then initialize
# the list of contours that correspond to questions
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
questionCnts = []
# loop over the contours
for c in cnts:
# compute the bounding box of the contour, then use the
# bounding box to derive the aspect ratio
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
# in order to label the contour as a question, region
# should be sufficiently wide, sufficiently tall, and
# have an aspect ratio approximately equal to 1
if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
questionCnts.append(c)
# sort the question contours top-to-bottom, then initialize
# the total number of correct answers
questionCnts = contours.sort_contours(questionCnts,
method="top-to-bottom")[0]
correct = 0
# each question has 5 possible answers, to loop over the
# question in batches of 5
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
# sort the contours for the current question from
# left to right, then initialize the index of the
# bubbled answer
cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
bubbled = None
# loop over the sorted contours
for (j, c) in enumerate(cnts):
# construct a mask that reveals only the current
# "bubble" for the question
mask = np.zeros(thresh.shape, dtype="uint8")
cv2.drawContours(mask, [c], -1, 255, -1)
# apply the mask to the thresholded image, then
# count the number of non-zero pixels in the
# bubble area
mask = cv2.bitwise_and(thresh, thresh, mask=mask)
total = cv2.countNonZero(mask)
# if the current total has a larger number of total
# non-zero pixels, then we are examining the currently
# bubbled-in answer
if bubbled is None or total > bubbled[0]:
bubbled = (total, j)
# initialize the contour color and the index of the
# *correct* answer
color = (0, 0, 255)
k = ANSWER_KEY[q]
# check to see if the bubbled answer is correct
if k == bubbled[1]:
color = (0, 255, 0)
correct += 1
# draw the outline of the correct answer on the test
cv2.drawContours(paper, [cnts[k]], -1, color, 3)
# grab the test taker
score = (correct / 5.0) * 100
print("[INFO] score: {:.2f}%".format(score))
cv2.putText(paper, "{:.2f}%".format(score), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.imshow("Original", image)
cv2.imshow("Exam", paper)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.waitKey(0)
Related
I have images similar to the one below. First of all, I am trying to detect the curves in these images. The curves I want to capture are marked on the image. Next, I want to fit these curves into the circle. I will use the radii of these circles as result.
But I have problem with detecting curves in images. Any help much appreciated. Thanks in advance.
Input Image
Expected
Here's the code I'm using to detect and draw the curves:
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
image = cv2.imread("croppedImage.png")
img = cv2.medianBlur(image,13)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,45,0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,3))
kernel1 = np.ones((3, 3), np.uint8)
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
dilate = cv2.dilate(thresh, kernel1, iterations=1)
erode = cv2.erode(dilate, kernel,iterations=1)
# Remove small noise by filtering using contour area
cnts = cv2.findContours(erode, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if cv2.contourArea(c) < 800:
if len(c)>0:
cv2.drawContours(thresh,[c], 0, (0,0,0), -1)
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(erode)
local_max = peak_local_max(distance_map, indices=False, min_distance=1, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=erode)
# Iterate through unique labels
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(thresh.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
cv2.drawContours(image, [c], -1, (36,255,12), -1)
cv2.imwrite('Results/drawedImage.png',image)
thresh = 155
im_bw = cv2.threshold(image, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.imwrite("Results/binary.png",im_bw)
Result Image
Binary Result
From the images like below, I can fit circles. But I don't have clean images like this one.
gray_blurred = cv2.GaussianBlur(img,(11,11),0)
ret3,thresh= cv2.threshold(gray_blurred,100,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Apply Hough transform on the blurred image.
detected_circles = cv2.HoughCircles(thresh,
cv2.HOUGH_GRADIENT, 1, 80, param1 = 20,
param2 = 9, minRadius = 120, maxRadius = 200)
# Draw circles that are detected.
if detected_circles is not None:
# Convert the circle parameters a, b and r to integers.
detected_circles = np.uint16(np.around(detected_circles))
for pt in detected_circles[0, :]:
a, b, r = pt[0], pt[1], pt[2]
# Draw the circumference of the circle.
cv2.circle(img, (a, b), r, (0, 255, 0), 2)
# Draw a small circle (of radius 1) to show the center.
cv2.circle(img, (a, b), 1, (0, 0, 255), 3)
else:
print("Circle is not found")
I'm using the following code to detect the brightly illuminated lamp. The illumination might vary. I'm using the following code to detect the same.
img = cv2.imread("input_img.jpg")
rgb = img.copy()
img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
while True:
th3 = cv2.adaptiveThreshold(img_grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
cv2.imshow("th3",th3)
edged = cv2.Canny(th3, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
cv2.imshow("edge", edged)
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
areaArray = []
for i, c in enumerate(cnts):
area = cv2.contourArea(c)
areaArray.append(area)
sorteddata = sorted(zip(areaArray, cnts), key=lambda x: x[0], reverse=True)
thirdlargestcontour = sorteddata[2][1]
x, y, w, h = cv2.boundingRect(thirdlargestcontour)
cv2.drawContours(rgb, thirdlargestcontour, -1, (255, 0, 0), 2)
cv2.rectangle(rgb, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("rgb", rgb)
if cv2.waitKey(1) == 27:
break
The above code works but,
It only gives the rectangle that encompasses the lamp. How do I get the four corner points of the lamp precisely?
How can I improve detection? at the moment I'm picking the third-largest contour which does not guarantee that it will always be the lamp as the environment poses challenge?
ApproxPolydp works when the contour is complete but if the contour is incomplete, ApproxPolydp is not returning the proper coordinate. for instance in the following image the approxpolydp returns a wrong coordinates.
Here is one way to do that in Python/OpenCV.
Read the input image and convert to grayscale
Use adaptive thresholding to get a thick outline of the lamp region
Find the contours
Filter the contours on area to remove extraneous regions and keep only the larger of the two (inner and outer contours of thresholded region)
Get the perimeter
Fit the perimeter to a polygon, which should be a quadrilateral with the right choice of arguments.
Draw the contour (red) and polygon (blue) over a copy of the input image as the result
Input:
import cv2
import numpy as np
# load image
img = cv2.imread("lamp.jpg")
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold image
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 10)
thresh = 255 - thresh
# find contours
cntrs = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
# Contour filtering -- remove small objects and those that are too large
# Keep the larger of the two contours (inner and outer contours from thresh)
area_thresh = 0
for c in cntrs:
area = cv2.contourArea(c)
if area > 200 and area > area_thresh:
big_contour = c
area_thresh = area
# draw big_contour on image in red and polygon in blue and print corners
results = img.copy()
cv2.drawContours(results,[big_contour],0,(0,0,255),1)
peri = cv2.arcLength(big_contour, True)
corners = cv2.approxPolyDP(big_contour, 0.04 * peri, True)
cv2.drawContours(results,[corners],0,(255,0,0),1)
print(len(corners))
print(corners)
# write result to disk
cv2.imwrite("lamp_thresh.jpg", thresh)
cv2.imwrite("lamp_corners.jpg", results)
cv2.imshow("THRESH", thresh)
cv2.imshow("RESULTS", results)
cv2.waitKey(0)
cv2.destroyAllWindows()
Thresholded Image:
Result Image:
Corner Coordinates:
[[[233 145]]
[[219 346]]
[[542 348]]
[[508 153]]]
I need to extract the 12 oval shapes from the image and store them in separate variables say 1 to 12.
The original image was as follows
Original Image:
Output image:
Can someone help me extract all those oval shapes into different variables ?
my code is
import cv2
import numpy as np
path = r'/home/parallels/Desktop/Opencv/data/test.JPG'
i = cv2.imread(path, -1)
img_rgb = cv2.resize(i, (1280,720))
cv2.namedWindow("Original Image",cv2.WINDOW_NORMAL)
img = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV)
img = cv2.bilateralFilter(img,9,105,105)
r,g,b=cv2.split(img)
equalize1= cv2.equalizeHist(r)
equalize2= cv2.equalizeHist(g)
equalize3= cv2.equalizeHist(b)
equalize=cv2.merge((r,g,b))
equalize = cv2.cvtColor(equalize,cv2.COLOR_RGB2GRAY)
ret,thresh_image = cv2.threshold(equalize,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
equalize= cv2.equalizeHist(thresh_image)
canny_image = cv2.Canny(equalize,250,255)
canny_image = cv2.convertScaleAbs(canny_image)
kernel = np.ones((3,3), np.uint8)
dilated_image = cv2.dilate(canny_image,kernel,iterations=1)
contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours= sorted(contours, key = cv2.contourArea, reverse = True)[:10]
c=contours[0]
print(cv2.contourArea(c))
final = cv2.drawContours(img, [c], -1, (255,0, 0), 3)
mask = np.zeros(img_rgb.shape,np.uint8)
new_image = cv2.drawContours(mask,[c],0,255,-1,)
new_image = cv2.bitwise_and(img_rgb, img_rgb, mask=equalize)
cv2.namedWindow("new",cv2.WINDOW_NORMAL)
cv2.imshow("new",new_image)
cv2.waitKey(0)
You're on the right track. After obtaining your binary image, you can perform contour area + aspect ratio filtering. We can sort the contours in order from left-to-right using imutils.contours.sort_contours(). We find contours then filter using cv2.contourArea
and aspect ratio with cv2.approxPolyDP + cv2.arcLength. If they pass this filter, we draw the contours and append it to a oval list to keep track of the contours. Here's the results:
Filtered mask
Results
Isolated ovals
Output from oval list
Oval contours: 12
Code
import cv2
import numpy as np
from imutils import contours
# Load image, resize, convert to HSV, bilaterial filter
image = cv2.imread('1.jpg')
resize = cv2.resize(image, (1280,720))
original = resize.copy()
mask = np.zeros(resize.shape[:2], dtype=np.uint8)
hsv = cv2.cvtColor(resize, cv2.COLOR_RGB2HSV)
hsv = cv2.bilateralFilter(hsv,9,105,105)
# Split into channels and equalize
r,g,b=cv2.split(hsv)
equalize1 = cv2.equalizeHist(r)
equalize2 = cv2.equalizeHist(g)
equalize3 = cv2.equalizeHist(b)
equalize = cv2.merge((r,g,b))
equalize = cv2.cvtColor(equalize,cv2.COLOR_RGB2GRAY)
# Blur and threshold for binary image
blur = cv2.GaussianBlur(equalize, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours, sort from left-to-right
# Filter using contour area and aspect ratio filtering
ovals = []
num = 0
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
(cnts, _) = contours.sort_contours(cnts, method="left-to-right")
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
ar = w / float(h)
if area > 1000 and ar < .8:
cv2.drawContours(resize, [c], -1, (36,255,12), 3)
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
cv2.putText(resize, str(num), (x,y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (36,55,12), 2)
ovals.append(c)
num += 1
result = cv2.bitwise_and(original, original, mask=mask)
result[mask==0] = (255,255,255)
print('Oval contours: {}'.format(len(ovals)))
cv2.imshow('equalize', equalize)
cv2.imshow('thresh', thresh)
cv2.imshow('resize', resize)
cv2.imshow('result', result)
cv2.imshow('mask', mask)
cv2.waitKey()
I am trying to create a bubble sheet scanner but unfortunately it does not detect the image if the bubble sheet picture is captured by a mobile phone, maybe due to the thresholding and edge detection or due to the resolution of the picture unfortunately I've had little to no progress solving this issue. Any help or tips on how to solve this issue would be appreciated,
here is the code
from imutils.perspective import four_point_transform
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
args = vars(ap.parse_args())
ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
# load the image, convert it to grayscale, blur it
# slightly, then find edges
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 75, 200)
# find contours in the edge map, then initialize
# the contour that corresponds to the document
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
docCnt = None
# ensure that at least one contour was found
if len(cnts) > 0:
# sort the contours according to their size in
# descending order
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
# loop over the sorted contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points,
# then we can assume we have found the paper
if len(approx) == 4:
docCnt = approx
break
# apply a four point perspective transform to both the
# original image and grayscale image to obtain a top-down
# birds eye view of the paper
paper = four_point_transform(image, docCnt.reshape(4, 2))
warped = four_point_transform(gray, docCnt.reshape(4, 2))
# apply Otsu's thresholding method to binarize the warped
# piece of paper
thresh = cv2.threshold(warped, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# find contours in the thresholded image, then initialize
# the list of contours that correspond to questions
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
questionCnts = []
# loop over the contours
for c in cnts:
# compute the bounding box of the contour, then use the
# bounding box to derive the aspect ratio
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
# in order to label the contour as a question, region
# should be sufficiently wide, sufficiently tall, and
# have an aspect ratio approximately equal to 1
if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
questionCnts.append(c)
# sort the question contours top-to-bottom, then initialize
# the total number of correct answers
questionCnts = contours.sort_contours(questionCnts,
method="top-to-bottom")[0]
correct = 0
# each question has 5 possible answers, to loop over the
# question in batches of 5
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
# sort the contours for the current question from
# left to right, then initialize the index of the
# bubbled answer
cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
bubbled = None
# loop over the sorted contours
for (j, c) in enumerate(cnts):
# construct a mask that reveals only the current
# "bubble" for the question
mask = np.zeros(thresh.shape, dtype="uint8")
cv2.drawContours(mask, [c], -1, 255, -1)
# apply the mask to the thresholded image, then
# count the number of non-zero pixels in the
# bubble area
mask = cv2.bitwise_and(thresh, thresh, mask=mask)
total = cv2.countNonZero(mask)
# if the current total has a larger number of total
# non-zero pixels, then we are examining the currently
# bubbled-in answer
if bubbled is None or total > bubbled[0]:
bubbled = (total, j)
# initialize the contour color and the index of the
# *correct* answer
color = (0, 0, 255)
k = ANSWER_KEY[q]
# check to see if the bubbled answer is correct
if k == bubbled[1]:
color = (0, 255, 0)
correct += 1
# draw the outline of the correct answer on the test
cv2.drawContours(paper, [cnts[k]], -1, color, 3)
# grab the test taker
score = (correct / 5.0) * 100
print("[INFO] score: {:.2f}%".format(score))
cv2.putText(paper, "{:.2f}%".format(score), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.imshow("Original", image)
cv2.imshow("Exam", paper)
cv2.waitKey(0)
I have this original image:
then I have applied the following code to
Converted the Original image to HSV image
Then using cv2.findContours() I have made a list containing all the contours.
Then i have removed all the contours of area less than 30.
Then I got the following image:
What I want is to remove the boundary from the resulting image it is of no use (outer boundary of leaf).I only need the inner patches of the leaf.
This is the code i used.
import cv2
import numpy as np
img = cv2.imread('Apple___Blackrot30.JPG')
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lower_gr = np.array([25,0,0])
upper_gr = np.array([90,255,255])
mask = cv2.inRange(hsv,lower_gr,upper_gr)
mask=~mask
res = cv2.bitwise_and(img,img,mask = mask)
blur = cv2.bilateralFilter(res,9,75,75)
im2,cont,_ = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(each_conts) for each_conts in cont]
cont_counter = 0
for each_conts in areas:
if each_conts < 30:
cv2.fillPoly(im2, pts =[cont[cont_counter]], color=(0,0,0))
if each_conts > 1024:
cv2.drawContours(mask, cont[cont_counter], 0, (255,255,255), -1)
cont_counter+=1
cv2.imshow('cn',im2)
You can use the concept of hierarchy of contours to solve this problem. But there is a caveat, all your images must be the same as the one in the question.
I just added some additional stuff to your code.
Code:
img2 = img.copy()
im2, cont, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
l = []
for e, h in enumerate(hierarchy[0]):
#print (e, h[3])
if h[3] == -1:
l.append(e)
for i in l:
if cv2.contourArea(cont[i]) < 1000:
cv2.drawContours(img2, [cont[i]], -1, (0, 255, 255), 2)
cv2.imshow('img2', img2)
Result:
hierarchy returns an array expressing the parent-child relationship of contours. As per the documentation link,
it as an array of four values : [Next, Previous, First_Child, Parent].
In the hierarchy array I scanned the Parent column (4th column) to see whether it has no parent contours (-1) and drew them
I assume you only need the inner spots inside a leaf.
Segment using the otsu algorithm
apply the flood-fill operation to ensure you capture all leaf pixels
Extract only the inner contour
All can simply be done using opencv below are the codes:
import cv2
import numpy as np
def flood_fill_binary(binary):
hh = binary.shape[0]
ww = binary.shape[1]
xx = 10
yy = 10
black = [0,0,0]
binary = cv2.copyMakeBorder(binary,10,10,10,10,cv2.BORDER_CONSTANT,value=black)
im_floodfill = binary.copy()
h, w = binary.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(im_floodfill, mask, (0,0), 255)
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
im_out = binary | im_floodfill_inv
crop_og = im_out[yy:yy+hh,xx:xx+ww]
return crop_og
def leaf_spots_detector(image):
image = image.astype('uint8')
hh = image.shape[0]
ww = image.shape[1]
xx = 10
yy = 10
#kernel = np.ones((3,3),np.uint8)
grayed_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
_, segmented = cv2.threshold(grayed_image,0, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)
segmented = flood_fill_binary(segmented)
segmented = cv2.copyMakeBorder(segmented,xx,xx,yy,yy,cv2.BORDER_CONSTANT,value=
[0,0,0])
major = cv2.__version__.split('.')[0]
if major == '3':
ret, contours, hierarchy = cv2.findContours(segmented, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
else:
contours, hierarchy = cv2.findContours(segmented, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
print(hierarchy.shape)
image_external = np.zeros(segmented.shape, segmented.dtype)
for i in range(1,len(contours)):
#if hierarchy[0][i][3] == -1:
cv2.drawContours(image_external, contours, i,(225,255,255), -1)
image_external = image_external[yy:yy+hh,xx:xx+ww]
#image_external = cv2.dilate(image_external,kernel,iterations = 1)
return image_external
image = cv2.imread('image/path.png')
leaf_spots = leaf_spots_detector(image)
cv2.imshow("detected spots", leaf_spots)
cv2.waitKey(0)
cv2.destroyAllWindow()