I need to segment the seeds in the image below and crop them.
https://i.stack.imgur.com/ndOkX.jpg
They can be pretty close to each other and sometimes overlap, so I chose to use the watershed algorithm for this task.
My results are in the image below, after drawing the contours of the markers that are returned, and as you can see I'm having problems defining good markers for applying it. The individual seeds are outlined but there are many inner lines that I do not want.
https://i.stack.imgur.com/BtOfj.jpg
How would I go about removing them or defining better markers?
The code I'm running:
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
import matplotlib.pyplot as plt
from scipy import ndimage
import cv2 as cv
import imutils
import numpy as np
img = cv.imread("image.jpg");
blur = cv.GaussianBlur(img,(7,7),0)
#color space change
mSource_Hsv = cv.cvtColor(blur,cv.COLOR_BGR2HSV);
mMask = cv.inRange(mSource_Hsv,np.array([0,0,0]),np.array([80,255,255]));
output = cv.bitwise_and(img, img, mask=mMask)
#grayscale
img_grey = cv.cvtColor(output, cv.COLOR_BGR2GRAY)
#thresholding
ret,th1 = cv.threshold(img_grey,0,255,cv.THRESH_BINARY + cv.THRESH_OTSU)
#dist transform
D = ndimage.distance_transform_edt(th1)
#markers
localMax = peak_local_max(D, indices=False, min_distance=20, labels=th1)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
#apply watershed
labels = watershed(-D, markers, mask=th1)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
# loop over the unique labels
for label in np.unique(labels):
if label == 0:
continue
# draw label on the mask
mask = np.zeros(img_grey.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv.findContours(mask.copy(), cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
cv.drawContours(img, cnts, -1, (0, 255, 0), 2)
cv.imshow("segmented",img)
cv.waitKey(0)
You can merge every two contours that applies the following condition:
Area of convex hull of the merged contours is close to the sum of areas of the two contours.
The following solution uses a kind of "brute force" approach that tries merging every contour with all other contours (not very efficient).
Here is a working code sample (please read the comments):
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
import matplotlib.pyplot as plt
from scipy import ndimage
import cv2 as cv
import imutils
import numpy as np
img = cv.imread("image.jpg");
blur = cv.GaussianBlur(img,(7,7),0)
#color space change
mSource_Hsv = cv.cvtColor(blur,cv.COLOR_BGR2HSV);
mMask = cv.inRange(mSource_Hsv,np.array([0,0,0]),np.array([80,255,255]));
output = cv.bitwise_and(img, img, mask=mMask)
#grayscale
img_grey = cv.cvtColor(output, cv.COLOR_BGR2GRAY)
#thresholding
ret,th1 = cv.threshold(img_grey,0,255,cv.THRESH_BINARY + cv.THRESH_OTSU)
#dist transform
D = ndimage.distance_transform_edt(th1)
#markers
localMax = peak_local_max(D, indices=False, min_distance=20, labels=th1)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
#apply watershed
labels = watershed(-D, markers, mask=th1)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
contours = []
# loop over the unique labels, and append contours to all_cnts
for label in np.unique(labels):
if label == 0:
continue
# draw label on the mask
mask = np.zeros(img_grey.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv.findContours(mask.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
## Ignore small contours
#if c.shape[0] < 20:
# continue
# Get convex hull of contour - it' going to help when merging contours
hull = cv.convexHull(c)
#cv.drawContours(img, c, -1, (0, 255, 0), 2)
cv.drawContours(img, [hull], -1, (0, 255, 0), 2, 1)
# Append hull to contours list
contours.append(hull)
# Merge the contours that does not increase the convex hull by much.
# Note: The solution is kind of "brute force" solution, and can be better.
################################################################################
for i in range(len(contours)):
c = contours[i]
area = cv.contourArea(c)
# Iterate all contours from i+1 to end of list
for j in range(i+1, len(contours)):
c2 = contours[j]
area2 = cv.contourArea(c2)
area_sum = area + area2
# Merge contours together
tmp = np.vstack((c, c2))
merged_c = cv.convexHull(tmp)
merged_area = cv.contourArea(merged_c)
# Replace contours c and c2 by the convex hull of merged c and c2, if total area is increased by no more then 10%
if merged_area < area_sum*1.1:
# Replace contour with merged one.
contours[i] = merged_c
contours[j] = merged_c
c = merged_c
area = merged_area
################################################################################
# Draw new contours in red color
for c in contours:
#Ignore small contours
if cv.contourArea(c) > 100:
cv.drawContours(img, [c], -1, (0, 0, 255), 2, 1)
cv.imshow("segmented",img)
cv.waitKey(0)
cv.destroyAllWindows()
Result:
Related
I have an image and I've done some pre-processing on the that image. Below I showed my preprocessing:
img= cv2.imread("...my_drive...\\image_69.tif",0)
median=cv2.medianBlur(img,13)
ret, th = cv2.threshold(median, 0 , 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel=np.ones((3,15),np.uint8)
closing1 = cv2.morphologyEx(th, cv2.MORPH_CLOSE, kernel, iterations=2)
kernel=np.ones((1,31),np.uint8)
closing2 = cv2.morphologyEx(closing1, cv2.MORPH_CLOSE, kernel)
kernel=np.ones((1,13),np.uint8)
opening1= cv2.morphologyEx(closing2, cv2.MORPH_OPEN, kernel, iterations=2)
So, basically I used "Threshold filtering" , "closing" and "opening" and the result looks like this:
Please note that when I used type(opening1), I got numpy.ndarray. So the image at this step is numpy array with 1021 x 1024 size.
Then I labeled my image:
label_image=measure.label(opening1, connectivity=opening1.ndim)
props= measure.regionprops_table (label_image, properties=['label', "area", "coords"])
and the result looks like this
Please note that when I used type(label_image), I got numpy.ndarray. So the image at this step is numpy array with 1021 x 1024 size.
As you can see, currently the image has 6 labels. Some of these labels are short and small pieces, so I tried to keep top 2 label based on area
slc=label_image
rps=regionprops(slc)
areas=[r.area for r in rps]
id=np.argsort(props["area"])[::-1]
new_slc=np.zeros_like(slc)
for i in id[:2]:
new_slc[tuple(rps[i].coords.T)]=i+1
Now the result looks like this:
It looks like I was successful in keeping 2 top regions (please note that by changing id[:2] you can select thickest white layer or thin layer). Now:
What I want to do: I want to find the average thickness of these two regions
Also, please note that I know each of my pixels is 314 nm
Can anyone here advise how I can do this task?
Original photo: Below I showed low quality of my original image, so you have better understanding as why I did all the pre-processing
you can also access the original photo here : https://www.mediafire.com/file/20h66aq83edy1h7/img.7z/file
Here is one way to do that in Python/OpenCV.
Read the input
Convert to gray
Threshold to binary
Get the contours and filter on area so that we have only the two primary lines
Sort by area
Select the first (smaller and thinner) contour
Draw it white filled on a black background
Get its skeleton
Get the points of the skeleton
Fit a line to the points and get the rotation angle of the skeleton
Loop over each of the two contours and draw them white filled on a black background. Then rotate to horizontal lines. Then get the vertical thickness of the lines from the average thickness along each column using np.count_nonzero() and print the value.
Save intermediate images
Input:
import cv2
import numpy as np
import skimage.morphology
import skimage.transform
import math
# read image
img = cv2.imread('lines.jpg')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# get contours
new_contours = []
img2 = np.zeros_like(thresh, dtype=np.uint8)
contour_img = thresh.copy()
contour_img = cv2.merge([contour_img,contour_img,contour_img])
contours = cv2.findContours(thresh , cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours:
area = cv2.contourArea(cntr)
if area > 1000:
cv2.drawContours(contour_img, [cntr], 0, (0,0,255), 1)
cv2.drawContours(img2, [cntr], 0, (255), -1)
new_contours.append(cntr)
# sort contours by area
cnts_sort = sorted(new_contours, key=lambda x: cv2.contourArea(x), reverse=False)
# select first (smaller) sorted contour
first_contour = cnts_sort[0]
contour_first_img = np.zeros_like(thresh, dtype=np.uint8)
cv2.drawContours(contour_first_img, [first_contour], 0, (255), -1)
# thin smaller contour
thresh1 = (contour_first_img/255).astype(np.float64)
skeleton = skimage.morphology.skeletonize(thresh1)
skeleton = (255*skeleton).clip(0,255).astype(np.uint8)
# get skeleton points
pts = np.column_stack(np.where(skeleton.transpose()==255))
# fit line to pts
(vx,vy,x,y) = cv2.fitLine(pts, cv2.DIST_L2, 0, 0.01, 0.01)
#print(vx,vy,x,y)
x_axis = np.array([1, 0]) # unit vector in the same direction as the x axis
line_direction = np.array([vx, vy]) # unit vector in the same direction as your line
dot_product = np.dot(x_axis, line_direction)
[angle_line] = (180/math.pi)*np.arccos(dot_product)
print("angle:", angle_line)
# loop over each sorted contour
# draw contour filled on black background
# rotate
# get mean thickness from np.count_non-zeros
black = np.zeros_like(thresh, dtype=np.uint8)
i = 1
for cnt in cnts_sort:
cnt_img = black.copy()
cv2.drawContours(cnt_img, [cnt], 0, (255), -1)
cnt_img_rot = skimage.transform.rotate(cnt_img, angle_line, resize=False)
thickness = np.mean(np.count_nonzero(cnt_img_rot, axis=0))
print("line ",i,"=",thickness)
i = i + 1
# save resulting images
cv2.imwrite('lines_thresh.jpg',thresh)
cv2.imwrite('lines_filtered.jpg',img2)
cv2.imwrite('lines_small_contour_skeleton.jpg',skeleton )
# show thresh and result
cv2.imshow("thresh", thresh)
cv2.imshow("contours", contour_img)
cv2.imshow("lines_filtered", img2)
cv2.imshow("first_contour", contour_first_img)
cv2.imshow("skeleton", skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Contour image:
Filtered contour image:
Skeleton image:
Angle (in degrees) and Thicknesses (in pixels):
angle: 3.1869032185349733
line 1 = 8.79219512195122
line 2 = 49.51609756097561
To get the thickness in nm, multiply thickness in pixels by your 314 nm/pixel.
ADDITION
If I start with your tiff image, the following shows my preprocessing, which is similar to yours.
import cv2
import numpy as np
import skimage.morphology
import skimage.transform
import math
# read image
img = cv2.imread('lines.tif')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)[1]
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,5))
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (29,1))
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# get contours
new_contours = []
img2 = np.zeros_like(gray, dtype=np.uint8)
contour_img = gray.copy()
contour_img = cv2.merge([contour_img,contour_img,contour_img])
contours = cv2.findContours(morph , cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours:
area = cv2.contourArea(cntr)
if area > 1000:
cv2.drawContours(contour_img, [cntr], 0, (0,0,255), 1)
cv2.drawContours(img2, [cntr], 0, (255), -1)
new_contours.append(cntr)
# sort contours by area
cnts_sort = sorted(new_contours, key=lambda x: cv2.contourArea(x), reverse=False)
# select first (smaller) sorted contour
first_contour = cnts_sort[0]
contour_first_img = np.zeros_like(morph, dtype=np.uint8)
cv2.drawContours(contour_first_img, [first_contour], 0, (255), -1)
# thin smaller contour
thresh1 = (contour_first_img/255).astype(np.float64)
skeleton = skimage.morphology.skeletonize(thresh1)
skeleton = (255*skeleton).clip(0,255).astype(np.uint8)
# get skeleton points
pts = np.column_stack(np.where(skeleton.transpose()==255))
# fit line to pts
(vx,vy,x,y) = cv2.fitLine(pts, cv2.DIST_L2, 0, 0.01, 0.01)
#print(vx,vy,x,y)
x_axis = np.array([1, 0]) # unit vector in the same direction as the x axis
line_direction = np.array([vx, vy]) # unit vector in the same direction as your line
dot_product = np.dot(x_axis, line_direction)
[angle_line] = (180/math.pi)*np.arccos(dot_product)
print("angle:", angle_line)
# loop over each sorted contour
# draw contour filled on black background
# rotate
# get mean thickness from np.count_non-zeros
black = np.zeros_like(thresh, dtype=np.uint8)
i = 1
for cnt in cnts_sort:
cnt_img = black.copy()
cv2.drawContours(cnt_img, [cnt], 0, (255), -1)
cnt_img_rot = skimage.transform.rotate(cnt_img, angle_line, resize=False)
thickness = np.mean(np.count_nonzero(cnt_img_rot, axis=0))
print("line ",i,"=",thickness)
i = i + 1
# save resulting images
cv2.imwrite('lines_thresh2.jpg',thresh)
cv2.imwrite('lines_morph2.jpg',morph)
cv2.imwrite('lines_filtered2.jpg',img2)
cv2.imwrite('lines_small_contour_skeleton2.jpg',skeleton )
# show thresh and result
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("contours", contour_img)
cv2.imshow("lines_filtered", img2)
cv2.imshow("first_contour", contour_first_img)
cv2.imshow("skeleton", skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology image:
Filtered Lines image:
Skeleton image:
Angle (degrees) and Thickness (pixels):
angle: 3.206927978669998
line 1 = 9.26171875
line 2 = 49.693359375
Use Deskew to straighten up the image.
Then, count the pixels of each column of the color of the label you want to measure then divide it by the number of columns to get the average thickness
This can be done with various tools in scipy. Assume you have the image here:
I = PIL.Image.open("input.jpg")
img = np.array(I).mean(axis=2)
mask = img==255 # or some kind of thresholding
imshow(mask) #note this is a binary image, the green coloring is due to some kind of rendering artifact or aliasing
If one zooms in they can see split up regions
To get around that we can dilate the mask
from scipy import ndimage as ni
mask1 = ni.binary_dilation(mask, iterations=2)
imshow(mask1)
Now, we can find connected regions, and find the top regions with the most pixels, which should be the two lines of interest:
lab, nlab = ni.label(mask1)
max_labs = np.argsort([ (lab==i).sum() for i in range(1, nlab+1)])[::-1]+1
imshow(lab==max_labs[0])
and imshow(lab==max_labs[1])
Working with the first line as an example:
from scipy.stats import linregress
y0,x0 = np.where(lab==max_labs[0])
l0 = linregress( x0, y0)
xi,yi = np.arange(img.shape[3]), np.arange(img.shape[3])*l0.slope + l0.intercept
plot( xi, yi, 'r--')
Interpolate along this region at different y-intercepts and compute the average signal along each line
from scipy.interpolate import RectBivariateSpline
img0 = img.copy()
img0[~(lab==max_labs[0])] = 0 # set everything outside this line region to 0
rbv = RectBivariateSpline(np.arange(img.shape[0]), np.arange(img.shape[1]), img0)
prof0 = [rbv.ev(yi+i, xi).mean() for i in np.arange(-300,300)] # pick a wide window here (-300,300), can be more technical, but not necessary
plot(prof0)
Use your favorite method to compute the FWHM of this profile, then multiply by your pixel-to-nanometers factor.
I would just use a Gaussian fit to compute fwhm
xvals = np.arange(len(prof0))
yvals = np.array(prof0)
def func(p, xvals, yvals):
mu,var, amp = p
model = np.exp(-(xvals-mu)**2/2/var)*amp
resid = (model-yvals)**2
return resid.sum()
from scipy.optimize import minimize
x0 = 300,200,255 # initial estimate of mu, variance, amplitude
fit_gauss = minimize(func, x0=x0, args=(xvals, yvals), method='Nelder-Mead')
mu, var, amp = fit_gauss.x
fwhm = 2.355 * np.sqrt(var)
# display using matplotlib plot /hlines
plot( xvals, yvals)
plot( xvals, amp*np.exp(-(xvals-mu)**2/2/var) )
hlines(amp*0.5, mu-fwhm/2., mu+fwhm/2, color='r')
legend(("profile","fit gauss","fwhm=%.2f pix" % fwhm))
Finally, thickness=fwhm*314, or about 13 microns.
Following the exact same approach for the second line (lab==max_labs[1]) gives a thickness of about 2.2 microns:
Note, I was using interactive plotting to do this example, hence calls to imshow , plot etc. are meant motly as a reference to the reader. One may need to take extra steps to recreate the exact images I've uploaded (zooming etc).
I have images similar to the one below. First of all, I am trying to detect the curves in these images. The curves I want to capture are marked on the image. Next, I want to fit these curves into the circle. I will use the radii of these circles as result.
But I have problem with detecting curves in images. Any help much appreciated. Thanks in advance.
Input Image
Expected
Here's the code I'm using to detect and draw the curves:
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
image = cv2.imread("croppedImage.png")
img = cv2.medianBlur(image,13)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,45,0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,3))
kernel1 = np.ones((3, 3), np.uint8)
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
dilate = cv2.dilate(thresh, kernel1, iterations=1)
erode = cv2.erode(dilate, kernel,iterations=1)
# Remove small noise by filtering using contour area
cnts = cv2.findContours(erode, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if cv2.contourArea(c) < 800:
if len(c)>0:
cv2.drawContours(thresh,[c], 0, (0,0,0), -1)
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(erode)
local_max = peak_local_max(distance_map, indices=False, min_distance=1, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=erode)
# Iterate through unique labels
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(thresh.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
cv2.drawContours(image, [c], -1, (36,255,12), -1)
cv2.imwrite('Results/drawedImage.png',image)
thresh = 155
im_bw = cv2.threshold(image, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.imwrite("Results/binary.png",im_bw)
Result Image
Binary Result
From the images like below, I can fit circles. But I don't have clean images like this one.
gray_blurred = cv2.GaussianBlur(img,(11,11),0)
ret3,thresh= cv2.threshold(gray_blurred,100,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Apply Hough transform on the blurred image.
detected_circles = cv2.HoughCircles(thresh,
cv2.HOUGH_GRADIENT, 1, 80, param1 = 20,
param2 = 9, minRadius = 120, maxRadius = 200)
# Draw circles that are detected.
if detected_circles is not None:
# Convert the circle parameters a, b and r to integers.
detected_circles = np.uint16(np.around(detected_circles))
for pt in detected_circles[0, :]:
a, b, r = pt[0], pt[1], pt[2]
# Draw the circumference of the circle.
cv2.circle(img, (a, b), r, (0, 255, 0), 2)
# Draw a small circle (of radius 1) to show the center.
cv2.circle(img, (a, b), 1, (0, 0, 255), 3)
else:
print("Circle is not found")
I followed a tutorial on watershed segmentation and used it to segment each red blood cell in an image. I'm new to openCV and I would like to know if it is possible to draw circles around the cells by using watershed segmentation? If so, could you please show how it is done.
Original image
Output of Watershed segmentation
Code is given below
import numpy as np
import cv2
from matplotlib import pyplot as plt
def fillHoles(otsuImg):
# find contours
contours, _ = cv2.findContours(otsuImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# filter out contours by size
small_cntrs = []
for con in contours:
area = cv2.contourArea(con)
# print(area)
if area < 1000: # size threshold
small_cntrs.append(con)
cv2.drawContours(otsuImg, small_cntrs, -1, 0, -1)
# load the image
img = cv2.imread('resources/rbc2.png')
img_pyr = cv2.pyrMeanShiftFiltering(img, 21, 51)
img_median = cv2.medianBlur(img_pyr, 9)
img_gray = cv2.cvtColor(img_median, cv2.COLOR_BGR2GRAY)
ret, img_thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# fill holes of RBC
fillHoles(img_thresh)
# invert the image
img_thresh = cv2.bitwise_not(img_thresh)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(img_thresh,cv2.MORPH_OPEN,kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform,0.1*dist_transform.max(),255,0)
# _, sure_fg = cv2.threshold(np.uint8(dist_transform), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
img[markers == -1] = [255,0,0]
cv2.imshow('markers2', np.uint8(markers))
cv2.imshow('Final output', img)
cv2.waitKey(0)
I had the same problem and ended up using skimage.segmentation.watershed for the last step, to get the labels I could use to calculate the contours of the watershed cells. Once you have the contours, you can calculate and plot the enclosing circle as usual:
# your code above
ret, markers = cv2.connectedComponents(sure_fg)
from skimage.segmentation import watershed
labels = watershed(-dist_transform,
markers,
mask=img_thresh,
watershed_line=False)
watershed_contours = list(map(lambda l: cv2.findContours((labels == l).astype(np.uint8),
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0],
np.unique(labels)[1:]))
output = img.copy()
for contour in watershed_contours:
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(output,center,radius,(0,255,0),2)
I have a trouble in segmenting the soil grains shown in the image using this algorithm however it included all particles as one object and calculate its area but this is not my desire as I would like to segment the particles each one individually so what can I modify on the following algorithm to enhance its performance in detecting each grain individually .
and the second image is the result of using this algorithm.
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
# Load in image, convert to gray scale, and Otsu's threshold
image = cv2.imread('sample64pxfor1mm.jpg')
image = cv2.bilateralFilter(image,15,75,75)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
image = cv2.morphologyEx(image,cv2.MORPH_CLOSE,kernel)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cv2.imshow('thresh',thresh)
cv2.waitKey(0)
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(thresh)
local_max = peak_local_max(distance_map, indices=False, min_distance=20, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=thresh)
# Iterate through unique labels
n = 0
total_area = []
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
area = cv2.contourArea(c)
total_area.append(area)
n+=1
print('Grain number' , ' of number ' , n , ' has area = ', area)
# total_area += area
cv2.drawContours(image, [c], -1, (36,255,12), 3)
cv2.imshow('image',image)
cv2.waitKey(0)
I want to detect form with opencv and python
so i chose Contour Features but now i have problem how can i distinguish between a square and a lozenge using opencv and python
if there is other method can you tell me please my image it like this :enter image description here
i add my code
#-*- coding: utf-8 -*-
import cv2
import numpy as np
from pyimagesearch.shapedetector import ShapeDetector
import argparse
import imutils
from scipy import ndimage
import math
import matplotlib.pyplot as plt
from skimage import io, morphology, img_as_bool, segmentation
global limit
# cv2.threshold(src, thresh, maxval, type[, dst])
import math
def angle(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
inner_product = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
return math.acos(inner_product/(len1*len2))
def calculate(pt, ls):
i = 2
for x in ls:
pt2 = (x, i)
i = i+1
ang = angle(pt, pt2)*180/math.pi
ang = ang * (-1)
print (ang)
Image = cv2.imread("114.png")
# Extraction of Blue channel
b = Image[:,:,0]
# Callback Function for Trackbar (but do not any work)
def nothing(*arg):
pass
# Generate trackbar Window Name
TrackbarName = "Trackbar"
# Make Window and Trackbar
cv2.namedWindow("window", cv2.WINDOW_NORMAL)
cv2.createTrackbar(TrackbarName, "window", 0, 250, nothing)
img_threshed = np.zeros(b.shape, np.uint8)
ret,img_threshed = cv2.threshold(b,168,255,cv2.THRESH_BINARY)
cv2.imshow("window55", img_threshed)
# Expanding borders of the objects
kernel = np.ones((9, 9),np.uint8)
img_dilated = cv2.dilate(img_threshed, kernel)
cv2.namedWindow("Dilated Blue Channel", cv2.WINDOW_NORMAL)
cv2.imshow("Dilated Blue Channel", img_dilated)
# Retrieving contours by subtraction base objects from the expanded objects
img_contours = img_dilated - img_threshed
cv2.namedWindow("Contours", cv2.WINDOW_NORMAL)
cv2.imshow("Contours", img_contours)
median = cv2.medianBlur(img_contours,3)
cv2.imshow("median img_threshed", median)
#_, contours0, hierarchy = cv2.findContours( median, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cnts = [cv2.approxPolyDP(cnt, 2, True) for cnt in contours0]
gray = cv2.imread('114.png')
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
wide = cv2.Canny(blurred, 90, 150)
cnts = cv2.findContours(img_contours, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#----Find contour in the image----
_, contours, hierarchy = cv2.findContours(img_contours, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
for c in cnts:
#----Draw a rectangle having minimum area around it using Contour features as you mentioned----
rect = cv2.minAreaRect(c) #---I used cnt[0] since there is only one contour in the image----
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(Image, [box], 0, (0,0,255), 2)
#----Draw one diagonal ----
#cv2.line(Image,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2)
#cv2.line(Image,(0,10),(Image.shape[1], 10), (255,255,0),2)
#calculate(cv2.line(Image,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2),cv2.line(Image,(0,10),(Image.shape[1], 10), (255,255,0),2))
cv2.imwrite("Final_Image.jpg", Image)
# show the output image
cv2.imshow("Image", Image)
cv2.waitKey(0)
cv2.destroyAllWindows()
As mentioned in the comments' section, if you want to distinguish between an apparent square from a lozenge the only property that is distinct are the diagonals.
Using python in OpenCV I coded the following to obtain 1 diagonal for the square and the lozenge:
#----Find contour in the image----
_, contours, hierarchy = cv2.findContours(th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#----Draw a rectangle having minimum area around it using Contour features as you mentioned----
rect = cv2.minAreaRect(cnt[0]) #---I used cnt[0] since there is only one contour in the image----
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(im1, [box], 0, (0,0,255), 2)
#----Draw one diagonal ----
cv2.line(im1,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2)
cv2.imwrite("Final_Image.jpg", im1)
This is what I get:
SQUARE:
LOZENGE:
Now since you have obtained the diagonal you have to compare it with a reference line to find the angle in order to determine whether it is a square or not.
For that first draw a reference line (I considered a horizontal line)
cv2.line(im1,(0,10),(im1.shape[1], 10), (255,255,0),2)
You will get :
SQUARE:
LOZENGE:
Now you just have to calculate the angle between these two lines (the diagonal and the reference line):
If the angle is 90 degree or 0 => Lozenge.
Otherwise => Square
How do you calculate angles between two lines?
See THIS POST