Trouble using watershed segmentation in python - python

I have a trouble in segmenting the soil grains shown in the image using this algorithm however it included all particles as one object and calculate its area but this is not my desire as I would like to segment the particles each one individually so what can I modify on the following algorithm to enhance its performance in detecting each grain individually .
and the second image is the result of using this algorithm.
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
# Load in image, convert to gray scale, and Otsu's threshold
image = cv2.imread('sample64pxfor1mm.jpg')
image = cv2.bilateralFilter(image,15,75,75)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
image = cv2.morphologyEx(image,cv2.MORPH_CLOSE,kernel)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cv2.imshow('thresh',thresh)
cv2.waitKey(0)
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(thresh)
local_max = peak_local_max(distance_map, indices=False, min_distance=20, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=thresh)
# Iterate through unique labels
n = 0
total_area = []
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
area = cv2.contourArea(c)
total_area.append(area)
n+=1
print('Grain number' , ' of number ' , n , ' has area = ', area)
# total_area += area
cv2.drawContours(image, [c], -1, (36,255,12), 3)
cv2.imshow('image',image)
cv2.waitKey(0)

Related

Multi-curve detection in noisy images in Python

I have images similar to the one below. First of all, I am trying to detect the curves in these images. The curves I want to capture are marked on the image. Next, I want to fit these curves into the circle. I will use the radii of these circles as result.
But I have problem with detecting curves in images. Any help much appreciated. Thanks in advance.
Input Image
Expected
Here's the code I'm using to detect and draw the curves:
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
image = cv2.imread("croppedImage.png")
img = cv2.medianBlur(image,13)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,45,0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,3))
kernel1 = np.ones((3, 3), np.uint8)
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
dilate = cv2.dilate(thresh, kernel1, iterations=1)
erode = cv2.erode(dilate, kernel,iterations=1)
# Remove small noise by filtering using contour area
cnts = cv2.findContours(erode, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if cv2.contourArea(c) < 800:
if len(c)>0:
cv2.drawContours(thresh,[c], 0, (0,0,0), -1)
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(erode)
local_max = peak_local_max(distance_map, indices=False, min_distance=1, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=erode)
# Iterate through unique labels
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(thresh.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
cv2.drawContours(image, [c], -1, (36,255,12), -1)
cv2.imwrite('Results/drawedImage.png',image)
thresh = 155
im_bw = cv2.threshold(image, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.imwrite("Results/binary.png",im_bw)
Result Image
Binary Result
From the images like below, I can fit circles. But I don't have clean images like this one.
gray_blurred = cv2.GaussianBlur(img,(11,11),0)
ret3,thresh= cv2.threshold(gray_blurred,100,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Apply Hough transform on the blurred image.
detected_circles = cv2.HoughCircles(thresh,
cv2.HOUGH_GRADIENT, 1, 80, param1 = 20,
param2 = 9, minRadius = 120, maxRadius = 200)
# Draw circles that are detected.
if detected_circles is not None:
# Convert the circle parameters a, b and r to integers.
detected_circles = np.uint16(np.around(detected_circles))
for pt in detected_circles[0, :]:
a, b, r = pt[0], pt[1], pt[2]
# Draw the circumference of the circle.
cv2.circle(img, (a, b), r, (0, 255, 0), 2)
# Draw a small circle (of radius 1) to show the center.
cv2.circle(img, (a, b), 1, (0, 0, 255), 3)
else:
print("Circle is not found")

How to rotate an image to align the text for extraction?

I am using pytessearct to extract the text from images. But it doesn't work on images which are inclined. Consider the image given below:
Here is the code to extract text, which is working fine on images which are not inclined.
img = cv2.imread(<path_to_image>)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
ret3, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
def findSignificantContours (img, edgeImg):
contours, heirarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Find level 1 contours
level1 = []
for i, tupl in enumerate(heirarchy[0]):
# Each array is in format (Next, Prev, First child, Parent)
# Filter the ones without parent
if tupl[3] == -1:
tupl = np.insert(tupl, 0, [i])
level1.append(tupl)
significant = []
tooSmall = edgeImg.size * 5 / 100 # If contour isn't covering 5% of total area of image then it probably is too small
for tupl in level1:
contour = contours[tupl[0]];
area = cv2.contourArea(contour)
if area > tooSmall:
significant.append([contour, area])
# Draw the contour on the original image
cv2.drawContours(img, [contour], 0, (0,255,0),2, cv2.LINE_AA, maxLevel=1)
significant.sort(key=lambda x: x[1])
#print ([x[1] for x in significant]);
mx = (0,0,0,0) # biggest bounding box so far
mx_area = 0
for cont in contours:
x,y,w,h = cv2.boundingRect(cont)
area = w*h
if area > mx_area:
mx = x,y,w,h
mx_area = area
x,y,w,h = mx
# Output to files
roi = img[y:y+h,x:x+w]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
ret3, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2_imshow(thresh)
text = pytesseract.image_to_string(roi);
print(text); print("\n"); print(pytesseract.image_to_string(thresh));
print("\n")
return [x[0] for x in significant];
edgeImg_8u = np.asarray(thresh, np.uint8)
# Find contours
significant = findSignificantContours(img, edgeImg_8u)
mask = thresh.copy()
mask[mask > 0] = 0
cv2.fillPoly(mask, significant, 255)
# Invert mask
mask = np.logical_not(mask)
#Finally remove the background
img[mask] = 0;
Tesseract can't extract the text from this image. Is there a way I can rotate it to align the text perfectly and then feed it to pytesseract? Please let me know if my question require any more clarity.
Here's a simple approach:
Obtain binary image. Load image, convert to grayscale,
Gaussian blur, then Otsu's threshold.
Find contours and sort for largest contour. We find contours then filter using contour area with cv2.contourArea() to isolate the rectangular contour.
Perform perspective transform. Next we perform contour approximation with cv2.contourArea() to obtain the rectangular contour. Finally we utilize imutils.perspective.four_point_transform to actually obtain the bird's eye view of the image.
Binary image
Result
To actually extract the text, take a look at
Use pytesseract OCR to recognize text from an image
Cleaning image for OCR
Detect text area in an image using python and opencv
Code
from imutils.perspective import four_point_transform
import cv2
import numpy
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread("1.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours and sort for largest contour
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
for c in cnts:
# Perform contour approximation
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
displayCnt = approx
break
# Obtain birds' eye view of image
warped = four_point_transform(image, displayCnt.reshape(4, 2))
cv2.imshow("thresh", thresh)
cv2.imshow("warped", warped)
cv2.waitKey()
To Solve this problem you can also use minAreaRect api in opencv which will give you a minimum area rotated rectangle with an angle of rotation. You can then get the rotation matrix and apply warpAffine for the image to straighten it. I have also attached a colab notebook which you can play around on.
Colab notebook : https://colab.research.google.com/drive/1SKxrWJBOHhGjEgbR2ALKxl-dD1sXIf4h?usp=sharing
import cv2
from google.colab.patches import cv2_imshow
import numpy as np
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
img = cv2.imread("/content/sxJzw.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = np.zeros((img.shape[0], img.shape[1]))
blur = cv2.GaussianBlur(gray, (5,5),0)
ret, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2_imshow(thresh)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
largest_countour = max(contours, key = cv2.contourArea)
binary_mask = cv2.drawContours(mask, [largest_countour], 0, 1, -1)
new_img = img * np.dstack((binary_mask, binary_mask, binary_mask))
minRect = cv2.minAreaRect(largest_countour)
rotate_angle = minRect[-1] if minRect[-1] < 0 else -minRect[-1]
new_img = rotate_image(new_img, rotate_angle)
cv2_imshow(new_img)

Extracting objects after watershed segmentation

I need to segment the seeds in the image below and crop them.
https://i.stack.imgur.com/ndOkX.jpg
They can be pretty close to each other and sometimes overlap, so I chose to use the watershed algorithm for this task.
My results are in the image below, after drawing the contours of the markers that are returned, and as you can see I'm having problems defining good markers for applying it. The individual seeds are outlined but there are many inner lines that I do not want.
https://i.stack.imgur.com/BtOfj.jpg
How would I go about removing them or defining better markers?
The code I'm running:
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
import matplotlib.pyplot as plt
from scipy import ndimage
import cv2 as cv
import imutils
import numpy as np
img = cv.imread("image.jpg");
blur = cv.GaussianBlur(img,(7,7),0)
#color space change
mSource_Hsv = cv.cvtColor(blur,cv.COLOR_BGR2HSV);
mMask = cv.inRange(mSource_Hsv,np.array([0,0,0]),np.array([80,255,255]));
output = cv.bitwise_and(img, img, mask=mMask)
#grayscale
img_grey = cv.cvtColor(output, cv.COLOR_BGR2GRAY)
#thresholding
ret,th1 = cv.threshold(img_grey,0,255,cv.THRESH_BINARY + cv.THRESH_OTSU)
#dist transform
D = ndimage.distance_transform_edt(th1)
#markers
localMax = peak_local_max(D, indices=False, min_distance=20, labels=th1)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
#apply watershed
labels = watershed(-D, markers, mask=th1)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
# loop over the unique labels
for label in np.unique(labels):
if label == 0:
continue
# draw label on the mask
mask = np.zeros(img_grey.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv.findContours(mask.copy(), cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
cv.drawContours(img, cnts, -1, (0, 255, 0), 2)
cv.imshow("segmented",img)
cv.waitKey(0)
You can merge every two contours that applies the following condition:
Area of convex hull of the merged contours is close to the sum of areas of the two contours.
The following solution uses a kind of "brute force" approach that tries merging every contour with all other contours (not very efficient).
Here is a working code sample (please read the comments):
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
import matplotlib.pyplot as plt
from scipy import ndimage
import cv2 as cv
import imutils
import numpy as np
img = cv.imread("image.jpg");
blur = cv.GaussianBlur(img,(7,7),0)
#color space change
mSource_Hsv = cv.cvtColor(blur,cv.COLOR_BGR2HSV);
mMask = cv.inRange(mSource_Hsv,np.array([0,0,0]),np.array([80,255,255]));
output = cv.bitwise_and(img, img, mask=mMask)
#grayscale
img_grey = cv.cvtColor(output, cv.COLOR_BGR2GRAY)
#thresholding
ret,th1 = cv.threshold(img_grey,0,255,cv.THRESH_BINARY + cv.THRESH_OTSU)
#dist transform
D = ndimage.distance_transform_edt(th1)
#markers
localMax = peak_local_max(D, indices=False, min_distance=20, labels=th1)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
#apply watershed
labels = watershed(-D, markers, mask=th1)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
contours = []
# loop over the unique labels, and append contours to all_cnts
for label in np.unique(labels):
if label == 0:
continue
# draw label on the mask
mask = np.zeros(img_grey.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv.findContours(mask.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
## Ignore small contours
#if c.shape[0] < 20:
# continue
# Get convex hull of contour - it' going to help when merging contours
hull = cv.convexHull(c)
#cv.drawContours(img, c, -1, (0, 255, 0), 2)
cv.drawContours(img, [hull], -1, (0, 255, 0), 2, 1)
# Append hull to contours list
contours.append(hull)
# Merge the contours that does not increase the convex hull by much.
# Note: The solution is kind of "brute force" solution, and can be better.
################################################################################
for i in range(len(contours)):
c = contours[i]
area = cv.contourArea(c)
# Iterate all contours from i+1 to end of list
for j in range(i+1, len(contours)):
c2 = contours[j]
area2 = cv.contourArea(c2)
area_sum = area + area2
# Merge contours together
tmp = np.vstack((c, c2))
merged_c = cv.convexHull(tmp)
merged_area = cv.contourArea(merged_c)
# Replace contours c and c2 by the convex hull of merged c and c2, if total area is increased by no more then 10%
if merged_area < area_sum*1.1:
# Replace contour with merged one.
contours[i] = merged_c
contours[j] = merged_c
c = merged_c
area = merged_area
################################################################################
# Draw new contours in red color
for c in contours:
#Ignore small contours
if cv.contourArea(c) > 100:
cv.drawContours(img, [c], -1, (0, 0, 255), 2, 1)
cv.imshow("segmented",img)
cv.waitKey(0)
cv.destroyAllWindows()
Result:

Extract car license plate number in image

I want to find the car plate number to search in a database. Since Saudi plates are different, I face this problem
The result of the code
My current approach is to search for the cross in openCV using edge detection. How can I found the cross and take the below character (using container and edge detection)?
import numpy as np
import pytesseract
from PIL import Image
import cv2
import imutils
import matplotlib.pyplot as plt
import numpy as np
img = cv2.imread('M4.png')
img = cv2.resize(img, (820,680) )
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert to grey scale
gray = cv2.blur(gray, (3,3))#Blur to reduce noise
edged = cv2.Canny(gray, 10, 100) #Perform Edge detection
# find contours in the edged image, keep only the largest
# ones, and initialize our screen contour
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.1 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
detected = 0
print "No contour detected"
else:
detected = 1
if detected == 1:
cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)
# Masking the part other than the number plate
imgs = img
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
new_image = cv2.bitwise_and(imgs,imgs,mask=mask)
# Now crop
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
Cropped = gray[topx:bottomx+1, topy:bottomy+1]
#Read the number plate
text = pytesseract.image_to_string(Cropped, config='--psm 11')
print("Detected Number is:",text)
plt.title(text)
plt.subplot(1,4,1),plt.imshow(img,cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(1,4,2),plt.imshow(gray,cmap = 'gray')
plt.title('gray'), plt.xticks([]), plt.yticks([])
plt.subplot(1,4,3),plt.imshow(Cropped,cmap = 'gray')
plt.title('Cropped'), plt.xticks([]), plt.yticks([])
plt.subplot(1,4,4),plt.imshow(edged,cmap = 'gray')
plt.title('edged'), plt.xticks([]), plt.yticks([])
plt.show()
#check data base
#recoed the entre
cv2.waitKey(0)
cv2.destroyAllWindows()
Thanks for your help
Here's an approach:
Convert image to grayscale and Gaussian blur
Otsu's threshold to get a binary image
Find contours and sort contours from left-to-right to maintain order
Iterate through contours and filter for the bottom two rectangles
Extract ROI and OCR
After converting to grayscale and Gaussian blurring, we Otsu's threshold to get a binary image. We find contours then sort the contours using imutils.contours.sort_contours() with the left-to-right parameter. This step keeps the contours in order. From here we iterate through the contours and perform contour filtering using these three filtering conditions:
The contour must be larger than some specified threshold area (3000)
The width must be larger than the height
The center of each ROI must be in the bottom half of the image. We find the center of each contour and compare it to where it is located on the image.
If a ROI passes these filtering conditions, we extract the ROI using numpy slicing and then throw it into Pytesseract. Here's the detected ROIs that pass the filter highlighted in green
Since we already have the bounding box, we extract each ROI
We throw each individual ROI into Pytesseract one at a time to construct our license plate string. Here's the result
License plate: 430SRU
Code
import cv2
import pytesseract
from imutils import contours
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
image = cv2.imread('1.png')
height, width, _ = image.shape
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts, _ = contours.sort_contours(cnts, method="left-to-right")
plate = ""
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
center_y = y + h/2
if area > 3000 and (w > h) and center_y > height/2:
ROI = image[y:y+h, x:x+w]
data = pytesseract.image_to_string(ROI, lang='eng', config='--psm 6')
plate += data
print('License plate:', plate)

Count number of cells in the image

I need code for counting the number of cells in the image and only the cells that are in pink color should be counted .I have used thresholding and watershed method.
import cv2
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
import numpy as np
import imutils
image = cv2.imread("cellorigin.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cv2.imshow("Thresh", thresh)
D = ndimage.distance_transform_edt(thresh)
localMax = peak_local_max(D, indices=False, min_distance=20,
labels=thresh)
cv2.imshow("D image", D)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=thresh)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
for label in np.unique(labels):
# if the label is zero, we are examining the 'background'
# so simply ignore it
if label == 0:
continue
# otherwise, allocate memory for the label region and draw
# it on the mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# draw a circle enclosing the object
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(image, (int(x), int(y)), int(r), (0, 255, 0), 2)
cv2.putText(image, "#{}".format(label), (int(x) - 10, int(y)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow("input",image
cv2.waitKey(0)
I am not able to segment the pink cells properly.At some places two pink cells are attached together those also should be separated.
output:
Since the cells seem to be visibility different from the nucleus (dark purple) and the background (light pink), color thresholding should work here. The idea is to convert the image to HSV format then use a lower and upper color threshold to isolate the cells. This will give us a binary mask which we can use to count the number of cells.
We begin by converting the image to HSV format then use a lower/upper color threshold to create a binary mask. From here we perform morphological operations to smooth the image and remove small bits of noise.
Now that we have the mask, we find contours with the cv2.RETR_EXTERNAL parameter to ensure that we only take the outer contours. We define several area thresholds to filter out the cells
minimum_area = 200
average_cell_area = 650
connected_cell_area = 1000
The minimum_area threshold ensures that we do not count tiny sections of a cell. Since some of the cells are connected, some contours may have multiple connected cells represented as a single contour so to estimate the cells better, we define an average_cell_area parameter which estimates the area of a single cell. The connected_cell_area parameter detects connected cells where use math.ceil() on a connected cell contour to estimate the number of cells in that contour. To count the number of cells, we iterate through the contours and sum up the contours based on their area. Here's the detected cells highlighted in green
Cells: 75
Code
import cv2
import numpy as np
import math
image = cv2.imread("1.jpg")
original = image.copy()
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv_lower = np.array([156,60,0])
hsv_upper = np.array([179,115,255])
mask = cv2.inRange(hsv, hsv_lower, hsv_upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
close = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel, iterations=2)
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
minimum_area = 200
average_cell_area = 650
connected_cell_area = 1000
cells = 0
for c in cnts:
area = cv2.contourArea(c)
if area > minimum_area:
cv2.drawContours(original, [c], -1, (36,255,12), 2)
if area > connected_cell_area:
cells += math.ceil(area / average_cell_area)
else:
cells += 1
print('Cells: {}'.format(cells))
cv2.imshow('close', close)
cv2.imshow('original', original)
cv2.waitKey()

Categories