Open CV Watershed not segmenting oval objects properly - python

Attemping to create a way to process images to count different types of tablets. The following code has been working well for circular objects, however oval shapes are creating issues that I cant find a workaround for.
kernel = np.ones((5,5),np.uint8)
image = cv2.imread('sample.jpg')
shifted = cv2.GaussianBlur(image, (15, 15), 1)
shifted = cv2.pyrMeanShiftFiltering(shifted, 21, 51)
shifted = cv2.erode(shifted,kernel,iterations=1)
shifted = cv2.dilate(shifted,kernel,iterations=1)
cv2.imwrite("step1.jpg", shifted)
gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imwrite("step2.jpg", thresh)
thresh = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
cv2.imwrite("step3.jpg", thresh)
thresh = cv2.bitwise_not(thresh)
thresh = cv2.erode(thresh,kernel,iterations=1)
cv2.imwrite("step4.jpg", thresh)
D = ndimage.distance_transform_edt(thresh)
localMax = peak_local_max(D, indices=False, min_distance=10,
labels=thresh)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=thresh)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
for label in np.unique(labels):
if label == 0:
continue
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(image, (int(x), int(y)), int(r), (0, 255, 0), 2)
cv2.putText(image, "#{}".format(label), (int(x) - 10, int(y)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imwrite("step5.jpg", image)
cv2.waitKey(0)
Image that is being used is :
https://imgur.com/a/1U49DeT
Output after filtering yields :
https://imgur.com/a/vXwrWlG
Any teaching points as to how to fix this would be greatly appreciated.

I think there is a better way to use the watershed operator.
It relies on having a good gradient, but if the images are similar to this one, you should be able to do this effectively. Also, there are very powerful edge detectors today, much better than the one I used on this demo.
import cv2
import numpy as np
import higra as hg
from skimage.segmentation import relabel_sequential
import matplotlib.pyplot as plt
def main():
img_path = "pills.jpg"
img = cv2.imread(img_path)
img = cv2.resize(img, (256, 256))
img = cv2.GaussianBlur(img, (9, 9), 0)
edges = cv2.Canny(img, 100, 100)
size = img.shape[:2]
graph = hg.get_4_adjacency_graph(size)
edge_weights = hg.weight_graph(graph, edges, hg.WeightFunction.mean)
tree, altitudes = hg.watershed_hierarchy_by_area(graph, edge_weights)
segments = hg.labelisation_horizontal_cut_from_threshold(tree, altitudes, 500)
segments, _, _ = relabel_sequential(segments)
print('The number of pills is ', segments.max() - 1)
plt.imshow(segments)
plt.show()
if __name__ == "__main__":
main()
Initially, I resize the image to speed up the computation and apply a blur to reduce the background gradient. I detect its edges (gradient) and create a graph with it as edge weights; then I compute the watershed hierarchy ordered by area and threshold it obtaining the connected component at that level, from this you can count the number of segments.

Related

How to detect White region in an image with Opencv & Python?

I'm trying to extract the coordinates of a big white region in an image as follows:
Here's the original image:
Using a small square kernel, I applied a closing operation to fill small holes and help identify larger structures in the image as follows:
import cv2
import numpy as np
import imutils
original = cv2.imread("Plates\\24.png")
original = cv2.resize(original, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC)
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
# next, find regions in the image that are light
squareKern = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
light = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, squareKern)
light = cv2.threshold(light, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
the resulting image is as follows:
Here's another example:
What I wish to be able to do is to detect the large white region in the plate as follows:
Keeping in mind that contours will not work well with many examples
With the one image you provided:
I came up with 2 approaches as to how this problem can be solved:
Approach 1
Contour Area Comparison
As you can see there are 3 large contours in the image; the top rectangle and the two rectangles below it, of which you want to detect as a whole.
So I used a threshold on your image, detected the contours of the thresholded image, and indexed the second largest contour and the third largest contour (the largest is the top rectangle which you want to ignore).
Here is the thresholded image:
I stacked the two contours together and detected the bounding box of the two contours:
import cv2
import numpy as np
img = cv2.imread("image.png")
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY)
img_blur = cv2.GaussianBlur(thresh, (5, 5), 2)
img_canny = cv2.Canny(img_blur, 0, 0)
return img_canny
def get_contours(img):
contours, _ = cv2.findContours(process(img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
r1, r2 = sorted(contours, key=cv2.contourArea)[-3:-1]
x, y, w, h = cv2.boundingRect(np.r_[r1, r2])
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
get_contours(img)
cv2.imshow("img_processed", img)
cv2.waitKey(0)
Output:
Approach 2
Threshold Masking
As the 2 bottom rectangles are whiter than the top rectangle of the plate, I used a threshold to mask out the top of the plate:
I used the canny edge detector on the mask shown above.
import cv2
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(img_gray, 163, 255, cv2.THRESH_BINARY)
img_canny = cv2.Canny(thresh, 0, 0)
img_dilate = cv2.dilate(img_canny, None, iterations=7)
return cv2.erode(img_dilate, None, iterations=7)
def get_contours(img):
contours, _ = cv2.findContours(process(img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
x, y, w, h = cv2.boundingRect(max(contours, key=cv2.contourArea))
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
img = cv2.imread("egypt.png")
get_contours(img)
cv2.imshow("img_processed", img)
cv2.waitKey(0)
Output:
Of course, this method may not work properly if the top of the plate isn't brighter than the bottom.

Human fingernail segmanation without training a model

I want to mask human fingernails (fingernails white and everything including the hand is black). I do simple image operations then Canny edge detection after I smoothen the image then find contours to give internal contours white color which would be fingernails.
My problem is that when fingernails are painted it is quite easy to detect however when there is no paint it becomes really complicated and the program has to get 50 images and save outputs to a certain folder.
I am confused about how to proceed, if anybody did something similar I would appreciate some help.
import cv2
import numpy as np
import matplotlib.pyplot as plt
def display_img(img):
fig = plt.figure(figsize = (12,10))
ax = fig.add_subplot(111)
plt.imshow(img,cmap='gray')
img = cv2.imread('nail2.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.blur(gray,ksize=(1,1))
kernel = np.ones((5,5),np.uint8)
display_img(blur)
med = np.median(gray)
gradient = cv2.Laplacian(blur,cv2.CV_64F)
gradient = cv2.convertScaleAbs(gradient)
plt.imshow(gradient,'gray')
lower = int(max(0,0.7*med))
upper = int(min(255,1.3*med))
edges = cv2.Canny(blur,lower,upper)
display_img(edges)
edges = cv2.GaussianBlur(edges, (11, 11), 0) # smoothing before applying threshold
display_img(edges)
image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
# Create empty array to hold internal contours
image_internal = np.zeros(image.shape)
# Iterate through list of contour arrays
for i in range(len(contours)):
# If third column value is NOT equal to -1 than its internal
if hierarchy[0][i][3] != -1:
# Draw the Contour
cv2.drawContours(image_internal, contours, i, 255, -1)
display_img(image_internal)
below is a good result:
some bad result even though fingers have pink paint:
Well, you have a big light and scale problem in these two images. But a possible solution is to segment the color channels and look for blobs.
Then you can segment with blob params.
The code you can try here:
import cv2
import numpy as np
fra = cv2.imread('nails.png')
height, width, channels = fra.shape
src = cv2.medianBlur(fra, 21)
hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV_FULL)
mask = cv2.inRange(hsv, np.array([0, 0, 131]), np.array([62, 105, 255]))
mask = cv2.erode(mask, None, iterations=8)
mask = cv2.dilate(mask, None, iterations=8)
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = int((height * width) / 500)
params.maxArea = int((height * width) / 10)
params.filterByCircularity = True
params.minCircularity = 0.5
params.filterByConvexity = True
params.minConvexity = 0.5
params.filterByInertia = True
params.minInertiaRatio = 0.01
detector = cv2.SimpleBlobDetector_create(params)
key_points = detector.detect(255 - mask)
vis = cv2.bitwise_and(hsv, hsv, mask=mask)
vis = cv2.addWeighted(src, 0.2, vis, 0.8, 0)
cv2.drawKeypoints(vis, key_points, vis, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
for kp in key_points:
cv2.drawMarker(vis, (int(kp.pt[0]), int(kp.pt[1])), color=(0, 255, 0), markerType=cv2.MARKER_CROSS, thickness=3)
cv2.imshow("VIS", vis)
cv2.imwrite('nails_detected.png', vis)
cv2.waitKey(0)
cv2.destroyAllWindows()
Good luck!

How to do remove jagged edges of an object in an image with python and opencv

I have tried to search and found two solutions that could solve the problem I have but are not working by my side. I want to remove the jagged edges or smooth the image attached. I have tried to implement the following codes
import cv2
import numpy as np
import os
os.chdir("F:/Examples")
image = cv2.imread("image1.jpeg")
blur = cv2.GaussianBlur(image, (21, 21), 0)
mask = np.zeros(image.shape[:2], dtype=np.uint8)
output = np.where(mask==np.array([255, 255, 255]), blur, image)
cv2.imwrite("", output)
The above codes returns unchanged image as if the applied mask didn't work. Here is the input image
I also tried to implement some solutions from different links including (How to blur the image according to segmentation map) without success. Any help will be acknowledged
While the edges or the external contour should be clear as in this image
I almost managed to solve it.
The main problem is that the contour is partially curved, and partially with straight lines.
Explanations are in the comments:
import numpy as np
import cv2
from scipy.interpolate import splprep, splev
im = cv2.imread('image1.jpeg')
bk = im.copy()
# Fill background with black color
cv2.floodFill(im, None, seedPoint=(1,1), newVal=(0, 0, 0), loDiff=(5, 5, 5), upDiff=(5, 5, 5))
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
ret, thresh_gray = cv2.threshold(gray, 5, 255, cv2.THRESH_BINARY)
# Use "open" morphological operation for removing small contours (noise)
thresh_gray = cv2.morphologyEx(thresh_gray, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)));
# Background
bk[(thresh_gray > 0)] = 0
bk = cv2.morphologyEx(bk, cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20)));
#cv2.imshow('bk', bk)
# Foreground
fg = im.copy()
tmm_fg = cv2.morphologyEx(fg, cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20)));
fg_gray = cv2.cvtColor(fg, cv2.COLOR_RGB2GRAY)
fg[(fg_gray==0)] = tmm_fg[(fg_gray==0)]
#thresh_gray = cv2.morphologyEx(thresh_gray, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (50,50)));
# Find contours (there is only one contour)
# _, contours, _ = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # 3 outputs works only in OpenCV 3. [-2:] is used for compatibility.
contours, _ = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:] # https://stackoverflow.com/questions/48291581/how-to-use-cv2-findcontours-in-different-opencv-versions
c = contours[0]
# Smooth contour
# https://agniva.me/scipy/2016/10/25/contour-smoothing.html
x,y = c.T
x = x.tolist()[0]
y = y.tolist()[0]
tck, u = splprep([x,y], u=None, s=1.0, per=1)
u_new = np.linspace(u.min(), u.max(), 150)
x_new, y_new = splev(u_new, tck, der=0)
res_array = [[[int(i[0]), int(i[1])]] for i in zip(x_new,y_new)]
smoothened = np.asarray(res_array, dtype=np.int32)
# Build a mask
mask = np.zeros_like(thresh_gray)
cv2.drawContours(mask, [smoothened], -1, 255, -1)
# For testig
test_im = cv2.cvtColor(thresh_gray, cv2.COLOR_GRAY2RGB)
cv2.drawContours(test_im, [smoothened], 0, (0, 255, 0), 1)
res = bk
res[(mask > 0)] = fg[(mask > 0)]
cv2.imshow('test_im', test_im)
cv2.imshow('res', res)
cv2.waitKey(0)
cv2.destroyAllWindows()

Detecting apple by thresholding

I want to detect apples by thresholding. For this, I've converted the image to HSV then and calculated the lower and upper limit for InRange() function. Getting a binary mask out of it. Since apples will be touching each others, I'm using watershed algorithm to separate them.
The input image looks like this:
After InRange() operation and erosion, the gray image looks like this:
Applying the watershed algorithm, the output looks like this:
The problem with it is that the bottom left apples are wrongly detected. There are only 2 apples and three contours are shown and also the circle of one of them is way too much bigger. Any help ?
Here is my code,
import cv2
import numpy as np
import imutils
from scipy import ndimage
from skimage.feature import peak_local_max
from skimage.morphology import watershed
img = cv2.imread('4.jpg')
img = imutils.resize(img, width=640)
# img = cv2.pyrMeanShiftFiltering(img, 21, 51)
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lower_1 = np.array([0,50,20])
upper_1 = np.array([80,255,255])
mask1 = cv2.inRange(hsv, lower_1, upper_1)
lower_2 = np.array([160,50,20])
upper_2 = np.array([179,255,255])
mask2 = cv2.inRange(hsv, lower_2, upper_2)
gray = mask1+mask2
kernel = np.ones((7,7),np.uint8)
gray = cv2.erode(gray,kernel,iterations = 1)
# gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
D = ndimage.distance_transform_edt(thresh)
localMax = peak_local_max(D, indices=False, min_distance=20,
labels=thresh)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=thresh)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
for label in np.unique(labels):
if label == 0:
continue
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
((x, y), r) = cv2.minEnclosingCircle(c)
if r > 25 and r < 55:
cv2.circle(img, (int(x), int(y)), int(r), (0, 255, 0), 2)
cv2.putText(img, "{}".format(round(r)), (int(x) - 10, int(y)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('gray', gray)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

Python OpenCV - Extrapolating the largest rectangle off of a set of contour points

I'm trying to make an OpenCV detect a bed in the image. I am running the usual Grayscale, Blur, Canny, and I've tried Convex Hull. However, since there's quite a number of "noise" which gives extra contours and messes up the object detection. Because of this, I am unable to detect the bed properly.
Here is the input image as well as the Canny Edge Detection result:
As you can see, it's almost there. I have the outline of the bed already, albeit, that the upper right corner has a gap - which is preventing me from detecting a closed rectangle.
Here's the code I'm running:
import cv2
import numpy as np
def contoursConvexHull(contours):
print("contours length = ", len(contours))
print("contours length of first item = ", len(contours[1]))
pts = []
for i in range(0, len(contours)):
for j in range(0, len(contours[i])):
pts.append(contours[i][j])
pts = np.array(pts)
result = cv2.convexHull(pts)
print(len(result))
return result
def auto_canny(image, sigma = 0.35):
# compute the mediam of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) *v))
edged = cv2.Canny(image, lower, upper)
# return edged image
return edged
# Get our image in color mode (1)
src = cv2.imread("bed_cv.jpg", 1)
# Convert the color from BGR to Gray
srcGray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# Use Gaussian Blur
srcBlur = cv2.GaussianBlur(srcGray, (3, 3), 0)
# ret is the returned value, otsu is an image
##ret, otsu = cv2.threshold(srcBlur, 0, 255,
## cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Use canny
##srcCanny = cv2.Canny(srcBlur, ret, ret*2, 3)
srcCanny1 = auto_canny(srcBlur, 0.70)
# im is the output image
# contours is the contour list
# I forgot what hierarchy was
im, contours, hierarchy = cv2.findContours(srcCanny1,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
##cv2.drawContours(src, contours, -1, (0, 255, 0), 3)
ConvexHullPoints = contoursConvexHull(contours)
##cv2.polylines(src, [ConvexHullPoints], True, (0, 0, 255), 3)
cv2.imshow("Source", src)
cv2.imshow("Canny1", srcCanny1)
cv2.waitKey(0)
Since the contour of the bed isn't closed, I can't fit a rectangle nor detect the contour with the largest area.
The solution I can think of is to extrapolate the largest possible rectangle using the contour points in the hopes of bridging that small gap, but I'm not too sure how to proceed since the rectangle is incomplete.
Since you haven't provided any other examples, I provide an algorithm working with this case. But bare in mind that you will have to find ways of adapting it to however the light and background changes on other samples.
Since there is a lot of noise and a relatively high dynamic range, I suggest not to use Canny and instead use Adaptive Thresholding and Find Contours on that (it doesn't need edges as an input), that helps with choosing different threshold values for different parts of the image.
My result:
Code:
import cv2
import numpy as np
def clahe(img, clip_limit=2.0, grid_size=(8,8)):
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=grid_size)
return clahe.apply(img)
src = cv2.imread("bed.png")
# HSV thresholding to get rid of as much background as possible
hsv = cv2.cvtColor(src.copy(), cv2.COLOR_BGR2HSV)
lower_blue = np.array([0, 0, 120])
upper_blue = np.array([180, 38, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(src, src, mask=mask)
b, g, r = cv2.split(result)
g = clahe(g, 5, (3, 3))
# Adaptive Thresholding to isolate the bed
img_blur = cv2.blur(g, (9, 9))
img_th = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 51, 2)
im, contours, hierarchy = cv2.findContours(img_th,
cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE)
# Filter the rectangle by choosing only the big ones
# and choose the brightest rectangle as the bed
max_brightness = 0
canvas = src.copy()
for cnt in contours:
rect = cv2.boundingRect(cnt)
x, y, w, h = rect
if w*h > 40000:
mask = np.zeros(src.shape, np.uint8)
mask[y:y+h, x:x+w] = src[y:y+h, x:x+w]
brightness = np.sum(mask)
if brightness > max_brightness:
brightest_rectangle = rect
max_brightness = brightness
cv2.imshow("mask", mask)
cv2.waitKey(0)
x, y, w, h = brightest_rectangle
cv2.rectangle(canvas, (x, y), (x+w, y+h), (0, 255, 0), 1)
cv2.imshow("canvas", canvas)
cv2.imwrite("result.jpg", canvas)
cv2.waitKey(0)

Categories