Detecting apple by thresholding - python

I want to detect apples by thresholding. For this, I've converted the image to HSV then and calculated the lower and upper limit for InRange() function. Getting a binary mask out of it. Since apples will be touching each others, I'm using watershed algorithm to separate them.
The input image looks like this:
After InRange() operation and erosion, the gray image looks like this:
Applying the watershed algorithm, the output looks like this:
The problem with it is that the bottom left apples are wrongly detected. There are only 2 apples and three contours are shown and also the circle of one of them is way too much bigger. Any help ?
Here is my code,
import cv2
import numpy as np
import imutils
from scipy import ndimage
from skimage.feature import peak_local_max
from skimage.morphology import watershed
img = cv2.imread('4.jpg')
img = imutils.resize(img, width=640)
# img = cv2.pyrMeanShiftFiltering(img, 21, 51)
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lower_1 = np.array([0,50,20])
upper_1 = np.array([80,255,255])
mask1 = cv2.inRange(hsv, lower_1, upper_1)
lower_2 = np.array([160,50,20])
upper_2 = np.array([179,255,255])
mask2 = cv2.inRange(hsv, lower_2, upper_2)
gray = mask1+mask2
kernel = np.ones((7,7),np.uint8)
gray = cv2.erode(gray,kernel,iterations = 1)
# gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
D = ndimage.distance_transform_edt(thresh)
localMax = peak_local_max(D, indices=False, min_distance=20,
labels=thresh)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=thresh)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
for label in np.unique(labels):
if label == 0:
continue
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
((x, y), r) = cv2.minEnclosingCircle(c)
if r > 25 and r < 55:
cv2.circle(img, (int(x), int(y)), int(r), (0, 255, 0), 2)
cv2.putText(img, "{}".format(round(r)), (int(x) - 10, int(y)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('gray', gray)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

Related

Get Edges/boundary for overlapping image

I have a mask for a dental x-ray here where all teeth are overlapping with each other. I want to count the number of teeth present in the image for that I want to separate overlapping tooths so I can use contour-based approach to count the number of tooths, I tried following approach but it is giving result like this
. how can I extract boundary for each tooth?
from skimage.feature import peak_local_max
from skimage.morphology import watershed
import matplotlib.pyplot as plt
from scipy import ndimage
import numpy as np
import cv2
def getImageEdge(input_image):
img_gray = input_image
image_black = np.zeros(shape=input_image.shape, dtype="uint8")
thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
thresh_copy = thresh.copy()
D = ndimage.distance_transform_edt(thresh_copy)
localMax = peak_local_max(D, indices = False, min_distance = 12, labels = thresh)
markers = ndimage.label(localMax, structure = np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask = thresh_copy)
for label in np.unique(labels):
if label == 0:
continue
mask = np.zeros(img_gray.shape, dtype = "uint8")
mask[labels == label] = 255
contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image_black, contours, -1, (255, 255, 255), 1)
return image_black
inputImage = cv2.imread("/content/dentalMask.bmp")
inputImage = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
outputImage = getImageEdge(inputImage)
plt.imshow(inputImage)
plt.show()
plt.imshow(outputImage)
plt.show()
EDITED:
Based on the answer from fmw42 I have added one more image where it is showing more overlapping and failed in simple thresholding and contour-based approach.
input
output
Given your example, the following works for me in Python/OpenCV by simply thresholding and getting the contours.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread("teeth.png")
# convert img to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold gray image
#thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# Get contours
cntrs = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
result = img.copy()
for c in cntrs:
cv2.drawContours(result, [c], -1, (0,0,255), 1)
count = len(cntrs)
print("")
print("count =",count)
print("")
# write results to disk
cv2.imwrite("teeth_thresh.png", thresh)
cv2.imwrite("tide_contours.png", result)
# display it
cv2.imshow("thresh", thresh)
cv2.imshow("result", result)
cv2.waitKey(0)
Contours:
Resulting Count:
count = 32
Without knowledge of the exact layout of teeth in a mouth, this task is impossible. No image processing technique can help.
Because in case of touching teeth, you can't tell two touching teeth from a two-rooted tooth.

Open CV Watershed not segmenting oval objects properly

Attemping to create a way to process images to count different types of tablets. The following code has been working well for circular objects, however oval shapes are creating issues that I cant find a workaround for.
kernel = np.ones((5,5),np.uint8)
image = cv2.imread('sample.jpg')
shifted = cv2.GaussianBlur(image, (15, 15), 1)
shifted = cv2.pyrMeanShiftFiltering(shifted, 21, 51)
shifted = cv2.erode(shifted,kernel,iterations=1)
shifted = cv2.dilate(shifted,kernel,iterations=1)
cv2.imwrite("step1.jpg", shifted)
gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imwrite("step2.jpg", thresh)
thresh = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
cv2.imwrite("step3.jpg", thresh)
thresh = cv2.bitwise_not(thresh)
thresh = cv2.erode(thresh,kernel,iterations=1)
cv2.imwrite("step4.jpg", thresh)
D = ndimage.distance_transform_edt(thresh)
localMax = peak_local_max(D, indices=False, min_distance=10,
labels=thresh)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=thresh)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
for label in np.unique(labels):
if label == 0:
continue
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(image, (int(x), int(y)), int(r), (0, 255, 0), 2)
cv2.putText(image, "#{}".format(label), (int(x) - 10, int(y)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imwrite("step5.jpg", image)
cv2.waitKey(0)
Image that is being used is :
https://imgur.com/a/1U49DeT
Output after filtering yields :
https://imgur.com/a/vXwrWlG
Any teaching points as to how to fix this would be greatly appreciated.
I think there is a better way to use the watershed operator.
It relies on having a good gradient, but if the images are similar to this one, you should be able to do this effectively. Also, there are very powerful edge detectors today, much better than the one I used on this demo.
import cv2
import numpy as np
import higra as hg
from skimage.segmentation import relabel_sequential
import matplotlib.pyplot as plt
def main():
img_path = "pills.jpg"
img = cv2.imread(img_path)
img = cv2.resize(img, (256, 256))
img = cv2.GaussianBlur(img, (9, 9), 0)
edges = cv2.Canny(img, 100, 100)
size = img.shape[:2]
graph = hg.get_4_adjacency_graph(size)
edge_weights = hg.weight_graph(graph, edges, hg.WeightFunction.mean)
tree, altitudes = hg.watershed_hierarchy_by_area(graph, edge_weights)
segments = hg.labelisation_horizontal_cut_from_threshold(tree, altitudes, 500)
segments, _, _ = relabel_sequential(segments)
print('The number of pills is ', segments.max() - 1)
plt.imshow(segments)
plt.show()
if __name__ == "__main__":
main()
Initially, I resize the image to speed up the computation and apply a blur to reduce the background gradient. I detect its edges (gradient) and create a graph with it as edge weights; then I compute the watershed hierarchy ordered by area and threshold it obtaining the connected component at that level, from this you can count the number of segments.

Difficulty extracting characters using MSER in images which have borders around the text

I am trying to develop a OCR system. I am trying to use MSER in order to extract character from an image and then passing the characters into a CNN to recognize those characters. Here is my code for character extraction:
import cv2
import numpy as np
# create MSER object
mser = cv2.MSER_create()
# read the image
img = cv2.imread('textArea01.png')
# convert to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# store copy of the image
vis = img.copy()
# detect regions in the image
regions,_ = mser.detectRegions(gray)
# find convex hulls of the regions and draw them onto the original image
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv2.polylines(vis, hulls, 1, (0, 255, 0))
# create mask for the detected region
mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
mask = cv2.dilate(mask, np.ones((150, 150), np.uint8))
for contour in hulls:
cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)
#this is used to find only text regions, remaining are ignored
text_only = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('img', vis)
cv2.waitKey(0)
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.imshow('text', text_only)
cv2.waitKey(0)
This is working fine for most images, but for some images like this:
The outer border is also detected as a region and the contour is drawn in the mask such that all area inside the border is detected as text region. So, the contours inside have no effect. How do I prevent this so that only the text is detected?
Hulls detected:
and the mask as a result:
My result using this code:
import cv2
import numpy as np
img = cv2.imread("img.png")
# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)
# binary
# ret, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 35, 180)
cv2.imshow('threshold', thresh)
# dilation
kernel = np.ones((1, 1), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
cv2.imshow('dilated', img_dilation)
# find contours
# cv2.findCountours() function changed from OpenCV3 to OpenCV4: now it have only two parameters instead of 3
cv2MajorVersion = cv2.__version__.split(".")[0]
# check for contours on thresh
if int(cv2MajorVersion) >= 4:
ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
im2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
roi = img[y:y + h, x:x + w]
# show ROI
# cv2.imshow('segment no:'+str(i),roi)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
# if you want to save the letters without green bounding box, comment the line above
if w > 5:
cv2.imwrite('C:\\Users\\PC\\Desktop\\output\\{}.png'.format(i), roi)
cv2.imshow('marked areas', img)
cv2.waitKey(0)
You can have a threshold on the contour area so that it ignores all shapes that cover more than a certain area in the image.
for contour in hulls:
if cv.contourArea(contour) < ThresholdArea:
continue
cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)
#this is used to find only text regions, remaining are ignored
text_only = cv2.bitwise_and(img, img, mask=mask)

Detect lines with dark color and end lines using Hough Tranform

i'm trying to detect vertical lines where the pixels RGB has every color in less than 100 |Dark| , here is an example RGB (100,100,100).
import numpy as np
import cv2
img = cv2.imread('testD2.png')
lower = np.array([0, 0, 0], dtype = "uint8")
upper = np.array([100,100,100], dtype = "uint8")
mask = cv2.inRange(img, lower, upper)
img = cv2.bitwise_and(img, img, mask = mask)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength=img.shape[1]-300
lines = cv2.HoughLinesP(image=edges,rho=0.02,theta=np.pi/500, threshold=10,lines=np.array([]), minLineLength=minLineLength,maxLineGap=100)
if lines is not None:
a,b,c = lines.shape
for i in range(a):
cv2.line(img, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv2.LINE_AA)
cv2.imshow('edges', edges)
cv2.imshow('result', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
i have to change the color of the end lines too,i mean the first and the last line.
Using cv2.findContours() may work better:
You can use cv2.findContours() and cv2.boundingRect() to identify the bars and return the information (x,y,h,w) that describes these rectangles. Here are a few examples.
If you want to only identify the lines and mark them you can do:
import cv2
import numpy as np
img = cv2.imread('oVKlP.png')
g = cv2.imread('oVKlP.png',0)
(T, mask) = cv2.threshold(g, 100, 255, cv2.THRESH_BINARY_INV)
_, contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(img.copy(), contours, -1, (0,255,0), 2)
cv2.imwrite('just_contours.png',img)
Result:
If you want to display some of the line info like maybe the x value for a side of the bar you can do:
import cv2
import numpy as np
img = cv2.imread('oVKlP.png')
g = cv2.imread('oVKlP.png',0)
(T, mask) = cv2.threshold(g, 100, 255, cv2.THRESH_BINARY_INV)
_, contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# bounds with x,y,h,w for each bar
bounds = [cv2.boundingRect(i) for i in contours]
bounds.reverse()
img = cv2.drawContours(img.copy(), contours, -1, (0,0,255), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
n = 20
b = 0
for (x,y,w,h) in bounds:
cv2.circle(img, (x,y+n+10), 5, (0, 255, 0), -1, cv2.LINE_AA)
cv2.putText(img, '{0}'.format(x), (x-b, y+n), font, .6, (255, 0, 255), 2, cv2.LINE_AA)
n+=33
b+=3
cv2.imwrite('fancy_marks.png',img)
Result:

OpenCV finds the middle of a red contour (Python)

My idea is to filter my body with color-keying. If I wear a red t shirt I would like to find the center of the red tshirt with the contours. Is there a good way for this?
Here is my code:
import numpy as np
import cv2
import imutils
cap = cv2.VideoCapture(0)
while True:
# Take each frame
_, frame = cap.read()
frame = imutils.resize(frame, width = 400)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of red color in HSV
lower_red = np.array([0,50,50])
upper_red = np.array([10,255,255])
lower_skin = np.array([0, 48, 80])
upper_skin = np.array([20, 255, 255])
skin_mask = cv2.inRange(hsv, lower_skin, upper_skin)
# Threshold the HSV image to get only red colors
red_mask = cv2.inRange(hsv, lower_red, upper_red)
mask = red_mask + skin_mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
mask = cv2.erode(mask, kernel, iterations = 2)
mask = cv2.dilate(mask, kernel, iterations = 2)
mask = cv2.GaussianBlur(mask, (3, 3), 0)
skin = cv2.bitwise_and(frame, frame, mask = mask)
gray = cv2.cvtColor(skin, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# # loop over the contours
for c in cnts:
cv2.drawContours(skin, [c], -1, (0, 255, 0), 2)
cv2.imshow("Image", skin)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
So at the moment I only draw contours of the Tshirt and the skin. But I would like to find the center of the red t shirt contour. Is this possible? Do I have to filter red on its own and do not add it to the skin mask? Maybe my current code is not able to do this, I would be happy for proposes! :)
Find t-shirt using your red mask and cv2.findContours and find its center using cv2.moments(contour) as described in docs:
for cnt in contours:
center_x = int(cnt['m10']/cnt['m00'])
center_y = int(cnt['m01']/cnt['m00'])

Categories