how to calculate distance between 2 red pixel - python

I have a video file with 2 dot laser and I want to calculate the distance in pixels between them, I tried this code with OpenCV, but it is not working :
Image
import cv2
import numpy as np
cap = cv2.VideoCapture('D:\Books\Pav Man\PICS\Test\VID_20200609_195155.mp4')
#cap = cv2.VideoCapture(0)
old = 0
while (1):
# Take each frame
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 0, 255])
upper_red = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower_red, upper_red)
cv2.imshow('mask', mask)
# cv2.imshow('Track Laser', frame)
moments = cv2.moments(hsv[:, :, 2])
output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S)
print (output[3])
print ("----**----")
if moments["m00"] > 0:
x = (moments['m10']/ moments['m00'])
y = (moments['m01']/ moments['m00'])
#print(moments['m00'],moments['m01'],moments['m10'])
#print(x, y)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
this code output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S) give me the centroid of points , but how to geat each point(laser dot) separately ? if I get the centroid I can measure the distance between these points

Here is your other way to do that in Python/OpenCV using connectedComponentsWithStats.
Read the input
Set min and max red colors
Use cv2.inRange() to threshold on the color range
Apply morphology open and close to clean up small spots and holes
Process with connectedComponentsWithStats to get the labels and centroids
Extract all the areas corresponding to the labels and centroids
For all labels, test if the area is smaller than some estimate of the background, as we want to exclude that centroid. Draw the label as a filled yellow region on a copy of the input. Extract the centroids of small regions and store in pts array. Draw the centroids as small green squares on the copy of the input.
For all saved points, compute the distance between successive ones.
Save the output
Input:
import cv2
import numpy as np
import math
# read image
frame = cv2.imread('red_spots.jpg')
hh, ww = frame.shape[:2]
# convert to hsv hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# threshold image
lower_red = np.array([0, 0, 225])
upper_red = np.array([255, 255, 255])
thresh = cv2.inRange(hsv, lower_red, upper_red)
# apply close and open morphology to smooth
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
morph = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)
# do connected components processing
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(morph, None, None, None, 8, cv2.CV_16U)
# get all areas from stats[label_start_id:label_stop_id, area_flag]
areas = stats[0:, cv2.CC_STAT_AREA]
# draw labels and get centroids and draw centroids
result = frame.copy()
pts = []
for i in range(0, nlabels):
if areas[i] <= ww*hh/5 :
# labels start at 1 not 0
result[labels == i+1] = (0,255,255)
pt = centroids[i]
pts.append(pt)
cx = pt[0]
cy = pt[1]
x = int(round(cx))
y = int(round(cy))
# draw small square at centroids
result[y-2:y+3,x-2:x+3] = (0,255,0)
print('centroid =',cx,",",cy)
number = len(pts)
for i in range(number-1):
pt1 = pts[i]
x1 = pt1[0]
y1 = pt1[1]
pt2 = pts[i+1]
x2 = pt2[0]
y2 = pt2[1]
dist = math.sqrt( (x2-x1)**2 + (y2-y1)**2 )
print('distance =', dist)
print('')
#save images
cv2.imwrite('red_spots_thresh2.jpg',thresh)
cv2.imwrite('red_spots_morph2.jpg',morph)
cv2.imwrite('red_spots_centroids2.jpg',result)
# show images
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology cleaned image:
Result image with colored region labels and centroids:
Centroids and Distances:
centroid = 1006.7307283673711 , 433.70499350726004
centroid = 1036.418693371483 , 750.4024797329519
distance = 318.08595229553544

Distance is the square root of the sum of the squares of the x difference and the y difference. So
import math
dist = math.sqrt( (x1-x2)**2 + (y1-y2)**2 )
for points x1,y1 and x2,y2

Here is one way to do your processing in Python/OpenCV using contours.
Read the input
Set min and max red colors
Use cv2.inRange() to threshold on the color range
Apply morphology open and close to clean up small spots and holes
Find contours
For each contour, draw the contour, use moments to compute the centroid, save the centroids in a list and draw a small square at the centroid location
For each successive pair of centroid points, compute the distance between them in pixels
Save the results
Input:
import cv2
import numpy as np
import math
# read image
frame = cv2.imread('red_spots.jpg')
# convert to hsv hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# threshold image
lower_red = np.array([0, 0, 225])
upper_red = np.array([255, 255, 255])
thresh = cv2.inRange(hsv, lower_red, upper_red)
# apply close and open morphology to smooth
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
morph = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)
# draw contours and get centroids
spots = frame.copy()
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
pts = []
count = 0
for c in contours:
cv2.drawContours(spots, [c], -1, (0,255,0), 2)
M = cv2.moments(c)
cx = M["m10"] / M["m00"]
cy = M["m01"] / M["m00"]
pt = (cx,cy)
pts.append(pt)
x = round(cx)
y = round(cy)
# draw small square at centroids
spots[y-2:y+3,x-2:x+3] = (255,0,0)
print('centroid =',cx,",",cy)
count = count + 1
for i in range(count-1):
pt1 = pts[i]
x1 = pt1[0]
y1 = pt1[1]
pt2 = pts[i+1]
x2 = pt2[0]
y2 = pt2[1]
dist = math.sqrt( (x2-x1)**2 + (y2-y1)**2 )
print('distance =', dist)
print('')
#save images
cv2.imwrite('red_spots_thresh.png',thresh)
cv2.imwrite('red_spots_morph.png',morph)
cv2.imwrite('red_spots_centroids.png',spots)
# show images
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("spots", spots)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology cleaned image:
Contours and centroids image:
Centroids and Distances:
centroid = 1036.4038142620232 , 750.3941127694858
centroid = 1006.6605586230609 , 433.9662237323787
distance = 317.8227024875417

You can do this:
The function cv2.InRange() to find two red dots.
The function cv2.connectedComponentsWithStats() to find the centroids of these red points.
Calculate the Euclidean distance between centroids.
Also, you can select points by brightness only, without even using their color.
import cv2
img = cv2.imread('HAgbc.jpg')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.normalize(gray, gray, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
points=cv2.threshold(gray, 230, 255, cv2.THRESH_BINARY )[1]
output = cv2.connectedComponentsWithStats(points, 8, cv2.CV_32S)
centroids = output[3]
x,y=(centroids[1]-centroids[2])
dist=cv2.magnitude(x, y)[0]
print('distance is: ', *dist)
Or this code (find two brightness maximum):
import cv2
img = cv2.imread('HAgbc.jpg')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
pos1=cv2.minMaxLoc(gray)[3]
cv2.circle(gray, pos1, 30, 0, -1) # masking first spot
pos2=cv2.minMaxLoc(gray)[3]
x=pos1[0]-pos2[0]
y=pos1[1]-pos2[1]
dist=cv2.magnitude(x, y)[0]
print('distance is: ', *dist)

Related

How to measure average thickness of labeled segmented image

I have an image and I've done some pre-processing on the that image. Below I showed my preprocessing:
img= cv2.imread("...my_drive...\\image_69.tif",0)
median=cv2.medianBlur(img,13)
ret, th = cv2.threshold(median, 0 , 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel=np.ones((3,15),np.uint8)
closing1 = cv2.morphologyEx(th, cv2.MORPH_CLOSE, kernel, iterations=2)
kernel=np.ones((1,31),np.uint8)
closing2 = cv2.morphologyEx(closing1, cv2.MORPH_CLOSE, kernel)
kernel=np.ones((1,13),np.uint8)
opening1= cv2.morphologyEx(closing2, cv2.MORPH_OPEN, kernel, iterations=2)
So, basically I used "Threshold filtering" , "closing" and "opening" and the result looks like this:
Please note that when I used type(opening1), I got numpy.ndarray. So the image at this step is numpy array with 1021 x 1024 size.
Then I labeled my image:
label_image=measure.label(opening1, connectivity=opening1.ndim)
props= measure.regionprops_table (label_image, properties=['label', "area", "coords"])
and the result looks like this
Please note that when I used type(label_image), I got numpy.ndarray. So the image at this step is numpy array with 1021 x 1024 size.
As you can see, currently the image has 6 labels. Some of these labels are short and small pieces, so I tried to keep top 2 label based on area
slc=label_image
rps=regionprops(slc)
areas=[r.area for r in rps]
id=np.argsort(props["area"])[::-1]
new_slc=np.zeros_like(slc)
for i in id[:2]:
new_slc[tuple(rps[i].coords.T)]=i+1
Now the result looks like this:
It looks like I was successful in keeping 2 top regions (please note that by changing id[:2] you can select thickest white layer or thin layer). Now:
What I want to do: I want to find the average thickness of these two regions
Also, please note that I know each of my pixels is 314 nm
Can anyone here advise how I can do this task?
Original photo: Below I showed low quality of my original image, so you have better understanding as why I did all the pre-processing
you can also access the original photo here : https://www.mediafire.com/file/20h66aq83edy1h7/img.7z/file
Here is one way to do that in Python/OpenCV.
Read the input
Convert to gray
Threshold to binary
Get the contours and filter on area so that we have only the two primary lines
Sort by area
Select the first (smaller and thinner) contour
Draw it white filled on a black background
Get its skeleton
Get the points of the skeleton
Fit a line to the points and get the rotation angle of the skeleton
Loop over each of the two contours and draw them white filled on a black background. Then rotate to horizontal lines. Then get the vertical thickness of the lines from the average thickness along each column using np.count_nonzero() and print the value.
Save intermediate images
Input:
import cv2
import numpy as np
import skimage.morphology
import skimage.transform
import math
# read image
img = cv2.imread('lines.jpg')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# get contours
new_contours = []
img2 = np.zeros_like(thresh, dtype=np.uint8)
contour_img = thresh.copy()
contour_img = cv2.merge([contour_img,contour_img,contour_img])
contours = cv2.findContours(thresh , cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours:
area = cv2.contourArea(cntr)
if area > 1000:
cv2.drawContours(contour_img, [cntr], 0, (0,0,255), 1)
cv2.drawContours(img2, [cntr], 0, (255), -1)
new_contours.append(cntr)
# sort contours by area
cnts_sort = sorted(new_contours, key=lambda x: cv2.contourArea(x), reverse=False)
# select first (smaller) sorted contour
first_contour = cnts_sort[0]
contour_first_img = np.zeros_like(thresh, dtype=np.uint8)
cv2.drawContours(contour_first_img, [first_contour], 0, (255), -1)
# thin smaller contour
thresh1 = (contour_first_img/255).astype(np.float64)
skeleton = skimage.morphology.skeletonize(thresh1)
skeleton = (255*skeleton).clip(0,255).astype(np.uint8)
# get skeleton points
pts = np.column_stack(np.where(skeleton.transpose()==255))
# fit line to pts
(vx,vy,x,y) = cv2.fitLine(pts, cv2.DIST_L2, 0, 0.01, 0.01)
#print(vx,vy,x,y)
x_axis = np.array([1, 0]) # unit vector in the same direction as the x axis
line_direction = np.array([vx, vy]) # unit vector in the same direction as your line
dot_product = np.dot(x_axis, line_direction)
[angle_line] = (180/math.pi)*np.arccos(dot_product)
print("angle:", angle_line)
# loop over each sorted contour
# draw contour filled on black background
# rotate
# get mean thickness from np.count_non-zeros
black = np.zeros_like(thresh, dtype=np.uint8)
i = 1
for cnt in cnts_sort:
cnt_img = black.copy()
cv2.drawContours(cnt_img, [cnt], 0, (255), -1)
cnt_img_rot = skimage.transform.rotate(cnt_img, angle_line, resize=False)
thickness = np.mean(np.count_nonzero(cnt_img_rot, axis=0))
print("line ",i,"=",thickness)
i = i + 1
# save resulting images
cv2.imwrite('lines_thresh.jpg',thresh)
cv2.imwrite('lines_filtered.jpg',img2)
cv2.imwrite('lines_small_contour_skeleton.jpg',skeleton )
# show thresh and result
cv2.imshow("thresh", thresh)
cv2.imshow("contours", contour_img)
cv2.imshow("lines_filtered", img2)
cv2.imshow("first_contour", contour_first_img)
cv2.imshow("skeleton", skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Contour image:
Filtered contour image:
Skeleton image:
Angle (in degrees) and Thicknesses (in pixels):
angle: 3.1869032185349733
line 1 = 8.79219512195122
line 2 = 49.51609756097561
To get the thickness in nm, multiply thickness in pixels by your 314 nm/pixel.
ADDITION
If I start with your tiff image, the following shows my preprocessing, which is similar to yours.
import cv2
import numpy as np
import skimage.morphology
import skimage.transform
import math
# read image
img = cv2.imread('lines.tif')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)[1]
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,5))
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (29,1))
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# get contours
new_contours = []
img2 = np.zeros_like(gray, dtype=np.uint8)
contour_img = gray.copy()
contour_img = cv2.merge([contour_img,contour_img,contour_img])
contours = cv2.findContours(morph , cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours:
area = cv2.contourArea(cntr)
if area > 1000:
cv2.drawContours(contour_img, [cntr], 0, (0,0,255), 1)
cv2.drawContours(img2, [cntr], 0, (255), -1)
new_contours.append(cntr)
# sort contours by area
cnts_sort = sorted(new_contours, key=lambda x: cv2.contourArea(x), reverse=False)
# select first (smaller) sorted contour
first_contour = cnts_sort[0]
contour_first_img = np.zeros_like(morph, dtype=np.uint8)
cv2.drawContours(contour_first_img, [first_contour], 0, (255), -1)
# thin smaller contour
thresh1 = (contour_first_img/255).astype(np.float64)
skeleton = skimage.morphology.skeletonize(thresh1)
skeleton = (255*skeleton).clip(0,255).astype(np.uint8)
# get skeleton points
pts = np.column_stack(np.where(skeleton.transpose()==255))
# fit line to pts
(vx,vy,x,y) = cv2.fitLine(pts, cv2.DIST_L2, 0, 0.01, 0.01)
#print(vx,vy,x,y)
x_axis = np.array([1, 0]) # unit vector in the same direction as the x axis
line_direction = np.array([vx, vy]) # unit vector in the same direction as your line
dot_product = np.dot(x_axis, line_direction)
[angle_line] = (180/math.pi)*np.arccos(dot_product)
print("angle:", angle_line)
# loop over each sorted contour
# draw contour filled on black background
# rotate
# get mean thickness from np.count_non-zeros
black = np.zeros_like(thresh, dtype=np.uint8)
i = 1
for cnt in cnts_sort:
cnt_img = black.copy()
cv2.drawContours(cnt_img, [cnt], 0, (255), -1)
cnt_img_rot = skimage.transform.rotate(cnt_img, angle_line, resize=False)
thickness = np.mean(np.count_nonzero(cnt_img_rot, axis=0))
print("line ",i,"=",thickness)
i = i + 1
# save resulting images
cv2.imwrite('lines_thresh2.jpg',thresh)
cv2.imwrite('lines_morph2.jpg',morph)
cv2.imwrite('lines_filtered2.jpg',img2)
cv2.imwrite('lines_small_contour_skeleton2.jpg',skeleton )
# show thresh and result
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("contours", contour_img)
cv2.imshow("lines_filtered", img2)
cv2.imshow("first_contour", contour_first_img)
cv2.imshow("skeleton", skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology image:
Filtered Lines image:
Skeleton image:
Angle (degrees) and Thickness (pixels):
angle: 3.206927978669998
line 1 = 9.26171875
line 2 = 49.693359375
Use Deskew to straighten up the image.
Then, count the pixels of each column of the color of the label you want to measure then divide it by the number of columns to get the average thickness
This can be done with various tools in scipy. Assume you have the image here:
I = PIL.Image.open("input.jpg")
img = np.array(I).mean(axis=2)
mask = img==255 # or some kind of thresholding
imshow(mask) #note this is a binary image, the green coloring is due to some kind of rendering artifact or aliasing
If one zooms in they can see split up regions
To get around that we can dilate the mask
from scipy import ndimage as ni
mask1 = ni.binary_dilation(mask, iterations=2)
imshow(mask1)
Now, we can find connected regions, and find the top regions with the most pixels, which should be the two lines of interest:
lab, nlab = ni.label(mask1)
max_labs = np.argsort([ (lab==i).sum() for i in range(1, nlab+1)])[::-1]+1
imshow(lab==max_labs[0])
and imshow(lab==max_labs[1])
Working with the first line as an example:
from scipy.stats import linregress
y0,x0 = np.where(lab==max_labs[0])
l0 = linregress( x0, y0)
xi,yi = np.arange(img.shape[3]), np.arange(img.shape[3])*l0.slope + l0.intercept
plot( xi, yi, 'r--')
Interpolate along this region at different y-intercepts and compute the average signal along each line
from scipy.interpolate import RectBivariateSpline
img0 = img.copy()
img0[~(lab==max_labs[0])] = 0 # set everything outside this line region to 0
rbv = RectBivariateSpline(np.arange(img.shape[0]), np.arange(img.shape[1]), img0)
prof0 = [rbv.ev(yi+i, xi).mean() for i in np.arange(-300,300)] # pick a wide window here (-300,300), can be more technical, but not necessary
plot(prof0)
Use your favorite method to compute the FWHM of this profile, then multiply by your pixel-to-nanometers factor.
I would just use a Gaussian fit to compute fwhm
xvals = np.arange(len(prof0))
yvals = np.array(prof0)
def func(p, xvals, yvals):
mu,var, amp = p
model = np.exp(-(xvals-mu)**2/2/var)*amp
resid = (model-yvals)**2
return resid.sum()
from scipy.optimize import minimize
x0 = 300,200,255 # initial estimate of mu, variance, amplitude
fit_gauss = minimize(func, x0=x0, args=(xvals, yvals), method='Nelder-Mead')
mu, var, amp = fit_gauss.x
fwhm = 2.355 * np.sqrt(var)
# display using matplotlib plot /hlines
plot( xvals, yvals)
plot( xvals, amp*np.exp(-(xvals-mu)**2/2/var) )
hlines(amp*0.5, mu-fwhm/2., mu+fwhm/2, color='r')
legend(("profile","fit gauss","fwhm=%.2f pix" % fwhm))
Finally, thickness=fwhm*314, or about 13 microns.
Following the exact same approach for the second line (lab==max_labs[1]) gives a thickness of about 2.2 microns:
Note, I was using interactive plotting to do this example, hence calls to imshow , plot etc. are meant motly as a reference to the reader. One may need to take extra steps to recreate the exact images I've uploaded (zooming etc).

How to find the average diameter of the drops, which are superimposed on a strip of size 260 mm x 6.2 mm using image processing?

How to find the average diameter of the drops in an image. The drops on the image are raindrops, and the strip on which drops are superimposed has a size of 260 mm x 6.2 mm. This strip length and breadth could be considered a reference (perspective) size and can be used to measure the drop diameter or length of the x and y axes in mm.
Also, how to label each drop with its diameter?
image = cv2.imread("1.png")
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# create a binary thresholded image
_, binary = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
# show it
# find the contours from the thresholded image
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# draw all contours
for contour in contours:
if (cv2.contourArea(contour) < 3000) & (cv2.contourArea(contour) > 4):
image = cv2.drawContours(image, contours, -1, (1, 1, 1), 1)
circles = image.copy()
num_circles = len(contours)
ave = 0
rds = []
center, radius = cv2.minEnclosingCircle(contour)
cx = int(round(center[0]))
cy = int(round(center[1]))
rr = int(round(radius))
# draw enclosing circle over beads
# cv2.circle(circles, (cx,cy), rr, (1,1,1), 1)
# cumulate radii for average
ave = ave + radius
rds.append(rr)
# print average radius
ave_radius = ave / num_circles
print("average radius:", ave_radius)
print ("number of circles:", num_circles)
cv2.imwrite('beads_circles.jpg', circles)
plt.imshow(circles)
plt.show()
Here is the output
import numpy as np
import matplotlib.pyplot as plt
import cv2
import glob
import os
img = cv2.imread("pip_ims/52.png")
image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# create a binary thresholded image
_, binary = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
# find the contours from the thresholded image
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
dia_mm = []
for contour in contours:
#image = cv2.drawContours(image, contours, -1, (1, 1, 1), 3)
area = cv2.contourArea(contour)
dia = 2*(area/np.pi)**(0.5) # calculating diameter from area
print(dia)
diamm = dia*(6.2/64) # converting diameter from pixels to mm
dia_mm.append(diamm)
print(diamm)
diameter = list(filter(lambda num: num != 0, dia_mm)) # removing zeros
diameter = np.array(diameter)
print(diameter.mean(), diameter.max(), diameter.min())

Drawing circles around cells after watershed segmentation - openCV/Python

I followed a tutorial on watershed segmentation and used it to segment each red blood cell in an image. I'm new to openCV and I would like to know if it is possible to draw circles around the cells by using watershed segmentation? If so, could you please show how it is done.
Original image
Output of Watershed segmentation
Code is given below
import numpy as np
import cv2
from matplotlib import pyplot as plt
def fillHoles(otsuImg):
# find contours
contours, _ = cv2.findContours(otsuImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# filter out contours by size
small_cntrs = []
for con in contours:
area = cv2.contourArea(con)
# print(area)
if area < 1000: # size threshold
small_cntrs.append(con)
cv2.drawContours(otsuImg, small_cntrs, -1, 0, -1)
# load the image
img = cv2.imread('resources/rbc2.png')
img_pyr = cv2.pyrMeanShiftFiltering(img, 21, 51)
img_median = cv2.medianBlur(img_pyr, 9)
img_gray = cv2.cvtColor(img_median, cv2.COLOR_BGR2GRAY)
ret, img_thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# fill holes of RBC
fillHoles(img_thresh)
# invert the image
img_thresh = cv2.bitwise_not(img_thresh)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(img_thresh,cv2.MORPH_OPEN,kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform,0.1*dist_transform.max(),255,0)
# _, sure_fg = cv2.threshold(np.uint8(dist_transform), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
img[markers == -1] = [255,0,0]
cv2.imshow('markers2', np.uint8(markers))
cv2.imshow('Final output', img)
cv2.waitKey(0)
I had the same problem and ended up using skimage.segmentation.watershed for the last step, to get the labels I could use to calculate the contours of the watershed cells. Once you have the contours, you can calculate and plot the enclosing circle as usual:
# your code above
ret, markers = cv2.connectedComponents(sure_fg)
from skimage.segmentation import watershed
labels = watershed(-dist_transform,
markers,
mask=img_thresh,
watershed_line=False)
watershed_contours = list(map(lambda l: cv2.findContours((labels == l).astype(np.uint8),
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0],
np.unique(labels)[1:]))
output = img.copy()
for contour in watershed_contours:
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(output,center,radius,(0,255,0),2)

Not enough background filtering

I am trying to filter the background of images presenting electric cables. I tried to do the following:
Transform from color to gray
Apply cv2.Laplacian or 2 times of cv2.Sobel for finding edges in both directions.
Apply thresholding cv2.THRESH_BINARY(_INV), cv2.THRESH_OTSU
Lastly, I tried to find edges with 'filtered' images using cv2.Canny together with cv2.HoughLinesP
Overall, the results aren't satisfying at all. I will give an example of 2 images:
And the output of my script:
I also played with the values in config, but the results weren't different much.
Here's the little script I managed to do:
import cv2
import matplotlib.pyplot as plt
import numpy as np
def img_show(images, cmap=None):
fig = plt.figure(figsize=(17, 10))
root = 3 # len(images) ** 0.5
for i, img in enumerate(images):
ax = fig.add_subplot(root, root, i + 1)
ax.imshow(img, cmap=cmap[i])
plt.show()
class Config:
scale = 0.4
min_threshold = 120
max_threshold = 200
canny_min_threshold = 100
canny_max_threshold = 200
config = Config()
def find_lines(img, rgb_img):
dst = cv2.Canny(img, config.canny_min_threshold, config.canny_max_threshold)
cdstP = np.copy(rgb_img)
lines = cv2.HoughLinesP(dst, 1, np.pi / 180, 150, None, 0, 0)
lines1 = lines[:, 0, :]
for x1, y1, x2, y2 in lines1[:]:
cv2.line(cdstP, (x1, y1), (x2, y2), (255, 0, 0), 5)
return cdstP
if __name__ == "__main__":
bgr_img = cv2.imread('DJI_0009.JPG')
bgr_img = cv2.resize(bgr_img, (0, 0), bgr_img, config.scale, config.scale)
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# _, threshold = cv2.threshold(gray_img, config.min_threshold, config.max_threshold, cv2.THRESH_BINARY)
# laplacian = cv2.Laplacian(rgb_img, cv2.CV_8UC1)
sobelx = cv2.Sobel(gray_img, cv2.CV_8UC1, 1, 0)
sobely = cv2.Sobel(gray_img, cv2.CV_8UC1, 0, 1)
blended = cv2.addWeighted(src1=sobelx, alpha=0.5, src2=sobely, beta=0.5, gamma=0)
_, threshold = cv2.threshold(blended, config.min_threshold, config.max_threshold,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
p1 = find_lines(threshold, rgb_img)
p2 = find_lines(blended, rgb_img)
p3 = find_lines(gray_img, rgb_img)
plots = [rgb_img, p1, p2, p3]
cmaps = [None] + ['gray'] * (len(plots) - 1)
img_show(plots, cmaps)
I am assuming I need to do much better filtring. However, I also tried image segmentation, but the results weren't promising at all.
Any ideas on how to improve this?
Thanks
Here is one way to do that in Python/OpenCV. I threshold, then optionally clean with morphology. Then get the contours and for each contour compute its rotated rectangle. Then get the dimensions of the rotated rectangle and compute the aspect ratio (largest dimension / smallest dimension) and optionally the area. Then I threshold on the aspect ratio (and optionally the area) and keep only those contours that pass)
Input:
import cv2
import numpy as np
image = cv2.imread("DCIM-100-MEDIA-DJI-0009-JPG.jpg")
hh, ww = image.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# create a binary thresholded image
thresh = cv2.threshold(gray, 64, 255, cv2.THRESH_BINARY)[1]
# invert so line is white on black background
thresh = 255 - thresh
# apply morphology
kernel = np.ones((11,11), np.uint8)
clean = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(clean, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
area_thresh = ww / 2
aspect_thresh = ww / 30
print(area_thresh,aspect_thresh)
print('')
result = image.copy()
for c in contours:
# get rotated rectangle from contour
# get its dimensions
rotrect = cv2.minAreaRect(c)
(center), (dim1,dim2), angle = rotrect
maxdim = max(dim1,dim2)
mindim = min(dim1,dim2)
area = dim1 * dim2
if mindim != 0:
aspect = maxdim / mindim
#print(area, aspect)
#if area > area_thresh and aspect > aspect_thresh:
if aspect > aspect_thresh:
# draw contour on input
cv2.drawContours(result,[c],0,(0,0,255),3)
print(area, aspect)
# save result
cv2.imwrite("DCIM-100-MEDIA-DJI-0009-JPG_thresh.jpg",thresh)
cv2.imwrite("DCIM-100-MEDIA-DJI-0009-JPG_clean.jpg",clean)
cv2.imwrite("DCIM-100-MEDIA-DJI-0009-JPG_result.jpg",result)
# display result
cv2.imshow("thresh", thresh)
cv2.imshow("clean", clean)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Thresholded image:
Morphology cleaned image:
Result image:

Calculate the white pixel inside cv2.circle

I'm trying to implement identification of optic nerve glioma identification using python and openCV.
I need to do the following steps in order for me to classify optic nerve glioma successfully.
Find the brightest part of an image and put a circle on it using cv2.circle - Done
Calculate the white part on the image inside cv2.circle - Needs help
Here's my code for identifying the brightest part of the image
gray = cv2.GaussianBlur(gray, (371, 371), 0)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
image = orig.copy()
cv2.circle(image, maxLoc, 371, (255, 0, 0), 2)
sought = [254,254,254]
amount = 0
for x in range(image.shape[0]):
for y in range(image.shape[1]):
b, g, r = image[x, y]
if (b, g, r) == sought:
amount += 1
print(amount)
image = imutils.resize(image, width=400)
# display the results of our newly improved method
cv2.imshow("Optic Image", image)
cv2.waitKey(0)
The code above returns the following output
What I'm trying to do now is to identify the size of the white region of the image inside the cv2.circle.
Thank you so much!
I am not sure what you consider as "white", but here is one way to do the counting in Python/OpenCV. Simply read the image. Convert to grayscale. Threshold it at some level. Then just count the number of white pixels in the thresholded image.
If I use your output image for my input (after removing your white border):
import cv2
import numpy as np
# read image
img = cv2.imread('optic.png')
# convert to HSV and extract saturation channel
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# threshold
thresh = cv2.threshold(gray, 175, 255, cv2.THRESH_BINARY)[1]
# count number of white pixels
count = np.sum(np.where(thresh == 255))
print("count =",count)
# write result to disk
cv2.imwrite("optic_thresh.png", thresh)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("THRESH", thresh)
cv2.waitKey(0)
Thresholded image:
Count of white pixels in threshold:
count = 1025729
I am still not sure what you consider as white and what you consider as the yellow circle. But here is another attempt using Python/OpenCV.
Read the input
Convert the input to the range 0 to 1 as 1D data
Use kmeans clustering to reduce the number of colors and convert back to range 0 to 255 as 2D image
Use inRange color thresholding to isolate the "yellow" area
Clean it up with morphology and get the contour
Get the minimum enclosing circle center and radius and bias the center a little
Draw an unfilled white circle on the input
Draw a white filled circle on a black background as a circle mask for the yellow area
Convert the input to grayscale
Threshold the grayscale image
Apply the mask to the thresholded grayscale image
Count the number of white pixels
Input:
import cv2
import numpy as np
from sklearn import cluster
# read image
img = cv2.imread('optic.png')
h, w, c = img.shape
# convert to range 0 to 1
image = img.copy()/255
# reshape to 1D array
image_1d = image.reshape(h*w, c)
# do kmeans processing
kmeans_cluster = cluster.KMeans(n_clusters=int(5))
kmeans_cluster.fit(image_1d)
cluster_centers = kmeans_cluster.cluster_centers_
cluster_labels = kmeans_cluster.labels_
# need to scale result back to range 0-255
newimage = cluster_centers[cluster_labels].reshape(h, w, c)*255.0
newimage = newimage.astype('uint8')
# threshold brightest region
lowcolor = (150,180,230)
highcolor = (170,200,250)
thresh1 = cv2.inRange(newimage, lowcolor, highcolor)
# apply morphology open and close
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
thresh1 = cv2.morphologyEx(thresh1, cv2.MORPH_OPEN, kernel, iterations=1)
thresh1 = cv2.morphologyEx(thresh1, cv2.MORPH_CLOSE, kernel, iterations=1)
# get contour
cntrs = cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
c = cntrs[0]
# get enclosing circle and bias center, if desired, since it is slightly offset (or alternately, increase the radius)
bias = 5
center, radius = cv2.minEnclosingCircle(c)
cx = int(round(center[0]))-bias
cy = int(round(center[1]))+bias
rr = int(round(radius))
# draw filled circle over black and also outline circle over input
mask = np.zeros_like(img)
cv2.circle(mask, (cx,cy), rr, (255, 255, 255), -1)
circle = img.copy()
cv2.circle(circle, (cx,cy), rr, (255, 255, 255), 1)
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# threshold gray image
thresh2 = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)[1]
# apply mask to thresh2
thresh2 = cv2.bitwise_and(thresh2, mask[:,:,0])
# count number of white pixels
count = np.sum(np.where(thresh2 == 255))
print("count =",count)
# write result to disk
#cv2.imwrite("optic_thresh.png", thresh)
cv2.imwrite("optic_kmeans.png", newimage)
cv2.imwrite("optic_thresh1.png", thresh1)
cv2.imwrite("optic_mask.png", mask)
cv2.imwrite("optic_circle.png", circle)
cv2.imwrite("optic_thresh2.png", thresh2)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("KMEANS", newimage)
cv2.imshow("THRESH1", thresh1)
cv2.imshow("MASK", mask)
cv2.imshow("CIRCLE", circle)
cv2.imshow("GRAY", gray)
cv2.imshow("THRESH2", thresh2)
cv2.waitKey(0)
kmeans image:
inRange threshold image:
Circle on input:
Circle mask image:
Masked threshold image:
Count Results:
count = 443239

Categories