How to detect shape in Cytologique image python opencv - python

I try to detect forms in cytology image and I get this result enter image description here my input image is enter image description here But My result does not see good can anyone help me ???
My stage was,
Color image by (COLORMAP_HOT)
Convert to grayscal image
Apply the canny filter
Find count
Test the contour
and i use python3.5 and opencv3
my code :
#!/usr/bin/env python
import cv2
import numpy as np
from pyimagesearch.shapedetector import ShapeDetector
import argparse
import imutils
from scipy import ndimage
import math
import matplotlib.pyplot as plt
if __name__ == '__main__' :
im = cv2.imread("23.png")
#im_out = np.zeros((670, 543, 3), np.uint8);
#resized = imutils.resize(im, width=300)
#ratio = im.shape[0] / float(resized.shape[0])
#coloration
im_color = cv2.applyColorMap(im, cv2.COLORMAP_HOT)
imgg = im_color[:, :, 1]
#cv2.putText(im_color, colormap_name(k), (30, 180), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255),1);
im_out = im_color
gray = cv2.cvtColor(im_color, cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
canny = cv2.Canny(blurred, 120, 200)
kernel = np.ones((5,5),np.uint8)
#morph
dilation = cv2.dilate(canny,kernel,iterations = 1)
erosion = cv2.erode(dilation,kernel,iterations = 1)
dilation = cv2.dilate(erosion,kernel,iterations = 1)
erosion = cv2.erode(dilation,kernel,iterations = 1)
blurred = cv2.GaussianBlur(erosion, (3, 3), 0)
canny = cv2.Canny(blurred, 200, 200)
cv2.imshow("dilation", dilation)
cv2.imshow("canny", canny)
cv2.imshow("erosion", erosion)
#(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
cv2.imshow("im_out", im_out);
cv2.imshow("gray ", gray);
#contour
cnts = cv2.findContours(canny, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
for c in cnts:
M = cv2.moments(c)
if(M["m00"]==0):M["m00"]=1
cX = int((M["m10"] / M["m00"]))
cY = int((M["m01"] / M["m00"]))
#shape = detect(c)
c = c.astype("float")
c = c.astype("int")
#cv2.drawContours(im, [c], -1, (0, 255, 0), 2)
#cv2.putText(im, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255,0,0), 2)
area = cv2.contourArea(c)
perimeter = cv2.arcLength(c,True)
M = cv2.moments(c)
# initialize the shape name and approximate the contour
shape = " "
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
(x, y, w, h) = cv2.boundingRect(approx)
area = cv2.contourArea(c)
radius = w/2
if len(approx) == 3:
shape = ""
# if the shape has 4 vertices, it is either a square or
# a rectangle
elif len(approx) == 4:
if (M['m00']==0):
M['m00']=1
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
# compute the bounding box of the contour and use the
# bounding box to compute the aspect ratio
#(x,y) be the top-left coordinate of the rectangle and (w,h) be its width and height.
(x, y, w, h) = cv2.boundingRect(approx)
print ("area",area,"perimeter",perimeter,"cx",cx,"cy",cy,"x",x,"y", y,"w", w, "h",h)
#fichier.write("area",area,"perimeter",perimeter,"cx",cx,"cy",cy)
print (sep="\n")
ar = w / float(h)
shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"
cv2.drawContours(im, [c], -1, (255, 0, 0), 2)
cv2.putText(im, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255,0,0), 2)
# if Cystine>6
elif len(approx) == 6:
if (M['m00']==0):
M['m00']=1
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print ("area",area,"perimeter",perimeter,"cx",cx,"cy",cy)
print (sep="\n")
shape = "HEXA"
cv2.drawContours(im, [c], -1, (255, 0, 0), 2)
cv2.putText(im, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255,0,0), 2)
# otherwise, we assume the shape is a circle
elif (abs(1 - (float(w)/h))<=2 and abs(1-(area/(math.pi*radius*radius)))<=0.2):
if (M['m00']==0):
M['m00']=1
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print ("area",area,"perimeter",perimeter,"cx",cx,"cy",cy)
print (sep="\n")
shape = "circle"
cv2.drawContours(im, [c], -1, (255,0, 0), 2)
cv2.putText(im, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255,0, 0), 2)
# show the output image
cv2.imshow("Image", im)
cv2.waitKey(0);

Related

Detect white label boundary opencv python

I'm having a series of images with shipping labels on boxes and I need to extract the whole white area of the label.
I'm extremely new to opencv and using these answers (detect rectangle in image and crop) i managed to put together the following code(it extracts only the top most part of the label):
import cv2
import numpy as np
#
path_to_image = 'IMG_0184b.jpg'
#
img = cv2.imread(path_to_image)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 0, 17, 17)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(gray,kernel,iterations = 2)
kernel = np.ones((4,4),np.uint8)
dilation = cv2.dilate(erosion,kernel,iterations = 2)
edged = cv2.Canny(dilation, 30, 200)
cnt, h = cv2.findContours(edged,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
largestArea = []
for contour in cnt:
largestArea.append(cv2.contourArea(contour))
print(sorted(largestArea, reverse=True)[0:3])
for contour in cnt:
approx = cv2.approxPolyDP(contour, 0.01* cv2.arcLength(contour, True), True)
area = cv2.contourArea(contour)
if area == 612144.5:
cv2.drawContours(img, [approx], 0, (0, 0, 0), 5)
x = approx.ravel()[0]
y = approx.ravel()[1] - 5
#
if len(approx) == 4 :
x, y , w, h = cv2.boundingRect(approx)
aspectRatio = float(w)/h
#print(aspectRatio)
if aspectRatio >= 0.95 and aspectRatio < 1.05:
cv2.putText(img, "square", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
else:
cv2.putText(img, "rectangle", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
cv2.namedWindow('custom window', cv2.WINDOW_KEEPRATIO)
cv2.imshow('custom window', img)
cv2.resizeWindow('custom window', 800, 800)
cv2.waitKey(0)
cv2.destroyAllWindows()
How do I capture the whole white area of the label just like in the example below?
Original picture
Desired result
Many thanks

Fitting ellipse to random distributed uniform regular shapes

We can think of the shapes in the representative picture as randomly scattered pencils or sticks on a table. I've been trying to find the areas of each shape by fitting ellipses, but I haven't been able to fit ellipses properly. Can you help me? Thanks.
First image is : input image
The code that I tried,
import cv2
import numpy as np
import random as rng
import math
img = cv2.imread('sticks.png', 1)
imge= cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(imge, cv2.COLOR_BGR2GRAY)
blur = cv2.blur(gray, (2,2), 3)
rng.seed(1)
def thresh_callback(val):
threshold = val
canny_output = cv2.Canny(blur, threshold, threshold * 4)
contours, _ = cv2.findContours(canny_output, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
minRect = [None]*len(contours)
minEllipse = [None]*len(contours)
for i, c in enumerate(contours):
minRect[i] = cv2.minAreaRect(c)
if c.shape[0] > 5:
minEllipse[i] = cv2.fitEllipse(c)
(x,y),(minor_axis,major_axis),angle = minEllipse[i]
half_major= major_axis/2
half_minor= minor_axis/2
pixel= 37.795275591
half_major1= half_major/pixel
half_minor1= half_minor/pixel
area= math.pi * half_major1 * half_major1
print(area)
drawing = np.zeros((canny_output.shape[1], canny_output.shape[1], 3), dtype=np.uint8)
for i, c in enumerate(contours):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv2.drawContours(drawing, contours, i, color)
if c.shape[0] > 5:
cv2.ellipse(drawing, minEllipse[i], color, 1)
cv2.imshow('Fitting Ellips', drawing)
source_window = 'Source'
cv2.namedWindow(source_window)
cv2.imshow(source_window, img)
max_thresh = 255
thresh = 100
cv2.createTrackbar('Canny Thresh:', source_window,thresh, max_thresh, thresh_callback)
thresh_callback(thresh)
cv2.waitKey()
Second image is: expected result (fitting ellipse each line like this)
This is not the final result and definitely has errors. You need to take the time to achieve the desired result. But it can be a good idea to start with:
import sys
import cv2
import math
import numpy as np
# Check it there is a black area in specific position of an image
def checkPointArea(im, pt):
x, y = pt[0], pt[1]
return im[y, x, 0] == 0 or im[y, x+1, 0] == 0 or im[y, x-1, 0] == 0 or im[y+1, x, 0] == 0 or im[y-1, x, 0] == 0
# Load image
pth = sys.path[0]
im = cv2.imread(pth+'/im.jpg')
H, W = im.shape[:2]
# Make grayscale and black and white versions
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
bw = cv2.threshold(im, 110, 255, cv2.THRESH_BINARY)[1]
# Try to clear the parts of the image that are stuck together
bw = cv2.dilate(bw, np.ones((5, 5), np.uint8))
# Convert im back to BGR
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
# Make some copies
org = im.copy()
empty = im.copy()
empty[:] = 255
# Find contours and sort them by position
cnts, _ = cv2.findContours(bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts.sort(key=lambda x: cv2.boundingRect(x)[0])
# Thikness of random lines
thickness = 5
# Find and draw ellipses
for cnt in cnts:
x, y, w, h = cv2.boundingRect(cnt)
if w < W:
cv2.rectangle(im, (x, y), (x+w, y+h), (10, 230, 0)
if w < h else (200, 0, 128), 1)
hw, hh = w//2, h//2
cx, cy = x+hw, y+hh
r = int(math.sqrt(w**2+h**2))
t, c = math.atan(hw/hh), (255, 0, 0)
if checkPointArea(org, (x, y)) and checkPointArea(org, (x+w-1, y+h-1)):
t, c = math.atan(hw/-hh), (100, 0, 200)
deg = math.degrees(t)
if w <= thickness*2:
deg = 0
if h <= thickness*2:
deg = 90
cv2.ellipse(im, (x, y), (1, 1), 0, 0, 360, c, 4)
cv2.ellipse(im, (cx, cy), (thickness, r//2),
deg, 0, 360, (40, 0, 255), 2, lineType=cv2.LINE_AA)
#cv2.ellipse(empty, (x, y), (1, 1), 0, 0, 360, c, 2)
cv2.ellipse(empty, (cx, cy), (thickness, r//2),
deg, 0, 360, c, 2, lineType=cv2.LINE_AA)
# Save output
bw = cv2.cvtColor(bw, cv2.COLOR_GRAY2BGR)
top = np.hstack((org, empty))
btm = np.hstack((bw, im))
cv2.imwrite(pth+'/im_.png', np.vstack((top, btm)))
Each section:
Final Result:
Errors:
You have to spend more time for these two parts, the first is due to my weak code. Removable with more time. The second is due to the overlap of two lines. Clearing the image did not help this part. You may be able to prevent such interference from occurring later.

Automatic Scaling when the image is resize

I am having a problem when I change the size of the image. Then the proportions of the line no longer match those of the image, i.e. the line will become smaller than expected.
Any idea how I can resize the line proportionally equal to the image?
Here is the original image:
Here is the result that I want:
But this is the result that I get after resizing (it gives the same value at second picture):
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import imutils
import cv2
# Method to find the mid point
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
img = cv2.imread('banana4.jpg')
# Gaussian blur
blur1 = cv2.GaussianBlur(img,(3,3),1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_blue = np.array([5, 25, 25])
upper_blue = np.array([70, 255, 255])
thresh = cv2.inRange(hsv, lower_blue, upper_blue)
# Find contours and sort for largest contour
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
box = perspective.order_points(box)
orig = img.copy()
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 3)
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# draw and write the midpoints on the image
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.putText(orig, "({},{})".format(tltrX, tltrY), (int(tltrX - 50), int(tltrY - 10) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2)
cv2.circle(orig, (int(blbrX), int(blbrY-90)), 5, (255, 0, 0), -1)
cv2.putText(orig, "({},{})".format(blbrX, blbrY-90), (int(blbrX - 50), int(blbrY - 10) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2)
# draw lines between the midpoints
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY-90)),
(255, 0, 255), 2)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
cv2.imshow("Image", orig)
break

Finding the center line and center point of rectangular region

I ran the following code to create a rectangle contour:
#import the necessary packages
import argparse
import imutils
import cv2
import numpy as np
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
args = vars(ap.parse_args())
# load the image, convert it to grayscale, blur it slightly, and threshold it
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold the image, then perform a series of erosions + dilations to remove any small regions of noise
thresh = cv2.threshold(gray, 45, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Find the index of the largest contour
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt=contours[max_index]
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
I would like to find the center line and center point of the rectangular contour. Please advise.
As you already have (x, y, w, h) of the desired contour using x,y,w,h = cv2.boundingRect(cnt) in above code, so center of the vertical mid line can be given by (x+w//2, y+h//2) and vertical line can be drawn using below code:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
# center line
cv2.line(image, (x+w//2, y), (x+w//2, y+h), (0, 0, 255), 2)
# below circle to denote mid point of center line
center = (x+w//2, y+h//2)
radius = 2
cv2.circle(image, center, radius, (255, 255, 0), 2)
output:
Since you already have the bounding box, you can use cv2.moments() to find the center coordinates. This gives us the centroid (i.e., the center (x, y)-coordinates of the object)
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
The center point is simply (cX, cY) and you can draw this with cv2.circle()
cv2.circle(image, (cX, cY), 5, (36, 255, 12), -1)
Similarly, we can draw the center line using cv2.line() or Numpy slicing
cv2.line(image, (x + int(w/2), y), (x + int(w/2), y+h), (0, 0, 255), 3)
image[int(cY - h/2):int(cY+h/2), cX] = (0, 0, 255)
import imutils
import cv2
import numpy as np
# load the image, convert it to grayscale, blur it slightly, and threshold it
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold the image, then perform a series of erosions + dilations to remove any small regions of noise
thresh = cv2.threshold(gray, 45, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Find the index of the largest contour
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt=contours[max_index]
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(image, (cX, cY), 5, (36, 255, 12), -1)
# To draw line you can use cv2.line or numpy slicing
cv2.line(image, (x + int(w/2), y), (x + int(w/2), y+h), (0, 0, 255), 3)
# image[int(cY - h/2):int(cY+h/2), cX] = (36, 255, 12)
# show the output image
cv2.imshow("Image", image)
cv2.imwrite("Image.png", image)
cv2.waitKey(0)

Video/image analysis to acquire distances between contours

New image: test image
I'm trying to quantify the distance between two contours in a video of a microvessel (see snapshot)
Image analysis structure
Right now I'm only able to select for one contour (which is outlined) and I'm acquiring dimensions from this outline, but what I'd like to select for is the top and bottom contour of the structure and measure the distance (labeled with an orange line and A in the snapshot).
Any suggestions as to do this? My code for this video analysis is the following. Thanks for the help in advance!:
import cv2
import pandas as pd
import numpy as np
import imutils
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
videocapture = cv2.VideoCapture('RTMLV.mp4')
def safe_div(x,y):
if y==0: return 0
return x/y
def nothing(x):
pass
def rescale_frame(frame, percent=100): #make the video windows a bit smaller
width = int(frame.shape[1]*percent/100)
height = int(frame.shape[0]*percent/100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
if not videocapture.isOpened():
print("Unable to open video")
exit()
windowName="Vessel Tracking"
cv2.namedWindow(windowName)
# Sliders to adjust image
cv2.createTrackbar("Threshold", windowName, 75, 255, nothing)
cv2.createTrackbar("Kernel", windowName, 5, 30, nothing)
cv2.createTrackbar("Iterations", windowName, 1, 10, nothing)
showLive=True
while(showLive):
ret, frame=videocapture.read()
frame_resize=rescale_frame(frame)
if not ret:
print("Cannot capture the frame")
exit()
thresh = cv2.getTrackbarPos("Threshold", windowName)
ret,thresh1 = cv2.threshold(frame_resize, thresh, 255, cv2.THRESH_BINARY)
kern = cv2.getTrackbarPos("Kernel", windowName)
kernel = np.ones((kern, kern), np.uint8) # square image kernel used for erosion
itera=cv2.getTrackbarPos("Iterations", windowName)
dilation = cv2.dilate(thresh1, kernel, iterations=itera)
erosion = cv2.erode(dilation, kernel, iterations=itera) #refines all edges in the binary image
opening = cv2.morphologyEx(erosion, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
closing = cv2.cvtColor(closing, cv2.COLOR_BGR2GRAY)
contours,hierarchy = cv2.findContours(closing,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) # find contours with simple approximation cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE
closing = cv2.cvtColor(closing,cv2.COLOR_GRAY2RGB)
cv2.drawContours(closing, contours, -1, (128,255,0), 1)
# focus on only the largest outline by area
areas = [] #list to hold all areas
for contour in contours:
ar = cv2.contourArea(contour)
areas.append(ar)
max_area = max(areas)
max_area_index = areas.index(max_area) # index of the list element with largest area
cnt = contours[max_area_index - 1] # largest area contour is usually the viewing window itself, why?
cv2.drawContours(closing, [cnt], 0, (0,0,255), 1)
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
# compute the rotated bounding box of the contour
orig = frame_resize.copy()
box = cv2.minAreaRect(cnt)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear
# in top-left, top-right, bottom-right, and bottom-left
# order, then draw the outline of the rotated bounding
# box
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 1)
# loop over the original points and draw them
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
# unpack the ordered bounding box, then compute the midpoint
# between the top-left and top-right coordinates, followed by
# the midpoint between bottom-left and bottom-right coordinates
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
# compute the midpoint between the top-left and top-right points,
# followed by the midpoint between the top-right and bottom-right
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# draw the midpoints on the image
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# draw lines between the midpoints
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),(255, 0, 255), 1)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),(255, 0, 255), 1)
cv2.drawContours(orig, [cnt], 0, (0,0,255), 1)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
# compute the size of the object
P2M4x = 1.2
P2M10x = 3.2
P2M20x = 6
pixelsPerMetric = P2M10x # Pixel to micron conversion
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
dimensions = [dimA, dimB]
# draw the object sizes on the image
cv2.putText(orig, "{:.1f}um".format(dimA), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
cv2.putText(orig, "{:.1f}um".format(dimB), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
# compute the center of the contour
M = cv2.moments(cnt)
cX = int(safe_div(M["m10"],M["m00"]))
cY = int(safe_div(M["m01"],M["m00"]))
# draw the contour and center of the shape on the image
cv2.circle(orig, (cX, cY), 5, (255, 255, 255), -1)
cv2.putText(orig, "center", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.imshow(windowName, orig)
cv2.imshow('', closing)
if cv2.waitKey(30)>=0:
showLive=False
videocapture.release()
cv2.destroyAllWindows()
Edits have been made to this answer in reponse to the new test image that was added to the post.
I was unable to segment the blood vessel in the test image using the code that you uploaded. I segmented the image by using manual annotation and the GrabCut algorithm.
This is the code that I used for the manual segmentation:
import cv2, os, numpy as np
import time
# Plot with Matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img_path = '/home/stephen/Desktop/0lszR.jpg'
img = cv2.imread(img_path)
img = img[420:1200, :]
h,w,_ = img.shape
mask = np.zeros((h,w), np.uint8)
mask[:] = 2
src = img.copy()
h,w,_ = img.shape
drawing = src.copy()
# Mouse callback function
global k, px, py
k = 0
px, py = 0,0
def callback(event, x, y, flags, param):
global k, px, py
print(x,y, k, px, py)
if k == 115: # 's' for sure background
if px+py!=0:
cv2.line(img, (x,y), (px, py), (255,255,0), 8)
cv2.line(mask, (x,y), (px, py), 0, 8)
if k == 116: # 't' for sure foreground
if px+py!=0:
cv2.line(img, (x,y), (px, py), (0,255,255), 8)
cv2.line(mask, (x,y), (px, py), 1, 8)
else: print(px, py)
px, py = x,y
#if k != 115 or 116: px, py = 0,0
cv2.namedWindow('img')
cv2.setMouseCallback('img', callback)
while k != 27:
cv2.imshow('img', img)
k_temp = cv2.waitKey(1)
if k_temp!=-1: k = k_temp
cv2.destroyAllWindows()
After I had found the segmented image, I used the function np.nonzero() to find the tops and bottoms of the columns:
This is the code that I used to find the width:
# Initialize parameters for the GrabCut algorithm
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
# Apply GrabCut
out_mask = mask.copy()
out_mask, _, _ = cv2.grabCut(src,out_mask,None,bgdModel,fgdModel,1,cv2.GC_INIT_WITH_MASK)
out_mask = np.where((out_mask==2)|(out_mask==0),0,1).astype('uint8')
# Open the mask to fill in the holes
out_img = src*out_mask[:,:,np.newaxis]
flip_mask = cv2.flip(out_mask, 0)
# Find the distances
distances = []
for col_num in range(src.shape[1]-1):
col = out_mask[:, col_num:col_num+1]
flip_col = flip_mask[:, col_num:col_num+1]
top = np.nonzero(col)[0][0]
bottom = h-np.nonzero(flip_col)[0][0]
if col_num % 12 == 0:
cv2.line(drawing, (col_num, top), (col_num, bottom), (234,345,34), 4)
distances.append(bottom-top)
f, axarr = plt.subplots(2,3, sharex=True)
axarr[0,0].imshow(src)
axarr[0,1].imshow(out_mask)
axarr[0,2].imshow(drawing)
axarr[1,0].imshow(img)
axarr[1,1].imshow(out_img)
axarr[1,2].plot(distances)
axarr[0,0].set_title("Source")
axarr[0,1].set_title('Mask from GrabCut')
axarr[0,2].set_title('Widths')
axarr[1,0].set_title('Manual Annotation')
axarr[1,1].set_title('GrabCut Mask')
axarr[1,2].set_title('Graph of Width')
axarr[0,0].axis('off')
axarr[0,1].axis('off')
axarr[1,0].axis('off')
axarr[1,1].axis('off')
axarr[1,2].axis('off')
axarr[0,2].axis('off')
plt.show()

Categories