Estimation fiber length of overlapping fibers from image using python code - python

I need a help in estimating the accurate fiber length from image. I have developed code in python through which estimation of length is possible up to some extent.
I have used Skan opensource library to get the diameter & length of fiber segments from skeletonized image of fiber. I am facing challenge in tracing the fiber at overlapping point or at Junctions for length estimation. Currently the estimated length is much small than actual image as it estimates only length of segments till the junction point from end point of fiber. It would helpful if anyone can help in estimating all overlapping fibers length. Sharing the code and original image for reference.
import cv2
import numpy as np
from matplotlib import pyplot as plt
from skimage.morphology import skeletonize
from skimage import morphology
img00 = cv2.imread(r'original_img.jpg')
img_01 = cv2.cvtColor(img00, cv2.COLOR_BGR2GRAY)
img0 = cv2.cvtColor(img00, cv2.COLOR_BGR2GRAY)
i_size = min(np.size(img_01,1),600) # image size for imshow
# Creating kernel
kernel = np.ones((2, 2), np.uint8)
# Using cv2.dialate() method
img01 = cv2.dilate(img0, kernel, iterations=2)
cv2.imwrite('Img1_Filtered.jpg',img01)
ret,thresh1 = cv2.threshold(img01,245,255,cv2.THRESH_BINARY)
thresh = (thresh1/255).astype(np.uint8)
cv2.imwrite('Img2_Binary.jpg',thresh1)
# skeleton based on Lee's method
skeleton1 = (skeletonize(thresh, method='lee')/255).astype(bool)
skeleton1 = morphology.remove_small_objects(skeleton1, 100, connectivity=2)
# fiber Detection through skeletonization and its characterization
from skan import draw, Skeleton, summarize
spacing_nm = 1 # pixel
fig, ax = plt.subplots()
draw.overlay_skeleton_2d(img_01, skeleton1, dilate=1, axes=ax);
from skan.csr import skeleton_to_csgraph
pixel_graph, coordinates0 = skeleton_to_csgraph(skeleton1, spacing=spacing_nm)
skel_analysis = Skeleton(skeleton1, spacing=spacing_nm,source_image=img00)
branch_data = summarize(skel_analysis)
branch_data.hist(column='branch-distance', bins=100);
draw.overlay_euclidean_skeleton_2d(img_01, branch_data,skeleton_color_source='branch-type');
from scipy import ndimage
dd = ndimage.distance_transform_edt(thresh)
radii = np.multiply(dd, skeleton1);
Fiber_D_mean = np.mean(2*radii[radii>0]);
criteria = 2 * Fiber_D_mean; # Remove branches smaller than this length for characterization
aa = branch_data[(branch_data['branch-distance']>criteria)];
CNT_L_count, CNT_L_mean, CNT_L_stdev = aa['branch-distance'].describe().loc[['count','mean','std']]
print("Fiber Length (px[enter image description here][1]) : Count, Average, Stdev:",int(CNT_L_count),round(CNT_L_mean,2),round(CNT_L_stdev,2))

Starting with the skeleton I would proceed as follows:
convert the skeleton to a path graph
for each pair of paths identify valid junctions
calculate the angle between each adjacent path
merge paths that go nearly straight through the junction
Here is a sketch that can find overlapping fibers in the skeleton. I leave it to you to optimize it, make it robust against real life images and how to derive statistics from the results.
import cv2
import numpy as np
from skimage import morphology, graph
from skan import Skeleton
MAX_JUNCTION = 4 # maximal size of junctions
MAX_ANGLE = 80 # maximal angle in junction
DELTA = 3 # distance from endpoint to inner point to estimate direction at endpoint
def angle(v1, v2):
rad = np.arctan2(v2[0], v2[1]) - np.arctan2(v1[0], v1[1])
return np.abs((np.rad2deg(rad) % 360) - 180)
img = cv2.imread('img.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# dilate and threshold
kernel = np.ones((2, 2), np.uint8)
dilated = cv2.dilate(gray, kernel, iterations=1)
ret, thresh = cv2.threshold(dilated, 245, 255, cv2.THRESH_BINARY)
# skeletonize
skeleton = morphology.skeletonize(thresh, method='lee')
skeleton = morphology.remove_small_objects(skeleton.astype(bool), 100, connectivity=2)
# split skeleton into paths, for each path longer than MAX_JUNCTION get list of point coordinates
g = Skeleton(skeleton)
lengths = np.array(g.path_lengths())
paths = [list(np.array(g.path_coordinates(i)).astype(int)) for i in range(g.n_paths) if lengths[i] > MAX_JUNCTION]
# get endpoints of path and vector to inner point to estimate direction at endpoint
endpoints = [[p[0], np.subtract(p[0], p[DELTA]), i] for i, p in enumerate(paths)] +\
[[p[-1], np.subtract(p[-1], p[-1 - DELTA]), i] for i, p in enumerate(paths)]
# get each pair of distinct endpoints with the same junction and calculate deviation of angle
angles = []
costs = np.where(skeleton, 1, 255) # cost array for route_through_array
for i1 in range(len(endpoints)):
for i2 in range(i1 + 1, len(endpoints)):
e1, d1, p1 = endpoints[i1]
e2, d2, p2 = endpoints[i2]
if p1 != p2:
p, c = graph.route_through_array(costs, e1, e2) # check connectivity of endpoints at junction
if c <= MAX_JUNCTION:
deg = angle(d1, d2) # get deviation of directions at junction
if deg <= MAX_ANGLE:
angles.append((deg, i1, i2, p))
# merge paths, with least deviation of angle first
angles.sort(key=lambda a: a[0])
for deg, i1, i2, p in angles:
e1, e2 = endpoints[i1], endpoints[i2]
if e1 and e2:
p1, p2 = e1[2], e2[2]
paths[p1] = paths[p1] + paths[p2] + p # merge path 2 into path 1, add junction from route_through_array
for i, e in enumerate(endpoints): # switch path 2 at other endpoint to new merged path 1
if e and e[2] == p2:
endpoints[i][2] = p1
paths[p2], endpoints[i1], endpoints[i2] = [], [], [] # disable merged path and endpoints
# display results
for p in paths:
if p:
img1 = img.copy()
for v in p:
img1[v[0], v[1]] = [0, 0, 255]
cv2.imshow(f'fiber', img1)
cv2.waitKey(0)
cv2.destroyAllWindows()

Related

How do I use Piecewise Affine Transformation to straighten curved text line/ contour?

Consider the following image:
and the following bounding contour( which is a smooth version of the output of a text-detection neural network of the above image ), so this contour is a given.
I need to warp both images so that I end up with a straight enough textline, so that it can be fed to a text recognition neural network:
using Piecewise Affine Transformation, or some other method. with an implementation if possible or key points of implementation in python.
I know how to find the medial axis, order its points, simplify it (e.g using Douglas-Peucker algorithm), and find the corresponding points on a straight line.
EDIT: the question can be rephrased -naively- as the following :
have you tried the "puppet warp" feature in Adobe Photoshop? you specify "joint" points on an image , and you move these points to the desired place to perform the image warping, we can calculate the source points using a simplified medial axis (e.g 20 points instead of 200 points), and calculate the corresponding target points on a straight line, how to perform Piecewise Affine Transformation using these two sets of points( source and target)?
EDIT: modified the images, my bad
Papers
Here's a paper that does the needed result:
A Novel Technique for Unwarping Curved Handwritten Texts Using Mathematical Morphology and Piecewise Affine Transformation
another paper: A novel method for straightening curved text-lines in stylistic documents
Similar questions:
Straighten B-Spline
Challenge : Curved text extraction using python
How to convert curves in images to lines in Python?
Deforming an image so that curved lines become straight lines
Straightening a curved contour
Full code also available in this notebook , runtime -> run all to reproduce the result.
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from scipy import interpolate
from scipy.spatial import distance
from shapely.geometry import LineString, GeometryCollection, MultiPoint
from skimage.morphology import skeletonize
from sklearn.decomposition import PCA
from warp import PiecewiseAffineTransform # https://raw.githubusercontent.com/TimSC/image-piecewise-affine/master/warp.py
# Helper functions
def extendline(line, length):
a = line[0]
b = line[1]
lenab = distance.euclidean(a, b)
cx = b[0] + ((b[0] - a[0]) / lenab * length)
cy = b[1] + ((b[1] - a[1]) / lenab * length)
return [cx, cy]
def XYclean(x, y):
xy = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1)), axis=1)
# make PCA object
pca = PCA(2)
# fit on data
pca.fit(xy)
# transform into pca space
xypca = pca.transform(xy)
newx = xypca[:, 0]
newy = xypca[:, 1]
# sort
indexSort = np.argsort(x)
newx = newx[indexSort]
newy = newy[indexSort]
# add some more points (optional)
f = interpolate.interp1d(newx, newy, kind='linear')
newX = np.linspace(np.min(newx), np.max(newx), 100)
newY = f(newX)
# #smooth with a filter (optional)
# window = 43
# newY = savgol_filter(newY, window, 2)
# return back to old coordinates
xyclean = pca.inverse_transform(np.concatenate((newX.reshape(-1, 1), newY.reshape(-1, 1)), axis=1))
xc = xyclean[:, 0]
yc = xyclean[:, 1]
return np.hstack((xc.reshape(-1, 1), yc.reshape(-1, 1))).astype(int)
def contour2skeleton(cnt):
x, y, w, h = cv2.boundingRect(cnt)
cnt_trans = cnt - [x, y]
bim = np.zeros((h, w))
bim = cv2.drawContours(bim, [cnt_trans], -1, color=255, thickness=cv2.FILLED) // 255
sk = skeletonize(bim > 0)
#####
skeleton_yx = np.argwhere(sk > 0)
skeleton_xy = np.flip(skeleton_yx, axis=None)
xx, yy = skeleton_xy[:, 0], skeleton_xy[:, 1]
skeleton_xy = XYclean(xx, yy)
skeleton_xy = skeleton_xy + [x, y]
return skeleton_xy
mm = cv2.imread('cont.png', cv2.IMREAD_GRAYSCALE)
plt.imshow(mm)
cnts, _ = cv2.findContours(mm.astype('uint8'), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cont = cnts[0].reshape(-1, 2)
# find skeleton
sk = contour2skeleton(cont)
mm = np.zeros_like(mm)
cv2.polylines(mm, [sk], False, 255, 2)
plt.imshow(mm)
# simplify the skeleton
ln = LineString(sk).simplify(2)
sk_simp = np.int0(ln.coords)
mm = np.zeros_like(mm)
for pt in sk_simp:
cv2.circle(mm, pt, 5, 255, -1)
plt.imshow(mm)
# extend both ends of the skeleton
print(len(sk_simp))
a, b = sk_simp[1], sk_simp[0]
c1 = np.int0(extendline([a, b], 50))
sk_simp = np.vstack([c1, sk_simp])
a, b = sk_simp[-2], sk_simp[-1]
c2 = np.int0(extendline([a, b], 50))
sk_simp = np.vstack([sk_simp, c2])
print(len(sk_simp))
cv2.circle(mm, c1, 10, 255, -1)
cv2.circle(mm, c2, 10, 255, -1)
plt.imshow(mm)
########
# find the target points
########
pts1 = sk_simp.copy()
dists = [distance.euclidean(p1, p2) for p1, p2 in zip(pts1[:-1], pts1[1:])]
zip1 = list(zip(pts1[:-1], dists))
# find the first 2 target points
a = pts1[0]
b = a - (dists[0], 0)
pts2 = [a, b, ]
for z in zip1[1:]:
lastpt = pts2[-1]
pt, dst = z
ln = [a, lastpt]
c = extendline(ln, dst)
pts2.append(c)
pts2 = np.int0(pts2)
ln1 = LineString(pts1)
ln2 = LineString(pts2)
GeometryCollection([ln1.buffer(5), ln2.buffer(5),
MultiPoint(pts2), MultiPoint(pts1)])
########
# create translated copies of source and target points
# 50 is arbitary
pts1 = np.vstack([pts1 + [0, 50], pts1 + [0, -50]])
pts2 = np.vstack([pts2 + [0, 50], pts2 + [0, -50]])
MultiPoint(pts1)
########
# performing the warping
im = Image.open('orig.png')
dstIm = Image.new(im.mode, im.size, color=(255, 255, 255))
# Perform transform
PiecewiseAffineTransform(im, pts1, dstIm, pts2)
plt.figure(figsize=(10, 10))
plt.imshow(dstIm)
1- find medial axis , e.g using skimage.morphology.skeletonize and simplify it ,e.g using shapely object.simplify , I used a tolerance of 2 , the medial axis points are in white:
2- find the corresponding points on a straight line, using the distance between each point and the next:
3 - also added extra points on the ends, colored blue, so that the points fit the entire contour length
4- create 2 copies of the source and target points, one copy translated up and the other translated down (I choose an offset of 50 here), so the source points are now like this, please note that simple upward/downward displacement may not be the best approach for all contours, e.g if the contour is curving with degrees > 45:
5- using the code here , perform PiecewiseAffineTransform using the source and target points, here's the result, it's straight enough:
If the goal is to just unshift each column, then:
import numpy as np
from PIL import Image
source_img = Image.open("73614379-input-v2.png")
contour_img = Image.open("73614379-map-v3.png").convert("L")
assert source_img.size == contour_img.size
contour_arr = np.array(contour_img) != 0 # convert to boolean array
col_offsets = np.argmax(
contour_arr, axis=0
) # find the first non-zero row for each column
assert len(col_offsets) == source_img.size[0] # sanity check
min_nonzero_col_offset = np.min(
col_offsets[col_offsets > 0]
) # find the minimum non-zero row
target_img = Image.new("RGB", source_img.size, (255, 255, 255))
for x, col_offset in enumerate(col_offsets):
offset = col_offset - min_nonzero_col_offset if col_offset > 0 else 0
target_img.paste(
source_img.crop((x, offset, x + 1, source_img.size[1])), (x, 0)
)
target_img.save("unshifted3.png")
with the new input and the new contour from OP outputs this image:

How to detect a grainy line?

I am trying to detect a grainy printed line on a paper with cv2. I need the angle of the line. I dont have much knowledge in image processing and I only need to detect the line. I tried to play with the parameters but the angle is always detected wrong. Could someone help me. This is my code:
import cv2
import numpy as np
import matplotlib.pylab as plt
from matplotlib.pyplot import figure
img = cv2.imread('CamXY1_1.bmp')
crop_img = img[100:800, 300:900]
blur = cv2.GaussianBlur(crop_img, (1,1), 0)
ret,thresh = cv2.threshold(blur,150,255,cv2.THRESH_BINARY)
gray = cv2.cvtColor(thresh,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 60, 150)
figure(figsize=(15, 15), dpi=150)
plt.imshow(edges, 'gray')
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 3000*(-b))
y1 = int(y0 + 3000*(a))
x2 = int(x0 - 3000*(-b))
y2 = int(y0 - 3000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0, 255, 0),2)
imagetobedetected
Here's a possible solution to estimate the line (and its angle) without using the Hough line transform. The idea is to locate the start and ending points of the line using the reduce function. This function can reduce an image to a single column or row. If we reduce the image we can also get the total SUM of all the pixels across the reduced image. Using this info we can estimate the extreme points of the line and calculate its angle. This are the steps:
Resize your image because it is way too big
Get a binary image via adaptive thresholding
Define two extreme regions of the image and crop them
Reduce the ROIs to a column using the SUM mode, which is the sum of all rows
Accumulate the total values above a threshold value
Estimate the starting and ending points of the line
Get the angle of the line
Here's the code:
# imports:
import cv2
import numpy as np
import math
# image path
path = "D://opencvImages//"
fileName = "mmCAb.jpg"
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Scale your BIG image into a small one:
scalePercent = 0.3
# Calculate the new dimensions
width = int(inputImage.shape[1] * scalePercent)
height = int(inputImage.shape[0] * scalePercent)
newSize = (width, height)
# Resize the image:
inputImage = cv2.resize(inputImage, newSize, None, None, None, cv2.INTER_AREA)
# Deep copy for results:
inputImageCopy = inputImage.copy()
# Convert BGR to grayscale:
grayInput = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Adaptive Thresholding:
windowSize = 51
windowConstant = 11
binaryImage = cv2.adaptiveThreshold(grayInput, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, windowSize, windowConstant)
The first step is to get the binary image. Note that I previously downscaled your input because it is too big and we don't need all that info. This is the binary mask:
Now, we don't need most of the image. In fact, since the line is across the whole image, we can only "trim" the first and last column and check out where the white pixels begin. I'll crop a column a little bit wider, though, so we can ensure we have enough data and as less noise as possible. I'll define two Regions of Interest (ROIs) and crop them. Then, I'll reduce each ROI to a column using the SUM mode, this will give me the summation of all intensity across each row. After that, I can accumulate the locations where the sum exceeds a certain threshold and approximate the location of the line, like this:
# Define the regions that will be cropped
# from the original image:
lineWidth = 5
cropPoints = [(0, 0, lineWidth, height), (width-lineWidth, 0, lineWidth, height)]
# Store the line points here:
linePoints = []
# Loop through the crop points and
# crop de ROI:
for p in range(len(cropPoints)):
# Get the ROI:
(x,y,w,h) = cropPoints[p]
# Crop the ROI:
imageROI = binaryImage[y:y+h, x:x+w]
# Reduce the ROI to a n row x 1 columns matrix:
reducedImg = cv2.reduce(imageROI, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
# Get the height (or lenght) of the arry:
reducedHeight = reducedImg.shape[0]
# Define a threshold and accumulate
# the coordinate of the points:
threshValue = 100
pointSum = 0
pointCount = 0
for i in range(reducedHeight):
currentValue = reducedImg[i]
if currentValue > threshValue:
pointSum = pointSum + i
pointCount = pointCount + 1
# Get average coordinate of the line:
y = int(accX / pixelCount)
# Store in list:
linePoints.append((x, y))
The red rectangles show the regions I cropped from the input image:
Note that I've stored both points in the linePoints list. Let's check out our approximation by drawing a line that connects both points:
# Get the two points:
p0 = linePoints[0]
p1 = linePoints[1]
# Draw the line:
cv2.line(inputImageCopy, (p0[0], p0[1]), (p1[0], p1[1]), (255, 0, 0), 1)
cv2.imshow("Line", inputImageCopy)
cv2.waitKey(0)
Which yields:
Not bad, huh? Now that we have both points, we can estimate the angle of this line:
# Get angle:
adjacentSide = p1[0] - p0[0]
oppositeSide = p0[1] - p1[1]
# Compute the angle alpha:
alpha = math.degrees(math.atan(oppositeSide / adjacentSide))
print("Angle: "+str(alpha))
This prints:
Angle: 0.534210901840831

How to compute maximum width and length of worms?

I tried to find width of each contour but it return infinity width. Any body have idea on this Image. First find all contours and calculate distance using Hausdorff distance.
My Code as follow:
Read Image
img = imread('M2 Output.jpg')
gray= img[:,:,0]
print('gray',gray.shape)
Binary = gray / 255
mask = np.zeros_like(img)
Find contours
contours = measure.find_contours(Binary, 0.8)
def drawShape(img, coordinates, color):
# In order to draw our line in red
#img = color.gray2rgb(img)
# Make sure the coordinates are expressed as integers
coordinates = coordinates.astype(int)
img[coordinates[:, 0], coordinates[:, 1]] = color
return img
Centeroid Function
def centeroidnp(arr):
length = len(arr[0])
sum_x = np.sum(arr[0])
sum_y = np.sum(arr[1])
return (sum_x//length), (sum_y//length)
Manhattan Distance
def manhattan(p1, p2):
dist = abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])
return dist
Width Calculation for each contour
for contour in contours:
contouri=contour.astype(int)
#print(contouri)
mask = np.zeros_like(img)
imge = drawShape(mask, contouri, [255, 255, 255])
print('Image',imge)
orig = imge.copy()
plt.figure(figsize=(10, 10))
plt.title('Contour')
plt.imshow(imge)
plt.show()
centeroid = centeroidnp(contouri)
print(centeroid)
# Manual Threshold Limit
thresh = 0.0
dist = []
# Get Worm Ends Location
for i in range(len(contouri[0])):
# Calculate the distance from the centroid
print(contouri[0][i],contouri[1][i])
dist.append(manhattan((contouri[0][i], contouri[1][i]),
centeroid))
print(dist)
# Get Worm Ends Location
ends_index = (np.argwhere(dist> thresh *
max(dist))).astype(int)
print('endix',ends_index)
# Padding of the ends
imge[contouri[0][ends_index],contouri[1][ends_index]] = 0
# Label each thread
lab = label(imge)
# Thread 1
u = lab.copy()
u[u==1] = 0
u[u>0] = 1
print('u',u)
# Thread 2
v = lab.copy()
v[v==2] = 0
v[v>0] = 1
# Hausdorff Distance
#width = round(metrics.hausdorff_distance(u, v))
width = metrics.hausdorff_distance(u, v)
print('width:',width)
If you can easily generate correct skeletons, then the skan library can measure the skeleton branch lengths for you:
https://jni.github.io/skan
In this case:
import skan
skel_obj = skan.Skeleton(skel)
skel_obj.path_lengths(0)
Here is the documentation for the Skeleton object API:
https://jni.github.io/skan/api/skan.csr.html#skan.csr.Skeleton
and the related function skan.summarize, which takes the skeleton object as input and produces some summary statistics:
https://jni.github.io/skan/api/skan.csr.html#skan.csr.summarize

Extract a fixed number of squares from an image with Python/OpenCV

I have several scanned images I would like to compute with Python/Opencv. Each of these images (see an example below) contains n rows of coloured squares. Each of these squares have the same size. The goal is to crop each of these squares and to extract the data from it.
I have found there a code which is able to extract squares from an image.
Here is my code where I have used it :
import numpy as np
import cv2
from matplotlib import pyplot as plt
def angle_cos(p0, p1, p2):
import numpy as np
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_squares(img):
import cv2 as cv
import numpy as np
img = cv.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv.split(img):
for thrs in range(0, 255, 26):
if thrs == 0:
bin = cv.Canny(gray, 0, 50, apertureSize=5)
bin = cv.dilate(bin, None)
else:
_retval, bin = cv.threshold(gray, thrs, 255, cv.THRESH_BINARY)
contours, _hierarchy = cv.findContours(bin, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv.arcLength(cnt, True)
cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in range(4)])
if max_cos < 0.1:
squares.append(cnt)
print(len(squares))
return squares
img = cv2.imread("test_squares.jpg",1)
plt.axis("off")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
squares = find_squares(img)
cv2.drawContours( img, squares, -1, (0, 255, 0), 1 )
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
However, it finds two many squares (100 instead of 15 !!). Looking at the image, it seems that Opencv find a lot of contours for each square.
I'm pretty sure that it can be optimized since the squares have more or less the same size and far from each other. As a very beginner in Opencv, I haven't found yet a way to give more criteria in the function "find squares" in order to get only 15 squares at the end of the routine. Maybe the contour area can be maximized ?
I have also found there a more detailed code (very close to the previous one) but it seems to be developed in a old version of Opencv. I haven't managed to make it work (and so to modify it).
This is another more robust method.
I used this code to find the contours in the image (the full code can be found in this gist):
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Define square size
min_square_size = 987
# Read Image
img = cv2.imread('/home/stephen/Desktop/3eY0k.jpg')
# Threshold and find edges
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Threshold the image - segment white background from post it notes
_, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY_INV);
# Find the contours
_, contours, _ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
I iterated through the contours. I only looked at the contours that were a reasonable size. I found the four corners of each contour.
# Create a list for post-it images
images = []
# Iterate through the contours in the image
for contour in contours:
area = cv2.contourArea(contour)
# If the contour is not really small, or really big
h,w = img.shape[0], img.shape[1]
if area > min_square_size and area < h*w-(2*(h+w)):
# Get the four corners of the contour
epsilon = .1 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
# Draw the point
for point in approx: cv2.circle(img, tuple(point[0]), 2, (255,0,0), 2)
# Warp it to a square
pts1 = np.float32(approx)
pts2 = np.float32([[0,0],[300,0],[300,300],[0,300]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(300,300))
# Add the square to the list of images
images.append(dst.copy())
The post-it notes are squares, but because the camera warps the objects in the image they do not appear as squares. I used warpPerspective to make the post-it notes square shapes. Only a few of them are shown in this plot (there are more that didn't fit):
If your problem is that too many contours (edges) are found in the image, my suggestion is to modify the edge-finding part first. It'll be by far the easiest modification to make.
In particular, you'll need to change this call:
bin = cv.Canny(gray, 0, 50, apertureSize=5)
The cv.Canny() function takes as arguments two threshold values, the aperture size, and a boolean to indicate whether a precise form of gradient is used. Play with those parameters, and my guess is, you'll get much better results.

finding edge in tilted image with Canny

I'm trying to find the tilt angle in a series of images which look like the created example data below. There should be a clear edge which is visible by eye. However I'm struggling in extracting the edges so far. Is Canny the right way of finding the edge here or is there a better way of finding the edge?
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
# create data
xvals = np.arange(0,2000)
yvals = 10000 * np.exp((xvals - 1600)/200) + 100
yvals[1600:] = 100
blurred = gaussian_filter(yvals, sigma=20)
# create image
img = np.tile(blurred,(2000,1))
img = np.swapaxes(img,0,1)
# rotate image
rows,cols = img.shape
M = cv.getRotationMatrix2D((cols/2,rows/2),3.7,1)
img = cv.warpAffine(img,M,(cols,rows))
# convert to uint8 for Canny
img_8 = cv.convertScaleAbs(img,alpha=(255.0/65535.0))
fig,ax = plt.subplots(3)
ax[0].plot(xvals,blurred)
ax[1].imshow(img)
# find edge
ax[2].imshow(cv.Canny(img_8, 20, 100, apertureSize=5))
You can find the angle by transforming your image to binary (cv2.threshold(cv2.THRESH_BINARY)) then search for contours.
When you locate your contour (line) then you can fit a line on your contour cv2.fitLine() and get two points of your line. My math is not very good but I think that in linear equation the formula goes f(x) = k*x + n and you can get k out of those two points (k = (y2-y1)/(x2-x1)) and finally the angle phi = arctan(k). (If I'm wrong please correct it)
You can also use the rotated bounding rectangle - cv2.minAreaRect() - which already returns the angle of the rectangle (rect = cv2.minAreaRect() --> rect[2]). Hope it helps. Cheers!
Here is an example code:
import cv2
import numpy as np
import math
img = cv2.imread('angle.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(gray,170,255,cv2.THRESH_BINARY)
im, contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for c in contours:
area = cv2.contourArea(c)
perimeter = cv2.arcLength(c, False)
if area < 10001 and 100 < perimeter < 1000:
# first approach - fitting line and calculate with y=kx+n --> angle=tan^(-1)k
rows,cols = img.shape[:2]
[vx,vy,x,y] = cv2.fitLine(c, cv2.DIST_L2,0,0.01,0.01)
lefty = int((-x*vy/vx) + y)
righty = int(((cols-x)*vy/vx)+y)
cv2.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)
(x1, y1) = (cols-1, righty)
(x2, y2) = (0, lefty)
k = (y2-y1)/(x2-x1)
angle = math.atan(k)*180/math.pi
print(angle)
#second approch - cv2.minAreaRect --> returns center (x,y), (width, height), angle of rotation )
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
print(rect[2])
cv2.imshow('img2', img)
Original image:
Output:
-3.8493663478518627
-3.7022125720977783
tribol,
it seems like you can take the gradient image G = |Gx| + |Gy| (normalize it to some known range), calc its Histogram and take the top bins of it. it will give you approx mask of the line. Then you can do line fitting. It'll give you a good initial guess.
A very simple way of doing it is as follows... adjust my numbers to suit your knowledge of the data.
Normalise your image to a scale of 0-255.
Choose two points A and B, where A is 10% of the image width in from the left side and B is 10% in from the right side. The distance AB is now 0.8 x 2000, or 1600 px.
Go North from point A sampling your image till you exceed some sensible threshold that means you have met the tilted line. Note the Y value at this point, as YA.
Do the same, going North from point B till you meet the tilted line. Note the Y value at this point, as YB.
The angle you seek is:
tan-1((YB-YA)/1600)
Thresholding as suggested by kavko didn't work that well, as the intensity varied from image to image (I could of course consider the histogram for each image to imrove this approach). I ended up with taking the maximum of the gradient in the y-direction:
def rotate_image(image):
blur = ndimage.gaussian_filter(image, sigma=10) # blur image first
grad = np.gradient(blur, axis= 0) # take gradient along y-axis
grad[grad>10000]=0 # filter unreasonable high values
idx_maxline = np.argmax(grad, axis=0) # get y-indices of max slope = indices of edge
mean = np.mean(idx_maxline)
std = np.std(idx_maxline)
idx = np.arange(idx_maxline.shape[0])
idx_filtered = idx[(idx_maxline < mean+std) & (idx_maxline > mean - std)] # filter positions where highest slope is at different position(blobs)
slope, intercept, r_value, p_value, std_err = stats.linregress(idx_filtered, idx_maxline[idx_filtered])
out = ndimage.rotate(image,slope*180/np.pi, reshape = False)
return out
out = rotate_image(img)
plt.imshow(out)

Categories