I'm interested in calculating the average minimum distance between elements of two sets of contours.
Here is a sample image:
Here's my code so far:
import cv2
import numpy as np
def contours(layer):
gray = cv2.cvtColor(layer, cv2.COLOR_BGR2GRAY)
ret,binary = cv2.threshold(gray, 1,255,cv2.THRESH_BINARY)
image, contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
drawn = cv2.drawContours(image,contours,-1,(150,150,150),3)
return contours, drawn
def minDistance(contour, contourOther):
distanceMin = 99999999
for xA, yA in contour[0]:
for xB, yB in contourOther[0]:
distance = ((xB-xA)**2+(yB-yA)**2)**(1/2) # distance formula
if (distance < distanceMin):
distanceMin = distance
return distanceMin
def cntDistanceCompare(contoursA, contoursB):
cumMinDistList = []
for contourA in contoursA:
indMinDistList = []
for contourB in contoursB:
minDist = minDistance(contourA,contourB)
indMinDistList.append(minDist)
cumMinDistList.append(indMinDistList)
l = cumMinDistList
return sum(l)/len(l) #returns mean distance
def maskBuilder(bgr,hl,hh,sl,sh,vl,vh):
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
lower_bound = np.array([hl,sl,vl],dtype=np.uint8)
upper_bound = np.array([hh,sh,vh],dtype=np.uint8)
return cv2.inRange(hsv, lower_bound,upper_bound)
img = cv2.imread("sample.jpg")
maskA=maskBuilder(img, 150,185, 40,220, 65,240)
maskB=maskBuilder(img, 3,20, 50,180, 20,250)
layerA = cv2.bitwise_and(img, img, mask = maskA)
layerB = cv2.bitwise_and(img, img, mask = maskB)
contoursA = contours(layerA)[0]
contoursB = contours(layerA)[1]
print cntDistanceCompare(contoursA, contoursB)
As you can see from these images, the masking and thesholding works (shown for the first set of contours):
The cntDistanceCompare() function loops through each contour of set A and B, outputting average minimum distance between contours. Within this function, minDistance() calculates from the (x,y) points on each set of contours A and B a minimum pythagorean distance (using the distance formula).
The following error is thrown:
Traceback (most recent call last):
File "mindistance.py", line 46, in
cntDistanceCompare(contoursA, contoursB)
File "mindistance.py", line 26, in cntDistanceCompare
minDist = minDistance(contourA,contourB)
File "mindistance.py:, line 15, in minDistance
for xB, yB in contourOther[0]:
TypeError: 'numpy.uint8' object is not iterable
I suspect this problem arises from my lack of knowledge of how to reference the x,y coordinates of each contour vertex within the data structre given by cv2.findContours().
I am using an older version of openCV where findContours only returns two values, but hopefully the important part of this code makes sense. I didn't test your functions, but I did show how to get the contour centers. You have to do some stuff with "moments."
import cv2
import numpy as np
def contours(layer):
gray = cv2.cvtColor(layer, cv2.COLOR_BGR2GRAY)
ret,binary = cv2.threshold(gray, 1,255,cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
#drawn = cv2.drawContours(image,contours,-1,(150,150,150),3)
return contours #, drawn
def minDistance(contour, contourOther):
distanceMin = 99999999
for xA, yA in contour[0]:
for xB, yB in contourOther[0]:
distance = ((xB-xA)**2+(yB-yA)**2)**(1/2) # distance formula
if (distance < distanceMin):
distanceMin = distance
return distanceMin
def cntDistanceCompare(contoursA, contoursB):
cumMinDistList = []
for contourA in contoursA:
indMinDistList = []
for contourB in contoursB:
minDist = minDistance(contourA,contourB)
indMinDistList.append(minDist)
cumMinDistList.append(indMinDistList)
l = cumMinDistList
return sum(l)/len(l) #returns mean distance
def maskBuilder(bgr,hl,hh,sl,sh,vl,vh):
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
lower_bound = np.array([hl,sl,vl],dtype=np.uint8)
upper_bound = np.array([hh,sh,vh],dtype=np.uint8)
return cv2.inRange(hsv, lower_bound,upper_bound)
def getContourCenters(contourData):
contourCoordinates = []
for contour in contourData:
moments = cv2.moments(contour)
contourX = int(moments['m10'] / float(moments['m00']))
contourY = int(moments['m01'] / float(moments['m00']))
contourCoordinates += [[contourX, contourY]]
return contourCoordinates
img = cv2.imread("sample.jpg")
maskA=maskBuilder(img, 150,185, 40,220, 65,240)
maskB=maskBuilder(img, 3,20, 50,180, 20,250)
layerA = cv2.bitwise_and(img, img, mask = maskA)
layerB = cv2.bitwise_and(img, img, mask = maskB)
contoursA = contours(layerA)
contoursB = contours(layerB)
print getContourCenters(contoursA)
print getContourCenters(contoursB)
#print cntDistanceCompare(contoursA, contoursB)
Edit: I'm playing with your functions now and I fear I misread the question. Let me know and I'll delete my answer.
Related
I need a help in estimating the accurate fiber length from image. I have developed code in python through which estimation of length is possible up to some extent.
I have used Skan opensource library to get the diameter & length of fiber segments from skeletonized image of fiber. I am facing challenge in tracing the fiber at overlapping point or at Junctions for length estimation. Currently the estimated length is much small than actual image as it estimates only length of segments till the junction point from end point of fiber. It would helpful if anyone can help in estimating all overlapping fibers length. Sharing the code and original image for reference.
import cv2
import numpy as np
from matplotlib import pyplot as plt
from skimage.morphology import skeletonize
from skimage import morphology
img00 = cv2.imread(r'original_img.jpg')
img_01 = cv2.cvtColor(img00, cv2.COLOR_BGR2GRAY)
img0 = cv2.cvtColor(img00, cv2.COLOR_BGR2GRAY)
i_size = min(np.size(img_01,1),600) # image size for imshow
# Creating kernel
kernel = np.ones((2, 2), np.uint8)
# Using cv2.dialate() method
img01 = cv2.dilate(img0, kernel, iterations=2)
cv2.imwrite('Img1_Filtered.jpg',img01)
ret,thresh1 = cv2.threshold(img01,245,255,cv2.THRESH_BINARY)
thresh = (thresh1/255).astype(np.uint8)
cv2.imwrite('Img2_Binary.jpg',thresh1)
# skeleton based on Lee's method
skeleton1 = (skeletonize(thresh, method='lee')/255).astype(bool)
skeleton1 = morphology.remove_small_objects(skeleton1, 100, connectivity=2)
# fiber Detection through skeletonization and its characterization
from skan import draw, Skeleton, summarize
spacing_nm = 1 # pixel
fig, ax = plt.subplots()
draw.overlay_skeleton_2d(img_01, skeleton1, dilate=1, axes=ax);
from skan.csr import skeleton_to_csgraph
pixel_graph, coordinates0 = skeleton_to_csgraph(skeleton1, spacing=spacing_nm)
skel_analysis = Skeleton(skeleton1, spacing=spacing_nm,source_image=img00)
branch_data = summarize(skel_analysis)
branch_data.hist(column='branch-distance', bins=100);
draw.overlay_euclidean_skeleton_2d(img_01, branch_data,skeleton_color_source='branch-type');
from scipy import ndimage
dd = ndimage.distance_transform_edt(thresh)
radii = np.multiply(dd, skeleton1);
Fiber_D_mean = np.mean(2*radii[radii>0]);
criteria = 2 * Fiber_D_mean; # Remove branches smaller than this length for characterization
aa = branch_data[(branch_data['branch-distance']>criteria)];
CNT_L_count, CNT_L_mean, CNT_L_stdev = aa['branch-distance'].describe().loc[['count','mean','std']]
print("Fiber Length (px[enter image description here][1]) : Count, Average, Stdev:",int(CNT_L_count),round(CNT_L_mean,2),round(CNT_L_stdev,2))
Starting with the skeleton I would proceed as follows:
convert the skeleton to a path graph
for each pair of paths identify valid junctions
calculate the angle between each adjacent path
merge paths that go nearly straight through the junction
Here is a sketch that can find overlapping fibers in the skeleton. I leave it to you to optimize it, make it robust against real life images and how to derive statistics from the results.
import cv2
import numpy as np
from skimage import morphology, graph
from skan import Skeleton
MAX_JUNCTION = 4 # maximal size of junctions
MAX_ANGLE = 80 # maximal angle in junction
DELTA = 3 # distance from endpoint to inner point to estimate direction at endpoint
def angle(v1, v2):
rad = np.arctan2(v2[0], v2[1]) - np.arctan2(v1[0], v1[1])
return np.abs((np.rad2deg(rad) % 360) - 180)
img = cv2.imread('img.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# dilate and threshold
kernel = np.ones((2, 2), np.uint8)
dilated = cv2.dilate(gray, kernel, iterations=1)
ret, thresh = cv2.threshold(dilated, 245, 255, cv2.THRESH_BINARY)
# skeletonize
skeleton = morphology.skeletonize(thresh, method='lee')
skeleton = morphology.remove_small_objects(skeleton.astype(bool), 100, connectivity=2)
# split skeleton into paths, for each path longer than MAX_JUNCTION get list of point coordinates
g = Skeleton(skeleton)
lengths = np.array(g.path_lengths())
paths = [list(np.array(g.path_coordinates(i)).astype(int)) for i in range(g.n_paths) if lengths[i] > MAX_JUNCTION]
# get endpoints of path and vector to inner point to estimate direction at endpoint
endpoints = [[p[0], np.subtract(p[0], p[DELTA]), i] for i, p in enumerate(paths)] +\
[[p[-1], np.subtract(p[-1], p[-1 - DELTA]), i] for i, p in enumerate(paths)]
# get each pair of distinct endpoints with the same junction and calculate deviation of angle
angles = []
costs = np.where(skeleton, 1, 255) # cost array for route_through_array
for i1 in range(len(endpoints)):
for i2 in range(i1 + 1, len(endpoints)):
e1, d1, p1 = endpoints[i1]
e2, d2, p2 = endpoints[i2]
if p1 != p2:
p, c = graph.route_through_array(costs, e1, e2) # check connectivity of endpoints at junction
if c <= MAX_JUNCTION:
deg = angle(d1, d2) # get deviation of directions at junction
if deg <= MAX_ANGLE:
angles.append((deg, i1, i2, p))
# merge paths, with least deviation of angle first
angles.sort(key=lambda a: a[0])
for deg, i1, i2, p in angles:
e1, e2 = endpoints[i1], endpoints[i2]
if e1 and e2:
p1, p2 = e1[2], e2[2]
paths[p1] = paths[p1] + paths[p2] + p # merge path 2 into path 1, add junction from route_through_array
for i, e in enumerate(endpoints): # switch path 2 at other endpoint to new merged path 1
if e and e[2] == p2:
endpoints[i][2] = p1
paths[p2], endpoints[i1], endpoints[i2] = [], [], [] # disable merged path and endpoints
# display results
for p in paths:
if p:
img1 = img.copy()
for v in p:
img1[v[0], v[1]] = [0, 0, 255]
cv2.imshow(f'fiber', img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
I used the following code to select nose in OpenCV and Python i searched a lot of to find a way to change the size of nose and save as a other image but i didn't find anything is there anybody to help me to do this.
import cv2
import numpy as np
import dlib
img = cv2.imread('1.jpg')
img = cv2.resize(img,(0,0),None,0.5,0.5)
imgOriginal = img.copy()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def createBox(img,points,scale=5):
bbox = cv2.boundingRect(points)
x,y,w,h = bbox
imgCrop = img[y:y+h,x:x+w]
imgCrop = cv2.resize(imgCrop,(0,0),None,scale,scale)
return imgCrop
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = detector(imgGray)
for face in faces:
x1,y1 = face.left(),face.top()
x2,y2 = face.right(),face.bottom()
imgOriginal = cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),1)
landmarks = predictor(imgGray,face)
myPoints=[]
for n in range(68):
x = landmarks.part(n).x
y = landmarks.part(n).y
myPoints.append([x,y])
#cv2.circle(imgOriginal,(x,y),5,(50,50,255),cv2.FILLED)
#cv2.putText(imgOriginal,str(n),(x,y-10),cv2.FONT_HERSHEY_COMPLEX_SMALL,0.8,(0,0,255),1)
myPoints = np.array(myPoints)
#nose points to select
#nose_points = myPoints[27:35]
print(myPoints)
cv2_imshow(imgOriginal)
cv2.waitKey(0)
thanks in advance
Here is one way using a spherical (bubble) warp in a local region in Python/OpenCV.
- Define region center and radius and amount of spherical distortion
- Crop the image for that center and radius
- Compute the spherical distortion x and y displacement maps and a binary mask
- Apply the distortion maps using cv2.remap
- Antialias the mask
- Merge the distorted and cropped image using the mask
- Insert that merged image into the original image
- Save the results
Input:
import numpy as np
import cv2
import math
import skimage.exposure
img = cv2.imread("portrait_of_mussorgsky2.jpg")
# set location and radius
cx = 130
cy = 109
radius = 30
# set distortion gain
gain = 1.5
# crop image
crop = img[cy-radius:cy+radius, cx-radius:cx+radius]
# get dimensions
ht, wd = crop.shape[:2]
xcent = wd / 2
ycent = ht / 2
rad = min(xcent,ycent)
# set up the x and y maps as float32
map_x = np.zeros((ht, wd), np.float32)
map_y = np.zeros((ht, wd), np.float32)
mask = np.zeros((ht, wd), np.uint8)
# create map with the spherize distortion formula --- arcsin(r)
# xcomp = arcsin(r)*x/r; ycomp = arsin(r)*y/r
for y in range(ht):
Y = (y - ycent)/ycent
for x in range(wd):
X = (x - xcent)/xcent
R = math.hypot(X,Y)
if R == 0:
map_x[y, x] = x
map_y[y, x] = y
mask[y,x] = 255
elif R >= .90: # avoid extreme blurring near R = 1
map_x[y, x] = x
map_y[y, x] = y
mask[y,x] = 0
elif gain >= 0:
map_x[y, x] = xcent*X*math.pow((2/math.pi)*(math.asin(R)/R), gain) + xcent
map_y[y, x] = ycent*Y*math.pow((2/math.pi)*(math.asin(R)/R), gain) + ycent
mask[y,x] = 255
elif gain < 0:
gain2 = -gain
map_x[y, x] = xcent*X*math.pow((math.sin(math.pi*R/2)/R), gain2) + xcent
map_y[y, x] = ycent*Y*math.pow((math.sin(math.pi*R/2)/R), gain2) + ycent
mask[y,x] = 255
# remap using map_x and map_y
bump = cv2.remap(crop, map_x, map_y, cv2.INTER_LINEAR, borderMode = cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# antialias edge of mask
# (pad so blur does not extend to edges of image, then crop later)
blur = 7
mask = cv2.copyMakeBorder(mask, blur,blur,blur,blur, borderType=cv2.BORDER_CONSTANT, value=(0))
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=blur, sigmaY=blur, borderType = cv2.BORDER_DEFAULT)
h, w = mask.shape
mask = mask[blur:h-blur, blur:w-blur]
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
mask = skimage.exposure.rescale_intensity(mask, in_range=(127.5,255), out_range=(0,1))
# merge bump with crop using grayscale (not binary) mask
bumped = (bump * mask + crop * (1-mask)).clip(0,255).astype(np.uint8)
# insert bumped image into original
result = img.copy()
result[cy-radius:cy+radius, cx-radius:cx+radius] = bumped
# save results
cv2.imwrite("portrait_of_mussorgsky2_bump.jpg", result)
# display images
cv2.imshow('img', img)
cv2.imshow('crop', crop)
cv2.imshow('bump', bump)
cv2.imshow('mask', mask)
cv2.imshow('bumped', bumped)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Resulting Image:
I think you need "Bulge" effects such as implode and explode. There are no implementation of these filters in OpenCV but, you can find other tools such as Wand(a python binding for ImageMagick) that have implode/explode.
Example (wand):
from wand.image import Image
with Image(filename="test.jpg") as img:
img.implode(amount = -0.2)
img.save(filename="destination.jpg")
# img_array = numpy.asarray(img) --> you can convert wand.image.Image to numpy array for further uses
passing negative values into implode functions is equal to doing explode. So for magnifying effect use negative values.
There is one problem though: img.implode performs on the center of the image, so after you've found the face features(eye, nose, ...) you need to move your picture somehow to make the eye or nose to lie on the center of the image. After that you can simply use implode function.
I tried to find width of each contour but it return infinity width. Any body have idea on this Image. First find all contours and calculate distance using Hausdorff distance.
My Code as follow:
Read Image
img = imread('M2 Output.jpg')
gray= img[:,:,0]
print('gray',gray.shape)
Binary = gray / 255
mask = np.zeros_like(img)
Find contours
contours = measure.find_contours(Binary, 0.8)
def drawShape(img, coordinates, color):
# In order to draw our line in red
#img = color.gray2rgb(img)
# Make sure the coordinates are expressed as integers
coordinates = coordinates.astype(int)
img[coordinates[:, 0], coordinates[:, 1]] = color
return img
Centeroid Function
def centeroidnp(arr):
length = len(arr[0])
sum_x = np.sum(arr[0])
sum_y = np.sum(arr[1])
return (sum_x//length), (sum_y//length)
Manhattan Distance
def manhattan(p1, p2):
dist = abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])
return dist
Width Calculation for each contour
for contour in contours:
contouri=contour.astype(int)
#print(contouri)
mask = np.zeros_like(img)
imge = drawShape(mask, contouri, [255, 255, 255])
print('Image',imge)
orig = imge.copy()
plt.figure(figsize=(10, 10))
plt.title('Contour')
plt.imshow(imge)
plt.show()
centeroid = centeroidnp(contouri)
print(centeroid)
# Manual Threshold Limit
thresh = 0.0
dist = []
# Get Worm Ends Location
for i in range(len(contouri[0])):
# Calculate the distance from the centroid
print(contouri[0][i],contouri[1][i])
dist.append(manhattan((contouri[0][i], contouri[1][i]),
centeroid))
print(dist)
# Get Worm Ends Location
ends_index = (np.argwhere(dist> thresh *
max(dist))).astype(int)
print('endix',ends_index)
# Padding of the ends
imge[contouri[0][ends_index],contouri[1][ends_index]] = 0
# Label each thread
lab = label(imge)
# Thread 1
u = lab.copy()
u[u==1] = 0
u[u>0] = 1
print('u',u)
# Thread 2
v = lab.copy()
v[v==2] = 0
v[v>0] = 1
# Hausdorff Distance
#width = round(metrics.hausdorff_distance(u, v))
width = metrics.hausdorff_distance(u, v)
print('width:',width)
If you can easily generate correct skeletons, then the skan library can measure the skeleton branch lengths for you:
https://jni.github.io/skan
In this case:
import skan
skel_obj = skan.Skeleton(skel)
skel_obj.path_lengths(0)
Here is the documentation for the Skeleton object API:
https://jni.github.io/skan/api/skan.csr.html#skan.csr.Skeleton
and the related function skan.summarize, which takes the skeleton object as input and produces some summary statistics:
https://jni.github.io/skan/api/skan.csr.html#skan.csr.summarize
First of all I am putting values into hough_lines such as rho = 2, theta = np.pi/180, threshold = 15, min_line_len = 40 , max_line_gap = 20
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_len, max_line_gap)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
#This function is used for drawing line when we give a specific criteria into the pixels we want to pick up.
#This function is used with draw_lines function to draw the lines in specific pixels
#which are drawn by region_of_interest function.
"""`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
Above is my code and getting following error.
File "auto.py", line 68, in <module>
output = lane.process_image(frame)
File "/home/pi/test/lane.py", line 227, in process_image
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_len, max_line_gap)
File "/home/pi/test/lane.py", line 154, in hough_lines
draw_lines(line_img, lines)
File "/home/pi/test/lane.py", line 97, in draw_lines
for line in lines:
TypeError: 'NoneType' object is not iterable
In the main function, I am trying to put the frame taken by video capture into process_image function in lane file.
cam = cv2.VideoCapture(0)
while True:
print('Succeed to connect...')
data = ''
data=sys.stdin.read(1)[0]
print("data input=",data)
while True:
if not cam.isOpened():
print("Wait for the header")
else:
flag, frame = cam.read()
frame = cv2.flip(frame,1)
cv2.imshow('video', frame)
#print(type(frame))
output = lane.process_image(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
exit()
I tried many things such that I changed parameter values for hough_lines function and tried to see what functions affects NoneType value in terms of image. However, before cv2.HoughLinesP function, every function has their own value with narray type.
--edit
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import os
def grayscale(img): # It converts the original picture to the gray scale picture.
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
#After applied grayscale function, it converts the grayscale image to edges
#which is a binary image with white pixels.
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
#Gaussian blur is applied for suppressing noise and nonlogical gradients by averaging.
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
# This functions is used for tracing a specific line of the road
# utilizing vertices which is are 4 integer points here and ignore_mask_color which ignores
# pixels if those do not meet the criteria.
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
"""
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
"""
right_slopes = []
right_intercepts = []
left_slopes = []
left_intercepts = []
left_points_x = []
left_points_y = []
right_points_x = []
right_points_y = []
y_max = img.shape[0]
y_min = img.shape[0]
for line in lines:
for x1,y1,x2,y2 in line:
slope = (y2-y1)/(x2-x1)
if slope < 0.0 and slope > -math.inf: # math.inf = floating point positive infinity
left_slopes.append(slope) # left line
left_points_x.append(x1)
left_points_x.append(x2)
left_points_y.append(y1)
left_points_y.append(y2)
left_intercepts.append(y1 - slope*x1)
if slope > 0.0 and slope < math.inf:
right_slopes.append(slope) # right line
right_points_x.append(x1)
right_points_x.append(x2)
right_points_y.append(y1)
right_points_y.append(y2)
right_intercepts.append(y1 - slope*x1)
y_min = min(y1,y2,y_min)
if len(left_slopes) > 0:
left_slope = np.mean(left_slopes)
left_intercept = np.mean(left_intercepts)
x_min_left = int((y_min - left_intercept)/left_slope)
x_max_left = int((y_max - left_intercept)/left_slope)
cv2.line(img, (x_min_left, y_min), (x_max_left, y_max), [255, 0, 0], 8)
if len(right_slopes) > 0:
right_slope = np.mean(right_slopes)
right_intercept = np.mean(right_intercepts)
x_min_right = int((y_min - right_intercept)/right_slope)
x_max_right = int((y_max - right_intercept)/right_slope)
cv2.line(img, (x_min_right, y_min), (x_max_right, y_max), [255, 0, 0], 8)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
#This function is used for drawing line when we give a specific criteria into the pixels we want to pick up.
#This function is used with draw_lines function to draw the lines in specific pixels
#which are drawn by region_of_interest function.
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
# By appliying "color" binary image, it finally draws the line on the edge image.
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
# Read in and grayscale the image
gray = grayscale(image)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = gaussian_blur(gray, kernel_size)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
edges = canny(blur_gray, low_threshold, high_threshold)
# Next we'll create a masked edges image using cv2.fillPoly()
imshape = image.shape
vertices = np.array([[(120,imshape[0]),(450, 320), (500, 320), (imshape[1],imshape[0])]], dtype=np.int32)
masked_edges = region_of_interest(edges, vertices)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_len = 40 # minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
line_image = np.copy(image)*1 #creating a blank to draw lines on
# Run Hough on edge detected image
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_len, max_line_gap)
# Create a "color" binary image to combine with line image
color_edges = np.dstack((edges, edges, edges))
# Draw the lines on the edge image
lines_edges = weighted_img(lines, line_image)
return lines_edges
I'm trying to find the tilt angle in a series of images which look like the created example data below. There should be a clear edge which is visible by eye. However I'm struggling in extracting the edges so far. Is Canny the right way of finding the edge here or is there a better way of finding the edge?
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
# create data
xvals = np.arange(0,2000)
yvals = 10000 * np.exp((xvals - 1600)/200) + 100
yvals[1600:] = 100
blurred = gaussian_filter(yvals, sigma=20)
# create image
img = np.tile(blurred,(2000,1))
img = np.swapaxes(img,0,1)
# rotate image
rows,cols = img.shape
M = cv.getRotationMatrix2D((cols/2,rows/2),3.7,1)
img = cv.warpAffine(img,M,(cols,rows))
# convert to uint8 for Canny
img_8 = cv.convertScaleAbs(img,alpha=(255.0/65535.0))
fig,ax = plt.subplots(3)
ax[0].plot(xvals,blurred)
ax[1].imshow(img)
# find edge
ax[2].imshow(cv.Canny(img_8, 20, 100, apertureSize=5))
You can find the angle by transforming your image to binary (cv2.threshold(cv2.THRESH_BINARY)) then search for contours.
When you locate your contour (line) then you can fit a line on your contour cv2.fitLine() and get two points of your line. My math is not very good but I think that in linear equation the formula goes f(x) = k*x + n and you can get k out of those two points (k = (y2-y1)/(x2-x1)) and finally the angle phi = arctan(k). (If I'm wrong please correct it)
You can also use the rotated bounding rectangle - cv2.minAreaRect() - which already returns the angle of the rectangle (rect = cv2.minAreaRect() --> rect[2]). Hope it helps. Cheers!
Here is an example code:
import cv2
import numpy as np
import math
img = cv2.imread('angle.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(gray,170,255,cv2.THRESH_BINARY)
im, contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for c in contours:
area = cv2.contourArea(c)
perimeter = cv2.arcLength(c, False)
if area < 10001 and 100 < perimeter < 1000:
# first approach - fitting line and calculate with y=kx+n --> angle=tan^(-1)k
rows,cols = img.shape[:2]
[vx,vy,x,y] = cv2.fitLine(c, cv2.DIST_L2,0,0.01,0.01)
lefty = int((-x*vy/vx) + y)
righty = int(((cols-x)*vy/vx)+y)
cv2.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)
(x1, y1) = (cols-1, righty)
(x2, y2) = (0, lefty)
k = (y2-y1)/(x2-x1)
angle = math.atan(k)*180/math.pi
print(angle)
#second approch - cv2.minAreaRect --> returns center (x,y), (width, height), angle of rotation )
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
print(rect[2])
cv2.imshow('img2', img)
Original image:
Output:
-3.8493663478518627
-3.7022125720977783
tribol,
it seems like you can take the gradient image G = |Gx| + |Gy| (normalize it to some known range), calc its Histogram and take the top bins of it. it will give you approx mask of the line. Then you can do line fitting. It'll give you a good initial guess.
A very simple way of doing it is as follows... adjust my numbers to suit your knowledge of the data.
Normalise your image to a scale of 0-255.
Choose two points A and B, where A is 10% of the image width in from the left side and B is 10% in from the right side. The distance AB is now 0.8 x 2000, or 1600 px.
Go North from point A sampling your image till you exceed some sensible threshold that means you have met the tilted line. Note the Y value at this point, as YA.
Do the same, going North from point B till you meet the tilted line. Note the Y value at this point, as YB.
The angle you seek is:
tan-1((YB-YA)/1600)
Thresholding as suggested by kavko didn't work that well, as the intensity varied from image to image (I could of course consider the histogram for each image to imrove this approach). I ended up with taking the maximum of the gradient in the y-direction:
def rotate_image(image):
blur = ndimage.gaussian_filter(image, sigma=10) # blur image first
grad = np.gradient(blur, axis= 0) # take gradient along y-axis
grad[grad>10000]=0 # filter unreasonable high values
idx_maxline = np.argmax(grad, axis=0) # get y-indices of max slope = indices of edge
mean = np.mean(idx_maxline)
std = np.std(idx_maxline)
idx = np.arange(idx_maxline.shape[0])
idx_filtered = idx[(idx_maxline < mean+std) & (idx_maxline > mean - std)] # filter positions where highest slope is at different position(blobs)
slope, intercept, r_value, p_value, std_err = stats.linregress(idx_filtered, idx_maxline[idx_filtered])
out = ndimage.rotate(image,slope*180/np.pi, reshape = False)
return out
out = rotate_image(img)
plt.imshow(out)