Detect lines in Python OpenCV without applying Gaussian Blur - python

I am detecting lines in a noiseless, programmatically generated png file. I would normally use Hough Lines, which requires me to first provide edges from a canny detection, but the first step of the canny detection is to apply a gaussian blur to eliminate noise. Is there a way I can do edge detection on my original image without ever intentionally blurring it? I suspect this will yield better results than burring first since the lines are already perfectly clean and high-contrast.
Here is a simple example using canny detection and an image. The lines in each group start at 5 pixels wide, then the next line is 4, then 3, 2, and 1. As you can see, the canny detection doesn't work perfectly (the 2 pixel lines appear smaller than the 1 pixel ones):
Original image:
Edges (Result of canny detection):
Sample code:
import cv2
import numpy as np
import matplotlib.pylab as plot
# img = cv2.imread("8px_and_2px_lines.png")
img = cv2.imread("5-1px_lines.png")
crop_size = 520
img = img[100:crop_size, 100:crop_size]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imwrite("5-1px_lines_cropped.png", img)
cv2.imshow("start", img)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
cv2.imshow("canny", edges)
cv2.imwrite("5-1px_lines_cropped_canny.png", edges)
# plot.imshow(edges, cmap="gray")
# plot.show()
lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)
line_length = 3000
for line in lines:
rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + line_length * (-b))
y1 = int(y0 + line_length * (a))
x2 = int(x0 - line_length * (-b))
y2 = int(y0 - line_length * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow("lines", img)
cv2.waitKey()
Any ideas on how I can do a better line detection on these images? I think the gaussian blur built into the canny detector is making the lines harder to detect.

One simple way is simply to threshold, invert so lines are white and then skeletonize. Here is code for Python/OpenCV/Skimage
Input:
import cv2
import numpy as np
import skimage.morphology
img = cv2.imread("lines_horizontal.png")
ht, wd = img.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# create a binary thresholded image
thresh = cv2.threshold(gray, 0, 1, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# invert so lines are white
thresh = 1 - thresh
# apply skeletonization
skeleton = skimage.morphology.skeletonize(thresh)
skeleton = (255*skeleton).clip(0,255).astype(np.uint8)
# save result
cv2.imwrite("lines_horizontal_skeleton.png", skeleton)
# show results
cv2.imshow("skeleton", skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
Note that there will be some distortion right at the ends from the skeletonization at endpoints.
Note also that OpenCV opencv-contrib-python has a thinning method that is similar to skeletonization.

Presumably, the posted image does not represent the general case, so my answer is probably inappropriate.
If you get the pixels on a vertical line, nothing's easier than detecting the transitions from white to black and conversely. As the line are perfectly horizontal, it is enough to do this for a single column (but you can repeat for every column if you want) !
By the above method, you get both sides of the lines, with their original spacing. If you need a single trace, average the ordinates in pairs.

Related

How to detect clock hands with hough lines detection

I want to get the time from an analog clock. Right now I'm stuck a bit, I managed to get the segmented image (altough I couldn't remove the bottom part of it...), and did a Canny detection. The problem I have is, well the bottom part I couldn't remove, and the detection of the clock hands. My goal is to detect the hands in a way I can calculate the angles and then the time from those angles. I know that I need Hough Line Transform, but I don't really understand how it works, how to set the parameters.
The original, segmented and the Canny detected pictures:
This is the code I'm using to get there:
img = cv2.imread('clock.jpg')
cv2.imshow('img', img)
cv2.waitKey(0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 5)
cv2.imshow('blur', blur)
cv2.waitKey(0)
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT, 1, 20, param1=20, param2=100, minRadius=0, maxRadius=0)
detected_cricles = np.uint16(np.around(circles))
circle = detected_cricles[0][0]
x = circle[0]
y = circle[1]
r = circle[2]
rect = (x - r, y - r, x+r, y+(r-10))
mask = np.zeros(img.shape[:2], dtype = np.uint8)
bgdModel = np.zeros((1,65), np.float64)
fgdModel = np.zeros((1,65), np.float64)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask == 1) + (mask == 3), 255, 0).astype('uint8')
segmented = cv2.bitwise_and(img, img, mask=mask2)
cv2.imshow('segmented', segmented)
cv2.waitKey(0)
blur = cv2.GaussianBlur(segmented, (11,11), 0)
cv2.imshow('blur2', blur)
cv2.waitKey(0)
canny = cv2.Canny(blur, 30, 150, None, 3)
cv2.imshow('canny', canny)
cv2.waitKey(0)
Here is one way using HoughLinesP in Python/OpenCV. The approach uses thresholding, contours and thinning before getting the Hough Lines. I will leave it to you to compute the angles from the line end points.
Input:
import cv2
import numpy as np
from skimage.morphology import skeletonize
# Read image
img = cv2.imread('clock.jpg')
hh, ww = img.shape[:2]
# convert to gray
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)[1]
# invert so shapes are white on black background
thresh = 255 - thresh
# get contours and save area
cntrs_info = []
contours = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
index=0
for cntr in contours:
area = cv2.contourArea(cntr)
cntrs_info.append((index,area))
index = index + 1
# sort contours by area
def takeSecond(elem):
return elem[1]
cntrs_info.sort(key=takeSecond, reverse=True)
# get third largest contour
arms = np.zeros_like(thresh)
index_third = cntrs_info[2][0]
cv2.drawContours(arms,[contours[index_third]],0,(1),-1)
#arms=cv2.ximgproc.thinning(arms)
arms_thin = skeletonize(arms)
arms_thin = (255*arms_thin).clip(0,255).astype(np.uint8)
# get hough lines and draw on copy of input
result = img.copy()
lineThresh = 15
minLineLength = 20
maxLineGap = 100
max
lines = cv2.HoughLinesP(arms_thin, 1, np.pi/180, lineThresh, None, minLineLength, maxLineGap)
for [line] in lines:
x1 = line[0]
y1 = line[1]
x2 = line[2]
y2 = line[3]
cv2.line(result, (x1,y1), (x2,y2), (0,0,255), 2)
# save results
cv2.imwrite('clock_thresh.jpg', thresh)
cv2.imwrite('clock_arms.jpg', (255*arms).clip(0,255).astype(np.uint8))
cv2.imwrite('clock_arms_thin.jpg', arms_thin)
cv2.imwrite('clock_lines.jpg', result)
cv2.imshow('thresh', thresh)
cv2.imshow('arms', (255*arms).clip(0,255).astype(np.uint8))
cv2.imshow('arms_thin', arms_thin)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Thresholded image:
Contour of arms:
Thinned (skeleton):
Hough Line Segments on input:
Here's another possible solution. We will try to segment the clocks hands and run them through Hough's line transform to detect the lines. Now, this detection will yield all the possible straight lines that pass through the clock hands' pixels - producing multiple lines. You can try to play with the line transform parameters to narrow the result to the target lines, but you will probably end up with a cluster of lines. I will try to cluster these lines using K-Means to get only two lines regardless of the output of Hough's line transform. These are the steps:
Get a binary mask of the image to isolate the clock hands
Apply some morphology to get rid of the noise
Run the binary mask through Hough's line detection
Use K-means on the multiple lines to get only 2 (average) lines (one per clock hand)
Let's see the code:
# Imports
import cv2
import numpy as np
# Read image
imagePath = "D://opencvImages//"
inputImage = cv2.imread(imagePath+"orFGl.jpg")
# Store deep copy for results:
originalImg = inputImage.copy()
# Convert BGR back to grayscale:
grayInput = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Threshold via Otsu + bias adjustment:
threshValue, binaryImage = cv2.threshold(grayInput, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
The first bit is trivial and produces this binary mask:
We can get rid of the small elements via some morphology. Let's apply an erosion followed by a dilation to filter everything but the larger components - the clock hands:
# Set morph operation iterations:
opIterations = 1
# Get the structuring element:
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# Perform Erode:
erodeImg = cv2.morphologyEx(binaryImage, cv2.MORPH_ERODE, structuringElement, None, None, opIterations, cv2.BORDER_REFLECT101)
# Perform Dilate:
dilateImg = cv2.morphologyEx(erodeImg, cv2.MORPH_DILATE, structuringElement, None, None, opIterations, cv2.BORDER_REFLECT101)
This produces this image:
Very nice, almost all the noise is gone. Let's run this directly through the line detection and check out what kind of results we get. Additionally, I've prepared some lists to store every starting (x1, y1) and ending (x2, y2) point of the lines:
# Set HoughLinesP parameters:
lineThresh = 50
minLineLength = 20
maxLineGap = 100
# Run the line detection:
lines = cv2.HoughLinesP(dilateImg, 1, np.pi/180, lineThresh, None, minLineLength, maxLineGap)
# Prepare some lists to store every coordinate of the detected lines:
X1 = []
X2 = []
Y1 = []
Y2 = []
# Store and draw the lines:
for [currentLine] in lines:
# First point:
x1 = currentLine[0]
y1 = currentLine[1]
X1.append(x1)
Y1.append(y1)
# Second point:
x2 = currentLine[2]
y2 = currentLine[3]
X2.append(x2)
Y2.append(y2)
# Draw the lines:
cv2.line(originalImg, (x1,y1), (x2,y2), (0,0,255), 2)
cv2.imshow("Lines", originalImg)
cv2.waitKey(0)
This is the result:
As you can see, there are multiple lines. Luckily, these lines are clustered in two very discernible groups: the left hand and the right hand. If we cluster the four coordinates into two groups, we can get the average starting and ending points of each hand. This can be done by applying a clustering algorithm, in this case K-Means. K-means will need four arrays holding the data to produce two cluster centers. Before giving it our data we need to reshape it the way K-means expects it:
# Reshape the arrays for K-means
X1 = np.array(X1)
Y1 = np.array(Y1)
X2 = np.array(X2)
Y2 = np.array(Y2)
X1dash = X1.reshape(-1,1)
Y1dash = Y1.reshape(-1,1)
X2dash = X2.reshape(-1,1)
Y2dash = Y2.reshape(-1,1)
# Stack the data
Z = np.hstack((X1dash, Y1dash, X2dash, Y2dash))
# K-means operates on 32-bit float data:
floatPoints = np.float32(Z)
# Set the convergence criteria and call K-means:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# Set the desired number of clusters
K = 2
ret, label, center = cv2.kmeans(floatPoints, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
The results are in the center array. Here we gave out final pair of lines. Let's loop through it and draw them on the original image:
# Loop through the center points
# and draw the lines:
for p in range(len(center)):
# Get line points:
print(center[p])
x1 = int(center[p][0])
y1 = int(center[p][1])
x2 = int(center[p][2])
y2 = int(center[p][3])
cv2.line(originalImg, (x1, y1), (x2, y2), (0, 255, 0), 1)
cv2.imshow("Lines", originalImg)
cv2.waitKey(0)
This is the final pair of lines (in green):

Rotate Image to align features with X-axis in OpenCV Python

In the following microscopy image, I extracted the horizontal white line grid using morphological operators in OpenCV. I couldn't completely get rid of the noise which is why there are some white lines in-between. The grid lines need to be parallel to the x-axis. During the microscopic reading process, perfect parallelism cannot be ensured. In this case, the lines are moving slightly upwards from left to right.
How can I realign the lines to the x-axis so that they are parallel to the lower and upper edges of the image using OpenCV or any other Python package?
I'm relatively new to OpenCV so if anyone could give me a hint what operations or functions would be helpful to tackle this problem, I'd be grateful.
Thanks!
You may fit lines, get the mean angle and rotate the image.
The suggested solution uses the following stages:
Threshold (binarize) the image.
Apply closing morphological operation for connecting the lines.
Find contours.
Iterate the contours and fit a line for each contour.
Compute the angle of each line, and build a list of angles.
Compute the mean angle of the angles that are "close to the median angle".
Rotate the image by the mean angle.
Here is the code:
import cv2
import numpy as np
import math
img = cv2.imread("input.png", cv2.IMREAD_GRAYSCALE) # Read input image as grayscale.
threshed = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)[1] # threshold (binarize) the image
# Apply closing for connecting the lines
threshed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, np.ones((1, 10)))
# Find contours
contours = cv2.findContours(threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2] # [-2] indexing takes return value before last (due to OpenCV compatibility issues).
img2 = cv2.cvtColor(threshed, cv2.COLOR_GRAY2BGR) # BGR image - used for drawing
angles = [] # List of line angles.
# Iterate the contours and fit a line for each contour
# Remark: consider ignoring small contours
for c in contours:
vx, vy, cx, cy = cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01) # Fit line
w = img.shape[1]
cv2.line(img2, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 255, 0)) # Draw the line for testing
ang = (180/np.pi)*math.atan2(vy, vx) # Compute the angle of the line.
angles.append(ang)
angles = np.array(angles) # Convert angles to NumPy array.
# Remove outliers and
lo_val, up_val = np.percentile(angles, (40, 60)) # Get the value of lower and upper 40% of all angles (mean of only 10 angles)
mean_ang = np.mean(angles[np.where((angles >= lo_val) & (angles <= up_val))])
print(f'mean_ang = {mean_ang}') # -0.2424
M = cv2.getRotationMatrix2D((img.shape[1]//2, img.shape[0]//2), mean_ang, 1) # Get transformation matrix - for rotating by mean_ang
img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]), cv2.INTER_CUBIC) # Rotate the image
# Display results
cv2.imshow('img2', img2)
cv2.imshow('img', img)
cv2.waitKey()
cv2.destroyAllWindows()
Result:
img2 (for testing):
img (after rotating):
Note:
The code is just an example - I don't expect it to solve all of your microscopy images.

Why is Opencv/Hough Transform not finding the whole line?

I finished a tutorial on OpenCv for finding lanes, and I am trying to apply it to finding a piece of tape on the floor. I got the code running and set the region of interest but it only finds a few edges of the tape. I think it has to do with the thickness but I am not 100% sure. Any help would be appreciated.
import cv2
import numpy as np
import matplotlib.pyplot as plt
def canny(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
canny = cv2.Canny(blur, 50, 150)
return canny
def display_lines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)
return line_image
def region_of_interest(image):
height = image.shape[0]
polygons = np.array([
[(200, height), (400, height), (355, 0)]
])
mask = np.zeros_like(image)
cv2.fillPoly(mask, polygons, 255)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
image = cv2.imread('tape3.jpg')
lane_image = np.copy(image)
canny_image = canny(image)
cropped_image = region_of_interest(canny_image)
lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap=5)
line_image = display_lines(lane_image, lines)
combo_image = cv2.addWeighted(lane_image, 0.8, line_image, 1, 1)
# cv2 print image
print(region_of_interest(image))
cv2.imshow("result", combo_image)
cv2.waitKey(0)
This may not answer your original question, but this could be an alternate way to achieve what you're looking for.
I started by thresholding the grayscale of the image to try and isolate the tape
Then I used opencv's findContours to get the segmentation points of each white blob
The thresholding method I used is sensitive to light and shadow so you may have to find some other thresholding method if this isn't a workable constraint. If different colored tape is a concern, you can threshold off of other values (convert to HSV or LAB and threshold off of the H or B channels respectively to look for red).
Edit:
If you still want to use HoughLinesP, here's a working example with your picture.
First I applied canny:
Then I used the HoughLinesP function:
I've never used houghLinesP before so I'm not sure of the potential pitfalls, but it seems to work, though it actually creates a bunch of overlapping lines with these parameters, you'll have to play around with it a bit.
Relevant Code:
# canny
canned = cv2.Canny(gray, 591, 269);
# dilate
kernel = np.ones((3,3), np.uint8);
canned = cv2.dilate(canned, kernel, iterations = 1);
# hough
lines = cv2.HoughLinesP(canned, rho = 1, theta = 1*np.pi/180, threshold = 30, minLineLength = 10, maxLineGap = 20);
Edit 2:
I looked at the documentation for the function and the third parameter (theta) refers to the angle resolution. I think it might not have worked in your code because you didn't run dilation on the image after Canny. With a one-degree search resolution it's not hard to imagine that we could miss the very thin line that canny returns. It might even be worth dilating the lines more than I did in the example by using a larger kernel (or dilating multiple times).

How to apply the proper threshold to remove edges on binary plates

I am facing a contour problem, so that you can read the license plate in a correct way, it is best to take out the contours, and thus perhaps apply some OCR.
For example if I want to use this photo, you can see that on the axis it has a white outline, how could I eliminate those white outlines in a generic way? for can be used on more license plates
I am thinking of applying a threshold in the 2 axis (horizontally and vertically) again to clean possible white borders, any ideas?
A little what I'm have made:
# Creating copies of the original images
output_cp = output.copy()
img_cp = straightened.copy()
# threshold
ret,thresh = cv2.threshold(output_cp, 215, 255, cv2.THRESH_BINARY_INV)
imshow(thresh)
But then when It suposed to work, doesn't work because it is a binary image,
pseudo-code, when xxx I don't know what contour apply:
mask = np.zeros(image.shape, np.uint8)
mask_cnt = cv2.drawContours(mask.copy(), [xxx], 0, (255,255,255,255), -1)
removed = cv2.subtract(mask_cnt, image)
plt.figure()
plt.imshow(removed, cmap='gray')
Any help is welcome!
To remove the white margin around the plate, you can :
find the external contours of the image with findCountours
take its rotated bounding box with minAreaRect
compute a transformation to correct the orientation with getPerspectiveTransforme
then apply it with warpPerspective
This remove the margin and also correct the orientation of the plate which should make it much easier to read by any OCR.
Here is a python implementation of my solution:
#!/usr/bin/env python3
import numpy as np
import cv2
img = cv2.imread("plate.png")
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# invert image and detect contours
inverted = cv2.bitwise_not(gray)
contours, hierarchy = cv2.findContours(inverted,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# get the biggest contour
biggest_index = -1
biggest_area = -1
i = 0
for c in contours:
area = cv2.contourArea(c)
if area > biggest_area:
biggest_area = area
biggest_index = i
i = i+1
print("biggest area: " + str(biggest_area) + " index: " + str(biggest_index))
cv2.drawContours(img, contours, biggest_index, [0,0,255])
center, size, angle = cv2.minAreaRect(contours[biggest_index])
rot_mat = cv2.getRotationMatrix2D(center, angle, 1.)
#cv2.warpPerspective()
print(size)
dst = cv2.warpAffine(gray, rot_mat, (int(size[0]), int(size[1])))
mask = dst * 0
x1 = max([int(center[0] - size[0] / 2)+1, 0])
y1 = max([int(center[1] - size[1] / 2)+1, 0])
x2 = int(center[0] + size[0] / 2)-1
y2 = int(center[1] + size[1] / 2)-1
point1 = (x1, y1)
point2 = (x2, y2)
print(point1)
print(point2)
cv2.rectangle(dst, point1, point2, [0,0,0])
cv2.rectangle(mask, point1, point2, [255,255,255], cv2.FILLED)
masked = cv2.bitwise_and(dst, mask)
cv2.imshow("img", img)
cv2.imshow("dst", dst)
cv2.imshow("masked", masked)
cv2.imshow("mask", mask)
key = -1;
while key != 27:
key = cv2.waitKey(1)
And the resulting image :
This is not perfect, but a good start I think, slightly different approche than thresholding.
You might also try to apply some morphological operator to close some gap or remove dirty parts.

Hough transformation with Open CV python

I'm trying to apply the hough probabilistic transform in a tube, and I already have a well-filtered image (edges).
My need is to recognize any of these straight lines (attached figure) that are in the middle of the tube so i can detect the liquid level, but I can not do this. Do anyone know how i can solve this?
import cv2
import numpy as np
img = cv2.imread('tube.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imwrite('gray.png',gray)
edges = cv2.Canny(gray,350,720,apertureSize = 3)
cv2.imwrite('edges.png',edges)
minLineLength = 30
maxLineGap = 0
lines = cv2.HoughLinesP(edges,1,np.pi/180,10,minLineLength,maxLineGap)
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),4)
cv2.imwrite('houghlines.png',img)
My actual results are in the 'houghlines' attached figure. What appears is a green and vertical line, but i need a horizontal one so i can detect the liquid level.
thanks in advance.
tube
edges
houghlines
I was view your code and modified some things and I saw a little of the documentation of OpenCV enter link description here.
I have this result, I don't know if it's what you need.
import cv2
import numpy as np
img = cv2.imread('tube.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imwrite('gray.png',gray)
edges = cv2.Canny(gray,350,720, apertureSize = 3)
cv2.imwrite('edges.png',edges)
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 10 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 50 # minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
line_image = np.copy(img) * 0 # creating a blank to draw lines on
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),5)
lines_edges = cv2.addWeighted(img, 0.8, line_image, 1, 0)
cv2.imwrite('houghlines.png',lines_edges)
houghlines.png
Look for a similar problem here enter link description here
Good luck.
Check if it's what you need, regards.
import cv2
import numpy as np
import math
img = cv2.imread('tube.png')
#img = cv2.resize(img,(360,480))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,350,720, apertureSize = 3)
#cv2.imshow("edges", edges)
rho = 1
#theta = np.pi / 180 #CHANGE FOR MATH.pi/1
threshold = 10 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 2 # minimum number of pixels making up a line
max_line_gap = 480 # maximum gap in pixels between connectable line segments
line_image = np.copy(img) * 0 # creating a blank to draw lines on
lines = cv2.HoughLinesP(edges, rho, math.pi/1, threshold, np.array([]),
min_line_length, max_line_gap);
#coordinates
dot1 = (lines[0][0][0],lines[0][0][1])
dot2 = (lines[0][0][2],lines[0][0][3])
dot3 = (lines[0][0][1],lines[0][0][1])
cv2.line(img, dot1, dot2, (255,0,0), 3)
cv2.line(img, dot1, dot3, (0,255,0), 3)
cv2.imshow("output", img)
length = lines[0][0][1] - lines[0][0][3]
print ('Pixels Level', length)
if cv2.waitKey(0) & 0xFF == 27:
cv2.destroyAllWindows()
lines img
terminal output
Good luck.

Categories