Rotate (alignment) an image by line along the y-axis - python

I am trying to rotate (alignment) an image, which contain line (with two points P1 and P2) along the y-axis
Original image:
Note: the green area represents the original image
Result should be:
Note: the red area represents the original image after rotation
So I need to calculate the angle between line defined by P1(x1,y1) and P2(x2,y2) and by y-axis,
Note: the green line represents the y-axis
My code is:
import cv2
import numpy as np
from math import *
import math
import imutils
height = 500
width = 500
original_image = np.zeros((height,width,3), np.uint8)
original_image[:] = (0,255,0)
x1 = 400
y1 = 50
P1 = (x1, y1)
x2 = 100
y2 = 300
P2 = (x2, y2)
cv2.line(original_image, P1, P2, (0, 0, 0), 3)
deltaY = y1 - y2
deltaX = x1 - x2
angleInDegrees = atan2(deltaY, deltaX) * 180 / math.pi
print(angleInDegrees)
rotated_image = imutils.rotate_bound(original_image, angleInDegrees)
cv2.imshow("Original", original_image)
cv2.imshow("Rotated", rotated_image)
cv2.waitKey(0)
But my rotated_image is not properly aligned
Result looks like:
How should I fix it?

First off you are calculating the wrong angle. The angle you are calcuating is between a vector originating at the origin and ending at P1 & a vector orignating at the origin and ending at P2.
The angle you need is between the vector starting at P1 and ending at P2 [P2-P1] & a vector that represents the direction of the y-axis which would be [0, 1].
Secondly you have to take into account that your origin is in the top left corner so you need to reflect the angle once calculated.
import cv2
import numpy as np
from math import *
import math
import imutils
height = 500
width = 500
original_image = np.zeros((height,width,3), np.uint8)
original_image[:] = (0,255,0)
x1 = 400
y1 = 50
P1 = np.array([x1, y1])
x2 = 100
y2 = 300
P2 = np.array([x2, y2])
# checks orientation of p vector & selects appropriate y_axis_vector
if (P2[1] - P1[1]) < 0:
y_axis_vector = np.array([0, -1])
else:
y_axis_vector = np.array([0, 1])
if (P2[0] - P1[0]) < 0 and (P2[1] - P1[1]) :
y_axis_vector = np.array([0, 1])
p_unit_vector = (P2 - P1) / np.linalg.norm(P2-P1)
angle_p_y = np.arccos(np.dot(p_unit_vector, y_axis_vector)) * 180 /math.pi
cv2.line(original_image, tuple(P1), tuple(P2), (0, 0, 0), 3)
print(angle_p_y)
print (P2-P1)
rotated_image = imutils.rotate_bound(original_image, -angle_p_y)
cv2.imshow("Original", original_image)
cv2.imshow("Rotated", rotated_image)
cv2.waitKey(0)

Related

Cannot read the second arm of the Analog Clock correctly by using opencv python

Picture Link since I cannot upload it> Thank you https://github.com/HassanAdamm that I can be able to continue the further code but still cannot display the correct second hand of the Analog Clock with OpenCV. Hour and Minute Hands are successfully done with HoughLineP(). I am unable to separate the seconds hand from the image. Below is my working code and hope you guys can help me with this!
import cv2
import math
import numpy as np
import tkinter as tk
from matplotlib import pyplot as plt
from math import sqrt, acos, degrees
# Reading the input image and convert the original RGB to a grayscale image
kernel = np.ones((5, 5), np.uint8)
img1 = cv2.imread('input1.jpg')
img = cv2.imread('input1.jpg', 0)
img_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
# Appling a binary threshold to the image
ret, thresh = cv2.threshold(img_gray, 50, 255, cv2.THRESH_BINARY)
# Create mask
height, width = img.shape
mask = np.zeros((height, width), np.uint8)
edges = cv2.Canny(thresh, 100, 200)
# Circle Detection
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img_gray, cv2.HOUGH_GRADIENT, 1.2, 100)
for i in circles[0,:]:
i[2] = i[2] + 4
# cv2.cicle(image, center_coordinates, radius, color, thickness)
cv2.circle(mask, (int(i[0]),int(i[1])), int(i[2]), (255,255,255), thickness = -1)
# Copy that image using that mask
masked_data = cv2.bitwise_and(img1, img1, mask = mask)
# Apply threshold
_,thresh = cv2.threshold(mask, 1, 255, cv2.THRESH_BINARY)
# Find Contour
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
x, y, w, h = cv2.boundingRect(contours[0])
# Crop masked_data
crop = masked_data[y + 30 : y + h -30, x + 30 : x + w - 30]
height, width, channel = crop.shape
blur_crop = cv2.GaussianBlur(crop, (5, 5), 0)
edges = cv2.Canny(blur_crop, 50, 150)
# Line segments
line_image = np.copy(crop) * 0
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 15, np.array([]), 100, 10)
l = []
xl1, xl2, yl1, yl2 = 0, 0, 0, 0 #long -> l
xm1, xm2, ym1, ym2 = 0, 0, 0, 0 #medium -> m
xs1, xs2, ys1, ys2 = 0, 0, 0, 0 #short -> s
# Getting the values from the line
for line in lines:
x1, y1, x2, y2 = line[0]
dx = x2 - x1
if (dx < 0):
dx = dx* (-1)
dy = y2 - y1
if (dy < 0):
dy = dy* (-1)
hypo = sqrt(dx**2 + dy**2)
l.append(hypo)
l.sort(reverse=True)
s, m, h = 0, 0, 0
for f in range(len(l)):
for line in lines:
# getting the values from the line
x1, y1, x2, y2 = line[0]
#cv2.line(crop, (x1, y1), (x2, y2), (0, 255, 0), 3)
dx = x2 - x1
if (dx < 0):
dx = dx* (-1)
dy = y2 - y1
if (dy < 0):
dy = dy* (-1)
hypo2 = sqrt(dx**2 + dy**2)
if (hypo2 == l[0]):
m = hypo2
xl1 = x1
xl2 = x2
yl1 = y1
yl2 = y2
# getting line region
cv2.line(crop, (xl1, yl1), (xl2, yl2), (255, 0, 0), 3)
if (m == l[0]):
if (hypo2 == l[f]):
if ((sqrt((xl2 - x2)**2 + (yl2 - y2)**2)) > 20):
if ((sqrt((xl1 - x1)**2 + (yl1 - y1)**2)) > 20):
xs1 = x1
xs2 = x2
ys1 = y1
ys2 = y2
# getting line region
cv2.line(crop, (xl1, yl1), (xl2, yl2), (0, 255, 0), 5)
h = 1
break
# Calculate center point
xcenter = width/2
ycenter = height/2
# Determine the cooridnates of the end point (farther from the center)
def coordinates (x1, y1, x2, y2):
a = abs(xcenter - x1)
b = abs(xcenter - x2)
if (a > b):
x_coor = x1
y_coor = y1
else:
x_coor = x2
y_coor = y2
return x_coor, y_coor
xhour, yhour = coordinates(xs1, ys1, xs2, ys2)
xmin, ymin = coordinates(xl1, yl1, xl2, yl2)
xsec, ysec = coordinates(xl1, yl1, xl2, yl2)
cv2.line(crop, (xs1, ys1), (xs2, ys2), (0, 255, 0), 5)
# Calculate the Hour, Minute, Second-hands by the law of cosines
def law_of_cosines (x, y):
l1 = sqrt(((xcenter - x)**2) + ((ycenter - y)**2))
l2 = ycenter
l3 = sqrt(((xcenter - x)**2) + ((0 - y)**2))
cos_theta = ( (l1**2) + (l2**2) - (l3**2) )/(2*l1*l2)
theta_radian = acos(cos_theta)
theta = math.degrees(theta_radian)
return theta
theta_hour = law_of_cosines(xhour, yhour)
theta_min = law_of_cosines(xmin, ymin)
theta_sec = law_of_cosines(xsec, ysec)
def right_or_not (x):
if (x > xcenter):
right = 1
else:
right = 0
return right
hour_right = right_or_not(xhour)
min_right = right_or_not(xmin)
sec_right = right_or_not(xsec)
def time_cal (x, y, z):
if (z == xhour):
if (x == 1):
a = int(y/30)
else:
a = 12 - int(y/30)
if a == 0:
a = 12
else:
if (x == 1):
a = int(y/6)
else:
a = 60 - int(y/6)
if (z == xcenter):
a = 30
return a
hour = time_cal(hour_right, theta_hour, xhour)
minute = time_cal(min_right, theta_min, xmin)
sec = time_cal(sec_right, theta_sec, xsec)
# Display window
canvas = tk.Tk()
canvas.title("Analog to Digital")
canvas.geometry("500x250")
digit = tk.Label(canvas, font = ("ds-digital", 65, "bold"), bg = "white", fg = "blue", bd = 80)
digit.grid(row = 0, column = 1)
# Display result
def display(hour, minute, sec):
value = "{0:0=2d}:{1:0=2d}:{2:0=2d}".format(hour, minute, sec)
digit.config(text=value)
print(value)
display(hour, minute, sec)
canvas.mainloop()
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image, (x1,y1), (x2,y2), (255,0,0), 1)
lines_edges = cv2.addWeighted(crop, 0.8, line_image, 1, 0)
cv2.imshow('Line Image', line_image)
cv2.imshow('Crop', crop)
cv2.waitKey(0)
There are lot of possible trap in this kind of things. Because each hand generate two lines, but not exactly parallel, and some interaction may make them appear shorter, etc.
But in your case, I bet the problem is far simpler:
xhour, yhour = coordinates(xs1, ys1, xs2, ys2)
xmin, ymin = coordinates(xl1, yl1, xl2, yl2)
xsec, ysec = coordinates(xl1, yl1, xl2, yl2)
I am pretty sure, one of those should be coordinates(xm1, ym1, xm2, ym2)
Edit after your comment. So, we are in a worse place. Because what you have is a computer vision problem, not just a python problem.
And there is not clear solution for that. But a few hint of what you could do.
You could identify the center of the clock (you've already done it, to draw a circle, I think), and also use the distance to the center rather than the length of the line.
You can take advantage of that, to filter lines that don't go through the center, or almost so
Since lines are the border of the hands, and those are slightly triangle, how close they come to the center is an indication of which hand it is. The hour and minute hands lines don't cross exactly the center of the circle. The second hand lines came closer to the center.
Besides, you should expect 2 lines at least (more in reality, that's how hough works) per hand. One over the center, another under. So you can take advantage of that to enhance reliability of the angle computation (by computing the median line, that goes through the center), and the length computation. And avoid counting twice the same hand
Also, you could compute angles from all lines: if there are 3 clearly separated angles, you know that all the angles you are looking for are there. The minutes and seconds for the long hand (and you can discriminate between those because of the more triangle and thick shape our hour, and more narrow shape of seconds. Which result in bigger variability of lines direction for hours than for seconds). The hour hand for the short one.
You can also try to take advantage of the "tail" of the seconds hand, and try to check if you find some dark pixels in the opposite direction of a hand. If you don't, it is not the second hand.
You could also use morphological operators, to erode black pixels before canny/hough. So that you know that the second hand has disappeared, because it is too narrow. You'll find 2 hands from there. And then redo the job without erosion. The extra hand you find is the second hand
Of course, there is the case when some hands are superposed to deal with. If you are confident that, after trying some of the ideas, you would have found 3 hands if there were 3, then, you can trust that 2 hands are at the same position. You could also use your knowledge of previous detection (you know how the hands are supposed to move)
At last, if you are not specially wanting to use line detection only, you could also simply watch the pixels on some circles. A circle whose center is the center of the clock, and whose radius is as big as possible but not big enough to include the digits, should be crossed by two hands (hours and seconds), and it will be quite easy to spot that one (minutes) is thicker than the other (seconds). If there is only one, then you know that hours and seconds are the same. A smaller circle should be crossed by 3 hands. The extra one is hour hand. If you can't find an extra one, and have 2 hands (the same as on the bigger circle) then the hour hand is superposed with either the minute hand or the second hand. If it is the second hand, then it should get a lot thicker.

how to rotate Rectangle shape cv2 python

I have simple rectangle I just want to rotate this rectangle in any input angle
my code is:
import cv2
import numpy as np
imgc = np.zeros((500, 500, 3), np.uint8)
p0 = (100, 100)
p1 = (100 , 150)
p2 = (150, 150)
p3 = (150, 100)
pp = np.array([p0, p1, p2, p3])
cv2.drawContours(imgc, [pp], 0, (155, 155, 155), -1, cv2.LINE_AA)
cv2.imshow("image",imgc)
cv2.waitKey()
What you need is Rotation Matrices. But you need to remember this rotating a point with a given angle (in radians) around the ORIGIN.
You need to move your points to the origin rotate them and move them back same amount.
Here what is would look line when you break down all dot production steps into one equation:
def rotate(points, angle):
ANGLE = np.deg2rad(angle)
c_x, c_y = np.mean(points, axis=0)
return np.array(
[
[
c_x + np.cos(ANGLE) * (px - c_x) - np.sin(ANGLE) * (py - c_x),
c_y + np.sin(ANGLE) * (px - c_y) + np.cos(ANGLE) * (py - c_y)
]
for px, py in points
]
).astype(int)
Please notice: drawContours expects points as integers not floats so you need to convert your arrays to int.
Here the blue rectangle was the original one and the magenta rectangle is the 45 degrees rotated one:

Difficulty calculating slope for a set of rotated and shifted ellipses, sometimes inverted, sometimes completely wrong

I am using OpenCV-Python to fit an ellipse to the shape of a droplet.
Then I choose a line, which represents the surface the droplet is resting on.
I calculate the tangents at the intersection of the surface and the ellipse to get the contact angle of the droplet.
It works most of the time, but in some cases, the tangents are flipped upside down or just wrong.
It seems that the calculation for the slope of the tangent fails.
Can someone tell me why this happens?
Here you can see how it should look like (surface at y=250):
And this is the result when I choose a surface level of y=47:
I did some research and I need to detect which of the two maj_ax, min_ax was parallel to the x-Axis before the ellipse gets rotated by phi or else the slope calculation algorithm fails.
What am I doing wrong?
Here is a minimal reproducible example:
from math import cos, sin, pi, sqrt, tan, atan2, radians
import cv2
class Droplet():
def __init__(self):
self.is_valid = False
self.angle_l = 0
self.angle_r = 0
self.center = (0,0)
self.maj = 0
self.min = 0
self.phi = 0.0
self.tilt_deg = 0
self.foc_pt1 = (0,0)
self.foc_pt2 = (0,0)
self.tan_l_m = 0
self.int_l = (0,0)
self.line_l = (0,0,0,0)
self.tan_r_m = 0
self.int_r = (0,0)
self.line_r = (0,0,0,0)
self.base_diam = 0
def evaluate_droplet(img, y_base) -> Droplet:
drplt = Droplet()
crop_img = img[:y_base,:]
shape = img.shape
height = shape[0]
width = shape[1]
# values only for 8bit images!
bw_edges = cv2.Canny(crop_img, 76, 179)
contours, hierarchy = cv2.findContours(bw_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if len(contours) == 0:
raise ValueError('No contours found!')
edge = max(contours, key=cv2.contourArea)
(x0,y0),(maj_ax,min_ax),phi_deg = cv2.fitEllipse(edge)
phi = radians(phi_deg) # to radians
a = maj_ax/2
b = min_ax/2
intersection = calc_intersection_line_ellipse((x0,y0,a,b,phi),(0,y_base))
if intersection is None:
raise ValueError('No intersections found')
# select left and right intersection points
x_int_l = min(intersection)
x_int_r = max(intersection)
foc_len = sqrt(abs(a**2 - b**2))
# calc slope and angle of tangent
m_t_l = calc_slope_of_ellipse((x0,y0,a,b,phi), x_int_l, y_base)
angle_l = pi - atan2(m_t_l,1)
m_t_r = calc_slope_of_ellipse((x0,y0,a,b,phi), x_int_r, y_base)
angle_r = atan2(m_t_r,1) + pi
drplt.angle_l = angle_l
drplt.angle_r = angle_r
drplt.maj = maj_ax
drplt.min = min_ax
drplt.center = (x0, y0)
drplt.phi = phi
drplt.tilt_deg = phi_deg
drplt.tan_l_m = m_t_l
drplt.tan_r_m = m_t_r
drplt.line_l = (int(round(x_int_l - (int(round(y_base))/m_t_l))), 0, int(round(x_int_l + ((height - int(round(y_base)))/m_t_l))), int(round(height)))
drplt.line_r = (int(round(x_int_r - (int(round(y_base))/m_t_r))), 0, int(round(x_int_r + ((height - int(round(y_base)))/m_t_r))), int(round(height)))
drplt.int_l = (x_int_l, y_base)
drplt.int_r = (x_int_r, y_base)
drplt.foc_pt1 = (x0 + foc_len*cos(phi), y0 + foc_len*sin(phi))
drplt.foc_pt2 = (x0 - foc_len*cos(phi), y0 - foc_len*sin(phi))
drplt.base_diam = x_int_r - x_int_l
drplt.is_valid = True
# draw ellipse and lines
img = cv2.drawContours(img,contours,-1,(100,100,255),2)
img = cv2.drawContours(img,edge,-1,(255,0,0),2)
img = cv2.ellipse(img, (int(round(x0)),int(round(y0))), (int(round(a)),int(round(b))), int(round(phi*180/pi)), 0, 360, (255,0,255), thickness=1, lineType=cv2.LINE_AA)
y_int = int(round(y_base))
img = cv2.line(img, (int(round(x_int_l - (y_int/m_t_l))), 0), (int(round(x_int_l + ((height - y_int)/m_t_l))), int(round(height))), (255,0,255), thickness=1, lineType=cv2.LINE_AA)
img = cv2.line(img, (int(round(x_int_r - (y_int/m_t_r))), 0), (int(round(x_int_r + ((height - y_int)/m_t_r))), int(round(height))), (255,0,255), thickness=1, lineType=cv2.LINE_AA)
img = cv2.ellipse(img, (int(round(x_int_l)),y_int), (20,20), 0, 0, -int(round(angle_l*180/pi)), (255,0,255), thickness=1, lineType=cv2.LINE_AA)
img = cv2.ellipse(img, (int(round(x_int_r)),y_int), (20,20), 0, 180, 180 + int(round(angle_r*180/pi)), (255,0,255), thickness=1, lineType=cv2.LINE_AA)
img = cv2.line(img, (0,y_int), (width, y_int), (255,0,0), thickness=2, lineType=cv2.LINE_AA)
img = cv2.putText(img, '<' + str(round(angle_l*180/pi,1)), (5,y_int-5), cv2.FONT_HERSHEY_COMPLEX, .5, (0,0,0))
img = cv2.putText(img, '<' + str(round(angle_r*180/pi,1)), (width - 80,y_int-5), cv2.FONT_HERSHEY_COMPLEX, .5, (0,0,0))
cv2.imshow('Test',img)
cv2.waitKey(0)
return drplt
def calc_intersection_line_ellipse(ellipse_pars, line_pars):
"""
calculates intersection(s) of an ellipse with a line
:param ellipse_pars: tuple of (x0,y0,a,b,phi): x0,y0 center of ellipse; a,b sem-axis of ellipse; phi tilt rel to x axis
:param line_pars: tuple of (m,t): m is the slope and t is intercept of the intersecting line
:returns: x-coordinate(s) of intesection as list or float or none if none found
"""
## -->> http://quickcalcbasic.com/ellipse%20line%20intersection.pdf
(x0, y0, h, v, phi) = ellipse_pars
(m, t) = line_pars
y = t - y0
try:
a = v**2 * cos(phi)**2 + h**2 * sin(phi)**2
b = 2*y*cos(phi)*sin(phi) * (v**2 - h**2)
c = y**2 * (v**2 * sin(phi)**2 + h**2 * cos(phi)**2) - (h**2 * v**2)
det = b**2 - 4*a*c
if det > 0:
x1 = int(round((-b - sqrt(det))/(2*a) + x0))
x2 = int(round((-b + sqrt(det))/(2*a) + x0))
return x1,x2
elif det == 0:
x = int(round(-b / (2*a)))
return x
else:
return None
except Exception as ex:
raise ex
def calc_slope_of_ellipse(ellipse_pars, x, y):
"""
calculates the slope of the tangent at point x,y, the point needs to be on the ellipse!
:param ellipse_params: tuple of (x0,y0,a,b,phi): x0,y0 center of ellipse; a,b sem-axis of ellipse; phi tilt rel to x axis
:param x: x-coord where the slope will be calculated
:returns: the slope of the tangent
"""
(x0, y0, a, b, phi) = ellipse_pars
# transform to non-rotated ellipse
x_rot = (x - x0)*cos(phi) + (y - y0)*sin(phi)
y_rot = (x - x0)*sin(phi) + (y - y0)*cos(phi)
m_rot = -(b**2 * x_rot)/(a**2 * y_rot) # slope of tangent to unrotated ellipse
#rotate tangent line back to angle of the rotated ellipse
m_tan = tan(atan2(m_rot,1) + phi)
return m_tan
if __name__ == "__main__":
im = cv2.imread('untitled1.png')
# any value below 250 is just the droplet without the substrate
drp = evaluate_droplet(im, 250)
Original image:
I made a mistake in calc_slope_ellipse:
x_rot = (x - x0)*cos(phi) + (y - y0)*sin(phi)
should be
x_rot = (x - x0)*cos(phi) - (y - y0)*sin(phi)
this fixes the wrong sign of the slope at y=47.
I replaced the atan2:
m_rot = -(b**2 * x_rot)/(a**2 * y_rot) # slope of tangent to unrotated ellipse
#rotate tangent line back to angle of the rotated ellipse
m_tan = tan(atan2(m_rot,1) + phi)
with
tan_a = x_rot/a**2
tan_b = y_rot/b**2
#rotate tangent line back to angle of the rotated ellipse
tan_a_r = tan_a*cos(phi) + tan_b*sin(phi)
tan_b_r = tan_b*cos(phi) - tan_a*sin(phi)
m_tan = - (tan_a_r / tan_b_r)
This fixes the weird behaviour for certain cases (y=62).
Complete fcn:
def calc_slope_of_ellipse(ellipse_pars, x, y):
(x0, y0, a, b, phi) = ellipse_pars
# transform to non-rotated ellipse centered to origin
x_rot = (x - x0)*cos(phi) - (y - y0)*sin(phi)
y_rot = (x - x0)*sin(phi) + (y - y0)*cos(phi)
# Ax + By = C
tan_a = x_rot/a**2
tan_b = y_rot/b**2
#rotate tangent line back to angle of the rotated ellipse
tan_a_r = tan_a*cos(phi) + tan_b*sin(phi)
tan_b_r = tan_b*cos(phi) - tan_a*sin(phi)
m_tan = - (tan_a_r / tan_b_r)
return m_tan

Distance between cv2 lienes and centre of screen - python

I'm using openCV to detect the distance between two lines and their position relative to the centre point of an image. Doesn't need to be an exact distance - just a contextual value of some sort (pixels would be fine)
My code which I have working detecting the two lines is this;
import PIL
import time
import io
import picamera
import cv2
import numpy as np
image_count = 0
with picamera.PiCamera() as camera:
camera.start_preview()
camera.resolution = (340, 240)
time.sleep(2)
while(True):
try:
stream = io.BytesIO()
image_counter+=1
camera.capture(stream, format='png')
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
image = cv2.imdecode(data, 1)
grey_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edge_image = cv2.Canny(grey_image, 50, 150, apertureSize = 3)
lines = cv2.HoughLines(edge_image, 1, np.pi/180, 95)
if(lines.any):
for rho, theta in lines[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(image, (x1, y1), (x2, y2), (0,0,255), 2)
cv2.imwrite('lined_image_' + str(image_counter) + '.png, image)
except:
print 'loop error'
It detects lines such as in this image;
I've been trying to work out how to do this numerically but it's convoluted and probably wrong - there must be an easier way but I can't see it with my inexperience using open CV.
How can I find the distance between the centre point of the image and the innermost red lines you see? (at the point where the lines intersects the horizontal line which intersects both in and the images centre point)
Thanks!
If you were to use HoughLinesP, you'd directly get start and end points of the lines. Then, when
Dx is (x2-x1) and Dy is (y2-y1), your required distance d from the centre point (x0,y0) is
If you intend to stick with HoughLines, you can easily transform rho and theta to get the equation of a line, and use one of the many formulae described here, which is also where the above formula has been borrowed from.

Is it possible in OpenCV to plot local curvature as a heat-map representing an object's "pointiness"?

Given a thresholded image of blobs that you can detect and draw contours around, is it possible when drawing the contour to represent the local curvature as a heat-map?
i.e. is it (1) possible to determine local curvature on a open cv contour (2) map this curvature to a heat-map color space (3) draw the contour as a heatmap.
My goal is to measure the "pointiness" of an object so that I can draw a vector from the pointy side to the opposite non-pointy side. For my objects, I happen to know that the pointy side is the top.
If other techniques would be more effective at representing "pointiness" than curvature feel free to suggest.
EDIT: Fixed a bug in the previous version.
I used angle between the gradient vectors at the ith and (i + n)th point on the contour as the score to determine the pointiness of a point. Code and results below.
import numpy as np
import cv2
import pylab as pl
def compute_pointness(I, n=5):
# Compute gradients
# GX = cv2.Sobel(I, cv2.CV_32F, 1, 0, ksize=5, scale=1)
# GY = cv2.Sobel(I, cv2.CV_32F, 0, 1, ksize=5, scale=1)
GX = cv2.Scharr(I, cv2.CV_32F, 1, 0, scale=1)
GY = cv2.Scharr(I, cv2.CV_32F, 0, 1, scale=1)
GX = GX + 0.0001 # Avoid div by zero
# Threshold and invert image for finding contours
_, I = cv2.threshold(I, 100, 255, cv2.THRESH_BINARY_INV)
# Pass in copy of image because findContours apparently modifies input.
C, H = cv2.findContours(I.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
heatmap = np.zeros_like(I, dtype=np.float)
pointed_points = []
for contour in C:
contour = contour.squeeze()
measure = []
N = len(contour)
for i in xrange(N):
x1, y1 = contour[i]
x2, y2 = contour[(i + n) % N]
# Angle between gradient vectors (gx1, gy1) and (gx2, gy2)
gx1 = GX[y1, x1]
gy1 = GY[y1, x1]
gx2 = GX[y2, x2]
gy2 = GY[y2, x2]
cos_angle = gx1 * gx2 + gy1 * gy2
cos_angle /= (np.linalg.norm((gx1, gy1)) * np.linalg.norm((gx2, gy2)))
angle = np.arccos(cos_angle)
if cos_angle < 0:
angle = np.pi - angle
x1, y1 = contour[((2*i + n) // 2) % N] # Get the middle point between i and (i + n)
heatmap[y1, x1] = angle # Use angle between gradient vectors as score
measure.append((angle, x1, y1, gx1, gy1))
_, x1, y1, gx1, gy1 = max(measure) # Most pointed point for each contour
# Possible to filter for those blobs with measure > val in heatmap instead.
pointed_points.append((x1, y1, gx1, gy1))
heatmap = cv2.GaussianBlur(heatmap, (3, 3), heatmap.max())
return heatmap, pointed_points
def plot_points(image, pointed_points, radius=5, color=(255, 0, 0)):
for (x1, y1, _, _) in pointed_points:
cv2.circle(image, (x1, y1), radius, color, -1)
def main():
I = cv2.imread("glLqt.jpg", 0)
heatmap, pointed_points = compute_pointness(I, n=5)
pl.figure()
pl.imshow(heatmap, cmap=pl.cm.jet)
pl.colorbar()
I_color = cv2.cvtColor(I, cv2.COLOR_GRAY2RGB)
plot_points(I_color, pointed_points)
pl.figure()
pl.imshow(I_color)
if __name__ == '__main__':
main()
Notice that sharper points are brighter in the heatmap.
The point is that " if you approximate the contour to continues lines you can see that the pointiness is the point where maximum angle deviation for consecutive line occurs", based on this you can develop your algorithm.
You need to do
Find contour.
Find approxPolyDP() for the contour.
Calculate angle for each consecutive line and store the point where the maximum deviation occur.
You can calculate the angle of a line using the equation
double Angle = atan2(P2.y - P1.y, P2.x - P1.x) * 180.0 / CV_PI;

Categories