Detect angle and rotate image using HoughLine Transform - python

I'm trying to rotate an image that is clearly seen with rotation.
I'm using HoughLine with opencv.
Here is the image with code below (working in google colab):
import numpy as np
import cv2
from scipy import ndimage
from google.colab.patches import cv2_imshow
image1 = cv2.imread('/content/rotate.png')
gray=cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
canimg = cv2.Canny(gray, 50, 200)
lines= cv2.HoughLines(canimg, 1, np.pi/180.0, 250, np.array([]))
#lines= cv2.HoughLines(edges, 1, np.pi/180, 80, np.array([]))
for line in lines:
rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(image1,(x1,y1),(x2,y2),(0,0,255),2)
print(theta)
print(rho)
cv2_imshow(image1)
cv2_imshow(edges)
This is the ouput:
theta: 0.9773844
rho: 311.0
So, when I try to rotate this image with this line and then show it:
img_rotated = ndimage.rotate(image1, theta)
cv2_imshow(img_rotated)
This is the output:
This result does not agree with the rotation that it should be for the frame to be horizontal.
Any advise? What am I doing wrong?

In ndimage.rotate angle in degrees.
img_rotated = ndimage.rotate(image1, 180*theta/3.1415926)

Related

Why Does Hough Transform Only Produce One Lines When There's Multiple Lines?

I'm attempting to use hough transform to find the lines that I need. I'm working with multiple images and an example of one is this
https://i.stack.imgur.com/KmGdP.jpg
However when using the hough transform the result comes out as
https://i.stack.imgur.com/DJvbg.jpg
I don't know why only one line ends up on the sign in which I want the hough lines to encompass all four sides, and with the other images I have some don't even have hough lines showing up on them. What am i doing wrong? Is there any corrections to be put in place? This is the code I'm running
import numpy as np
import cv2 as cv
import os
images = []
edges = []
light_range = (0, 0, 0)
dark_range = (80, 80, 80)
#Importing testing images
for i in os.listdir("Directional Signage/Train"):
img = cv.imread(os.path.join("Directional Signage/Train", i))
if img is None:
print("Couldn't read this image" + str(i))
else:
images.append(img)
for i in range(0, len(images)):
#Preprocessing
#mask = cv.inRange(images[i], light_range, dark_range)
img = np.float32(images[i]) / 255.0
gx = cv.Sobel(img, cv.CV_32F, 1, 0, ksize=1)
gy = cv.Sobel(img, cv.CV_32F, 0, 1, ksize=1)
mag, angle = cv.cartToPolar(gx, gy, angleInDegrees=True)
gray = cv.cvtColor(mag,cv.COLOR_BGR2GRAY)
gray = np.uint8(gray * 255.0)
edges = cv.Canny(gray,50,150,apertureSize = 3)
lines = cv.HoughLines(edges,2,np.pi/90,200)
#edges.append(lines)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(images[i],(x1,y1),(x2,y2),(0,0,255),2)
for i in range(0, len(images)):
cv.imshow("blobby " + str(i), images[i])
l = cv.waitKey(0)
You're just iterating over one line (lines[0]), so it is obvious that you get one line from each image, do this instead:
[...]
for line in lines:
for rho,theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(images[i],(x1,y1),(x2,y2),(0,0,255),2)
[...]
Update: now rather than getting a single red line my entire image goes red
The reason is that, the Hough function finds every line, but by priority. The lines which are more likely to be a real line are at the first part of the list.
First two lines of the list, maybe is what you're looking for, so try this:
[...]
for line in lines[:2]:
for rho,theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(images[i],(x1,y1),(x2,y2),(0,0,255),2)
[...]

Straighten Image and make it flat as well

Looking for some solution for following type of image , I want to make it flat and straighten for further comparison. I tried searching but some solutions here on stack overflow didn't work
import numpy as np
import cv2
from scipy import ndimage
image1 = cv2.imread('image.jpg')
gray=cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
canimg = cv2.Canny(gray, 50, 200)
lines= cv2.HoughLines(canimg, 1, np.pi/180.0, 250, np.array([]))
#lines= cv2.HoughLines(edges, 1, np.pi/180, 80, np.array([]))
for line in lines:
rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(image1,(x1,y1),(x2,y2),(0,0,255),2)
print(theta)
print(rho)
img_rotated = ndimage.rotate(image1, 180*theta/3.1415926 + 180)
cv2.imshow("img",img_rotated)
cv2.waitKey(0)
#closing all open windows
cv2.destroyAllWindows()
This did very slight straightening I am not expert at OpenCV but I it didn't work.
The above code does very slight adjustment. I know it doesnt do any flattening work.
I am looking for something related to this.

Rotate (alignment) an image by line along the y-axis

I am trying to rotate (alignment) an image, which contain line (with two points P1 and P2) along the y-axis
Original image:
Note: the green area represents the original image
Result should be:
Note: the red area represents the original image after rotation
So I need to calculate the angle between line defined by P1(x1,y1) and P2(x2,y2) and by y-axis,
Note: the green line represents the y-axis
My code is:
import cv2
import numpy as np
from math import *
import math
import imutils
height = 500
width = 500
original_image = np.zeros((height,width,3), np.uint8)
original_image[:] = (0,255,0)
x1 = 400
y1 = 50
P1 = (x1, y1)
x2 = 100
y2 = 300
P2 = (x2, y2)
cv2.line(original_image, P1, P2, (0, 0, 0), 3)
deltaY = y1 - y2
deltaX = x1 - x2
angleInDegrees = atan2(deltaY, deltaX) * 180 / math.pi
print(angleInDegrees)
rotated_image = imutils.rotate_bound(original_image, angleInDegrees)
cv2.imshow("Original", original_image)
cv2.imshow("Rotated", rotated_image)
cv2.waitKey(0)
But my rotated_image is not properly aligned
Result looks like:
How should I fix it?
First off you are calculating the wrong angle. The angle you are calcuating is between a vector originating at the origin and ending at P1 & a vector orignating at the origin and ending at P2.
The angle you need is between the vector starting at P1 and ending at P2 [P2-P1] & a vector that represents the direction of the y-axis which would be [0, 1].
Secondly you have to take into account that your origin is in the top left corner so you need to reflect the angle once calculated.
import cv2
import numpy as np
from math import *
import math
import imutils
height = 500
width = 500
original_image = np.zeros((height,width,3), np.uint8)
original_image[:] = (0,255,0)
x1 = 400
y1 = 50
P1 = np.array([x1, y1])
x2 = 100
y2 = 300
P2 = np.array([x2, y2])
# checks orientation of p vector & selects appropriate y_axis_vector
if (P2[1] - P1[1]) < 0:
y_axis_vector = np.array([0, -1])
else:
y_axis_vector = np.array([0, 1])
if (P2[0] - P1[0]) < 0 and (P2[1] - P1[1]) :
y_axis_vector = np.array([0, 1])
p_unit_vector = (P2 - P1) / np.linalg.norm(P2-P1)
angle_p_y = np.arccos(np.dot(p_unit_vector, y_axis_vector)) * 180 /math.pi
cv2.line(original_image, tuple(P1), tuple(P2), (0, 0, 0), 3)
print(angle_p_y)
print (P2-P1)
rotated_image = imutils.rotate_bound(original_image, -angle_p_y)
cv2.imshow("Original", original_image)
cv2.imshow("Rotated", rotated_image)
cv2.waitKey(0)

Hough Line transform is not correctly identifying any lines

I am having trouble with Hough Line transformation. I am trying to identify the major lines in a kitchen. I first just used Canny, but it was picking up more noise than I wanted and wasn't picking up the meeting of the wall and ceiling. However, the Hough Line transformation is only identifying one line that it should not be identifying at all. Any help would be appreciated.
My input:
kitchen_sample.jpg
My output:
kitchen_lines.jpg
And here is my code:
import cv2
import numpy as np
image = cv2.imread('kitchen_sample.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imwrite('kitchen_lines.jpg', image)
You were probably looking at the old opencv tutorial page which probably has a mistake in it (or something changed with versioning, didn't track opencv-python).
Here's a new & correct one
All you need to change is replace
for rho, theta in lines[0]:
with
for line in lines:
rho,theta = line[0]
But anyway it would take you some time to get desired output.
What I would recommend you is using HoughLinesP which would easily give you what you likely need
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)
for line in lines:
x1,y1,x2,y2 = line[0]
cv2.line(image,(x1,y1),(x2,y2),(0,255,0),2)

Distance between cv2 lienes and centre of screen - python

I'm using openCV to detect the distance between two lines and their position relative to the centre point of an image. Doesn't need to be an exact distance - just a contextual value of some sort (pixels would be fine)
My code which I have working detecting the two lines is this;
import PIL
import time
import io
import picamera
import cv2
import numpy as np
image_count = 0
with picamera.PiCamera() as camera:
camera.start_preview()
camera.resolution = (340, 240)
time.sleep(2)
while(True):
try:
stream = io.BytesIO()
image_counter+=1
camera.capture(stream, format='png')
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
image = cv2.imdecode(data, 1)
grey_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edge_image = cv2.Canny(grey_image, 50, 150, apertureSize = 3)
lines = cv2.HoughLines(edge_image, 1, np.pi/180, 95)
if(lines.any):
for rho, theta in lines[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(image, (x1, y1), (x2, y2), (0,0,255), 2)
cv2.imwrite('lined_image_' + str(image_counter) + '.png, image)
except:
print 'loop error'
It detects lines such as in this image;
I've been trying to work out how to do this numerically but it's convoluted and probably wrong - there must be an easier way but I can't see it with my inexperience using open CV.
How can I find the distance between the centre point of the image and the innermost red lines you see? (at the point where the lines intersects the horizontal line which intersects both in and the images centre point)
Thanks!
If you were to use HoughLinesP, you'd directly get start and end points of the lines. Then, when
Dx is (x2-x1) and Dy is (y2-y1), your required distance d from the centre point (x0,y0) is
If you intend to stick with HoughLines, you can easily transform rho and theta to get the equation of a line, and use one of the many formulae described here, which is also where the above formula has been borrowed from.

Categories