I have image, that is rotated by 30deg.
However i need to rotate the bounding box too. The coordinations of bounding box are [xmin,ymin,xmax,ymax] = [101,27,270,388] (xmin,ymin) = top left corner , (xmax,ymax) = bottom right corner.
Now i wanted to rotate this matrix by running it over rotations matrix
theta = np.radians(30)
c, s = np.cos(theta), np.sin(theta)
r = np.array(((c,-s), (s, c)))
Using
labels = np.array([[101,270],[27,388]])
print(np.dot(r,labels))
But this trows incorrect values. If i am not mistaken the linear transformation should be correct did i overlook something or i made mistake somewhere? THanks for help.
Option 1
you can simply use angle parameter in patches.Rectangle:
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import numpy as np
from skimage import data, io, filters
im = np.array(data.coins(), dtype=np.uint8)
# Create figure and axes
fig,ax = plt.subplots(1)
# Display the image
ax.imshow(im)
# Create a Rectangle patch
rect = patches.Rectangle((50,100),100,80,linewidth=1,edgecolor='r',facecolor='none')
rect_2 = patches.Rectangle((50,100),100,80,linewidth=1,edgecolor='r',facecolor='none', angle=30)
# Add the patch to the Axes
ax.add_patch(rect)
ax.add_patch(rect_2)
plt.show()
Option 2
Or if you want to do this in a mathematical way, use a rotation matrix. I just show you how can calculate the corner points of your rotated box
Previous corner points
First I set up the unrotated points:
x = [101,101,270, 270]
y = [27, 388, 27,388]
Now we create the rotation matrix
rot_mat = np.array([[np.cos(pi/6), -np.sin(pi/6)], [ np.sin(pi/6), np.cos(pi/6)]])
Now we centralize x and y, by shifting them (so that center of the rectangle is equivalent to the origin)
x_cen = np.array(x) - np.mean(x)
y_cen = np.array(y) -np.mean(y)
Apply the rotation matrix to the centralized arrays and shift back
x_rot = np.dot(rot_mat, np.array((x_cen,y_cen)))[0,:] + np.mean(x)
y_rot = np.dot(rot_mat, np.array((x_cen,y_cen)))[1,:] + np.mean(x)
Rotated corner points:
Related
In PIL I have rotated a rectangle by an angle and pasted it back to the background image.
I'd like to know what will be the new coordinates of a specific corner.
function(old_coord) => new coordinates
I have read in the documentation it mentioned the center of rotation by default is the center of the image.
Here is my code:
import PIL
from PIL import Image, ImageDraw
import os
background = Image.open('background.jpg')
rectangle = Image.open("rectangle.png")
my_angle = 30
rectangle_rotate = Image.Image.rotate(rectangle, angle=my_angle, resample=Image.BICUBIC, expand=True)
# box: 2-tuple giving the upper left corner
px = int(background.size[0] / 2)
py = int(background.size[1] / 2)
background.paste(im=rectangle_rotate,
box=(px, py),
mask=rectangle_rotate)
# The new position I'm getting is wrong, how come?????
pos_xNew = px * math.cos(math.radians(my_angle)) + py * math.sin(math.radians(my_angle))
pos_yNew = -px * math.sin(math.radians(my_angle)) + py * math.cos(math.radians(my_angle))
print('pos_xNew:', pos_xNew)
print('pos_yNew:', pos_yNew)
draw_img_pts = ImageDraw.Draw(background)
r = 10
# Drawing a simple small circle circle for visualization
draw_img_pts.ellipse((pos_xNew - r, pos_yNew - r, pos_xNew + r, pos_yNew + r), fill='red')
background.save('example_with_roatation.png')
how can I find the new coordinates value? I keep getting wrong value.
Background image (input):
Rectangle image (input):
The Output I got with zero rotation as expected:
Output I got after 30 degree rotation:
Comments inline
import math
import os
import numpy as np # many operations are more concise in matrix form
import PIL
from PIL import Image, ImageDraw
def get_rotation_matrix(angle):
""" For background, https://en.wikipedia.org/wiki/Rotation_matrix
rotation is clockwise in traditional descartes, and counterclockwise,
if y goes down (as in picture coordinates)
"""
return np.array([
[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
def rotate(points, pivot, angle):
""" Get coordinates of points rotated by a given angle counterclocwise
Args:
points (np.array): point coordinates shaped (n, 2)
pivot (np.array): [x, y] coordinates of rotation center
angle (float): counterclockwise rotation angle in radians
Returns:
np.array of new coordinates shaped (n, 2)
"""
relative_points = points - pivot
return relative_points.dot(get_rotation_matrix(angle)) + pivot
background = Image.open('background.jpg')
rectangle = Image.open("rectangle.png")
my_angle_deg = 30
my_angle = math.radians(my_angle_deg)
rsize_x, rsize_y = rectangle.size
# to get shift introduced by rotation+clipping we'll need to rotate all four corners
# starting from top-right corners, counter-clockwise
rectangle_corners = np.array([
[rsize_x, 0], # top-right
[0, 0], # top-left
[0, rsize_y], # bottom-left
[rsize_x, rsize_y] # bottom-right
])
# rectangle_corners now are:
# array([[262, 0],
# [ 0, 0],
# [ 0, 67],
# [262, 67]])
rotated_corners = rotate(rectangle_corners, rectangle_corners[0], my_angle)
# as a result of rotation, one of the corners might end up left from 0,
# e.g. if the rectangle is really tall and rotated 90 degrees right
# or, leftmost corner is no longer at 0, so part of the canvas is clipped
shift_introduced_by_rotation_clip = rotated_corners.min(axis=0)
rotated_shifted_corners = rotated_corners - shift_introduced_by_rotation_clip
# finally, demo
# this part is just copied from the question
rectangle_rotate = Image.Image.rotate(rectangle, angle=my_angle_deg, resample=Image.BICUBIC, expand=True)
# box: 2-tuple giving the upper left corner
px = int(background.size[0] / 2)
py = int(background.size[1] / 2)
background.paste(im=rectangle_rotate,
box=(px, py),
mask=rectangle_rotate)
# let's see if dots land right after these translations:
draw_img_pts = ImageDraw.Draw(background)
r = 10
for point in rotated_shifted_corners:
pos_xNew, pos_yNew = point + [px, py]
draw_img_pts.ellipse((pos_xNew - r, pos_yNew - r, pos_xNew + r, pos_yNew + r), fill='red')
I am trying to create an occupancy grid map by exporting an higher resolution image of the map to a very low resolution.
In most basic form an occupancy grid is a 2 dimensional binary array. The values stored in array denotes free(0) or occupied(1). Each value corresponds to a discrete location of the physical map (the following image depicts an area)
As seen in the above image each array location is a cell of physical world.
I have a 5 meter x 5 meter World, it is then discretized into cells of 5cm x 5cm. The world is thus 100 x 100 cells corresponding to 5m x 5m physical world.
The obstacle re randomly generated circular disks at location (x,y) and of a random radius r like follows:
I need to covert this (above) image into an array of size 100x100. That means evaluating if each cell is actually in the region of a obstacle or free.
To speed things, I have found the following workaround:
Create matplotlib figure populated with obstacles with figsize=(5,5) and save the image with dpi=20 in bmp format and finally import the bmp image as an numpy array. Alas, matplotlib does not support bmp. If I save the image in jpeg using plt.savefig('map.jpg', dpi=20, quality=100) or other formats then the cell's boundary becomes blurred and flows into other cells. Shown in this image :
So my question: How to save a scaled-down image from matplotlib that preserves the cell sharpness of image (akin to bmp).
Nice hack. However, I would rather compute the boolean mask corresponding to your discretized circles explicitly. One simple way to get such a boolean map is by using the contains_points method of matplotlib artists such as a Circle patch.
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
world_width = 100 # x
world_height = 100 # y
minimum_radius = 1
maximum_radius = 10
total_circles = 5
# create circle patches
x = np.random.randint(0, world_width, size=total_circles)
y = np.random.randint(0, world_height, size=total_circles)
r = minimum_radius + (maximum_radius - minimum_radius) * np.random.rand(total_circles)
circles = [Circle((xx,yy), radius=rr) for xx, yy, rr in zip(x, y, r)]
# for each circle, create a boolean mask where each cell element is True
# if its center is within that circle and False otherwise
X, Y = np.meshgrid(np.arange(world_width) + 0.5, np.arange(world_height) + 0.5)
masks = np.zeros((total_circles, world_width, world_height), dtype=bool)
for ii, circle in enumerate(circles):
masks[ii] = circle.contains_points(np.c_[X.ravel(), Y.ravel()]).reshape(world_width, world_height)
combined_mask = np.sum(masks, axis=0)
plt.imshow(combined_mask, cmap='gray_r')
plt.show()
If I have understood correctly, I think this can be done quite simply with PIL, specifically with the Image.resize fucntion. For example, does this do what you asked:
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image, ImageDraw
# Make a dummy image with some black circles on a white background
image = Image.new('RGBA', (1000, 1000), color="white")
draw = ImageDraw.Draw(image)
draw.ellipse((20, 20, 180, 180), fill = 'black', outline ='black')
draw.ellipse((500, 500, 600, 600), fill = 'black', outline ='black')
draw.ellipse((100, 800, 250, 950), fill = 'black', outline ='black')
draw.ellipse((750, 300, 800, 350), fill = 'black', outline ='black')
image.save('circles_full_res.png')
# Resize the image with nearest neighbour interpolation to preserve grid sharpness
image_lo = image.resize((100,100), resample=0)
image_lo.save("circles_low_res.png")
I'm trying to find the tilt angle in a series of images which look like the created example data below. There should be a clear edge which is visible by eye. However I'm struggling in extracting the edges so far. Is Canny the right way of finding the edge here or is there a better way of finding the edge?
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
# create data
xvals = np.arange(0,2000)
yvals = 10000 * np.exp((xvals - 1600)/200) + 100
yvals[1600:] = 100
blurred = gaussian_filter(yvals, sigma=20)
# create image
img = np.tile(blurred,(2000,1))
img = np.swapaxes(img,0,1)
# rotate image
rows,cols = img.shape
M = cv.getRotationMatrix2D((cols/2,rows/2),3.7,1)
img = cv.warpAffine(img,M,(cols,rows))
# convert to uint8 for Canny
img_8 = cv.convertScaleAbs(img,alpha=(255.0/65535.0))
fig,ax = plt.subplots(3)
ax[0].plot(xvals,blurred)
ax[1].imshow(img)
# find edge
ax[2].imshow(cv.Canny(img_8, 20, 100, apertureSize=5))
You can find the angle by transforming your image to binary (cv2.threshold(cv2.THRESH_BINARY)) then search for contours.
When you locate your contour (line) then you can fit a line on your contour cv2.fitLine() and get two points of your line. My math is not very good but I think that in linear equation the formula goes f(x) = k*x + n and you can get k out of those two points (k = (y2-y1)/(x2-x1)) and finally the angle phi = arctan(k). (If I'm wrong please correct it)
You can also use the rotated bounding rectangle - cv2.minAreaRect() - which already returns the angle of the rectangle (rect = cv2.minAreaRect() --> rect[2]). Hope it helps. Cheers!
Here is an example code:
import cv2
import numpy as np
import math
img = cv2.imread('angle.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(gray,170,255,cv2.THRESH_BINARY)
im, contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for c in contours:
area = cv2.contourArea(c)
perimeter = cv2.arcLength(c, False)
if area < 10001 and 100 < perimeter < 1000:
# first approach - fitting line and calculate with y=kx+n --> angle=tan^(-1)k
rows,cols = img.shape[:2]
[vx,vy,x,y] = cv2.fitLine(c, cv2.DIST_L2,0,0.01,0.01)
lefty = int((-x*vy/vx) + y)
righty = int(((cols-x)*vy/vx)+y)
cv2.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)
(x1, y1) = (cols-1, righty)
(x2, y2) = (0, lefty)
k = (y2-y1)/(x2-x1)
angle = math.atan(k)*180/math.pi
print(angle)
#second approch - cv2.minAreaRect --> returns center (x,y), (width, height), angle of rotation )
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
print(rect[2])
cv2.imshow('img2', img)
Original image:
Output:
-3.8493663478518627
-3.7022125720977783
tribol,
it seems like you can take the gradient image G = |Gx| + |Gy| (normalize it to some known range), calc its Histogram and take the top bins of it. it will give you approx mask of the line. Then you can do line fitting. It'll give you a good initial guess.
A very simple way of doing it is as follows... adjust my numbers to suit your knowledge of the data.
Normalise your image to a scale of 0-255.
Choose two points A and B, where A is 10% of the image width in from the left side and B is 10% in from the right side. The distance AB is now 0.8 x 2000, or 1600 px.
Go North from point A sampling your image till you exceed some sensible threshold that means you have met the tilted line. Note the Y value at this point, as YA.
Do the same, going North from point B till you meet the tilted line. Note the Y value at this point, as YB.
The angle you seek is:
tan-1((YB-YA)/1600)
Thresholding as suggested by kavko didn't work that well, as the intensity varied from image to image (I could of course consider the histogram for each image to imrove this approach). I ended up with taking the maximum of the gradient in the y-direction:
def rotate_image(image):
blur = ndimage.gaussian_filter(image, sigma=10) # blur image first
grad = np.gradient(blur, axis= 0) # take gradient along y-axis
grad[grad>10000]=0 # filter unreasonable high values
idx_maxline = np.argmax(grad, axis=0) # get y-indices of max slope = indices of edge
mean = np.mean(idx_maxline)
std = np.std(idx_maxline)
idx = np.arange(idx_maxline.shape[0])
idx_filtered = idx[(idx_maxline < mean+std) & (idx_maxline > mean - std)] # filter positions where highest slope is at different position(blobs)
slope, intercept, r_value, p_value, std_err = stats.linregress(idx_filtered, idx_maxline[idx_filtered])
out = ndimage.rotate(image,slope*180/np.pi, reshape = False)
return out
out = rotate_image(img)
plt.imshow(out)
I have a numpy array for an image that I read in from a FITS file. I rotated it by N degrees using scipy.ndimage.interpolation.rotate. Then I want to figure out where some point (x,y) in the original non-rotated frame ends up in the rotated image -- i.e., what are the rotated frame coordinates (x',y')?
This should be a very simple rotation matrix problem but if I do the usual mathematical or programming based rotation equations, the new (x',y') do not end up where they originally were. I suspect this has something to do with needing a translation matrix as well because the scipy rotate function is based on the origin (0,0) rather than the actual center of the image array.
Can someone please tell me how to get the rotated frame (x',y')? As an example, you could use
from scipy import misc
from scipy.ndimage import rotate
data_orig = misc.face()
data_rot = rotate(data_orig,66) # data array
x0,y0 = 580,300 # left eye; (xrot,yrot) should point there
P.S. The following two related questions' answers do not help me:
Find new coordinates of a point after rotation
New coordinates after image rotation using scipy.ndimage.rotate
As usual with rotations, one needs to translate to the origin, then rotate, then translate back. Here, we can take the center of the image as origin.
import numpy as np
import matplotlib.pyplot as plt
from scipy import misc
from scipy.ndimage import rotate
data_orig = misc.face()
x0,y0 = 580,300 # left eye; (xrot,yrot) should point there
def rot(image, xy, angle):
im_rot = rotate(image,angle)
org_center = (np.array(image.shape[:2][::-1])-1)/2.
rot_center = (np.array(im_rot.shape[:2][::-1])-1)/2.
org = xy-org_center
a = np.deg2rad(angle)
new = np.array([org[0]*np.cos(a) + org[1]*np.sin(a),
-org[0]*np.sin(a) + org[1]*np.cos(a) ])
return im_rot, new+rot_center
fig,axes = plt.subplots(2,2)
axes[0,0].imshow(data_orig)
axes[0,0].scatter(x0,y0,c="r" )
axes[0,0].set_title("original")
for i, angle in enumerate([66,-32,90]):
data_rot, (x1,y1) = rot(data_orig, np.array([x0,y0]), angle)
axes.flatten()[i+1].imshow(data_rot)
axes.flatten()[i+1].scatter(x1,y1,c="r" )
axes.flatten()[i+1].set_title("Rotation: {}deg".format(angle))
plt.show()
I need to convert map coordinates into pixels (in order to make a clickable map in html).
Here is a sample map (made using the Basemap package from matplotlib). I have put some labels on it and attempted to calculate the midpoints of the labels in pixels:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Step 0: some points to plot
names = [u"Reykjavík", u"Höfn", u"Húsavík"]
lats = [64.133333, 64.25, 66.05]
lons = [-21.933333, -15.216667, -17.316667]
## Step 1: draw a map using matplotlib/Basemap
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
M = Basemap(projection='merc',resolution='c',
llcrnrlat=63,urcrnrlat=67,
llcrnrlon=-24,urcrnrlon=-13)
x, y = M(lons, lats) # transform coordinates according to projection
boxes = []
for xa, ya, name in zip(x, y, names):
box = plt.text(xa, ya, name,
bbox=dict(facecolor='white', alpha=0.5))
boxes.append(box)
M.bluemarble() # a bit fuzzy at this resolution...
plt.savefig('test.png', bbox_inches="tight", pad_inches=0.01)
# Step 2: get the coordinates of the textboxes in pixels and calculate the
# midpoints
F = plt.gcf() # get current figure
R = F.canvas.get_renderer()
midpoints = []
for box in boxes:
bb = box.get_window_extent(renderer=R)
midpoints.append((int((bb.p0[0] + bb.p1[0]) / 2),
int((bb.p0[1] + bb.p1[1]) / 2)))
These calculated points are in the approximately correct relative relation to each other, but do not coincide with the true points. The following code snippet should put a red dot on the midpoint of each label:
# Step 3: use PIL to draw dots on top of the labels
from PIL import Image, ImageDraw
im = Image.open("test.png")
draw = ImageDraw.Draw(im)
for x, y in midpoints:
y = im.size[1] - y # PIL counts rows from top not bottom
draw.ellipse((x-5, y-5, x+5, y+5), fill="#ff0000")
im.save("test.png", "PNG")
Red dots should be in the middle of the labels.
I guess that the error comes in where I extract the coordinates of the text boxes (in Step #2). Any help much appreciated.
Notes
Perhaps the solution is something along the lines of this answer?
Two things are happening to cause your pixel positions to be off.
The dpi used to calculated the text position is different from that used to save the figure.
When you use the bbox_inches option in the savefig call, it eliminates a lot of white space. You don't take this into account when you are drawing your circles with PIL (or checking where someone clicked. Also you add a padding in this savefig call that you may need to account for if it's very large (as I show in my example below). Probably it will not matter if you still use 0.01.
To fix this first issue, just force the figure and the savefig call to use the same DPI.
To fix the second issue, document the (0,0) position (Axes units) of the axes in pixels, and shift your text positions accordingly.
Here's a slightly modified version of your code:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Step 0: some points to plot
names = [u"Reykjavík", u"Höfn", u"Húsavík"]
lats = [64.133333, 64.25, 66.05]
lons = [-21.933333, -15.216667, -17.316667]
## Step 1: draw a map using matplotlib/Basemap
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
# predefined dpi
FIGDPI=80
# set dpi of figure, so that all calculations use this value
plt.gcf().set_dpi(FIGDPI)
M = Basemap(projection='merc',resolution='c',
llcrnrlat=63,urcrnrlat=67,
llcrnrlon=-24,urcrnrlon=-13)
x, y = M(lons, lats) # transform coordinates according to projection
boxes = []
for xa, ya, name in zip(x, y, names):
box = plt.text(xa, ya, name,
bbox=dict(facecolor='white', alpha=0.5))
boxes.append(box)
M.bluemarble() # a bit fuzzy at this resolution...
# predefine padding in inches
PADDING = 2
# force dpi to same value you used in your calculations
plt.savefig('test.png', bbox_inches="tight", pad_inches=PADDING,dpi=FIGDPI)
# document shift due to loss of white space and added padding
origin = plt.gca().transAxes.transform((0,0))
padding = [FIGDPI*PADDING,FIGDPI*PADDING]
Step #2 is unchanged
Step #3 takes account of the origin
# Step 3: use PIL to draw dots on top of the labels
from PIL import Image, ImageDraw
im = Image.open("test.png")
draw = ImageDraw.Draw(im)
for x, y in midpoints:
# deal with shift
x = x-origin[0]+padding[0]
y = y-origin[1]+padding[1]
y = im.size[1] - y # PIL counts rows from top not bottom
draw.ellipse((x-5, y-5, x+5, y+5), fill="#ff0000")
im.save("test.png", "PNG")
This results in:
Notice that I used an exaggerated PADDING value to test that everything still works, and a value of 0.01 would produce your original figure.