Generating equidistance points along the boundary of a polygon but CW/CCW - python

Suppose I have the vertices of a polygon and they are all oriented CCW. I wish to generate n equidistance points along the boundary of this polygon. Does anyone know of any existing package that does this, and if not, an algorithm one can use? I am working in Python. For example, here is what I would like if the polygon in question is a rectangle:
enter image description here

shapely:
import shapely.geometry as sg
import shapely.affinity as sa
import matplotlib.pyplot as P
import numpy as np
n = 7
k = 11
ori = sg.Point([0,0])
p = [sg.Point([0,1])]
for j in range(1,n):
p.append(sa.rotate(p[-1],360/n,origin=ori))
ngon = sg.Polygon(p)
P.figure()
P.plot(*ngon.exterior.xy,"-k")
P.scatter(*np.transpose([ngon.exterior.interpolate(t).xy for t in np.linspace(
0,ngon.length,k,False)])[0])
P.axis("equal");P.box("off");P.axis("off")
P.show(block=0)

To add to the possible solutions, and so I have a record. This incarnation just uses numpy to densify the boundary of polygons or polylines.
def _pnts_on_line_(a, spacing=1, is_percent=False): # densify by distance
"""Add points, at a fixed spacing, to an array representing a line.
**See** `densify_by_distance` for documentation.
Parameters
----------
a : array
A sequence of `points`, x,y pairs, representing the bounds of a polygon
or polyline object.
spacing : number
Spacing between the points to be added to the line.
is_percent : boolean
Express the densification as a percent of the total length.
Notes
-----
Called by `pnt_on_poly`.
"""
N = len(a) - 1 # segments
dxdy = a[1:, :] - a[:-1, :] # coordinate differences
leng = np.sqrt(np.einsum('ij,ij->i', dxdy, dxdy)) # segment lengths
if is_percent: # as percentage
spacing = abs(spacing)
spacing = min(spacing / 100, 1.)
steps = (sum(leng) * spacing) / leng # step distance
else:
steps = leng / spacing # step distance
deltas = dxdy / (steps.reshape(-1, 1)) # coordinate steps
pnts = np.empty((N,), dtype='O') # construct an `O` array
for i in range(N): # cycle through the segments and make
num = np.arange(steps[i]) # the new points
pnts[i] = np.array((num, num)).T * deltas[i] + a[i]
a0 = a[-1].reshape(1, -1) # add the final point and concatenate
return np.concatenate((*pnts, a0), axis=0)
Results for a hexagon densified at 4 unit point spacing.
h = np.array([[-8.66, 5.00], [ 0.00, 10.00], [ 8.66, 5.00], [ 8.66, -5.00],
[ 0.00,-10.00], [-8.66, -5.00], [-8.66, 5.00]]) # ---- hexagon
Addendum
As suggested in a comment, I will add the densify by factor incarnation
h = np.array([[-8.66, 5.00], [-2.5, 5.0], [ 0.00, 10.00], [2.5, 5.0], [ 8.66, 5.00], [ 8.66, -5.00], [ 0.00,-10.00], [-8.66, 5.00]]) # ---- a polygon
_densify_by_factor(h, factor=3)
def _densify_by_factor(a, factor=2):
"""Densify a 2D array using np.interp.
Parameters
----------
a : array
A 2D array of points representing a polyline/polygon boundary.
fact : number
The factor to density the line segments by.
"""
a = np.squeeze(a)
n_fact = len(a) * factor
b = np.arange(0, n_fact, factor)
b_new = np.arange(n_fact - 1) # Where you want to interpolate
c0 = np.interp(b_new, b, a[:, 0])
c1 = np.interp(b_new, b, a[:, 1])
n = c0.shape[0]
c = np.zeros((n, 2))
c[:, 0] = c0
c[:, 1] = c1
return c

Related

Voronoi diagram using python to make exact hexagons

I am trying to make a hexagonal fill by the voronoi diagram. One problem I find is that although the plot it produces is a hexagon diagram, the distances between the points vary.
The first function is to give a voronoi diagram by exact hexagons. Then I am trying to assign a universal initial distance between each cells as a spring rest length between them.
Now my problem is that the initial hexagonal diagram gives non-universal length between cells. We can see it by the printed result given by the line "print(a)" in the code. However, I assigned the coordinates of the points by 'x = (col + (0.5 * (row % 2))) * np.sqrt(3)' and 'y = row * 0.5', which should give exact hexagons. I don't understand how I am getting different distances between points.
The following is my code, and mostly the second function part is about finding neighbors to each cell and computing distances between each cell and its neighbors. I am printing the distances by 'print(a)' line.
import numpy as np
import freud
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay
from collections import defaultdict
import itertools
# Source: https://freud.readthedocs.io/en/v2.10.0/gettingstarted/examples/module_intros/locality.Voronoi.html
def hexagonal_lattice(rows=3, cols=3, noise=.0, seed=None):
if seed is not None:
np.random.seed(seed)
# Assemble a hexagonal lattice
points = []
for row in range(rows * 2):
for col in range(cols):
x = (col + (0.5 * (row % 2))) * np.sqrt(3)
y = row * 0.5 # These x,y are allocated to produce exact hexagons
points.append((x, y, 0))
points = np.asarray(points)
points += np.random.multivariate_normal(
mean=np.zeros(3), cov=np.eye(3) * noise, size=points.shape[0]
)
# Set z=0 again for all points after adding Gaussian noise
# points[:, 2] = 0 # do not see the need. Seems wrap later changes z coordi to 0
# Wrap the points into the box
box = freud.box.Box(Lx=cols * np.sqrt(3), Ly=rows, is2D=True)
points = box.wrap(points) # 주어진 그림박스 안으로 periodic bdy 써서 넣어주는 역할
return box, points
# Compute the Voronoi diagram and plot
box1, pts1 = hexagonal_lattice(rows=12, cols=12, seed=2) # Noise = 0
voro = freud.locality.Voronoi()
voro.compute((box1, pts1))
plt.figure()
ax = plt.gca()
voro.plot(ax=ax, cmap="RdBu")
ax.scatter(pts1[:, 0], pts1[:, 1], s=2, c='k')
plt.show()
# This part is for the stability check of the initial exact hexagons diagram
def cell_movement(box, points, time_length, Lambda=0.01):
time = 1
while time <= time_length:
# 2D projection + neighboring cells
points_2d = []
for point in points:
points_2d.append([point[0], point[1]]) # projection to 2d for neighbor list
points_2d = np.asarray(points_2d)
tri = Delaunay(points_2d)
neiList = defaultdict(set) # Neighbor list for each cell
for p in tri.vertices:
for i, j in itertools.combinations(p, 2):
neiList[i].add(j)
neiList[j].add(i)
neiborList = sorted(neiList.items()) # Sorted neighbor array
spring = np.ones((len(points[:, 0]), len(points[:, 0]))) # Initial spring rest length
rintervec = np.empty((len(points[:, 0]), len(points[:, 0]), 2)) # spring length array
for i in range(len(neiborList)):
for j in list(neiborList[i][1]):
j = int(j)
rintervec[i, j] = points_2d[i] - points_2d[j] # Distance vector between i,j cells
a = np.linalg.norm(rintervec[i, j]) # Distances between neighboring cells
if a != 0:
print(a) # These are the printed numbers
spring[i, j] = np.linalg.norm(rintervec[i, j]) # Assign a as spring rest length
points_2d[i] += Lambda * rintervec[i, j] * ( # moves points by equation (8)
spring[i, j] - np.linalg.norm(rintervec[i, j])) / np.linalg.norm(rintervec[i, j])
points[i] = np.append(points_2d[i], np.array([0]))
# diagram
points = box.wrap(points) # 주어진 그림박스 안으로 periodic bdy 써서 넣어주는 역할
voro.compute((box, points)) # Computing the Voronoi diagram
# figure
plt.figure()
ax = plt.gca()
voro.plot(ax=ax, cmap="RdBu")
ax.scatter(points[:, 0], points[:, 1], s=2, c='k')
plt.savefig("C:\\doit\\pythonPractice\\At time %s.png" % time) # saves diagrams
plt.show()
time = time + 1
cell_movement(box1, pts1, time_length=5)

finding all the points that belong to a plane using python

I have an mx3 array that is used to create a 3d model. Is there is a fast way to extract all the points that belong to a given plane using numpy or other python functions? The plane will take the Ax+By+Cz+D=0 form. I'm currently looping through all the points in the array to find the points that satisfy this equation.
plane1=[]
for i in pcd_array:
if (normal_vector[0]*(i[0]-point1[0])+normal_vector[1]*(i[1]-point1[1])+normal_vector[2]*(i[2]-point1[2]))==0:
plane1.append(i)
I'm wondering is there any numpythonic way to do it to make it faster?
Vectorization will be much faster. In the example below, all points below lie on integer values in the region -100 < x,y,z < 100. The matrix p contains one million points; we calculate all points that lie on a given plane (almost instantaneously):
# define 1M points at random:
p = np.random.randint(-100,100, size=(1000000,3))
# A,B,C (c0) are arbitrary values; define D so plane intersects first point:
c0 = np.array([3,5,7])
D = -p[0].dot(c0)
# return all points in plane Ax + By + Cz + D = 0
p_in_plane = p[p.dot(c0) + D == 0]
Does the following help? I'm assuming it is fast because it is not using any for loops. My answer is based on this
import numpy as np
mat = np.arange(18).reshape(6,3)
mat[5,:] = [0,1,2]
aa = 1
bb = 2
cc = 3
dd = -8
mask = mat[:,0]*aa + mat[:,1]*bb + mat[:,2]*cc + dd == 0
selected = mat[mask,:]
Using numpy where to find all points that match a condition
Code
import numpy as np
def get_planer_indexes(pts, plane):
'''
:parm pts - array of 3D points
:param plane - coefficient of plane (i.e. A, B, C, D)
:returns - indexes of points which are in plance
'''
# Compute A*pt[0] + B*pt[1] + C*pt[3] + D for each point pt in pts
# Checks that abs(...) is below threshold (1e-6) to allow for floating point error
return np.where(np.abs(points.dot(plane[:3]) + plane[3]) <= 1e-6 )
Example Usage
# Create 3 points which lie in a plane
P1 = [1, -2, 0]
P2 = [3, 1, 4]
P3 = [0, -1, 2]
planar_pts = np.array([P1, P2, P3])
# Plane that P1, P2, P3 lie within
plane = np.array([2, -8, 5, -18]) # i.e. A = 2, B = -8, C = 5, D = -18
# Random 3 D points (100 points)
rand_points = np.random.rand(100, 3)
# Stack onto planar points
points = np.vstack((planar_pts, rand_points))
# Shuffle the points (so planar points are in random locations)
np.random.shuffle(points)
# Find planar points
indexes = get_planer_indexes(points, plane)
print(points[indexes])
Output
[[ 3. 1. 4.]
[ 0. -1. 2.]
[ 1. -2. 0.]]

Fitting an Orthogonal Grid to Noisy Coordinates

Problem
I have a list of coordinates that are meant to form a grid. Each coordinate has a random error component and some of the coordinates are missing. Grid could be rotated (update). I want to fit a orthogonal grid to the data points and return a list of the grid's vertices. For example:
Application
The purpose is to find a grid in a scanned image. The data points come from the results of contour or edge detection in OpenCV. An example is image with a grid of photos.
Goal
I wrote some Python code that works, but would like to find a linear algebra algorithm using SciPy, statsmodels or other modules that would be more robust and handle a small rotation of the grid (less than 10°).
Python Code Using Lists Only
# Noisy [x, y] coordinates (origin is upper-left corner)
pts = [[103,101],
[198,103],
[300, 99],
[ 97,205],
[304,202],
[102,295],
[200,303],
[104,405],
[205,394],
[298,401]]
def row_col_avgs(num_list, ratio):
# Finds the average of each row and column. Coordinates are
# assigned to a row and column by specifying an error ratio.
last_num, sum_nums, count_nums, avgs = 0, 0, 0, []
num_list.sort()
for num in num_list:
# Calculate average for last row or column and begin new row or column
if num > (1+ratio)*last_num and count_nums != 0:
avgs.append(int(round(sum_nums/count_nums,0)))
sum_nums = num
count_nums = 1
# Or continue with current row or column
else:
sum_nums += num
count_nums += 1
last_num = num
avgs.append(int(round(sum_nums/count_nums,0)))
return avgs
# Split coordinates into two lists of x's and y's
xs, ys = map(list, zip(*pts))
# Find averages of each row and column of the grid
x_avgs = row_col_avgs(xs, 0.1)
y_avgs = row_col_avgs(ys, 0.1)
# Return vertices of completed averaged grid
avg_grid = []
for y_avg in y_avgs:
avg_row = []
for x_avg in x_avgs:
avg_row.append([int(x_avg), int(y_avg)])
avg_grid.append(avg_row)
print(avg_grid)
Output
[[[102, 101], [201, 101], [301, 101]],
[[102, 204], [201, 204], [301, 204]],
[[102, 299], [201, 299], [301, 299]],
[[102, 400], [201, 400], [301, 400]]]
Parallel Slopes Ordinary Least Squares (OLS) Model:
y = mx + grp + b where m=slope, b=y-intercept, & grp=categorical variable.
This is an alternative algorithm that can handle a rotated grid.
The OLS model includes both the data points in the original orientation
and a 90° rotation of the same data points. This is necessary so all gridlines are parallel and have the same slope.
Algorithm:
Find a reference gridline to compare with remaining points by choosing two neighboring points in the first or last row with a slope closest to zero.
Calculate the distances between this reference line and the remaining points.
Segment points into groups w.r.t. the calculated distances (one group per gridline).
Repeat steps 1 to 3 for the 90 degree rotated grid and combine results.
Create a parallel slopes OLS model to determine linear equations for the gridlines.
Rotate the rotated gridlines back to their original orientation.
Calculate the intersection points.
Note: Fails if noise, angle and/or missing data are too much.
Example:
                
Python Code to Create Example
def create_random_example():
# Requires import of numpy and random packages
# Creates grid with random noise and missing points
# Example will fail if std_dev, rotation, pct_removed too large
# Parameters
first_row, last_row = 100, 900
first_col, last_col = 100, 600
num_rows = 6
num_cols = 4
rotation = 3 # degrees that grid is rotated
sd = 3 # percent std dev of avg x and avg y coordinates
pct_remove = 30 # percent of points to randomly remove from data
# Create grid
x = np.linspace(first_col, last_col, num_cols)
y = np.linspace(first_row, last_row, num_rows)
xx, yy = np.meshgrid(x, y)
# Add noise
x = xx.flatten() + sd * np.mean(xx) * np.random.randn(xx.size) / 100
y = yy.flatten() + sd * np.mean(yy) * np.random.randn(yy.size) / 100
# Randomly remove points
random_list = random.sample(range(0, num_cols*num_rows),
int(pct_remove*num_cols*num_rows/100))
x, y = np.delete(x, random_list), np.delete(y, random_list)
pts = np.column_stack((x, y))
# Rotate points
radians = np.radians(rotation)
rot_mat = np.array([[np.cos(radians),-np.sin(radians)],
[np.sin(radians), np.cos(radians)]])
einsum = np.einsum('ji, mni -> jmn', rot_mat, [pts])
pts = np.squeeze(einsum).T
return np.rint(pts)
Python Code to Fit Gridlines
import numpy as np
import pandas as pd
import itertools
import math
import random
from statsmodels.formula.api import ols
from scipy.spatial import KDTree
import matplotlib.pyplot as plt
def pt_line_dist(pt, ref_line):
pt1, pt2 = [ref_line[:2], ref_line[2:]]
# Distance from point to line defined by two other points
return np.linalg.norm(np.cross(pt1 - pt2, [pt[0],pt[1]])) \
/ np.linalg.norm(pt1 - pt2)
def segment_pts(amts, grp_var, grp_label):
# Segment on amounts (distances here) in last column of array
# Note: need to label groups with string for OLS model
amts = amts[amts[:, -1].argsort()]
first_amt_in_grp = amts[0][-1]
group, groups, grp = [], [], 0
for amt in amts:
if amt[-1] - first_amt_in_grp > grp_var:
groups.append(group)
first_amt_in_grp = amt[-1]
group = []; grp += 1
group.append(np.append(amt[:-1],[[grp_label + str(grp)]]))
groups.append(group)
return groups
def find_reference_line(pts):
# Find point with minimum absolute slope relative both min y and max y
y = np.hsplit(pts, 2)[1] # y column of array
m = []
for i, y_pt in enumerate([ pts[np.argmin(y)], pts[np.argmax(y)] ]):
m.append(np.zeros((pts.shape[0]-1, 5))) # dtype default is float64
m[i][:,2:4] = np.delete(pts, np.where((pts==y_pt).all(axis=1))[0], axis=0)
m[i][:,4] = abs( (m[i][:,3]-y_pt[1]) / (m[i][:,2]-y_pt[0]) )
m[i][:,:2] = y_pt
m = np.vstack((m[0], m[1]))
return m[np.argmin(m[:,4]), :4]
# Ignore division by zero (slopes of vertical lines)
np.seterr(divide='ignore')
# Create dataset and plot
pts = create_random_example()
plt.scatter(pts[:,0], pts[:,1], c='r') # plot now because pts array changes
# Average distance to the nearest neighbor of each point
tree = KDTree(pts)
nn_avg_dist = np.mean(tree.query(pts, 2)[0][:, 1])
# Find groups of points representing each gridline
groups = []
for orientation in ['o', 'r']: # original and rotated orientations
# Rotate points 90 degrees (note: this moves pts to 2nd quadrant)
if orientation == 'r':
pts[:,1] = -1 * pts[:,1]
pts[:, [1, 0]] = pts[:, [0, 1]]
# Find reference line to compare remaining points for grouping
ref_line = find_reference_line(pts) # line is defined by two points
# Distances between points and reference line
pt_dists = np.zeros((pts.shape[0], 3))
pt_dists[:,:2] = pts
pt_dists[:,2] = np.apply_along_axis(pt_line_dist, 1, pts, ref_line).T
# Segment pts into groups w.r.t. distances (one group per gridline)
# Groups have range less than nn_avg_dist.
groups += segment_pts(pt_dists, 0.7*nn_avg_dist, orientation)
# Create dataframe of groups (OLS model requires a dataframe)
df = pd.DataFrame(np.row_stack(groups), columns=['x', 'y', 'grp'])
df['x'] = pd.to_numeric(df['x'])
df['y'] = pd.to_numeric(df['y'])
# Parallel slopes OLS model
ols_model = ols("y ~ x + grp + 0", data=df).fit()
# OLS parameters
grid_lines = ols_model.params[:-1].to_frame() # panda series to dataframe
grid_lines = grid_lines.rename(columns = {0:'b'})
grid_lines['grp'] = grid_lines.index.str[4:6]
grid_lines['m'] = ols_model.params[-1] # slope
# Rotate the rotated lines back to their original orientation
grid_lines.loc[grid_lines['grp'].str[0] == 'r', 'b'] = grid_lines['b'] / grid_lines['m']
grid_lines.loc[grid_lines['grp'].str[0] == 'r', 'm'] = -1 / grid_lines['m']
# Find grid intersection points by combinations of gridlines
comb = list(itertools.combinations(grid_lines['grp'], 2))
comb = [i for i in comb if i[0][0] != 'r']
comb = [i for i in comb if i[1][0] != 'o']
df_comb = pd.DataFrame(comb, columns=['grp', 'r_grp'])
# Merge gridline parameters with grid points
grid_pts = df_comb.merge(grid_lines.drop_duplicates('grp'),how='left',on='grp')
grid_lines.rename(columns={'grp': 'r_grp'}, inplace=True)
grid_pts.rename(columns={'b':'o_b', 'm': 'o_m', 'grp':'o_grp'}, inplace=True)
grid_pts = grid_pts.merge(grid_lines.drop_duplicates('r_grp'),how='left',on='r_grp')
grid_pts.rename(columns={'b':'r_b', 'm': 'r_m'}, inplace=True)
# Calculate x, y coordinates of gridline interception points
grid_pts['x'] = (grid_pts['r_b']-grid_pts['o_b']) \
/ (grid_pts['o_m']-grid_pts['r_m'])
grid_pts['y'] = grid_pts['o_m'] * grid_pts['x'] + grid_pts['o_b']
# Results output
print(grid_lines)
print(grid_pts)
plt.scatter(grid_pts['x'], grid_pts['y'], s=8, c='b') # for setting axes
axes = plt.gca()
axes.invert_yaxis()
axes.xaxis.tick_top()
axes.set_aspect('equal')
axes.set_xlim(axes.get_xlim())
axes.set_ylim(axes.get_ylim())
x_vals = np.array(axes.get_xlim())
for idx in grid_lines.index:
y_vals = grid_lines['b'][idx] + grid_lines['m'][idx] * x_vals
plt.plot(x_vals, y_vals, c='gray')
plt.show()
A numpy implementation of your code can be found below. As the size AvgGrid is known, I pre-allocate the required memory (rather than append). This should have speed advantages, especially if the number of output vertices is large.
import numpy as np
# Input of [x, y] coordinates of a sparse grid with errors
xys = np.array([[103,101],
[198,103],
[300, 99],
[ 97,205],
[304,202],
[102,295],
[200,303],
[104,405],
[205,394],
[298,401]])
# Function to average
def ColAvgs(CoordinateList, CutoffRatio = 1.1):
# Length of CoordinateList
L = len(CoordinateList)
# Sort input
SortedList = np.sort(CoordinateList)
# Determine indices to average
RelativeIncrease = SortedList[-(L-1):]/SortedList[:(L-1)]
CriticalIndices = np.flatnonzero(RelativeIncrease > CutoffRatio) + 1
Indices = np.hstack((0,CriticalIndices))
if (Indices[-1] != L):
Indices = np.hstack((Indices,L))
#print(Indices) # Uncomment to show index construction
# Compute averages
Avgs = np.empty((len(Indices)-1)); Avgs[:] = np.NaN
for iter in range(len(Avgs)):
Avgs[iter] = int( round(np.mean(SortedList[Indices[iter]:Indices[(iter+1)]]) ) )
# Return output
return Avgs
# Compute x- and y-coordinates of vertices
AvgsXcoord = ColAvgs(xys[:,0])
AvgsYcoord = ColAvgs(xys[:,1])
# Return all vertices
AvgGrid = np.empty((len(AvgsXcoord)*len(AvgsYcoord),2)); AvgGrid[:] = np.NaN
iter = 0
for y in AvgsYcoord:
for x in AvgsXcoord:
AvgGrid[iter, :] = np.hstack((x,y))
iter = iter+1
print(AvgGrid)
If you project all points on a vertical or horizontal axis, the problem turns to one of clustering with equally spaced clusters.
To perform these clusterings, you can consider the distances between the successive (sorted) points. They will form two clusters: short distances corresponding to noise, and longer ones for the grid size. You can solve the two-way clustering using the Otsu method.

Calculate rotation matrix to align two vectors in 3D space?

I have two separate vectors of 3D data points that represent curves and I'm plotting these as scatter data in a 3D plot with matplotlib.
Both the vectors start at the origin, and both are of unit length. The curves are similar to each other, however, there is typically a rotation between the two curves (for test purposes, I've actually being using one curve and applying a rotation matrix to it to create the second curve).
I want to align the two curves so that they line up in 3D e.g. rotate curve b, so that its start and end points line up with curve a. I've been trying to do this by subtracting the final point from the first, to get a direction vector representing the straight line from the start to the end of each curve, converting these to unit vectors and then calculating the cross and dot products and using the methodology outlined in this answer (https://math.stackexchange.com/a/476311/357495) to calculate a rotation matrix.
However, when I do this, the calculated rotation matrix is wrong and I'm not sure why?
My code is below (I'm using Python 2.7):
# curve_1, curve_2 are arrays of 3D points, of the same length (both start at the origin)
curve_vec_1 = (curve_1[0] - curve_1[-1]).reshape(3,1)
curve_vec_2 = (curve_2[index][0] - curve_2[index][-1]).reshape(3,1)
a,b = (curve_vec_1/ np.linalg.norm(curve_vec_1)).reshape(3), (curve_vec_2/ np.linalg.norm(curve_vec_2)).reshape(3)
v = np.cross(a,b)
c = np.dot(a,b)
s = np.linalg.norm(v)
I = np.identity(3)
vXStr = '{} {} {}; {} {} {}; {} {} {}'.format(0, -v[2], v[1], v[2], 0, -v[0], -v[1], v[0], 0)
k = np.matrix(vXStr)
r = I + k + np.square(k) * ((1 -c)/(s**2))
for i in xrange(item.shape[0]):
item[i] = (np.dot(r, item[i]).reshape(3,1)).reshape(3)
In my test case, curve 2 is simply curve 1 with the following rotation matrix applied:
[[1 0 0 ]
[ 0 0.5 0.866]
[ 0 -0.866 0.5 ]]
(just a 60 degree rotation around the x axis).
The rotation matrix computed by my code to align the two vectors again is:
[[ 1. -0.32264329 0.27572962]
[ 0.53984249 1. -0.35320293]
[-0.20753816 0.64292975 1. ]]
The plot of the direction vectors for the two original curves (a and b in blue and green respectively) and the result of b transformed with the computed rotation matrix (red) is below. I'm trying to compute the rotation matrix to align the green vector to the blue.
Based on Daniel F's correction, here is a function that does what you want:
import numpy as np
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
Test:
vec1 = [2, 3, 2.5]
vec2 = [-3, 1, -3.4]
mat = rotation_matrix_from_vectors(vec1, vec2)
vec1_rot = mat.dot(vec1)
assert np.allclose(vec1_rot/np.linalg.norm(vec1_rot), vec2/np.linalg.norm(vec2))
Problem is here:
r = I + k + np.square(k) * ((1 -c)/(s**2))
np.square(k) squares each element of the matrix. You want np.matmul(k,k) or k # k which is the matrix multiplied by itself.
I'd also implement the side cases (especially s=0) mentioned in the comments of that answer or you will end up with errors for quite a few cases.
Based off of #Peter and #Daniel F's work. The above function worked for me, except for in cases of the same direction vector, where v would be a zero vector. I catch this here, and return the identity vector instead.
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / numpy.linalg.norm(vec1)).reshape(3), (vec2 / numpy.linalg.norm(vec2)).reshape(3)
v = numpy.cross(a, b)
if any(v): #if not all zeros then
c = numpy.dot(a, b)
s = numpy.linalg.norm(v)
kmat = numpy.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
return numpy.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
else:
return numpy.eye(3) #cross of all zeros only occurs on identical directions
One can use scipy for this, reproducing here #Peter answer with scipy Rotation see:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html?highlight=scipy%20spatial%20transform%20rotation#scipy.spatial.transform.Rotation
from scipy.spatial.transform import Rotation as R
import numpy as np
def get_rotation_matrix(vec2, vec1=np.array([1, 0, 0])):
"""get rotation matrix between two vectors using scipy"""
vec1 = np.reshape(vec1, (1, -1))
vec2 = np.reshape(vec2, (1, -1))
r = R.align_vectors(vec2, vec1)
return r[0].as_matrix()
vec1 = np.array([2, 3, 2.5])
vec2 = np.array([-3, 1, -3.4])
mat = get_rotation_matrix(vec1=vec1, vec2=vec2)
print(mat)
vec1_rot = mat.dot(vec1)
assert np.allclose(vec1_rot / np.linalg.norm(vec1_rot), vec2 / np.linalg.norm(vec2))
terveisin, Markus
I think if you do not have rotation axis, the rotation matrix is not unique.

Rotating 1D numpy array of radial intensities into 2D array of spacial intensities

I have a numpy array filled with intensity readings at different radii in a uniform circle (for context, this is a 1D radiative transfer project for protostellar formation models: while much better models exist, my supervisor wasnts me to have the experience of producing one so I understand how others work).
I want to take that 1d array, and "rotate" it through a circle, forming a 2D array of intensities that could then be shown with imshow (or, with a bit of work, aplpy). The final array needs to be 2d, and the projection needs to be Cartesian, not polar.
I can do it with nested for loops, and I can do it with lookup tables, but I have a feeling there must be a neat way of doing it in numpy or something.
Any ideas?
EDIT:
I have had to go back and recreate my (frankly horrible) mess of for loops and if statements that I had before. If I really tried, I could probably get rid of one of the loops and one of the if statements by condensing things down. However, the aim is not to make it work with for loops, but see if there is a built in way to rotate the array.
impB is an array that differs slightly from what I stated it was before. Its actually just a list of radii where particles are detected. I then bin those into radius bins to get the intensity (or frequency if you prefer) in each radius. R is the scale factor for my radius as I run the model in a dimensionless way. iRes is a resolution scale factor, essentially how often I want to sample my radial bins. Everything else should be clear.
radJ = np.ndarray(shape=(2*iRes, 2*iRes)) # Create array of 2xRadius square
for i in range(iRes):
n = len(impB[np.where(impB[:] < ((i+1.) * (R / iRes)))]) # Count number of things within this radius +1
m = len(impB[np.where(impB[:] <= ((i) * (R / iRes)))]) # Count number of things in this radius
a = (((i + 1) * (R / iRes))**2 - ((i) * (R / iRes))**2) * math.pi # A normalisation factor based on area.....dont ask
for x in range(iRes):
for y in range(iRes):
if (x**2 + y**2) < (i * iRes)**2:
if (x**2 + y**2) >= (i * iRes)**2: # Checks for radius, and puts in cartesian space
radJ[x+iRes,y+iRes] = (n-m) / a # Put in actual intensity bins
radJ[x+iRes,-y+iRes] = (n-m) / a
radJ[-x+iRes,y+iRes] = (n-m) / a
radJ[-x+iRes,-y+iRes] = (n-m) / a
Nested loops are a simple approach for that. With ri_data_r and y containing your radius values (difference to the middle pixel) and the array for rotation, respectively, I would suggest:
from scipy import interpolate
import numpy as np
y = np.random.rand(100)
ri_data_r = np.linspace(-len(y)/2,len(y)/2,len(y))
interpol_index = interpolate.interp1d(ri_data_r, y)
xv = np.arange(-1, 1, 0.01) # adjust your matrix values here
X, Y = np.meshgrid(xv, xv)
profilegrid = np.ones(X.shape, float)
for i, x in enumerate(X[0, :]):
for k, y in enumerate(Y[:, 0]):
current_radius = np.sqrt(x ** 2 + y ** 2)
profilegrid[i, k] = interpol_index(current_radius)
print(profilegrid)
This will give you exactly what you are looking for. You just have to take in your array and calculate an symmetric array ri_data_r that has the same length as your data array and contains the distance between the actual data and the middle of the array. The code is doing this automatically.
I stumbled upon this question in a different context and I hope I understood it right. Here are two other ways of doing this. The first uses skimage.transform.warp with interpolation of desired order (here we use order=0 Nearest-neighbor). This method is slower but more precise and needs less memory then the second method.
The second one does not use interpolation, therefore is faster but also less precise and needs way more memory because it stores each 2D array containing one tilt until the end, where they are averaged with np.nanmean().
The difference between both solutions stemmed from the problem of handling the center of the final image where the tilts overlap the most, i.e. the first one would just add values with each tilt ending up out of the original range. This was "solved" by clipping the matrix in each step to a global_min and global_max (consult the code). The second one solves it by taking the mean of the tilts where they overlap, which forces us to use the np.nan.
Please, read the Example of usage and Sanity check sections in order to understand the plot titles.
Solution 1:
import numpy as np
from skimage.transform import warp
def rotate_vector(vector, deg_angle):
# Credit goes to skimage.transform.radon
assert vector.ndim == 1, 'Pass only 1D vectors, e.g. use array.ravel()'
center = vector.size // 2
square = np.zeros((vector.size, vector.size))
square[center,:] = vector
rad_angle = np.deg2rad(deg_angle)
cos_a, sin_a = np.cos(rad_angle), np.sin(rad_angle)
R = np.array([[cos_a, sin_a, -center * (cos_a + sin_a - 1)],
[-sin_a, cos_a, -center * (cos_a - sin_a - 1)],
[0, 0, 1]])
# Approx. 80% of time is spent in this function
return warp(square, R, clip=False, output_shape=((vector.size, vector.size)))
def place_vectors(vectors, deg_angles):
matrix = np.zeros((vectors.shape[-1], vectors.shape[-1]))
global_min, global_max = 0, 0
for i, deg_angle in enumerate(deg_angles):
tilt = rotate_vector(vectors[i], deg_angle)
global_min = tilt.min() if global_min > tilt.min() else global_min
global_max = tilt.max() if global_max < tilt.max() else global_max
matrix += tilt
matrix = np.clip(matrix, global_min, global_max)
return matrix
Solution 2:
Credit for the idea goes to my colleague Michael Scherbela.
import numpy as np
def rotate_vector(vector, deg_angle):
assert vector.ndim == 1, 'Pass only 1D vectors, e.g. use array.ravel()'
square = np.ones([vector.size, vector.size]) * np.nan
radius = vector.size // 2
r_values = np.linspace(-radius, radius, vector.size)
rad_angle = np.deg2rad(deg_angle)
ind_x = np.round(np.cos(rad_angle) * r_values + vector.size/2).astype(np.int)
ind_y = np.round(np.sin(rad_angle) * r_values + vector.size/2).astype(np.int)
ind_x = np.clip(ind_x, 0, vector.size-1)
ind_y = np.clip(ind_y, 0, vector.size-1)
square[ind_y, ind_x] = vector
return square
def place_vectors(vectors, deg_angles):
matrices = []
for deg_angle, vector in zip(deg_angles, vectors):
matrices.append(rotate_vector(vector, deg_angle))
matrix = np.nanmean(np.array(matrices), axis=0)
return np.nan_to_num(matrix, copy=False, nan=0.0)
Example of usage:
r = 100 # Radius of the circle, i.e. half the length of the vector
n = int(np.pi * r / 8) # Number of vectors, e.g. number of tilts in tomography
v = np.ones(2*r) # One vector, e.g. one tilt in tomography
V = np.array([v]*n) # All vectors, e.g. a sinogram in tomography
# Rotate 1D vector to a specific angle (output is 2D)
angle = 45
rotated = rotate_vector(v, angle)
# Rotate each row of a 2D array according to its angle (output is 2D)
angles = np.linspace(-90, 90, num=n, endpoint=False)
inplace = place_vectors(V, angles)
Sanity check:
These are just simple checks which by no means cover all possible edge cases. Depending on your use case you might want to extend the checks and adjust the method.
# I. Sanity check
# Assuming n <= πr and v = np.ones(2r)
# Then sum(inplace) should be approx. equal to (n * (2πr - n)) / π
# which is an area that should be covered by the tilts
desired_area = (n * (2 * np.pi * r - n)) / np.pi
covered_area = np.sum(inplace)
covered_frac = covered_area / desired_area
print(f'This method covered {covered_frac * 100:.2f}% '
'of the area which should be covered in total.')
# II. Sanity check
# Assuming n <= πr and v = np.ones(2r)
# Then a circle M with radius m <= r should be the largest circle which
# is fully covered by the vectors. I.e. its mean should be no less than 1.
# If n = πr then m = r.
# m = n / π
m = int(n / np.pi)
# Code for circular mask not included
mask = create_circular_mask(2*r, 2*r, center=None, radius=m)
m_area = np.mean(inplace[mask])
print(f'Full radius r={r}, radius m={m}, mean(M)={m_area:.4f}.')
Code for plotting:
import matplotlib.pyplot as plt
plt.figure(figsize=(16, 8))
plt.subplot(121)
rotated = np.nan_to_num(rotated) # not necessary in case of the first method
plt.title(
f'Output of rotate_vector(), angle={angle}°\n'
f'Sum is {np.sum(rotated):.2f} and should be {np.sum(v):.2f}')
plt.imshow(rotated, cmap=plt.cm.Greys_r)
plt.subplot(122)
plt.title(
f'Output of place_vectors(), r={r}, n={n}\n'
f'Covered {covered_frac * 100:.2f}% of the area which should be covered.\n'
f'Mean of the circle M is {m_area:.4f} and should be 1.0.')
plt.imshow(inplace)
circle=plt.Circle((r, r), m, color='r', fill=False)
plt.gcf().gca().add_artist(circle)
plt.gcf().gca().legend([circle], [f'Circle M (m={m})'])

Categories