Polygon from set of points that lie on a boundary - python

I have following set of points that lie on a boundary and want to create the polygon that connects these points. For a person it is quite obvious what path to follow, but I am unable to find an algorithm that does the same and trying to solve it myself it all seems quite tricky and ambiguous occasionally. What is the best solution for this?
As a background.
This is the boundary for the julia set with constant = -0.624+0.435j with stable area defined after 100 iterations. I got these points by setting the stable points to 1 and all other to zero and then convolving with a 3x3 matrix [[1, 1, 1], [1, 1, 1], [1, 1, 1]] and select the points that have value 1. My experimenting code is as follows:
import numpy as np
from scipy.signal import convolve2d
import matplotlib.pyplot as plt
r_min, r_max = -1.5, 1.5
c_min, c_max = -2.0, 2.0
dpu = 50 # dots per unit - 50 dots per 1 units means 200 points per 4 units
max_iterations = 100
cmap='hot'
intval = 1 / dpu
r_range = np.arange(r_min, r_max + intval, intval)
c_range = np.arange(c_min, c_max + intval, intval)
constant = -0.624+0.435j
def z_func(point, constant):
z = point
stable = True
num_iterations = 1
while stable and num_iterations < max_iterations:
z = z**2 + constant
if abs(z) > max(abs(constant), 2):
stable = False
return (stable, num_iterations)
num_iterations += 1
return (stable, 0)
points = np.array([])
colors = np.array([])
stables = np.array([], dtype='bool')
progress = 0
for imag in c_range:
for real in r_range:
point = complex(real, imag)
points = np.append(points, point)
stable, color = z_func(point, constant)
stables = np.append(stables, stable)
colors = np.append(colors, color)
print(f'{100*progress/len(c_range)/len(r_range):3.2f}% completed\r', end='')
progress += len(r_range)
print(' \r', end='')
rows = len(r_range)
start = len(colors)
orig_field = []
for i_num in range(len(c_range)):
start -= rows
real_vals = [color for color in colors[start:start+rows]]
orig_field.append(real_vals)
orig_field = np.array(orig_field, dtype='int')
rows = len(r_range)
start = len(stables)
stable_field = []
for i_num in range(len(c_range)):
start -= rows
real_vals = [1 if val == True else 0 for val in stables[start:start+rows]]
stable_field.append(real_vals)
stable_field = np.array(stable_field, dtype='int')
kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
stable_boundary = convolve2d(stable_field, kernel, mode='same')
boundary_points = []
cols, rows = stable_boundary.shape
assert cols == len(c_range), "check c_range and cols"
assert rows == len(r_range), "check r_range and rows"
zero_field = np.zeros((cols, rows))
for col in range(cols):
for row in range(rows):
if stable_boundary[col, row] in [1]:
real_val = r_range[row]
# invert cols as min imag value is highest col and vice versa
imag_val = c_range[cols-1 - col]
stable_boundary[col, row] = 1
boundary_points.append((real_val, imag_val))
else:
stable_boundary[col, row] = 0
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(5, 5))
ax1.matshow(orig_field, cmap=cmap)
ax2.matshow(stable_field, cmap=cmap)
ax3.matshow(stable_boundary, cmap=cmap)
x = [point[0] for point in boundary_points]
y = [point[1] for point in boundary_points]
ax4.plot(x, y, 'o', c='r', markersize=0.5)
ax4.set_aspect(1)
plt.show()
Output with dpu = 200 and max_iterations = 100:
inspired by this Youtube video: What's so special about the Mandelbrot Set? - Numberphile

Thanks for the input. As it turned out this is indeed not as easy as it seems. In the end I have used the convex_hull and the alpha shape algorithms to deterimine boundary polygon(s) around the boundary points as shown the picture below. Top left is the juliaset where colors represent the number of iterations; top right black is unstable and white is stable; bottom left is a collection of points representing the boundary between unstable and stable; and bottom right is the collection of boundary polygons around the boundary points.
The code shows below:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib import patches as mpl_patches
from matplotlib.collections import PatchCollection
import shapely.geometry as geometry
from shapely.ops import cascaded_union, polygonize
from scipy.signal import convolve2d
from scipy.spatial import Delaunay # pylint: disable-msg=no-name-in-module
from descartes.patch import PolygonPatch
def juliaset_func(point, constant, max_iterations):
z = point
stable = True
num_iterations = 1
while stable and num_iterations < max_iterations:
z = z**2 + constant
if abs(z) > max(abs(constant), 2):
stable = False
return (stable, num_iterations)
num_iterations += 1
return (stable, num_iterations)
def create_juliaset(r_range, c_range, constant, max_iterations):
''' create a juliaset that returns two fields (matrices) - orig_field and
stable_field, where orig_field contains the number of iterations for
a point in the complex plane (r, c) and stable_field for each point
either whether the point is stable (True) or not stable (False)
'''
points = np.array([])
colors = np.array([])
stables = np.array([], dtype='bool')
progress = 0
for imag in c_range:
for real in r_range:
point = complex(real, imag)
points = np.append(points, point)
stable, color = juliaset_func(point, constant, max_iterations)
stables = np.append(stables, stable)
colors = np.append(colors, color)
print(f'{100*progress/len(c_range)/len(r_range):3.2f}% completed\r', end='')
progress += len(r_range)
print(' \r', end='')
rows = len(r_range)
start = len(colors)
orig_field = []
stable_field = []
for i_num in range(len(c_range)):
start -= rows
real_colors = [color for color in colors[start:start+rows]]
real_stables = [1 if val == True else 0 for val in stables[start:start+rows]]
orig_field.append(real_colors)
stable_field.append(real_stables)
orig_field = np.array(orig_field, dtype='int')
stable_field = np.array(stable_field, dtype='int')
return orig_field, stable_field
def find_boundary_points_of_stable_field(stable_field, r_range, c_range):
''' find the boundary points by convolving the stable_field with a 3x3
kernel of all ones and define the point on the boundary where the
convolution is 1.
'''
kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype='int8')
stable_boundary = convolve2d(stable_field, kernel, mode='same')
rows = len(r_range)
cols = len(c_range)
boundary_points = []
for col in range(cols):
for row in range(rows):
# Note you can make the boundary 'thicker ' by
# expanding the range of possible values like [1, 2, 3]
if stable_boundary[col, row] in [1]:
real_val = r_range[row]
# invert cols as min imag value is highest col and vice versa
imag_val = c_range[cols-1 - col]
boundary_points.append((real_val, imag_val))
else:
pass
return [geometry.Point(val[0], val[1]) for val in boundary_points]
def alpha_shape(points, alpha):
''' determine the boundary of a cluster of points whereby 'sharpness' of
the boundary depends on alpha.
paramaters:
:points: list of shapely Point objects
:alpha: scalar
returns:
shapely Polygon object or MultiPolygon
edge_points: list of start and end point of each side of the polygons
'''
if len(points) < 4:
# When you have a triangle, there is no sense
# in computing an alpha shape.
return geometry.MultiPoint(list(points)).convex_hull
def add_edge(edges, edge_points, coords, i, j):
"""
Add a line between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add((i, j))
edge_points.append((coords[[i, j]]))
coords = np.array([point.coords[0]
for point in points])
tri = Delaunay(coords)
edges = set()
edge_points = []
# loop over triangles:
# ia, ib, ic = indices of corner points of the
# triangle
for ia, ib, ic in tri.vertices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# Lengths of sides of triangle
a = np.sqrt((pa[0]-pb[0])**2 + (pa[1]-pb[1])**2)
b = np.sqrt((pb[0]-pc[0])**2 + (pb[1]-pc[1])**2)
c = np.sqrt((pc[0]-pa[0])**2 + (pc[1]-pa[1])**2)
# Semiperimeter of triangle
s = (a + b + c)/2.0
# Area of triangle by Heron's formula
area = np.sqrt(s*(s-a)*(s-b)*(s-c))
circum_r = a*b*c/(4.0*area)
# Here's the radius filter.
if circum_r < alpha:
add_edge(edges, edge_points, coords, ia, ib)
add_edge(edges, edge_points, coords, ib, ic)
add_edge(edges, edge_points, coords, ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
return cascaded_union(triangles), edge_points
def main():
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(5, 5))
# define limits, range and resolution in the complex plane
r_min, r_max = -1.5, 1.5
c_min, c_max = -1.1, 1.1
dpu = 100 # dots per unit - 50 dots per 1 units means 200 points per 4 units
intval = 1 / dpu
r_range = np.arange(r_min, r_max + intval, intval)
c_range = np.arange(c_min, c_max + intval, intval)
# create two matrixes (orig_field and stable_field) for the juliaset with
# constant
constant = -0.76 -0.10j
max_iterations = 50
orig_field, stable_field = create_juliaset(r_range, c_range,
constant,
max_iterations)
cmap='nipy_spectral'
ax1.matshow(orig_field, cmap=cmap, interpolation='bilinear')
ax2.matshow(stable_field, cmap=cmap)
# find points that are on the boundary of the stable field
boundary_points = find_boundary_points_of_stable_field(stable_field,
r_range, c_range)
x = [p.x for p in boundary_points]
y = [p.y for p in boundary_points]
ax3.plot(x, y, 'o', c='r', markersize=0.5)
ax3.set_xlim(r_min, r_max)
ax3.set_ylim(c_min, c_max)
ax3.set_aspect(1)
# find the boundary polygon using alpha_shape where 'sharpness' of the
# boundary is determined by the factor ALPHA
# a green boundary consists of multiple polygons, a red boundary on a single
# polygon
alpha = 0.03 # determines shape of the boundary polygon
bnd_polygon, _ = alpha_shape(boundary_points, alpha)
patches = []
if bnd_polygon.geom_type == 'Polygon':
patches.append(PolygonPatch(bnd_polygon))
ec = 'red'
else:
for poly in bnd_polygon:
patches.append(PolygonPatch(poly))
ec = 'green'
p = PatchCollection(patches, facecolor='none', edgecolor=ec, lw=1)
ax4.add_collection(p)
ax4.set_xlim(r_min, r_max)
ax4.set_ylim(c_min, c_max)
ax4.set_aspect(1)
plt.show()
if __name__ == "__main__":
main()

Related

Voronoi diagram using python to make exact hexagons

I am trying to make a hexagonal fill by the voronoi diagram. One problem I find is that although the plot it produces is a hexagon diagram, the distances between the points vary.
The first function is to give a voronoi diagram by exact hexagons. Then I am trying to assign a universal initial distance between each cells as a spring rest length between them.
Now my problem is that the initial hexagonal diagram gives non-universal length between cells. We can see it by the printed result given by the line "print(a)" in the code. However, I assigned the coordinates of the points by 'x = (col + (0.5 * (row % 2))) * np.sqrt(3)' and 'y = row * 0.5', which should give exact hexagons. I don't understand how I am getting different distances between points.
The following is my code, and mostly the second function part is about finding neighbors to each cell and computing distances between each cell and its neighbors. I am printing the distances by 'print(a)' line.
import numpy as np
import freud
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay
from collections import defaultdict
import itertools
# Source: https://freud.readthedocs.io/en/v2.10.0/gettingstarted/examples/module_intros/locality.Voronoi.html
def hexagonal_lattice(rows=3, cols=3, noise=.0, seed=None):
if seed is not None:
np.random.seed(seed)
# Assemble a hexagonal lattice
points = []
for row in range(rows * 2):
for col in range(cols):
x = (col + (0.5 * (row % 2))) * np.sqrt(3)
y = row * 0.5 # These x,y are allocated to produce exact hexagons
points.append((x, y, 0))
points = np.asarray(points)
points += np.random.multivariate_normal(
mean=np.zeros(3), cov=np.eye(3) * noise, size=points.shape[0]
)
# Set z=0 again for all points after adding Gaussian noise
# points[:, 2] = 0 # do not see the need. Seems wrap later changes z coordi to 0
# Wrap the points into the box
box = freud.box.Box(Lx=cols * np.sqrt(3), Ly=rows, is2D=True)
points = box.wrap(points) # 주어진 그림박스 안으로 periodic bdy 써서 넣어주는 역할
return box, points
# Compute the Voronoi diagram and plot
box1, pts1 = hexagonal_lattice(rows=12, cols=12, seed=2) # Noise = 0
voro = freud.locality.Voronoi()
voro.compute((box1, pts1))
plt.figure()
ax = plt.gca()
voro.plot(ax=ax, cmap="RdBu")
ax.scatter(pts1[:, 0], pts1[:, 1], s=2, c='k')
plt.show()
# This part is for the stability check of the initial exact hexagons diagram
def cell_movement(box, points, time_length, Lambda=0.01):
time = 1
while time <= time_length:
# 2D projection + neighboring cells
points_2d = []
for point in points:
points_2d.append([point[0], point[1]]) # projection to 2d for neighbor list
points_2d = np.asarray(points_2d)
tri = Delaunay(points_2d)
neiList = defaultdict(set) # Neighbor list for each cell
for p in tri.vertices:
for i, j in itertools.combinations(p, 2):
neiList[i].add(j)
neiList[j].add(i)
neiborList = sorted(neiList.items()) # Sorted neighbor array
spring = np.ones((len(points[:, 0]), len(points[:, 0]))) # Initial spring rest length
rintervec = np.empty((len(points[:, 0]), len(points[:, 0]), 2)) # spring length array
for i in range(len(neiborList)):
for j in list(neiborList[i][1]):
j = int(j)
rintervec[i, j] = points_2d[i] - points_2d[j] # Distance vector between i,j cells
a = np.linalg.norm(rintervec[i, j]) # Distances between neighboring cells
if a != 0:
print(a) # These are the printed numbers
spring[i, j] = np.linalg.norm(rintervec[i, j]) # Assign a as spring rest length
points_2d[i] += Lambda * rintervec[i, j] * ( # moves points by equation (8)
spring[i, j] - np.linalg.norm(rintervec[i, j])) / np.linalg.norm(rintervec[i, j])
points[i] = np.append(points_2d[i], np.array([0]))
# diagram
points = box.wrap(points) # 주어진 그림박스 안으로 periodic bdy 써서 넣어주는 역할
voro.compute((box, points)) # Computing the Voronoi diagram
# figure
plt.figure()
ax = plt.gca()
voro.plot(ax=ax, cmap="RdBu")
ax.scatter(points[:, 0], points[:, 1], s=2, c='k')
plt.savefig("C:\\doit\\pythonPractice\\At time %s.png" % time) # saves diagrams
plt.show()
time = time + 1
cell_movement(box1, pts1, time_length=5)

Generating random points in a box

I want to generate random points in a box (a=0.2m, b=0.2m, c=1m). This points should have random distance between each other but minimum distance between two points is should be 0.03m, for this I used random.choice. When I run my code it generates random points but distance management is so wrong. Also my float converting approximation is terrible because I don't want to change random values which I generate before but I couldn't find any other solution. I'm open to suggestions.
Images
graph1
graph2
import random
import matplotlib.pyplot as plt
# BOX a = 0.2m b=0.2m h=1m
save = 0 #for saving 3 different plot.
for k in range(3):
pointsX = [] #information of x coordinates of points
pointsY = [] #information of y coordinates of points
pointsZ = [] #information of z coordinates of points
for i in range(100): #number of the points
a = random.uniform(0.0,0.00001) #for the numbers generated below are float.
x = random.choice(range(3, 21,3)) #random coordinates for x
x1 = x/100 + a
pointsX.append(x1)
y = random.choice(range(3, 21,3)) #random coordinates for y
y1 = y/100 + a
pointsY.append(y1)
z = random.choice(range(3, 98,3)) #random coordinates for z
z1 = z/100 + a
pointsZ.append(z1)
new_pointsX = list(set(pointsX)) # deleting if there is a duplicates
new_pointsY = list(set(pointsY))
new_pointsZ = list(set(pointsZ))
# i wonder max and min values it is or not between borders.
print("X-Min", min(new_pointsX))
print("X-Max", max(new_pointsX))
print("Y-Min", min(new_pointsY))
print("Y-Max", max(new_pointsY))
print("Z-Min", min(new_pointsZ))
print("Z-Max", max(new_pointsZ))
if max(new_pointsX) >= 0.2 or max(new_pointsY) >= 0.2:
print("MAX VALUE GREATER THAN 0.2")
if max(new_pointsZ) >= 0.97:
print("MAX VALUE GREATER THAN 0.97")
#3D graph
fig = plt.figure(figsize=(18,9))
ax = plt.axes(projection='3d')
ax.set_xlim([0, 0.2])
ax.set_ylim([0, 0.2])
ax.set_zlim([0, 1])
ax.set_title('title',fontsize=18)
ax.set_xlabel('X',fontsize=14)
ax.set_ylabel('Y',fontsize=14)
ax.set_zlabel('Z',fontsize=14)
ax.scatter3D(new_pointsX, new_pointsY, new_pointsZ);
save += 1
plt.savefig("graph" + str(save) + ".png", dpi=900)
As mentioned in the comments by #user3431635, you can check each point with all previous points before appending that new point to the list. I would do that something like this:
import random
import numpy as np
import matplotlib.pyplot as plt
plt.close("all")
a = 0.2 # x bound
b = 0.2 # y bound
c = 1.0 # z bound
N = 1000 # number of points
def distance(p, points, min_distance):
"""
Determines if any points in the list are less than the minimum specified
distance apart.
Parameters
----------
p : tuple
`(x,y,z)` point.
points : ndarray
Array of points to check against. `x, y, z` points are columnwise.
min_distance : float
Minimum allowable distance between any two points.
Returns
-------
bool
True if point `p` is at least `min_distance` from all points in `points`.
"""
distances = np.sqrt(np.sum((p+points)**2, axis=1))
distances = np.where(distances < min_distance)
return distances[0].size < 1
points = np.array([]) # x, y, z columnwise
while points.shape[0] < 1000:
x = random.choice(np.linspace(0, a, 100000))
y = random.choice(np.linspace(0, b, 100000))
z = random.choice(np.linspace(0, c, 100000))
p = (x,y,z)
if len(points) == 0: # add first point blindly
points = np.array([p])
elif distance(p, points, 0.03): # ensure the minimum distance is met
points = np.vstack((points, p))
fig = plt.figure(figsize=(18,9))
ax = plt.axes(projection='3d')
ax.set_xlim([0, a])
ax.set_ylim([0, b])
ax.set_zlim([0, c])
ax.set_title('title',fontsize=18)
ax.set_xlabel('X',fontsize=14)
ax.set_ylabel('Y',fontsize=14)
ax.set_zlabel('Z',fontsize=14)
ax.scatter(points[:,0], points[:,1], points[:,2])
Note, this might not be the randomness you're looking for. I have written it to take the range of x, y, and z values and split it into 100000 increments; a new x, y, or z point is then chosen from those values.

Fitting an Orthogonal Grid to Noisy Coordinates

Problem
I have a list of coordinates that are meant to form a grid. Each coordinate has a random error component and some of the coordinates are missing. Grid could be rotated (update). I want to fit a orthogonal grid to the data points and return a list of the grid's vertices. For example:
Application
The purpose is to find a grid in a scanned image. The data points come from the results of contour or edge detection in OpenCV. An example is image with a grid of photos.
Goal
I wrote some Python code that works, but would like to find a linear algebra algorithm using SciPy, statsmodels or other modules that would be more robust and handle a small rotation of the grid (less than 10°).
Python Code Using Lists Only
# Noisy [x, y] coordinates (origin is upper-left corner)
pts = [[103,101],
[198,103],
[300, 99],
[ 97,205],
[304,202],
[102,295],
[200,303],
[104,405],
[205,394],
[298,401]]
def row_col_avgs(num_list, ratio):
# Finds the average of each row and column. Coordinates are
# assigned to a row and column by specifying an error ratio.
last_num, sum_nums, count_nums, avgs = 0, 0, 0, []
num_list.sort()
for num in num_list:
# Calculate average for last row or column and begin new row or column
if num > (1+ratio)*last_num and count_nums != 0:
avgs.append(int(round(sum_nums/count_nums,0)))
sum_nums = num
count_nums = 1
# Or continue with current row or column
else:
sum_nums += num
count_nums += 1
last_num = num
avgs.append(int(round(sum_nums/count_nums,0)))
return avgs
# Split coordinates into two lists of x's and y's
xs, ys = map(list, zip(*pts))
# Find averages of each row and column of the grid
x_avgs = row_col_avgs(xs, 0.1)
y_avgs = row_col_avgs(ys, 0.1)
# Return vertices of completed averaged grid
avg_grid = []
for y_avg in y_avgs:
avg_row = []
for x_avg in x_avgs:
avg_row.append([int(x_avg), int(y_avg)])
avg_grid.append(avg_row)
print(avg_grid)
Output
[[[102, 101], [201, 101], [301, 101]],
[[102, 204], [201, 204], [301, 204]],
[[102, 299], [201, 299], [301, 299]],
[[102, 400], [201, 400], [301, 400]]]
Parallel Slopes Ordinary Least Squares (OLS) Model:
y = mx + grp + b where m=slope, b=y-intercept, & grp=categorical variable.
This is an alternative algorithm that can handle a rotated grid.
The OLS model includes both the data points in the original orientation
and a 90° rotation of the same data points. This is necessary so all gridlines are parallel and have the same slope.
Algorithm:
Find a reference gridline to compare with remaining points by choosing two neighboring points in the first or last row with a slope closest to zero.
Calculate the distances between this reference line and the remaining points.
Segment points into groups w.r.t. the calculated distances (one group per gridline).
Repeat steps 1 to 3 for the 90 degree rotated grid and combine results.
Create a parallel slopes OLS model to determine linear equations for the gridlines.
Rotate the rotated gridlines back to their original orientation.
Calculate the intersection points.
Note: Fails if noise, angle and/or missing data are too much.
Example:
                
Python Code to Create Example
def create_random_example():
# Requires import of numpy and random packages
# Creates grid with random noise and missing points
# Example will fail if std_dev, rotation, pct_removed too large
# Parameters
first_row, last_row = 100, 900
first_col, last_col = 100, 600
num_rows = 6
num_cols = 4
rotation = 3 # degrees that grid is rotated
sd = 3 # percent std dev of avg x and avg y coordinates
pct_remove = 30 # percent of points to randomly remove from data
# Create grid
x = np.linspace(first_col, last_col, num_cols)
y = np.linspace(first_row, last_row, num_rows)
xx, yy = np.meshgrid(x, y)
# Add noise
x = xx.flatten() + sd * np.mean(xx) * np.random.randn(xx.size) / 100
y = yy.flatten() + sd * np.mean(yy) * np.random.randn(yy.size) / 100
# Randomly remove points
random_list = random.sample(range(0, num_cols*num_rows),
int(pct_remove*num_cols*num_rows/100))
x, y = np.delete(x, random_list), np.delete(y, random_list)
pts = np.column_stack((x, y))
# Rotate points
radians = np.radians(rotation)
rot_mat = np.array([[np.cos(radians),-np.sin(radians)],
[np.sin(radians), np.cos(radians)]])
einsum = np.einsum('ji, mni -> jmn', rot_mat, [pts])
pts = np.squeeze(einsum).T
return np.rint(pts)
Python Code to Fit Gridlines
import numpy as np
import pandas as pd
import itertools
import math
import random
from statsmodels.formula.api import ols
from scipy.spatial import KDTree
import matplotlib.pyplot as plt
def pt_line_dist(pt, ref_line):
pt1, pt2 = [ref_line[:2], ref_line[2:]]
# Distance from point to line defined by two other points
return np.linalg.norm(np.cross(pt1 - pt2, [pt[0],pt[1]])) \
/ np.linalg.norm(pt1 - pt2)
def segment_pts(amts, grp_var, grp_label):
# Segment on amounts (distances here) in last column of array
# Note: need to label groups with string for OLS model
amts = amts[amts[:, -1].argsort()]
first_amt_in_grp = amts[0][-1]
group, groups, grp = [], [], 0
for amt in amts:
if amt[-1] - first_amt_in_grp > grp_var:
groups.append(group)
first_amt_in_grp = amt[-1]
group = []; grp += 1
group.append(np.append(amt[:-1],[[grp_label + str(grp)]]))
groups.append(group)
return groups
def find_reference_line(pts):
# Find point with minimum absolute slope relative both min y and max y
y = np.hsplit(pts, 2)[1] # y column of array
m = []
for i, y_pt in enumerate([ pts[np.argmin(y)], pts[np.argmax(y)] ]):
m.append(np.zeros((pts.shape[0]-1, 5))) # dtype default is float64
m[i][:,2:4] = np.delete(pts, np.where((pts==y_pt).all(axis=1))[0], axis=0)
m[i][:,4] = abs( (m[i][:,3]-y_pt[1]) / (m[i][:,2]-y_pt[0]) )
m[i][:,:2] = y_pt
m = np.vstack((m[0], m[1]))
return m[np.argmin(m[:,4]), :4]
# Ignore division by zero (slopes of vertical lines)
np.seterr(divide='ignore')
# Create dataset and plot
pts = create_random_example()
plt.scatter(pts[:,0], pts[:,1], c='r') # plot now because pts array changes
# Average distance to the nearest neighbor of each point
tree = KDTree(pts)
nn_avg_dist = np.mean(tree.query(pts, 2)[0][:, 1])
# Find groups of points representing each gridline
groups = []
for orientation in ['o', 'r']: # original and rotated orientations
# Rotate points 90 degrees (note: this moves pts to 2nd quadrant)
if orientation == 'r':
pts[:,1] = -1 * pts[:,1]
pts[:, [1, 0]] = pts[:, [0, 1]]
# Find reference line to compare remaining points for grouping
ref_line = find_reference_line(pts) # line is defined by two points
# Distances between points and reference line
pt_dists = np.zeros((pts.shape[0], 3))
pt_dists[:,:2] = pts
pt_dists[:,2] = np.apply_along_axis(pt_line_dist, 1, pts, ref_line).T
# Segment pts into groups w.r.t. distances (one group per gridline)
# Groups have range less than nn_avg_dist.
groups += segment_pts(pt_dists, 0.7*nn_avg_dist, orientation)
# Create dataframe of groups (OLS model requires a dataframe)
df = pd.DataFrame(np.row_stack(groups), columns=['x', 'y', 'grp'])
df['x'] = pd.to_numeric(df['x'])
df['y'] = pd.to_numeric(df['y'])
# Parallel slopes OLS model
ols_model = ols("y ~ x + grp + 0", data=df).fit()
# OLS parameters
grid_lines = ols_model.params[:-1].to_frame() # panda series to dataframe
grid_lines = grid_lines.rename(columns = {0:'b'})
grid_lines['grp'] = grid_lines.index.str[4:6]
grid_lines['m'] = ols_model.params[-1] # slope
# Rotate the rotated lines back to their original orientation
grid_lines.loc[grid_lines['grp'].str[0] == 'r', 'b'] = grid_lines['b'] / grid_lines['m']
grid_lines.loc[grid_lines['grp'].str[0] == 'r', 'm'] = -1 / grid_lines['m']
# Find grid intersection points by combinations of gridlines
comb = list(itertools.combinations(grid_lines['grp'], 2))
comb = [i for i in comb if i[0][0] != 'r']
comb = [i for i in comb if i[1][0] != 'o']
df_comb = pd.DataFrame(comb, columns=['grp', 'r_grp'])
# Merge gridline parameters with grid points
grid_pts = df_comb.merge(grid_lines.drop_duplicates('grp'),how='left',on='grp')
grid_lines.rename(columns={'grp': 'r_grp'}, inplace=True)
grid_pts.rename(columns={'b':'o_b', 'm': 'o_m', 'grp':'o_grp'}, inplace=True)
grid_pts = grid_pts.merge(grid_lines.drop_duplicates('r_grp'),how='left',on='r_grp')
grid_pts.rename(columns={'b':'r_b', 'm': 'r_m'}, inplace=True)
# Calculate x, y coordinates of gridline interception points
grid_pts['x'] = (grid_pts['r_b']-grid_pts['o_b']) \
/ (grid_pts['o_m']-grid_pts['r_m'])
grid_pts['y'] = grid_pts['o_m'] * grid_pts['x'] + grid_pts['o_b']
# Results output
print(grid_lines)
print(grid_pts)
plt.scatter(grid_pts['x'], grid_pts['y'], s=8, c='b') # for setting axes
axes = plt.gca()
axes.invert_yaxis()
axes.xaxis.tick_top()
axes.set_aspect('equal')
axes.set_xlim(axes.get_xlim())
axes.set_ylim(axes.get_ylim())
x_vals = np.array(axes.get_xlim())
for idx in grid_lines.index:
y_vals = grid_lines['b'][idx] + grid_lines['m'][idx] * x_vals
plt.plot(x_vals, y_vals, c='gray')
plt.show()
A numpy implementation of your code can be found below. As the size AvgGrid is known, I pre-allocate the required memory (rather than append). This should have speed advantages, especially if the number of output vertices is large.
import numpy as np
# Input of [x, y] coordinates of a sparse grid with errors
xys = np.array([[103,101],
[198,103],
[300, 99],
[ 97,205],
[304,202],
[102,295],
[200,303],
[104,405],
[205,394],
[298,401]])
# Function to average
def ColAvgs(CoordinateList, CutoffRatio = 1.1):
# Length of CoordinateList
L = len(CoordinateList)
# Sort input
SortedList = np.sort(CoordinateList)
# Determine indices to average
RelativeIncrease = SortedList[-(L-1):]/SortedList[:(L-1)]
CriticalIndices = np.flatnonzero(RelativeIncrease > CutoffRatio) + 1
Indices = np.hstack((0,CriticalIndices))
if (Indices[-1] != L):
Indices = np.hstack((Indices,L))
#print(Indices) # Uncomment to show index construction
# Compute averages
Avgs = np.empty((len(Indices)-1)); Avgs[:] = np.NaN
for iter in range(len(Avgs)):
Avgs[iter] = int( round(np.mean(SortedList[Indices[iter]:Indices[(iter+1)]]) ) )
# Return output
return Avgs
# Compute x- and y-coordinates of vertices
AvgsXcoord = ColAvgs(xys[:,0])
AvgsYcoord = ColAvgs(xys[:,1])
# Return all vertices
AvgGrid = np.empty((len(AvgsXcoord)*len(AvgsYcoord),2)); AvgGrid[:] = np.NaN
iter = 0
for y in AvgsYcoord:
for x in AvgsXcoord:
AvgGrid[iter, :] = np.hstack((x,y))
iter = iter+1
print(AvgGrid)
If you project all points on a vertical or horizontal axis, the problem turns to one of clustering with equally spaced clusters.
To perform these clusterings, you can consider the distances between the successive (sorted) points. They will form two clusters: short distances corresponding to noise, and longer ones for the grid size. You can solve the two-way clustering using the Otsu method.

Calculating random sample points using polar coordinates on cartesian map

I'm trying to generate random sample points on a cartesian plane using polar coordinates. I have a cartesian map with polar sectors, I'd like to put a random sample point within each of the sectors.
Problem Visual Description
I've added a sample point in the first sector. The problem is I don't know how to set the min and max limits for each sector as it's a cartesian plane (using cartesian min and max of the sector corners will give you boxes instead of the entire polar sector).
Code is commented for clarity. Final output posted below.
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [10, 10]
import math
import pylab as pl
from matplotlib import collections as mc
import pprint
from IPython.utils import io
from random import randrange, uniform
#convertes cartesian x,y coordinates to polar r, theta coordinates
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return np.array([rho, phi])
#convertes polar r,theta coordinates to cartesian x,y coordinates
def pol2cart(r, theta): #r is distance
x = r * np.cos(theta)
y = r * np.sin(theta)
return np.array([x, y])
#cooks delicious pie
pi = np.pi
#no idea what this does
theta = np.linspace(0,2*pi,100)
#x theta
def x_size(r):
return r*np.cos(theta)
#y theta
def y_size(r):
return r*np.sin(theta)
#calculates distribution of sectors on a circle in radians
#eg. sub_liner(3) = array([0. , 2.0943951, 4.1887902])
def sub_liner(k):
sub_lines = []
for c,b in enumerate(range(0,k)):
sub_lines = np.append(sub_lines,((12*pi/6)/k)*c)
return sub_lines
#calculates all distribution sectors for every ring and puts them in a list
def mlp(i):
master_lines = []
k = 3
for a in range(0,i):
master_lines.append(sub_liner(k))
k += 3
return master_lines
#calculates all four corners of each sector for a ring
#(ring,ring points,number of rings)
def cg(r,rp,n):
return [[[pol2cart(r-1,mlp(n)[r-1][i])[0],pol2cart(r-1,mlp(n)[r-1][i])[1]]\
,[pol2cart(r,mlp(n)[r-1][i])[0],pol2cart(r,mlp(n)[r-1][i])[1]]] for i in range(0,rp)]
#generates all corners for the ring sectors
def rg(n):
cgl = []
k = 3
for r in range(1,11):
cgl.append(cg(r,k,n))
k += 3
output = cgl[0]
for q in range(1,10):
output = np.concatenate((output,cgl[q]))
return output
#print(cg(1,3,10)[0][0][0])
#print(cg(1,3,10))
# randrange gives you an integral value
irand = randrange(0, 10)
# uniform gives you a floating-point value
frand = uniform(0, 10)
#define ring sectors
ring_sectors = rg(10)
#define node points
nx = 0.5
ny = 0.5
#define ring distance
ymin = [0]
ymax = [1]
#generate rings
ring_r = np.sqrt(1.0)
master_array = np.array([[x_size(i),y_size(i)] for i in range(0,11)])
#plot rings
fig, ax = plt.subplots(1)
[ax.plot(master_array[i][0],master_array[i][1]) for i in range(0,11)]
ax.set_aspect(1)
size = 10
plt.xlim(-size,size)
plt.ylim(-size,size)
#generate nodes
ax.plot(nx, ny, 'o', color='black');
#ring lines
lc = mc.LineCollection(ring_sectors, color='black', linewidths=2)
ax.add_collection(lc)
plt.grid(linestyle='--')
plt.title('System Generator', fontsize=8)
plt.show()
Sample output can be viewed at.
Edit:
What I've tried:
Based on feedback, I implemented a system which gets random uniform values between the polar coordinates, and it works, but the points aren't neatly distributed within their sectors as they should be, and I'm not sure why. Maybe my math is off or I made a mistake in the generator functions. If anyone has any insight, I'm all ears.
Output with points
def ngx(n):
rmin = 0
rmax = 1
nxl = []
s1 = 0
s2 = 1
k = 0
for i in range(0,n):
for a in range(0,rmax*3):
nxl.append(pol2cart(np.random.uniform(rmin,rmax),\
np.random.uniform(sub_liner(rmax*3)[(s1+k)%(rmax*3)],sub_liner(rmax*3)[(s2+k)%(rmax*3)]))[0])
k += 1
rmin += 1
rmax += 1
return nxl
def ngy(n):
rmin = 0
rmax = 1
nyl = []
s1 = 0
s2 = 1
k = 0
for i in range(0,n):
for a in range(0,rmax*3):
nyl.append(pol2cart(np.random.uniform(rmin,rmax),\
np.random.uniform(sub_liner(rmax*3)[(s1+k)%(rmax*3)],sub_liner(rmax*3)[(s2+k)%(rmax*3)]))[1])
k += 1
rmin += 1
rmax += 1
return nyl
#define node points
nx = ngx(10)
ny = ngy(10)

Place points with variable density

Assume that you have an NxM matrix, with values ranging from [0,100]. What I'd like to do is place points with a density (inversely) relative to the values in that area.
For example, here's a 2D Gaussian field, inverted s.t. the centroid has a value of 0, and the perimeter is at 100:
I'd like to pack the points so that they appear somewhat similar to this image:
Note how there is a radial spread outwards.
My attempt looks a little different :( ...
What I attempt to do is (i) generate a boolean area, of the same shape and size, and (ii) move through the rows and columns. If the value of the boolean array at some point is True, then pass; otherwise, add a [row,col] point to a list and cover the boolean array with True in a radius proportional to the value in the Gaussian array.
The choice of Gaussian for this example isn't important, the fundamental idea is that: given a floating point matrix, how can one place points with a density proportional to those values?
Any help very much appreciated :)
import matplotlib.pyplot as plt
import numpy as np
from math import exp
def gaussian(x,y,x0,y0,A=10.0,sigma_x=10.0,sigma_y=10.0):
return A - A*exp(-((x-x0)**2/(2*sigma_x**2) + (y-y0)**2/(2*sigma_y**2)))
def generate_grid(width=100,height=100):
grid = np.empty((width,height))
for x in range(0,width):
for y in range(0,height):
grid[x][y] = gaussian(x,y,width/2,height/2,A=100.0)
return grid
def cover_array(a,row,col,radius):
nRows = np.shape(grid)[0]
nCols = np.shape(grid)[1]
mid = round(radius / 2)
half_radius = int(round(radius))
for x in range(-half_radius,half_radius):
for y in range(-half_radius,half_radius):
if row+x >= 0 and x+row < nRows and col+y >= 0 and y+col < nCols:
if (x-mid)**2 + (y-mid)**2 <= radius**2:
a[row+x][col+y] = True
def pack_points(grid):
points = []
nRows = np.shape(grid)[0]
nCols = np.shape(grid)[1]
maxDist = 50.0
minDist = 0.0
maxEdge = 10.0
minEdge = 5.0
grid_min = 0.0
grid_max = 100.0
row = 0
col = 0
arrayCovered = np.zeros((nRows,nCols))
while True:
if row >= nRows:
return np.array(points)
if arrayCovered[row][col] == False:
radius = maxEdge * ((grid[row][col] - grid_min) / (grid_max - grid_min))
cover_array(arrayCovered,row,col,radius)
points.append([row,col])
col += 1
if col >= nCols:
row += 1
col = 0
grid = generate_grid()
plt.imshow(grid)
plt.show()
points = pack_points(grid)
plt.scatter(points[:,0],points[:,1])
plt.show()
Here is a cheap and simple method, although it requires hand-setting an amount parameter:
import numpy as np
import matplotlib.pyplot as plt
def gaussian(x,y,x0,y0,A=10.0,sigma_x=10.0,sigma_y=10.0):
return A - A*np.exp(-((x-x0)**2/(2*sigma_x**2) + (y-y0)**2/(2*sigma_y**2)))
def distribute_points(data, amount=1):
p = amount * (1 / data)
r = np.random.random(p.shape)
return np.where(p > r)
ii, jj = np.mgrid[-10:10:.1, -10:10:.1]
data = gaussian(ii, jj, 0, 0)
px, py = distribute_points(data, amount=.03)
plt.imshow(data)
plt.scatter(px, py, marker='.', c='#ff000080')
plt.xticks([])
plt.yticks([])
plt.xlim([0, len(ii)])
plt.ylim([0, len(jj)])
Result:

Categories