Full algorithm (math) of natural cubic splines computation in Python? - python

I'm interested in full Python code (with math formulas) with all computations needed to calculate natural Cubic Splines from scratch. If possible, fast (e.g. Numpy-based).
I created this question only to share my code (as answer) that I programmed recently from scratch (based on Wikipedia) when learning cubic splines.

I programmed the following code based on Russian Wikipedia Article, as I see almost the same description and formulas are located in English Article.
To speed-up computation I used both Numpy and Numba.
To check the correctness of code I made tests with comparison to reference implementation of the natural cubic spline of scipy.interpolate.CubicSpline, you can see np.allclose(...) assertion in my code that proves my formulas are correct.
Also, I did timings:
calc (spline_scipy): Timed best=2.712 ms, mean=2.792 +- 0.1 ms
calc (spline_numba): Timed best=916.000 us, mean=938.868 +- 17.9 us
speedup: 2.973
use (spline_scipy): Timed best=5.262 ms, mean=5.320 +- 0.1 ms
use (spline_numba): Timed best=4.745 ms, mean=5.420 +- 0.3 ms
speedup: 0.981
which shows that my spline-params computation is around 3x times faster than the Scipy version and usage of spline (computation for given x) is the same speed as Scipy.
Running code below needs one-time installing following packages python -m pip install numpy numba scipy timerit, here scipy and timerit are only needed for testing purposes and not needed for actual algorithm.
Code draws plots showing original multi-line and both spline approximation for Scipy and Numba versions, as one can see Scipy and Numba lines are the same (meaning that spline computation is same):
Code:
Try it online!
import numpy as np, numba
# Solves linear system given by Tridiagonal Matrix
# Helper for calculating cubic splines
#numba.njit(
[f'f{ii}[:](f{ii}[:], f{ii}[:], f{ii}[:], f{ii}[:])' for ii in (4, 8)],
cache = True, fastmath = True, inline = 'always')
def tri_diag_solve(A, B, C, F):
n = B.size
assert A.ndim == B.ndim == C.ndim == F.ndim == 1 and (
A.size == B.size == C.size == F.size == n
) #, (A.shape, B.shape, C.shape, F.shape)
Bs, Fs = np.zeros_like(B), np.zeros_like(F)
Bs[0], Fs[0] = B[0], F[0]
for i in range(1, n):
Bs[i] = B[i] - A[i] / Bs[i - 1] * C[i - 1]
Fs[i] = F[i] - A[i] / Bs[i - 1] * Fs[i - 1]
x = np.zeros_like(B)
x[-1] = Fs[-1] / Bs[-1]
for i in range(n - 2, -1, -1):
x[i] = (Fs[i] - C[i] * x[i + 1]) / Bs[i]
return x
# Calculate cubic spline params
#numba.njit(
#[f'(f{ii}, f{ii}, f{ii}, f{ii})(f{ii}[:], f{ii}[:])' for ii in (4, 8)],
cache = True, fastmath = True, inline = 'always')
def calc_spline_params(x, y):
a = y
h = np.diff(x)
c = np.concatenate((np.zeros((1,), dtype = y.dtype),
np.append(tri_diag_solve(h[:-1], (h[:-1] + h[1:]) * 2, h[1:],
((a[2:] - a[1:-1]) / h[1:] - (a[1:-1] - a[:-2]) / h[:-1]) * 3), 0)))
d = np.diff(c) / (3 * h)
b = (a[1:] - a[:-1]) / h + (2 * c[1:] + c[:-1]) / 3 * h
return a[1:], b, c[1:], d
# Spline value calculating function, given params and "x"
#numba.njit(
[f'f{ii}[:](f{ii}[:], i8[:], f{ii}[:], f{ii}[:], f{ii}[:], f{ii}[:], f{ii}[:])' for ii in (4, 8)],
cache = True, fastmath = True, inline = 'always')
def func_spline(x, ix, x0, a, b, c, d):
dx = x - x0[1:][ix]
return a[ix] + (b[ix] + (c[ix] + d[ix] * dx) * dx) * dx
#numba.njit(
[f'i8[:](f{ii}[:], f{ii}[:], b1)' for ii in (4, 8)],
cache = True, fastmath = True, inline = 'always')
def searchsorted_merge(a, b, sort_b):
ix = np.zeros((len(b),), dtype = np.int64)
if sort_b:
ib = np.argsort(b)
pa, pb = 0, 0
while pb < len(b):
if pa < len(a) and a[pa] < (b[ib[pb]] if sort_b else b[pb]):
pa += 1
else:
ix[pb] = pa
pb += 1
return ix
# Compute piece-wise spline function for "x" out of sorted "x0" points
#numba.njit([f'f{ii}[:](f{ii}[:], f{ii}[:], f{ii}[:], f{ii}[:], f{ii}[:], f{ii}[:])' for ii in (4, 8)],
cache = True, fastmath = True, inline = 'always')
def piece_wise_spline(x, x0, a, b, c, d):
xsh = x.shape
x = x.ravel()
#ix = np.searchsorted(x0[1 : -1], x)
ix = searchsorted_merge(x0[1 : -1], x, False)
y = func_spline(x, ix, x0, a, b, c, d)
y = y.reshape(xsh)
return y
def test():
import matplotlib.pyplot as plt, scipy.interpolate
from timerit import Timerit
Timerit._default_asciimode = True
np.random.seed(0)
def f(n):
x = np.sort(np.random.uniform(0., n / 5 * np.pi, (n,))).astype(np.float64)
return x, (np.sin(x) * 5 + np.sin(1 + 2.5 * x) * 3 + np.sin(2 + 0.5 * x) * 2).astype(np.float64)
def spline_numba(x0, y0):
a, b, c, d = calc_spline_params(x0, y0)
return lambda x: piece_wise_spline(x, x0, a, b, c, d)
def spline_scipy(x0, y0):
f = scipy.interpolate.CubicSpline(x0, y0, bc_type = 'natural')
return lambda x: f(x)
def timings():
x0, y0 = f(10000)
s, t = {}, []
gs = [spline_scipy, spline_numba]
spline_numba(np.copy(x0[::3]), np.copy(y0[::3])) # pre-compile numba
for g in gs:
print('calc (', g.__name__, '): ', sep = '', end = '', flush = True)
tim = Timerit(num = 150, verbose = 1)
for _ in tim:
s_ = g(x0, y0)
s[g.__name__] = s_
t.append(tim.mean())
if len(t) >= 2:
print('speedup:', round(t[-2] / t[-1], 3))
print()
x = np.linspace(x0[0], x0[-1], 50000, dtype = np.float64)
t = []
s['spline_numba'](np.copy(x[::3])) # pre-compile numba
for i in range(len(s)):
print('use (', gs[i].__name__, '): ', sep = '', end = '', flush = True)
tim = Timerit(num = 100, verbose = 1)
sg = s[gs[i].__name__]
for _ in tim:
sg(x)
t.append(tim.mean())
if len(t) >= 2:
print('speedup:', round(t[-2] / t[-1], 3))
x0, y0 = f(50)
timings()
shift = 3
x = np.linspace(x0[0], x0[-1], 1000, dtype = np.float64)
ys = spline_scipy(x0, y0)(x)
yn = spline_numba(x0, y0)(x)
assert np.allclose(ys, yn), np.absolute(ys - yn).max()
plt.plot(x0, y0, label = 'orig')
plt.plot(x, ys, label = 'spline_scipy')
plt.plot(x, yn, '-.', label = 'spline_numba')
plt.legend()
plt.show()
if __name__ == '__main__':
test()

Related

Numpy - vectorize the bivariate poisson pmf equation

I'm trying to write a function to evaluate the probability mass function for the bivariate poisson distribution.
This is easy when all of the parameters (x, y, theta1, theta2, theta0) are scalars, but tricky to scale up without loops to allow these parameters to be vectors. I need it to scale such that, for:
theta0 being a scalar - the "correlation parameter" in the equation
theta1 and theta2 having length l
x, y both having length n
the output array would have shape (l, n, n). For example, a slice [j, :, :] from the output array would look like:
The first part (the constant, before the summation) I think i've figured out:
import numpy as np
from scipy.special import factorial
def constant(theta1, theta2, theta0, x, y):
exponential_part = np.exp(-(theta1 + theta2 + theta0)).reshape(-1, 1, 1)
x = np.tile(x, (len(x), 1)).transpose()
y = np.tile(y, (len(y), 1))
double_factorial = (np.power(np.array(theta1).reshape(-1, 1, 1), x)/factorial(x)) * \
(np.power(np.array(theta2).reshape(-1, 1, 1), y)/factorial(y))
return exponential_part * double_factorial
But I'm struggling with the summation part. How can I vectorize a summation where the limits depend on variable arrays?
I think I have this figured out, based on the approach that #w-m suggests: calculate every possible summation term which could appear, based on the maximum x or y value which appears, and use a mask to get rid of the ones you don't want. Assuming you have your x and y terms go from 0 to N, in consecutive order, this is calculating up to three times more terms than are actually required, but this is offset by getting to use vectorization.
Reference implementation
I wrote this by first writing a pure-Python reference implementation, which just implements your problem using loops. With 4 nested loops, it's not exactly fast, but it's handy to have while testing the numpy version.
import numpy as np
from scipy.special import factorial, comb
import operator as op
from functools import reduce
def choose(n, r):
# https://stackoverflow.com/a/4941932/530160
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer // denom # or / in Python 2
def reference_impl_constant(s_theta1, s_theta2, s_theta0, s_x, s_y):
# Cast to float to prevent overflow
s_theta1 = float(s_theta1)
s_theta2 = float(s_theta2)
s_theta0 = float(s_theta0)
s_x = float(s_x)
s_y = float(s_y)
term1 = np.exp(-(s_theta1 + s_theta2 + s_theta0))
term2 = (s_theta1 ** s_x / factorial(s_x))
term3 = (s_theta2 ** s_y / factorial(s_y))
assert term1 >= 0
assert term2 >= 0
assert term3 >= 0
return term1 * term2 * term3
def reference_impl_constant_loop(theta1, theta2, theta0, x, y):
theta_len = theta1.shape[0]
xy_len = x.shape[0]
constant_array = np.zeros((theta_len, xy_len, xy_len))
for i in range(theta_len):
for j in range(xy_len):
for k in range(xy_len):
s_theta1 = theta1[i]
s_theta2 = theta2[i]
s_theta0 = theta0
s_x = x[j]
s_y = y[k]
constant_term = reference_impl_constant(s_theta1, s_theta2, s_theta0, s_x, s_y)
assert constant_term >= 0
constant_array[i, j, k] = constant_term
return constant_array
def reference_impl_summation(s_theta1, s_theta2, s_theta0, s_x, s_y):
sum_ = 0
for i in range(min(s_x, s_y) + 1):
sum_ += choose(s_x, i) * choose(s_y, i) * factorial(i) * ((s_theta0/s_theta1/s_theta2) ** i)
assert sum_ >= 0
return sum_
def reference_impl_summation_loop(theta1, theta2, theta0, x, y):
theta_len = theta1.shape[0]
xy_len = x.shape[0]
summation_array = np.zeros((theta_len, xy_len, xy_len))
for i in range(theta_len):
for j in range(xy_len):
for k in range(xy_len):
s_theta1 = theta1[i]
s_theta2 = theta2[i]
s_theta0 = theta0
s_x = x[j]
s_y = y[k]
summation_term = reference_impl_summation(s_theta1, s_theta2, s_theta0, s_x, s_y)
assert summation_term >= 0
summation_array[i, j, k] = summation_term
return summation_array
def reference_impl(theta1, theta2, theta0, x, y):
# all array inputs must be 1D
assert len(theta1.shape) == 1
assert len(theta2.shape) == 1
assert len(x.shape) == 1
assert len(y.shape) == 1
# theta vectors must have same length
theta_len = theta1.shape[0]
assert theta2.shape[0] == theta_len
# x and y must have same length
xy_len = x.shape[0]
assert y.shape[0] == xy_len
# theta0 is scalar
assert isinstance(theta0, (int, float))
constant_array = np.zeros((theta_len, xy_len, xy_len))
output = np.zeros((theta_len, xy_len, xy_len))
constant_array = reference_impl_constant_loop(theta1, theta2, theta0, x, y)
summation_array = reference_impl_summation_loop(theta1, theta2, theta0, x, y)
output = constant_array * summation_array
return output
Numpy implementation
I split the implementation of this across two functions.
The fast_constant() function calculates everything to the left of the summation symbol. The fast_summation() function calculates everything inside the summation symbol.
import numpy as np
from scipy.special import factorial, comb
def fast_summation(theta1, theta2, theta0, x, y):
x = np.tile(x, (len(x), 1)).transpose()
y = np.tile(y, (len(y), 1))
sum_limit = np.minimum(x, y)
max_sum_limit = np.max(sum_limit)
i = np.arange(max_sum_limit + 1).reshape(-1, 1, 1)
summation_mask = (i <= sum_limit)
theta_ratio = (theta0 / (theta1 * theta2)).reshape(-1, 1, 1, 1)
theta_to_power = np.power(theta_ratio, i)
terms = comb(x, i) * comb(y, i) * factorial(i) * theta_to_power
# mask out terms which aren't part of sum
terms *= summation_mask
# axis 0 is theta
# axis 1 is i
# axis 2 & 3 are x and y
# so sum across axis 1
terms = terms.sum(axis=1)
return terms
def fast_constant(theta1, theta2, theta0, x, y):
theta1 = theta1.astype('float64')
theta2 = theta2.astype('float64')
exponential_part = np.exp(-(theta1 + theta2 + theta0)).reshape(-1, 1, 1)
# x and y must be 1D
assert len(x.shape) == 1
assert len(y.shape) == 1
# x and y must have same shape
assert x.shape == y.shape
x_len, y_len = x.shape[0], y.shape[0]
x = x.reshape((x_len, 1))
y = y.reshape((1, y_len))
double_factorial = (np.power(np.array(theta1).reshape(-1, 1, 1), x)/factorial(x)) * \
(np.power(np.array(theta2).reshape(-1, 1, 1), y)/factorial(y))
return exponential_part * double_factorial
def fast_impl(theta1, theta2, theta0, x, y):
return fast_summation(theta1, theta2, theta0, x, y) * fast_constant(theta1, theta2, theta0, x, y)
Benchmarking
Assuming that X and Y range from 0 to 20, and that theta is centered somewhere inside that range, I get the result that the numpy version is roughly 280 times faster than the pure python reference.
Numerical stability
I'm unsure how numerically stable this is. For example, when I center theta at 100, I get a floating-point overflow. Typically, when computing an expression which has lots of choose and factorial expressions inside it, you'll use some mathematical equivalent which results in smaller intermediate sums. In this case I have so little understanding of the math that I don't know how you'd do that.

Transport equation in 1D (python)

I'm trying to write a python program to solve the convection equation in 1D using the finite differences method (upwind scheme). The problem is as follows:
Here's what I've attempted
from numpy import *
from numpy.linalg import *
from matplotlib.pyplot import *
def u0(x):
if (0.4 <= x <= 0.5):
y = 10*(x - 0.4)
elif (0.5 <= x <= 0.6):
y = 10*(0.6 - x)
else:
y = 0
return y
print('Choix de la vitesse de transport c : ')
c = float(input('c = '))
def solex(x, t):
return u0(x - c*t)
print('Choix de pas h : ')
h = float(input('h = '))
print('Choix du pas dt et du temps final T : ')
dt = float(input('dt = '))
T = float(input('T = '))
# Maillage
N = int((1/h) - 1)
x = linspace(0, 1, N + 2)
M = int((T/dt) - 1)
t = linspace(0, T, M + 2)
# Itération
U1 = zeros(N)
U2 = zeros(N)
sol = zeros((N, M + 2))
for i in range(1, N + 1):
U1[i - 1] = u0(x[i])
sol[:, 0] = U1
for j in range(1, size(t)):
for i in range(1, N-1):
U2[i] = U1[i] - c*(dt/h)*(U1[i] - U1[i - 1])
sol[:, j] = U2
U1 = U2
It doesn't seem to work and I don't know why
Though you said you already solved your problem, I would still like to suggest some general improvements:
wildcard imports like from numpy import * are considered bad practice, better use import numpy as np and refer to the necessary functions as np.linspace etc.
the power of numpy comes from vectorization, so try to replace as much for-loops as possible by vectorized operations.
at least from what you showed us, the variables U1 and U2 are not really necessary.
using input for every single parameter might be overkill
The following code includes my suggested improvements. Note how I replaced your u0 with a vectorized version using np.piecewise and replaced several for-loops. I also added a visualisation.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def u0(x):
y= np.piecewise(
x,
[(0.4 <= x)&(x <= 0.5), (0.5 <= x)&(x<= 0.6) ],
[lambda x: 10*(x - 0.4), lambda x: 10*(0.6 - x), 0])
return y
c = 0.9
h = 0.01
dt = 0.01
T = 2
N = int(np.ceil(1/h))
x = np.linspace(0, 1, N)
M = int(np.ceil(T/dt))
t = np.linspace(0, T, M)
#solve with upwind scheme
sol = np.zeros((N, M))
sol[:,0] = u0(x)
#you could add boundary values here by setting
#sol[0,:] = <your_boundary_data>
for i in range(1,len(t)):
sol[1:,i] = sol[1:,i-1] - c*(dt/h)*(sol[1:,i-1] - sol[:-1,i-1])
#Visualization
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('t')
T, X = np.meshgrid(t, x)
surf = ax.plot_surface(X, T, sol)

Solve a nonlinear ODE system for the Frenet frame

I have checked python non linear ODE with 2 variables , which is not my case. Maybe my case is not called as nonlinear ODE, correct me please.
The question isFrenet Frame actually, in which there are 3 vectors T(s), N(s) and B(s); the parameter s>=0. And there are 2 scalar with known math formula expression t(s) and k(s). I have the initial value T(0), N(0) and B(0).
diff(T(s), s) = k(s)*N(s)
diff(N(s), s) = -k(s)*T(s) + t(s)*B(s)
diff(B(s), s) = -t(s)*N(s)
Then how can I get T(s), N(s) and B(s) numerically or symbolically?
I have checked scipy.integrate.ode but I don't know how to pass k(s)*N(s) into its first parameter at all
def model (z, tspan):
T = z[0]
N = z[1]
B = z[2]
dTds = k(s) * N # how to express function k(s)?
dNds = -k(s) * T + t(s) * B
dBds = -t(s)* N
return [dTds, dNds, dBds]
z = scipy.integrate.ode(model, [T0, N0, B0]
Here is a code using solve_ivp interface from Scipy (instead of odeint) to obtain a numerical solution:
import numpy as np
from scipy.integrate import solve_ivp
from scipy.integrate import cumtrapz
import matplotlib.pylab as plt
# Define the parameters as regular Python function:
def k(s):
return 1
def t(s):
return 0
# The equations: dz/dt = model(s, z):
def model(s, z):
T = z[:3] # z is a (9, ) shaped array, the concatenation of T, N and B
N = z[3:6]
B = z[6:]
dTds = k(s) * N
dNds = -k(s) * T + t(s) * B
dBds = -t(s)* N
return np.hstack([dTds, dNds, dBds])
T0, N0, B0 = [1, 0, 0], [0, 1, 0], [0, 0, 1]
z0 = np.hstack([T0, N0, B0])
s_span = (0, 6) # start and final "time"
t_eval = np.linspace(*s_span, 100) # define the number of point wanted in-between,
# It is not necessary as the solver automatically
# define the number of points.
# It is used here to obtain a relatively correct
# integration of the coordinates, see the graph
# Solve:
sol = solve_ivp(model, s_span, z0, t_eval=t_eval, method='RK45')
print(sol.message)
# >> The solver successfully reached the end of the integration interval.
# Unpack the solution:
T, N, B = np.split(sol.y, 3) # another way to unpack the z array
s = sol.t
# Bonus: integration of the normal vector in order to get the coordinates
# to plot the curve (there is certainly better way to do this)
coords = cumtrapz(T, x=s)
plt.plot(coords[0, :], coords[1, :]);
plt.axis('equal'); plt.xlabel('x'); plt.xlabel('y');
T, N and B are vectors. Therefore, there are 9 equations to solve: z is a (9,) array.
For constant curvature and no torsion, the result is a circle:
thanks for your example. And I thought it again, found that since there is formula for dZ where Z is matrix(T, N, B), we can calculate Z[i] = Z[i-1] + dZ[i-1]*deltaS according to the concept of derivative. Then I code and find this idea can solve the circle example. So
is Z[i] = Z[i-1] + dZ[i-1]*deltaS suitable for other ODE? will it fail in some situation, or does scipy.integrate.solve_ivp/scipy.integrate.ode supply advantage over the direct usage of Z[i] = Z[i-1] + dZ[i-1]*deltaS?
in my code, I have to normalize Z[i] because ||Z[i]|| is not always 1. Why does it happen? A float numerical calculation error?
my answer to my question, at least it works for the circle
import numpy as np
from scipy.integrate import cumtrapz
import matplotlib.pylab as plt
# Define the parameters as regular Python function:
def k(s):
return 1
def t(s):
return 0
def dZ(s, Z):
return np.array(
[k(s) * Z[1], -k(s) * Z[0] + t(s) * Z[2], -t(s)* Z[1]]
)
T0, N0, B0 = np.array([1, 0, 0]), np.array([0, 1, 0]), np.array([0, 0, 1])
deltaS = 0.1 # step to calculate dZ/ds
num = int(2*np.pi*1/deltaS) + 1 # how many points on the curve we have to calculate
T = np.zeros([num, ], dtype=object)
N = np.zeros([num, ], dtype=object)
B = np.zeros([num, ], dtype=object)
T[0] = T0
N[0] = N0
B[0] = B0
for i in range(num-1):
temp_dZ = dZ(i*deltaS, np.array([T[i], N[i], B[i]]))
T[i+1] = T[i] + temp_dZ[0]*deltaS
T[i+1] = T[i+1]/np.linalg.norm(T[i+1]) # have to do this
N[i+1] = N[i] + temp_dZ[1]*deltaS
N[i+1] = N[i+1]/np.linalg.norm(N[i+1])
B[i+1] = B[i] + temp_dZ[2]*deltaS
B[i+1] = B[i+1]/np.linalg.norm(B[i+1])
coords = cumtrapz(
[
[i[0] for i in T], [i[1] for i in T], [i[2] for i in T]
]
, x=np.arange(num)*deltaS
)
plt.figure()
plt.plot(coords[0, :], coords[1, :]);
plt.axis('equal'); plt.xlabel('x'); plt.xlabel('y');
plt.show()
I found that the equation I listed in the first post does not work for my curve. So I read Gray A., Abbena E., Salamon S-Modern Differential Geometry of Curves and Surfaces with Mathematica. 2006 and found that for arbitrary curve, Frenet equation should be written as
diff(T(s), s) = ||r'||* k(s)*N(s)
diff(N(s), s) = ||r'||*(-k(s)*T(s) + t(s)*B(s))
diff(B(s), s) = ||r'||* -t(s)*N(s)
where ||r'||(or ||r'(s)||) is diff([x(s), y(s), z(s)], s).norm()
now the problem has changed to be some different from that in the first post, because there is no r'(s) function or discrete data array. So I think this is suitable for a new reply other than comment.
I met 2 questions while trying to solve the new equation:
how can we program with r'(s) if scipy's solve_ivp is used?
I try to modify my gaussian solution, but the result is totally wrong.
thanks again
import numpy as np
from scipy.integrate import cumtrapz
import matplotlib.pylab as plt
# Define the parameters as regular Python function:
def k(s):
return 1
def t(s):
return 0
def dZ(s, Z, r_norm):
return np.array([
r_norm * k(s) * Z[1],
r_norm*(-k(s) * Z[0] + t(s) * Z[2]),
r_norm*(-t(s)* Z[1])
])
T0, N0, B0 = np.array([1, 0, 0]), np.array([0, 1, 0]), np.array([0, 0, 1])
deltaS = 0.1 # step to calculate dZ/ds
num = int(2*np.pi*1/deltaS) + 1 # how many points on the curve we have to calculate
T = np.zeros([num, ], dtype=object)
N = np.zeros([num, ], dtype=object)
B = np.zeros([num, ], dtype=object)
R0 = N0
T[0] = T0
N[0] = N0
B[0] = B0
for i in range(num-1):
r_norm = np.linalg.norm(R0)
temp_dZ = dZ(i*deltaS, np.array([T[i], N[i], B[i]]), r_norm)
T[i+1] = T[i] + temp_dZ[0]*deltaS
T[i+1] = T[i+1]/np.linalg.norm(T[i+1])
N[i+1] = N[i] + temp_dZ[1]*deltaS
N[i+1] = N[i+1]/np.linalg.norm(N[i+1])
B[i+1] = B[i] + temp_dZ[2]*deltaS
B[i+1] = B[i+1]/np.linalg.norm(B[i+1])
R0 = R0 + T[i]*deltaS
coords = cumtrapz(
[
[i[0] for i in T], [i[1] for i in T], [i[2] for i in T]
]
, x=np.arange(num)*deltaS
)
plt.figure()
plt.plot(coords[0, :], coords[1, :]);
plt.axis('equal'); plt.xlabel('x'); plt.xlabel('y');
plt.show()

Procrustes Analysis with NumPy?

Is there something like Matlab's procrustes function in NumPy/SciPy or related libraries?
For reference. Procrustes analysis aims to align 2 sets of points (in other words, 2 shapes) to minimize square distance between them by removing scale, translation and rotation warp components.
Example in Matlab:
X = [0 1; 2 3; 4 5; 6 7; 8 9]; % first shape
R = [1 2; 2 1]; % rotation matrix
t = [3 5]; % translation vector
Y = X * R + repmat(t, 5, 1); % warped shape, no scale and no distortion
[d Z] = procrustes(X, Y); % Z is Y aligned back to X
Z
Z =
0.0000 1.0000
2.0000 3.0000
4.0000 5.0000
6.0000 7.0000
8.0000 9.0000
Same task in NumPy:
X = arange(10).reshape((5, 2))
R = array([[1, 2], [2, 1]])
t = array([3, 5])
Y = dot(X, R) + t
Z = ???
Note: I'm only interested in aligned shape, since square error (variable d in Matlab code) is easily computed from 2 shapes.
I'm not aware of any pre-existing implementation in Python, but it's easy to take a look at the MATLAB code using edit procrustes.m and port it to Numpy:
def procrustes(X, Y, scaling=True, reflection='best'):
"""
A port of MATLAB's `procrustes` function to Numpy.
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Inputs:
------------
X, Y
matrices of target and input coordinates. they must have equal
numbers of points (rows), but Y may have fewer dimensions
(columns) than X.
scaling
if False, the scaling component of the transformation is forced
to 1
reflection
if 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Outputs
------------
d
the residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(0))**2).sum()
Z
the matrix of transformed Y-values
tform
a dict specifying the rotation, translation and scaling that
maps X --> Y
"""
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection != 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
tform = {'rotation':T, 'scale':b, 'translation':c}
return d, Z, tform
There is a Scipy function for it: scipy.spatial.procrustes
I'm just posting its example here:
>>> import numpy as np
>>> from scipy.spatial import procrustes
>>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
>>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
>>> mtx1, mtx2, disparity = procrustes(a, b)
>>> round(disparity)
0.0
You can have both Ordinary Procrustes Analysis and Generalized Procrustes Analysis in python with something like this:
import numpy as np
def opa(a, b):
aT = a.mean(0)
bT = b.mean(0)
A = a - aT
B = b - bT
aS = np.sum(A * A)**.5
bS = np.sum(B * B)**.5
A /= aS
B /= bS
U, _, V = np.linalg.svd(np.dot(B.T, A))
aR = np.dot(U, V)
if np.linalg.det(aR) < 0:
V[1] *= -1
aR = np.dot(U, V)
aS = aS / bS
aT-= (bT.dot(aR) * aS)
aD = (np.sum((A - B.dot(aR))**2) / len(a))**.5
return aR, aS, aT, aD
def gpa(v, n=-1):
if n < 0:
p = avg(v)
else:
p = v[n]
l = len(v)
r, s, t, d = np.ndarray((4, l), object)
for i in range(l):
r[i], s[i], t[i], d[i] = opa(p, v[i])
return r, s, t, d
def avg(v):
v_= np.copy(v)
l = len(v_)
R, S, T = [list(np.zeros(l)) for _ in range(3)]
for i, j in np.ndindex(l, l):
r, s, t, _ = opa(v_[i], v_[j])
R[j] += np.arccos(min(1, max(-1, np.trace(r[:1])))) * np.sign(r[1][0])
S[j] += s
T[j] += t
for i in range(l):
a = R[i] / l
r = [np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]
v_[i] = v_[i].dot(r) * (S[i] / l) + (T[i] / l)
return v_.mean(0)
For testing purposes, the output of each algorithm can be visualized as follows:
import matplotlib.pyplot as p; p.rcParams['toolbar'] = 'None';
def plt(o, e, b):
p.figure(figsize=(10, 10), dpi=72, facecolor='w').add_axes([0.05, 0.05, 0.9, 0.9], aspect='equal')
p.plot(0, 0, marker='x', mew=1, ms=10, c='g', zorder=2, clip_on=False)
p.gcf().canvas.set_window_title('%f' % e)
x = np.ravel(o[0].T[0])
y = np.ravel(o[0].T[1])
p.xlim(min(x), max(x))
p.ylim(min(y), max(y))
a = []
for i, j in np.ndindex(len(o), 2):
a.append(o[i].T[j])
O = p.plot(*a, marker='x', mew=1, ms=10, lw=.25, c='b', zorder=0, clip_on=False)
O[0].set(c='r', zorder=1)
if not b:
O[2].set_color('b')
O[2].set_alpha(0.4)
p.axis('off')
p.show()
# Fly wings example (Klingenberg, 2015 | https://en.wikipedia.org/wiki/Procrustes_analysis)
arr1 = np.array([[588.0, 443.0], [178.0, 443.0], [56.0, 436.0], [50.0, 376.0], [129.0, 360.0], [15.0, 342.0], [92.0, 293.0], [79.0, 269.0], [276.0, 295.0], [281.0, 331.0], [785.0, 260.0], [754.0, 174.0], [405.0, 233.0], [386.0, 167.0], [466.0, 59.0]])
arr2 = np.array([[477.0, 557.0], [130.129, 374.307], [52.0, 334.0], [67.662, 306.953], [111.916, 323.0], [55.119, 275.854], [107.935, 277.723], [101.899, 259.73], [175.0, 329.0], [171.0, 345.0], [589.0, 527.0], [591.0, 468.0], [299.0, 363.0], [306.0, 317.0], [406.0, 288.0]])
def opa_out(a):
r, s, t, d = opa(a[0], a[1])
a[1] = a[1].dot(r) * s + t
return a, d, False
plt(*opa_out([arr1, arr2, np.matrix.copy(arr2)]))
def gpa_out(a):
g = gpa(a, -1)
D = [avg(a)]
for i in range(len(a)):
D.append(a[i].dot(g[0][i]) * g[1][i] + g[2][i])
return D, sum(g[3])/len(a), True
plt(*gpa_out([arr1, arr2]))
Probably you want to try this package with various flavors of different Procrustes methods, https://github.com/theochem/procrustes.

Python 3D polynomial surface fit, order dependent

I am currently working with astronomical data among which I have comet images. I would like to remove the background sky gradient in these images due to the time of capture (twilight). The first program I developed to do so took user selected points from Matplotlib's "ginput" (x,y) pulled the data for each coordinate (z) and then gridded the data in a new array with SciPy's "griddata."
Since the background is assumed to vary only slightly, I would like to fit a 3d low order polynomial to this set of (x,y,z) points. However, the "griddata" does not allow for an input order:
griddata(points,values, (dimension_x,dimension_y), method='nearest/linear/cubic')
Any ideas on another function that may be used or a method for developing a leas-squares fit that will allow me to control the order?
Griddata uses a spline fitting. A 3rd order spline is not the same thing as a 3rd order polynomial (instead, it's a different 3rd order polynomial at every point).
If you just want to fit a 2D, 3rd order polynomial to your data, then do something like the following to estimate the 16 coefficients using all of your data points.
import itertools
import numpy as np
import matplotlib.pyplot as plt
def main():
# Generate Data...
numdata = 100
x = np.random.random(numdata)
y = np.random.random(numdata)
z = x**2 + y**2 + 3*x**3 + y + np.random.random(numdata)
# Fit a 3rd order, 2d polynomial
m = polyfit2d(x,y,z)
# Evaluate it on a grid...
nx, ny = 20, 20
xx, yy = np.meshgrid(np.linspace(x.min(), x.max(), nx),
np.linspace(y.min(), y.max(), ny))
zz = polyval2d(xx, yy, m)
# Plot
plt.imshow(zz, extent=(x.min(), y.max(), x.max(), y.min()))
plt.scatter(x, y, c=z)
plt.show()
def polyfit2d(x, y, z, order=3):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m, _, _, _ = np.linalg.lstsq(G, z)
return m
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
main()
The following implementation of polyfit2d uses the available numpy methods numpy.polynomial.polynomial.polyvander2d and numpy.polynomial.polynomial.polyval2d
#!/usr/bin/env python3
import unittest
def polyfit2d(x, y, f, deg):
from numpy.polynomial import polynomial
import numpy as np
x = np.asarray(x)
y = np.asarray(y)
f = np.asarray(f)
deg = np.asarray(deg)
vander = polynomial.polyvander2d(x, y, deg)
vander = vander.reshape((-1,vander.shape[-1]))
f = f.reshape((vander.shape[0],))
c = np.linalg.lstsq(vander, f)[0]
return c.reshape(deg+1)
class MyTest(unittest.TestCase):
def setUp(self):
return self
def test_1(self):
self._test_fit(
[-1,2,3],
[ 4,5,6],
[[1,2,3],[4,5,6],[7,8,9]],
[2,2])
def test_2(self):
self._test_fit(
[-1,2],
[ 4,5],
[[1,2],[4,5]],
[1,1])
def test_3(self):
self._test_fit(
[-1,2,3],
[ 4,5],
[[1,2],[4,5],[7,8]],
[2,1])
def test_4(self):
self._test_fit(
[-1,2,3],
[ 4,5],
[[1,2],[4,5],[0,0]],
[2,1])
def test_5(self):
self._test_fit(
[-1,2,3],
[ 4,5],
[[1,2],[4,5],[0,0]],
[1,1])
def _test_fit(self, x, y, c, deg):
from numpy.polynomial import polynomial
import numpy as np
X = np.array(np.meshgrid(x,y))
f = polynomial.polyval2d(X[0], X[1], c)
c1 = polyfit2d(X[0], X[1], f, deg)
np.testing.assert_allclose(c1,
np.asarray(c)[:deg[0]+1,:deg[1]+1],
atol=1e-12)
unittest.main()
According to the principle of Least squares, and imitate Kington's style,
while move argument m to argument m_1 and argument m_2.
import numpy as np
import matplotlib.pyplot as plt
import itertools
# w = (Phi^T Phi)^{-1} Phi^T t
# where Phi_{k, j + i (m_2 + 1)} = x_k^i y_k^j,
# t_k = z_k,
# i = 0, 1, ..., m_1,
# j = 0, 1, ..., m_2,
# k = 0, 1, ..., n - 1
def polyfit2d(x, y, z, m_1, m_2):
# Generate Phi by setting Phi as x^i y^j
nrows = x.size
ncols = (m_1 + 1) * (m_2 + 1)
Phi = np.zeros((nrows, ncols))
ij = itertools.product(range(m_1 + 1), range(m_2 + 1))
for h, (i, j) in enumerate(ij):
Phi[:, h] = x ** i * y ** j
# Generate t by setting t as Z
t = z
# Generate w by solving (Phi^T Phi) w = Phi^T t
w = np.linalg.solve(Phi.T.dot(Phi), (Phi.T.dot(t)))
return w
# t' = Phi' w
# where Phi'_{k, j + i (m_2 + 1)} = x'_k^i y'_k^j
# t'_k = z'_k,
# i = 0, 1, ..., m_1,
# j = 0, 1, ..., m_2,
# k = 0, 1, ..., n' - 1
def polyval2d(x_, y_, w, m_1, m_2):
# Generate Phi' by setting Phi' as x'^i y'^j
nrows = x_.size
ncols = (m_1 + 1) * (m_2 + 1)
Phi_ = np.zeros((nrows, ncols))
ij = itertools.product(range(m_1 + 1), range(m_2 + 1))
for h, (i, j) in enumerate(ij):
Phi_[:, h] = x_ ** i * y_ ** j
# Generate t' by setting t' as Phi' w
t_ = Phi_.dot(w)
# Generate z_ by setting z_ as t_
z_ = t_
return z_
if __name__ == "__main__":
# Generate x, y, z
n = 100
x = np.random.random(n)
y = np.random.random(n)
z = x ** 2 + y ** 2 + 3 * x ** 3 + y + np.random.random(n)
# Generate w
w = polyfit2d(x, y, z, m_1=3, m_2=2)
# Generate x', y', z'
n_ = 1000
x_, y_ = np.meshgrid(np.linspace(x.min(), x.max(), n_),
np.linspace(y.min(), y.max(), n_))
z_ = np.zeros((n_, n_))
for i in range(n_):
z_[i, :] = polyval2d(x_[i, :], y_[i, :], w, m_1=3, m_2=2)
# Plot
plt.imshow(z_, extent=(x_.min(), y_.max(), x_.max(), y_.min()))
plt.scatter(x, y, c=z)
plt.show()
If anyone is looking for fitting a polynomial of a specific order (rather than polynomials where the highest power is equal to order, you can make this adjustment to the accepted answer's polyfit and polyval:
instead of:
ij = itertools.product(range(order+1), range(order+1))
which, for order=2 gives [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)] (aka up to a 4th degree polynomial), you can use
def xy_powers(order):
powers = itertools.product(range(order + 1), range(order + 1))
return [tup for tup in powers if sum(tup) <= order]
This returns [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (2, 0)] for order=2

Categories