Translating from Matlab: New problems (complex ginzburg landau equation - python

Thank you for all of your constructive criticisim on my last post. I have made some changes, but alas my code is still not working and I can't figure out why. What happens when I run this version is that I get a runtime warning about invalid errors encountered in matmul.
My code is given as
from __future__ import division
import numpy as np
from scipy.linalg import eig
from scipy.linalg import toeplitz
def poldif(*arg):
"""
Calculate differentiation matrices on arbitrary nodes.
Returns the differentiation matrices D1, D2, .. DM corresponding to the
M-th derivative of the function f at arbitrarily specified nodes. The
differentiation matrices can be computed with unit weights or
with specified weights.
Parameters
----------
x : ndarray
vector of N distinct nodes
M : int
maximum order of the derivative, 0 < M <= N - 1
OR (when computing with specified weights)
x : ndarray
vector of N distinct nodes
alpha : ndarray
vector of weight values alpha(x), evaluated at x = x_j.
B : int
matrix of size M x N, where M is the highest derivative required.
It should contain the quantities B[l,j] = beta_{l,j} =
l-th derivative of log(alpha(x)), evaluated at x = x_j.
Returns
-------
DM : ndarray
M x N x N array of differentiation matrices
Notes
-----
This function returns M differentiation matrices corresponding to the
1st, 2nd, ... M-th derivates on arbitrary nodes specified in the array
x. The nodes must be distinct but are, otherwise, arbitrary. The
matrices are constructed by differentiating N-th order Lagrange
interpolating polynomial that passes through the speficied points.
The M-th derivative of the grid function f is obtained by the matrix-
vector multiplication
.. math::
f^{(m)}_i = D^{(m)}_{ij}f_j
This function is based on code by Rex Fuzzle
https://github.com/RexFuzzle/Python-Library
References
----------
..[1] B. Fornberg, Generation of Finite Difference Formulas on Arbitrarily
Spaced Grids, Mathematics of Computation 51, no. 184 (1988): 699-706.
..[2] J. A. C. Weidemann and S. C. Reddy, A MATLAB Differentiation Matrix
Suite, ACM Transactions on Mathematical Software, 26, (2000) : 465-519
"""
if len(arg) > 3:
raise Exception('number of arguments is either two OR three')
if len(arg) == 2:
# unit weight function : arguments are nodes and derivative order
x, M = arg[0], arg[1]
N = np.size(x)
# assert M<N, "Derivative order cannot be larger or equal to number of points"
if M >= N:
raise Exception("Derivative order cannot be larger or equal to number of points")
alpha = np.ones(N)
B = np.zeros((M, N))
elif len(arg) == 3:
# specified weight function : arguments are nodes, weights and B matrix
x, alpha, B = arg[0], arg[1], arg[2]
N = np.size(x)
M = B.shape[0]
I = np.eye(N) # identity matrix
L = np.logical_or(I, np.zeros(N)) # logical identity matrix
XX = np.transpose(np.array([x, ] * N))
DX = XX - np.transpose(XX) # DX contains entries x(k)-x(j)
DX[L] = np.ones(N) # put 1's one the main diagonal
c = alpha * np.prod(DX, 1) # quantities c(j)
C = np.transpose(np.array([c, ] * N))
C = C / np.transpose(C) # matrix with entries c(k)/c(j).
Z = 1 / DX # Z contains entries 1/(x(k)-x(j)
Z[L] = 0 # eye(N)*ZZ; # with zeros on the diagonal.
X = np.transpose(np.copy(Z)) # X is same as Z', but with ...
Xnew = X
for i in range(0, N):
Xnew[i:N - 1, i] = X[i + 1:N, i]
X = Xnew[0:N - 1, :] # ... diagonal entries removed
Y = np.ones([N - 1, N]) # initialize Y and D matrices.
D = np.eye(N) # Y is matrix of cumulative sums
DM = np.empty((M, N, N)) # differentiation matrices
for ell in range(1, M + 1):
Y = np.cumsum(np.vstack((B[ell - 1, :], ell * (Y[0:N - 1, :]) * X)), 0) # diags
D = ell * Z * (C * np.transpose(np.tile(np.diag(D), (N, 1))) - D) # off-diags
D[L] = Y[N - 1, :]
DM[ell - 1, :, :] = D
return DM
def herdif(N, M, b=1):
"""
Calculate differentiation matrices using Hermite collocation.
Returns the differentiation matrices D1, D2, .. DM corresponding to the
M-th derivative of the function f, at the N Chebyshev nodes in the
interval [-1,1].
Parameters
----------
N : int
number of grid points
M : int
maximum order of the derivative, 0 < M < N
b : float, optional
scale parameter, real and positive
Returns
-------
x : ndarray
N x 1 array of Hermite nodes which are zeros of the N-th degree
Hermite polynomial, scaled by b
DM : ndarray
M x N x N array of differentiation matrices
Notes
-----
This function returns M differentiation matrices corresponding to the
1st, 2nd, ... M-th derivates on a Hermite grid of N points. The
matrices are constructed by differentiating N-th order Hermite
interpolants.
The M-th derivative of the grid function f is obtained by the matrix-
vector multiplication
.. math::
f^{(m)}_i = D^{(m)}_{ij}f_j
References
----------
..[1] B. Fornberg, Generation of Finite Difference Formulas on Arbitrarily
Spaced Grids, Mathematics of Computation 51, no. 184 (1988): 699-706.
..[2] J. A. C. Weidemann and S. C. Reddy, A MATLAB Differentiation Matrix
Suite, ACM Transactions on Mathematical Software, 26, (2000) : 465-519
..[3] R. Baltensperger and M. R. Trummer, Spectral Differencing With A
Twist, SIAM Journal on Scientific Computing 24, (2002) : 1465-1487
"""
if M >= N - 1:
raise Exception('number of nodes must be greater than M - 1')
if M <= 0:
raise Exception('derivative order must be at least 1')
x = herroots(N) # compute Hermite nodes
alpha = np.exp(-x * x / 2) # compute Hermite weights.
beta = np.zeros([M + 1, N])
# construct beta(l,j) = d^l/dx^l (alpha(x)/alpha'(x))|x=x_j recursively
beta[0, :] = np.ones(N)
beta[1, :] = -x
for ell in range(2, M + 1):
beta[ell, :] = -x * beta[ell - 1, :] - (ell - 1) * beta[ell - 2, :]
# remove initialising row from beta
beta = np.delete(beta, 0, 0)
# compute differentiation matrix (b=1)
DM = poldif(x, alpha, beta)
# scale nodes by the factor b
x = x / b
# scale the matrix by the factor b
for ell in range(M):
DM[ell, :, :] = (b ** (ell + 1)) * DM[ell, :, :]
return x, DM
def herroots(N):
"""
Compute roots of the Hermite polynomial of degree N
Parameters
----------
N : int
degree of the Hermite polynomial
Returns
-------
x : ndarray
N x 1 array of Hermite roots
"""
# Jacobi matrix
d = np.sqrt(np.arange(1, N))
J = np.diag(d, 1) + np.diag(d, -1)
# compute eigenvalues
mu = eig(J)[0]
# return sorted, normalised eigenvalues
# real part only since all roots must be real.
return np.real(np.sort(mu) / np.sqrt(2))
a = 1-1j
b = 2+0.2j
c1 = 0.34
c2 = 0.005
alpha1 = (4*c2/a)**0.25
alpha2 = b/2*a
Nx = 220;
# hermite differentiation matrices
[x,D] = herdif(Nx, 2, np.real(alpha1))
D1 = D[0,:]
D2 = D[1,:]
# integration weights
diff = np.diff(x)
#print(len(diff))
p = np.concatenate([np.zeros(1), diff])
q = np.concatenate([diff, np.zeros(1)])
w = (p + q)/2
Q = np.diag(w)
#Discretised operator
const = c1*np.diag(np.ones(len(x)))-c2*(np.diag(x)*np.diag(x))
#print(const)
A = a*D2 - b*D1 + const
##### Timestepping
tmax = 200
tmin = 0
dt = 1
n = (tmax - tmin)/dt
tvec = np.linspace(0,tmax,n, endpoint = True)
#(len(tvec))
q = np.zeros((Nx, len(tvec)),dtype=complex)
f = np.zeros((Nx, len(tvec)),dtype=complex)
q0 = np.ones(Nx)*10**4
q[:,0] = q0
#print(q[:,0])
#print(q0)
# qnew - qold = dt*Aqold + dt*N(qold,qold,qold)
# qnew - qold = dt*Aqnew - dt*N(qold,qold,qold)
# therefore qnew - qold = 0.5*dtAqold + 0.5*dt*Aqnew + dtN(qold,qold,qold)
# rearranging to give qnew( 1- 0.5Adt) = (1 + 0.5Adt) + dt N(qold,qold,qold)
from numpy.linalg import inv
inverted = inv(np.eye(Nx)-0.5*A*dt)
forqold = (np.eye(Nx) + 0.5*A*dt)
firstterm = np.matmul(inverted,forqold)
for t in range(0, len(tvec)-1):
nl = abs(np.square(q[:,t]))*q[:,t]
q[:,t+1] = np.matmul(firstterm,q[:,t]) - dt*np.matmul(inverted,nl)
where the hermitedifferentiation matrices can be found online and are in a different file. This code blows up after five interations, which I cannot understand as I don't see how it differs in the matlab found here https://www.bagherigroup.com/research/open-source-codes/
I would really appreciate any help.

Error in:
q[:,t+1] = inverted*forgold*np.array(q[:,t]) + inverted*dt*np.array(nl)
q[:, t+1] indexes a 2d array (probably not a np.matrix which is more MATLAB like). This indexing reduces the number of dimensions by 1, hence the (220,) shape in the error message.
The error message says the RHS is (220,220). That shape probably comes from inverted and forgold. np.array(q[:,t]) is 1d. Multiplying a (220,220) by a (220,) is ok, but you can't put that square array into a 1d slot.
Both uses of np.array in the error line are superfluous. Their arguments are already ndarray.
As for the loop, it may be necessary. It looks like q[:,t+1] is a function of q[:,t], a serial, rather than parallel operation. Those are harder to render as 'vectorized' (unless you can usecumsum` like operations).
Note that in numpy * is elementwise multiplication, the .* of MATLAB. np.dot and # are used for matrix multiplication.
q[:,t+1]= invert#q[:,t]
would work

Related

How to take advantage of vectorization when computing the pdf for a multivariate gaussian?

I've been spending a few hours googling about this problem and it seems I can't find any information.
I tried coding a multivariate gaussian pdf as:
def multivariate_normal(X, M, S):
# X has shape (D, N) where D is the number of dimensions and N the number of observations
# M is the mean vector with shape (D, 1)
# S is the covariance matrix with shape (D, D)
D = S.shape[0]
S_inv = np.linalg.inv(S)
logdet = np.log(np.linalg.det(S))
log2pi = np.log(2*np.pi)
devs = X - M
a = np.array([- D/2 * log2pi - (1/2) * logdet - dev.T # S_inv # dev for dev in devs.T])
return np.exp(a)
I've only been successful in computing the pdf through a for loop, iterating N times. If I don't, I end up with an (N, N) matrix which is unhelpful. I've found another post here, but the post is quite outdated and in matlab.
Is there anyway to take advantage of numpy's vectorisation?
This is my first post on stackoverflow, let me know if anything is off!d
I came across this problem in a similar manner and here's how I solved it:
Variables:
X = numpy.ndarray[numpy.ndarray[float]] - m x n
MU = numpy.ndarray[numpy.ndarray[float]] - k x n
SIGMA = numpy.ndarray[numpy.ndarray[numpy.ndarray[float]]] - k x n x n
k = int
Where X is my feature vector, MU is my means, SIGMA is my covariance matrix.
To vectorize, I rewrote the dot product per the definition of the dot-product:
sigma_det = np.linalg.det(sigma)
sigma_inv = np.linalg.inv(sigma)
const = 1/((2*np.pi)**(n/2)*sigma_det**(1/2))
p = const*np.exp((-1/2)*np.sum((X-mu).dot(sigma_inv)*(X-mu),axis=1))
I have been working on this problem for the last few days and finally have come to a solution.
To do so I have added an extra dimension to the x vector, and then used the np.einsum() function for computing the Mahalanobis distance.
Example
For the following example we will use a (100 x 2) input array. That is, 100 samples of two random variables. That gives us a (1 x 2) mean vector and a (2 x 2) covariance matrix.
Generating some data:
# instantiate a random number generator
rng = np.random.default_rng(100)
# define mu and sigma for the dummy sample
mu = np.array([0.5, 0.25])
covmat = np.array([[1, 0.5],
[0.5, 1]])
# generate multivariate normal random sample
x = rng.multivariate_normal(mu, covmat, size=100)
And defining the pdf function:
def pdf(x, mu, covmat):
"""
Generates the probability of a given x vector based on the
probability distribution function N(mu, covmat)
Returns: the probability
"""
x = x[:, np.newaxis] # add a new first dimension to x
k = mu.shape[0] # number of dimensions
diff = x - mu # deviation of x from the mean
inv_covmat = np.linalg.inv(covmat)
term1 = (2*np.pi)**-(k/2)*np.linalg.det(inv_covmat)
term2 = np.exp(-np.einsum('ijk, kl, ijl->ij', diff, inv_covmat, diff) / 2)
return term1 * term2
Which returns a (n, 1) array, where n is the number of samples, in this case (100,1).
Explanation
The easiest way to think about solving the problem is just writing down the dimensions, and trying to do the linear algebra.
We need to do some kind of manipulation of three tensors with the following shapes, to get the resulting tensor:
A, B, C -> D
(100 x 1 x 2), (2, 2), (100 x 1 x 2) -> (100 x 1)
Let the first tensor, A, have the indices, ijk:
Then we want to do some operation of A and B to get the shape (100 x 1 x 2).
Hence,
ijk, kl - > ijl
(100 x 1 x 2), (2 x 2) -> (100 x 1 x 2)
This leaves us with AB, C
(100 x 1 x 2), (100 x 1 x 2)
We want D to have the shape (100 x 1)
Hence:
ijl, ijl->ij
(100 x 1 x 2), (100 x 1 x 2) -> (100 x 1)
Putting the two operations together, we get:
ijk, kl, ijl->ij

How to apply crank-nicolson method in python to a wave equation like schrodinger's

I'm trying to do a particle in a box simulation with no potential field. Took me some time to find out that simple explicit and implicit methods break unitary time evolution so I resorted to crank-nicolson, which is supposed to be unitary. But when I try it I find that it still is not so. I'm not sure what I'm missing.. The formulation I used is this:
where T is the tridiagonal Toeplitz matrix for the second derivative wrt x and
The system simplifies to
The A and B matrices are:
I just solve this linear system for using the sparse module. The math makes sense and I found the same numeric scheme in some papers so that led me to believe my code is where the problem is.
Here's my code so far:
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz
from scipy.sparse.linalg import spsolve
from scipy import sparse
# Spatial discretisation
N = 100
x = np.linspace(0, 1, N)
dx = x[1] - x[0]
# Time discretisation
K = 10000
t = np.linspace(0, 10, K)
dt = t[1] - t[0]
alpha = (1j * dt) / (2 * (dx ** 2))
A = sparse.csc_matrix(toeplitz([1 + 2 * alpha, -alpha, *np.zeros(N-4)]), dtype=np.cfloat) # 2 less for both boundaries
B = sparse.csc_matrix(toeplitz([1 - 2 * alpha, alpha, *np.zeros(N-4)]), dtype=np.cfloat)
# Initial and boundary conditions (localized gaussian)
psi = np.exp((1j * 50 * x) - (200 * (x - .5) ** 2))
b = B.dot(psi[1:-1])
psi[0], psi[-1] = 0, 0
for index, step in enumerate(t):
# Within the domain
psi[1:-1] = spsolve(A, b)
# Enforce boundaries
# psi[0], psi[N - 1] = 0, 0
b = B.dot(psi[1:-1])
# Square integration to show if it's unitary
print(np.trapz(np.abs(psi) ** 2, dx))
You are relying on the Toeplitz constructor to produce a symmetric matrix, so that the entries below the diagonal are the same as above the diagonal. However, the documentation for scipy.linalg.toeplitz(c, r=None) says not "transpose", but
*"If r is not given, r == conjugate(c) is assumed."
so that the resulting matrix is self-adjoint. In this case this means that the entries above the diagonal have their sign switched.
It makes no sense to first construct a dense matrix and then extract a sparse representation. Construct it as sparse tridiagonal matrix from the start, using scipy.sparse.diags
A = sparse.diags([ (N-3)*[-alpha], (N-2)*[1+2*alpha], (N-3)*[-alpha]], [-1,0,1], format="csc");
B = sparse.diags([ (N-3)*[ alpha], (N-2)*[1-2*alpha], (N-3)*[ alpha]], [-1,0,1], format="csc");

Efficiently compute n-body gravitation in python

I am trying to compute the accelerations due to gravity for an n-body problem in 3-space (I'm using symplectic Euler).
I have position and velocity vectors for each time step, and am using the below (working) code to calculate accelerations and update velocity and position. Note that the accelerations are vectors in 3-space, not just magnitudes.
I would like to know if there's a more efficient way to compute this with numpy to avoid the loops.
def accelerations(positions, masses):
'''Params:
- positions: numpy array of size (n,3)
- masses: numpy array of size (n,)
Returns:
- accelerations: numpy of size (n,3), the acceleration vectors in 3-space
'''
n_bodies = len(masses)
accelerations = numpy.zeros([n_bodies,3]) # n_bodies * (x,y,z)
# vectors from mass(i) to mass(j)
D = numpy.zeros([n_bodies,n_bodies,3]) # n_bodies * n_bodies * (x,y,z)
for i, j in itertools.product(range(n_bodies), range(n_bodies)):
D[i][j] = positions[j]-positions[i]
# Acceleration due to gravitational force between each pair of bodies
A = numpy.zeros((n_bodies, n_bodies,3))
for i, j in itertools.product(range(n_bodies), range(n_bodies)):
if numpy.linalg.norm(D[i][j]) > epsilon:
A[i][j] = gravitational_constant * masses[j] * D[i][j] \
/ numpy.linalg.norm(D[i][j])**3
# Calculate net acceleration of each body (vectors in 3-space)
accelerations = numpy.sum(A, axis=1) # sum of accel vectors for each body of shape (n_bodies,3)
return accelerations
Here is an optimized version using blas. blas has special routines for linear algebra on symmetric or Hermitian matrices. These use specialized, packed storage, keeping only the upper or lower triangle and leaving out the (redundant) mirrored entries. That way blas saves not only ~half the storage but also ~half the flops.
I've put quite a few comments to make it readable.
import numpy as np
import itertools
from scipy.linalg.blas import zhpr, dspr2, zhpmv
def acc_vect(pos, mas):
n = mas.size
d2 = pos#(-2*pos.T)
diag = -0.5 * np.einsum('ii->i', d2)
d2 += diag + diag[:, None]
np.einsum('ii->i', d2)[...] = 1
return np.nansum((pos[:, None, :] - pos) * (mas[:, None] * d2**-1.5)[..., None], axis=0)
def acc_blas(pos, mas):
n = mas.size
# trick: use complex Hermitian to get the packed anti-symmetric
# outer difference in the imaginary part of the zhpr answer
# don't want to sum over dimensions yet, therefore must do them one-by-one
trck = np.zeros((3, n * (n + 1) // 2), complex)
for a, p in zip(trck, pos.T - 1j):
zhpr(n, -2, p, a, 1, 0, 0, 1)
# does a -> a + alpha x x^H
# parameters: n -- matrix dimension
# alpha -- real scalar
# x -- complex vector
# ap -- packed Hermitian n x n matrix a
# i.e. an n(n+1)/2 vector
# incx -- x stride
# offx -- x offset
# lower -- is storage of ap lower or upper
# overwrite_ap -- whether to change a inplace
# as a by-product we get pos pos^T:
ppT = trck.real.sum(0) + 6
# now compute matrix of squared distances ...
# ... using (A-B)^2 = A^2 + B^2 - 2AB
# ... that and the outer sum X (+) X.T equals X ones^T + ones X^T
dspr2(n, -0.5, ppT[np.r_[0, 2:n+1].cumsum()], np.ones((n,)), ppT,
1, 0, 1, 0, 0, 1)
# does a -> a + alpha x y^T + alpha y x^T in packed symmetric storage
# scale anti-symmetric differences by distance^-3
np.divide(trck.imag, ppT*np.sqrt(ppT), where=ppT.astype(bool),
out=trck.imag)
# it remains to scale by mass and sum
# this can be done by matrix multiplication with the vector of masses ...
# ... unfortunately because we need anti-symmetry we need to work
# with Hermitian storage, i.e. complex numbers, even though the actual
# computation is only real:
out = np.zeros((3, n), complex)
for a, o in zip(trck, out):
zhpmv(n, 0.5, a, mas*-1j, 1, 0, 0, o, 1, 0, 0, 1)
# multiplies packed Hermitian matrix by vector
return out.real.T
def accelerations(positions, masses, epsilon=1e-6, gravitational_constant=1.0):
'''Params:
- positions: numpy array of size (n,3)
- masses: numpy array of size (n,)
'''
n_bodies = len(masses)
accelerations = np.zeros([n_bodies,3]) # n_bodies * (x,y,z)
# vectors from mass(i) to mass(j)
D = np.zeros([n_bodies,n_bodies,3]) # n_bodies * n_bodies * (x,y,z)
for i, j in itertools.product(range(n_bodies), range(n_bodies)):
D[i][j] = positions[j]-positions[i]
# Acceleration due to gravitational force between each pair of bodies
A = np.zeros((n_bodies, n_bodies,3))
for i, j in itertools.product(range(n_bodies), range(n_bodies)):
if np.linalg.norm(D[i][j]) > epsilon:
A[i][j] = gravitational_constant * masses[j] * D[i][j] \
/ np.linalg.norm(D[i][j])**3
# Calculate net accleration of each body
accelerations = np.sum(A, axis=1) # sum of accel vectors for each body
return accelerations
from numpy.linalg import norm
def acc_pm(positions, masses, G=1):
'''Params:
- positions: numpy array of size (n,3)
- masses: numpy array of size (n,)
'''
mass_matrix = masses.reshape((1, -1, 1))*masses.reshape((-1, 1, 1))
disps = positions.reshape((1, -1, 3)) - positions.reshape((-1, 1, 3)) # displacements
dists = norm(disps, axis=2)
dists[dists == 0] = 1 # Avoid divide by zero warnings
forces = G*disps*mass_matrix/np.expand_dims(dists, 2)**3
return forces.sum(axis=1)/masses.reshape(-1, 1)
n = 500
pos = np.random.random((n, 3))
mas = np.random.random((n,))
from timeit import timeit
print(f"loops: {timeit('accelerations(pos, mas)', globals=globals(), number=1)*1000:10.3f} ms")
print(f"pmende: {timeit('acc_pm(pos, mas)', globals=globals(), number=10)*100:10.3f} ms")
print(f"vectorized: {timeit('acc_vect(pos, mas)', globals=globals(), number=10)*100:10.3f} ms")
print(f"blas: {timeit('acc_blas(pos, mas)', globals=globals(), number=10)*100:10.3f} ms")
A = accelerations(pos, mas)
AV = acc_vect(pos, mas)
AB = acc_blas(pos, mas)
AP = acc_pm(pos, mas)
assert np.allclose(A, AV) and np.allclose(AB, AV) and np.allclose(AP, AV)
Sample run; comparing to OP, my pure numpy vectorization and #P Mende's.
loops: 3213.130 ms
pmende: 41.480 ms
vectorized: 43.860 ms
blas: 7.726 ms
We can see that
1) P Mende is slightly better than I at vectorizing
2) blas is ~5 times as fast; please note that my blas is not very good; I suspect with an optimized blas you may get even better (numpy would be expected to run faster too on a better blas, though)
3) any of the answers is much faster than loops
A follow up to my comments on your original post:
from numpy.linalg import norm
def accelerations(positions, masses):
'''Params:
- positions: numpy array of size (n,3)
- masses: numpy array of size (n,)
'''
mass_matrix = masses.reshape((1, -1, 1))*masses.reshape((-1, 1, 1))
disps = positions.reshape((1, -1, 3)) - positions.reshape((-1, 1, 3)) # displacements
dists = norm(disps, axis=2)
dists[dists == 0] = 1 # Avoid divide by zero warnings
forces = G*disps*mass_matrix/np.expand_dims(dists, 2)**3
return forces.sum(axis=1)/masses.reshape(-1, 1)
Some things to consider:
You only need half the distances; once you've calculated D[i][j], that's the same as -D[j][i].
You can do df2 = df.apply(lambda x:gravitational_constant/x**3)
You can generate a dataframe that records, for each pair of bodies, the product of their masses. You only have do that once, and then you can pass it to accelearations every time you call it.
Then df.product(df2).product(mass_products).sum().div(masses) gives you the accelerations.

numpy second derivative of a ndimensional array

I have a set of simulation data where I would like to find the lowest slope in n dimensions. The spacing of the data is constant along each dimension, but not all the same (I could change that for the sake of simplicity).
I can live with some numerical inaccuracy, especially towards the edges. I would heavily prefer not to generate a spline and use that derivative; just on the raw values would be sufficient.
It is possible to calculate the first derivative with numpy using the numpy.gradient() function.
import numpy as np
data = np.random.rand(30,50,40,20)
first_derivative = np.gradient(data)
# second_derivative = ??? <--- there be kudos (:
This is a comment regarding laplace versus the hessian matrix; this is no more a question but is meant to help understanding of future readers.
I use as a testcase a 2D function to determine the 'flattest' area below a threshold. The following pictures show the difference in results between using the minimum of second_derivative_abs = np.abs(laplace(data)) and the minimum of the following:
second_derivative_abs = np.zeros(data.shape)
hess = hessian(data)
# based on the function description; would [-1] be more appropriate?
for i in hess[0]: # calculate a norm
for j in i[0]:
second_derivative_abs += j*j
The color scale depicts the functions values, the arrows depict the first derivative (gradient), the red dot the point closest to zero and the red line the threshold.
The generator function for the data was ( 1-np.exp(-10*xi**2 - yi**2) )/100.0 with xi, yi being generated with np.meshgrid.
Laplace:
Hessian:
The second derivatives are given by the Hessian matrix. Here is a Python implementation for ND arrays, that consists in applying the np.gradient twice and storing the output appropriately,
import numpy as np
def hessian(x):
"""
Calculate the hessian matrix with finite differences
Parameters:
- x : ndarray
Returns:
an array of shape (x.dim, x.ndim) + x.shape
where the array[i, j, ...] corresponds to the second derivative x_ij
"""
x_grad = np.gradient(x)
hessian = np.empty((x.ndim, x.ndim) + x.shape, dtype=x.dtype)
for k, grad_k in enumerate(x_grad):
# iterate over dimensions
# apply gradient again to every component of the first derivative.
tmp_grad = np.gradient(grad_k)
for l, grad_kl in enumerate(tmp_grad):
hessian[k, l, :, :] = grad_kl
return hessian
x = np.random.randn(100, 100, 100)
hessian(x)
Note that if you are only interested in the magnitude of the second derivatives, you could use the Laplace operator implemented by scipy.ndimage.filters.laplace, which is the trace (sum of diagonal elements) of the Hessian matrix.
Taking the smallest element of the the Hessian matrix could be used to estimate the lowest slope in any spatial direction.
Slopes, Hessians and Laplacians are related, but are 3 different things.
Start with 2d: a function( x, y ) of 2 variables, e.g. a height map of a range of hills,
slopes aka gradients are direction vectors, a direction and length at each point x y.
This can be given by 2 numbers dx dy in cartesian coordinates,
or an angle θ and length sqrt( dx^2 + dy^2 ) in polar coordinates.
Over a whole range of hills, we get a
vector field.
Hessians describe curvature near x y, e.g. a paraboloid or a saddle,
with 4 numbers: dxx dxy dyx dyy.
a Laplacian is 1 number, dxx + dyy, at each point x y.
Over a range of hills, we get a
scalar field.
(Functions or hills with Laplacian = 0
are particularly smooth.)
Slopes are linear fits and Hessians quadratic fits, for tiny steps h near a point xy:
f(xy + h) ~ f(xy)
+ slope . h -- dot product, linear in both slope and h
+ h' H h / 2 -- quadratic in h
Here xy, slope and h are vectors of 2 numbers,
and H is a matrix of 4 numbers dxx dxy dyx dyy.
N-d is similar: slopes are direction vectors of N numbers,
Hessians are matrices of N^2 numbers, and Laplacians 1 number, at each point.
(You might find better answers over on
math.stackexchange .)
You can see the Hessian Matrix as a gradient of gradient, where you apply gradient a second time for each component of the first gradient calculated here is a wikipedia link definig Hessian matrix and you can see clearly that is a gradient of gradient, here is a python implementation defining gradient then hessian :
import numpy as np
#Gradient Function
def gradient_f(x, f):
assert (x.shape[0] >= x.shape[1]), "the vector should be a column vector"
x = x.astype(float)
N = x.shape[0]
gradient = []
for i in range(N):
eps = abs(x[i]) * np.finfo(np.float32).eps
xx0 = 1. * x[i]
f0 = f(x)
x[i] = x[i] + eps
f1 = f(x)
gradient.append(np.asscalar(np.array([f1 - f0]))/eps)
x[i] = xx0
return np.array(gradient).reshape(x.shape)
#Hessian Matrix
def hessian (x, the_func):
N = x.shape[0]
hessian = np.zeros((N,N))
gd_0 = gradient_f( x, the_func)
eps = np.linalg.norm(gd_0) * np.finfo(np.float32).eps
for i in range(N):
xx0 = 1.*x[i]
x[i] = xx0 + eps
gd_1 = gradient_f(x, the_func)
hessian[:,i] = ((gd_1 - gd_0)/eps).reshape(x.shape[0])
x[i] =xx0
return hessian
As a test, the Hessian matrix of (x^2 + y^2) is 2 * I_2 where I_2 is the identity matrix of dimension 2
hessians = np.asarray(np.gradient(np.gradient(f(X, Y))))
hessians[1:]
Worked for 3-d function f.

Simultaneously diagonalize matrices with numpy

I have a m × n × n numpy.ndarray of m simultaneously diagonalizable square matrices and would like to use numpy to obtain their simultaneous eigenvalues.
For example, if I had
from numpy import einsum, diag, array, linalg, random
U = linalg.svd(random.random((3,3)))[2]
M = einsum(
"ij, ajk, lk",
U, [diag([2,2,0]), diag([1,-1,1])], U)
the two matrices in M are simultaneously diagonalizable, and I am looking for a way to obtain the array
array([[2., 1.],
[2., -1.],
[0., 1.]])
(up to permutation of the lines) from M. Is there a built-in or easy way to get this?
There is a fairly simple and very elegant simultaneous diagonalization algorithm based on Givens rotation that was published by Cardoso and Soulomiac in 1996:
Cardoso, J., & Souloumiac, A. (1996). Jacobi Angles for Simultaneous Diagonalization. SIAM Journal on Matrix Analysis and Applications, 17(1), 161–164. doi:10.1137/S0895479893259546
I've attached a numpy implementation of the algorithm at the end of this response. Caveat: It turns out simultaneous diagonalization is a bit of a tricky numerical problem, with no algorithm (to the best of my knowledge) that guarantees global convergence. However, the cases in which it does not work (see the paper) are degenerate and in practice I have never had the Jacobi angles algorithm fail on me.
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
Routines for simultaneous diagonalization
Arun Chaganty <arunchaganty#gmail.com>
"""
import numpy as np
from numpy import zeros, eye, diag
from numpy.linalg import norm
def givens_rotate( A, i, j, c, s ):
"""
Rotate A along axis (i,j) by c and s
"""
Ai, Aj = A[i,:], A[j,:]
A[i,:], A[j,:] = c * Ai + s * Aj, c * Aj - s * Ai
return A
def givens_double_rotate( A, i, j, c, s ):
"""
Rotate A along axis (i,j) by c and s
"""
Ai, Aj = A[i,:], A[j,:]
A[i,:], A[j,:] = c * Ai + s * Aj, c * Aj - s * Ai
A_i, A_j = A[:,i], A[:,j]
A[:,i], A[:,j] = c * A_i + s * A_j, c * A_j - s * A_i
return A
def jacobi_angles( *Ms, **kwargs ):
r"""
Simultaneously diagonalize using Jacobi angles
#article{SC-siam,
HTML = "ftp://sig.enst.fr/pub/jfc/Papers/siam_note.ps.gz",
author = "Jean-Fran\c{c}ois Cardoso and Antoine Souloumiac",
journal = "{SIAM} J. Mat. Anal. Appl.",
title = "Jacobi angles for simultaneous diagonalization",
pages = "161--164",
volume = "17",
number = "1",
month = jan,
year = {1995}}
(a) Compute Givens rotations for every pair of indices (i,j) i < j
- from eigenvectors of G = gg'; g = A_ij - A_ji, A_ij + A_ji
- Compute c, s as \sqrt{x+r/2r}, y/\sqrt{2r(x+r)}
(b) Update matrices by multiplying by the givens rotation R(i,j,c,s)
(c) Repeat (a) until stopping criterion: sin theta < threshold for all ij pairs
"""
assert len(Ms) > 0
m, n = Ms[0].shape
assert m == n
sweeps = kwargs.get('sweeps', 500)
threshold = kwargs.get('eps', 1e-8)
rank = kwargs.get('rank', m)
R = eye(m)
for _ in xrange(sweeps):
done = True
for i in xrange(rank):
for j in xrange(i+1, m):
G = zeros((2,2))
for M in Ms:
g = np.array([ M[i,i] - M[j,j], M[i,j] + M[j,i] ])
G += np.outer(g,g) / len(Ms)
# Compute the eigenvector directly
t_on, t_off = G[0,0] - G[1,1], G[0,1] + G[1,0]
theta = 0.5 * np.arctan2( t_off, t_on + np.sqrt( t_on*t_on + t_off * t_off) )
c, s = np.cos(theta), np.sin(theta)
if abs(s) > threshold:
done = False
# Update the matrices and V
for M in Ms:
givens_double_rotate(M, i, j, c, s)
#assert M[i,i] > M[j, j]
R = givens_rotate(R, i, j, c, s)
if done:
break
R = R.T
L = np.zeros((m, len(Ms)))
err = 0
for i, M in enumerate(Ms):
# The off-diagonal elements of M should be 0
L[:,i] = diag(M)
err += norm(M - diag(diag(M)))
return R, L, err
I am not aware of any direct solution. But why not just getting the eigenvalues and the eigenvectors of the first matrix, and using the eigenvectors to transform all other matrices to the diagonal form? Something like:
eigvals, eigvecs = np.linalg.eig(matrix1)
eigvals2 = np.diagonal(np.dot(np.dot(transpose(eigvecs), matrix2), eigvecs))
You can the add the columns to an array via hstack if you like.
UPDATE: As pointed out below, this is only valid if no degenerate eigenvalues occur. Otherwise one would have to check first for the degenerate eigenvalues, then transform the 2nd matrix to a blockdiagonal form, and diagonalize eventual blocks bigger than 1x1 separately.
I am sure there is significant room for improvement in my solution, but I have come up with the following set of three functions doing the calculation for me in a semi-robust way.
def clusters(array,
orig_indices = None,
start = 0,
rtol=numpy.allclose.__defaults__[0],
atol=numpy.allclose.__defaults__[1]):
"""For an array, return a permutation that sorts the numbers and the sizes of the resulting blocks of identical numbers."""
array = numpy.asarray(array)
if not len(array):
return numpy.array([]),[]
if orig_indices is None:
orig_indices = numpy.arange(len(array))
x = array[0]
close = abs(array-x) <= (atol + rtol*abs(x))
first = sum(close)
r_perm, r_sizes = clusters(
array[~close],
orig_indices[~close],
start+first,
rtol, atol)
r_sizes.insert(0, first)
return numpy.concatenate((orig_indices[close], r_perm)), r_sizes
def permutation_matrix(permutation, dtype=dtype):
n = len(permutation)
P = numpy.zeros((n,n), dtype)
for i,j in enumerate(permutation):
P[j,i]=1
return P
def simultaneously_diagonalize(tensor, atol=numpy.allclose.__defaults__[1]):
tensor = numpy.asarray(tensor)
old_shape = tensor.shape
size = old_shape[-1]
tensor = tensor.reshape((-1, size, size))
diag_mask = 1-numpy.eye(size)
eigvalues, diagonalizer = numpy.linalg.eig(tensor[0])
diagonalization = numpy.dot(
numpy.dot(
matrix.linalg.inv(diagonalizer),
tensor).swapaxes(0,-2),
diagonalizer)
if numpy.allclose(diag_mask*diagonalization, 0):
return diagonalization.diagonal(axis1=-2, axis2=-1).reshape(old_shape[:-1])
else:
perm, cluster_sizes = clusters(diagonalization[0].diagonal())
perm_matrix = permutation_matrix(perm)
diagonalization = numpy.dot(
numpy.dot(
perm_matrix.T,
diagonalization).swapaxes(0,-2),
perm_matrix)
mask = 1-scipy.linalg.block_diag(
*list(
numpy.ones((blocksize, blocksize))
for blocksize in cluster_sizes))
print(diagonalization)
assert(numpy.allclose(
diagonalization*mask,
0)) # Assert that the matrices are co-diagonalizable
blocks = numpy.cumsum(cluster_sizes)
start = 0
other_part = []
for block in blocks:
other_part.append(
simultaneously_diagonalize(
diagonalization[1:, start:block, start:block]))
start = block
return numpy.vstack(
(diagonalization[0].diagonal(axis1=-2, axis2=-1),
numpy.hstack(other_part)))
If you know something about the size of the eigenvalues of the two matrices in advance, you can diagonalize a linear combination of the two matrices, with coefficients chosen to break the degeneracy. For example, if the eigenvalues of both lie between -10 and 10, you could diagonalize 100*M1 + M2. There's a slight loss of precision, but for many purposes it's good enough--and quick and easy!

Categories