Implementation of an algorithm for simultaneous diagonalization - python

I am trying to write an implementation of an algorithm for the simultaneous diagonalization of two matrices (which are assumed to be simultaneously diagonalizable). However, the algorithm does not seem to converge. The algorithm is described in SIAM J. Matrix Anal. Appl. 14, 927 (1993).
Here is the first part of my code to set up a test case:
import numpy as np
import numpy.linalg as lin
from scipy.optimize import minimize
N = 3
# Unitary example matrix
X = np.array([
[-0.54717736-0.43779416j, 0.26046313+0.11082439j, 0.56151027-0.33692186j],
[-0.33452046-0.37890784j, -0.40907097-0.70730291j, -0.15344477+0.23100467j],
[-0.31253864-0.39468687j, 0.05342909+0.49940543j, -0.70062586+0.05835082j]
])
# Generate eigenvalues
LA = np.diag(np.arange(0, N))
LB = np.diag(np.arange(N, 2*N))
# Generate simultaneously diagonalizable matrices
A = X # LA # np.conj(X).T
B = X # LB # np.conj(X).T
This should generate two 3x3 matrices which are simultaneously diagonalizable, since they are constructed this way via X. The following code block then defines a few helper functions:
def off2(A, B):
"""Defines the distance from the matrices from
their diagonal form.
"""
C = np.abs(A) ** 2 + np.abs(B) ** 2
diag_idx = np.diag_indices(N)
C[diag_idx] = 0
return np.sum(C)
def Rijcs(i, j, c, s):
"""Function R(i, j, c, s) from the paper, see
Eq. (1) therein. Used for plane rotations in
the plane ij.
"""
res = np.eye(N, dtype=complex)
res[i, i] = c
res[i, j] = -np.conj(s)
res[j, i] = s
res[j, j] = np.conj(c)
return res
def cs(theta, phi):
"""Parametrization for c and s."""
c = np.cos(theta)
s = np.exp(1j * phi) * np.sin(theta)
return c, s
With these definitions, the algorithm can be implemented:
tol = 1e-10
Q = np.eye(N, dtype=complex)
while True:
off = off2(A, B)
# Print statement for debugging purposes
print(off)
# Terminate if the result is converged
if off <= tol * (lin.norm(A, "fro") + lin.norm(B, "fro")):
break
for i in range(N):
for j in range(i + 1, N):
def fij(c, s):
aij = A[i, j]
aji = A[j, i]
aii = A[i, i]
ajj = A[j, j]
bij = B[i, j]
bji = B[j, i]
bii = B[i, i]
bjj = B[j, j]
x = np.array(
[
[np.conj(aij), np.conj(aii - ajj), -np.conj(aji)],
[aji, (aii - ajj), -aij ],
[np.conj(bij), np.conj(bii - bjj), -np.conj(bji)],
[bji, (bii - bjj), -bij ]
]
)
y = np.array(
[
[c ** 2],
[c * s],
[s ** 2]
]
)
return lin.norm(x # y, 2)
# 5
result = minimize(
lambda x: fij(*cs(x[0], x[1])),
x0=(0, 0),
bounds=(
(-0.25 * np.pi, 0.25 * np.pi),
(-np.pi, np.pi)
),
)
theta, phi = result['x']
c, s = cs(theta, phi)
# 6
R = Rijcs(i, j, c, s)
# 7
Q = Q # R
A = np.conj(R).T # A # R
B = np.conj(R).T # B # R
As you can observe from the print statement, the "distance" of A and B from diagonal form does not really converge. Instead, the values printed range from 0.5 up to 3 and oscillate up and down. Is there a bug in this code and if so, where exactly is it?

Related

How to reduce run time in my python code?

I am currently working on a project that requires me to run a complete python code base. For research purpose, I need to run the code as fast as possible. Yet I am fairly new to programming and have no idea how to reduce run time. So I hope someone can help me on that. Any advice would be appreciated. Here's part of my code base, which used a lot of nested for loops, so it might significantly increase run time.
def a_j(r, a, A): # the Claussius-Mossotti factor, determined by a symmetric (3 × 3) matrix such that (A_i)^T = A_i
alph = np.array([[0,0,0],[0,0,0],[0,0,0]],complex)
for i in range(3):
for j in range(3):
alph[i,j] = (r * a * A[i,j])
return alph
def W_ext(x, k, rho, alpha, A): # particle–particle interaction term
n = x.shape[0] # the number of x vextors
result = np.zeros([3*n,3*n],complex)
u = np.zeros((n, 3)) # u = x - x'
for i in range(n):
for j in range(n):
if i != j:
u[i] = x[i] - x[j]
block_result = a_j(rho[i], alpha, A) * G((u[i]), k) * a_j(rho[j], alpha, A)
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return result.imag
def A_ext(rho, a, A): # single-particle term
n = rho.shape[0]
result = np.zeros([3*n,3*n],complex)
for i in range(n):
for j in range(n):
if i == j:
block_result = a_j(rho[i], a, A).imag
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return result # (3 x 3) matrix
def P_ext(e, A, W, omega):
eT = np.matrix.getH(e)
mm1 = np.matmul(A, e)
mm2 = np.matmul(W, e)
extinction = (np.dot(eT, mm1) + np.dot(eT, mm2)) * (omega/2.0)
return extinction
#ABSORPTION
def W_abs(x, k, rho, alpha, A, chi): # particle–particle interaction term
n = x.shape[0]
result = np.zeros([3*n,3*n],complex)
u = np.zeros((n, 3))
for i in range(n):
for j in range(n):
if i != j:
u[i] = x[i] - x[j]
block_result = np.matrix.getH(a_j(rho[i], alpha, A)) * (1.0 / np.conjugate(chi)).imag * a_j(rho[i], alpha, A) * G((u[i]), k) * a_j(rho[j], alpha, A)
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return 2.0 * result.real # (3 x 3) matrix
def A_abs(rho, a, A, chi): # single-particle term
n = rho.shape[0]
result = np.zeros([3*n,3*n],complex)
for i in range(n):
for j in range(n):
if i == j:
block_result = np.matrix.getH(a_j(rho[i], a, A)) * (1.0 / np.conjugate(chi)).imag * a_j(rho[i], a, A)
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return result # (3 x 3) matrix

General minimal residual method with right-preconditioner of SSOR

I am trying to implement the algorithm of GMRES with right-preconditioner P for solving the linear system Ax = b . The code is running without error; however, it pops into unprecise result for me because the error I have is very large. For the GMRES method (without preconditioning matrix - remove P in the algorithm), the error I get is around 1e^{-12} and it converges with the same matrix.
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
from scipy.linalg import norm as norm
import scipy.sparse as sp
from scipy.sparse import diags
"""The program is to split the matrix into D-diagonal; L: strictly lower matrix; U strictly upper matrix
satisfying: A = D - L - U """
def splitMat(A):
n,m = A.shape
if (n == m):
diagval = np.diag(A)
D = diags(diagval,0).toarray()
L = (-1)*np.tril(A,-1)
U = (-1)*np.triu(A,1)
else:
print("A needs to be a square matrix")
return (L,D,U)
"""Preconditioned Matrix for symmetric successive over-relaxation (SSOR): """
def P_SSOR(A,w):
## Split up matrix A:
L,D,U = splitMat(A)
Comp1 = (D - w*U)
Comp2 = (D - w*L)
Comp1inv = np.linalg.inv(Comp1)
Comp2inv = np.linalg.inv(Comp2)
P = w*(2-w)*np.matmul(Comp1inv, np.matmul(D,Comp2inv))
return P
"""GMRES_SSOR using right preconditioning P:
A - matrix of linear system Ax = b
x0 - initial guess
tol - tolerance
maxit - maximum iteration """
def myGMRES_SSOR(A,x0, b, tol, maxit):
matrixSize = A.shape[0]
e = np.zeros((maxit+1,1))
rr = 1
rstart = 2
X = x0
w = 1.9 ## in ssor
P = P_SSOR(A,w) ### preconditioned matrix
### Starting the GMRES ####
for rs in range(0,rstart+1):
### first check the residual:
if rr<tol:
break
else:
r0 = (b-A.dot(x0))
rho = norm(r0)
e[0] = rho
H = np.zeros((maxit+1,maxit))
Qcol = np.zeros((matrixSize, maxit+1))
Qcol[:,0:1] = r0/rho
for k in range(1, maxit+1):
### Arnodi procedure ##
Qcol[:,k] =np.matmul(np.matmul(A,P), Qcol[:,k-1]) ### This step applies P here:
for j in range(0,k):
H[j,k-1] = np.dot(np.transpose(Qcol[:,k]),Qcol[:,j])
Qcol[:,k] = Qcol[:,k] - (np.dot(H[j,k-1], Qcol[:,j]))
H[k,k-1] =norm(Qcol[:,k])
Qcol[:,k] = Qcol[:,k]/H[k,k-1]
### QR decomposition step ###
n = k
Q = np.zeros((n+1, n))
R = np.zeros((n, n))
R[0, 0] = norm(H[0:n+2, 0])
Q[:, 0] = H[0:n+1, 0] / R[0,0]
for j in range (0, n+1):
t = H[0:n+1, j-1]
for i in range (0, j-1):
R[i, j-1] = np.dot(Q[:, i], t)
t = t - np.dot(R[i, j-1], Q[:, i])
R[j-1, j-1] = norm(t)
Q[:, j-1] = t / R[j-1, j-1]
g = np.dot(np.transpose(Q), e[0:k+1])
Y = np.dot(np.linalg.inv(R), g)
Res= e[0:n] - np.dot(H[0:n, 0:n], Y[0:n])
rr = norm(Res)
#### second check on the residual ###
if rr < tol:
break
#### Updating the solution with the preconditioned matrix ####
X = X + np.matmul(np.matmul(P,Qcol[:, 0:k]), Y) ### This steps applies P here:
return X
######
A = np.random.rand(100,100)
x = np.random.rand(100,1)
b = np.matmul(A,x)
x0 = np.zeros((100,1))
maxit = 100
tol = 0.00001
x = myGMRES_SSOR(A,x0,b,tol,maxit)
res = b - np.matmul(A,x)
print(norm(res))
print("Solution with gmres\n", np.matmul(A,x))
print("---------------------------------------")
print("b matrix:", b)
I hope anyone could help me figure out this!!!
I'm not sure where you got you "Symmetric_successive_over-relaxation" SSOR code from, but it appears to be wrong. You also seem to be assuming that A is symmetric matrix, but in your random test case it is not.
Following SSOR's Wikipedia entry, I replaced your P_SSOR function with
def P_SSOR(A,w):
L,D,U = splitMat(A)
P = 2/(2-w) * (1/w*D+L)*np.linalg.inv(D)*(1/w*D+L).T
return P
and your test matrix with
A = np.random.rand(100,100)
A = A + A.T
and your code works up to a 12 digit residual error.

How can I code a matrix within a matrix using a loop?

So I have this 3x3 G matrix (not shown here, it's irrelevant to my problem) that I created using the two variables u (a vector, x - y) and the scalar k. x_j = (x_1 (j), x_2 (j), x_3 (j)) and y_j = (y_1 (j), y_2 (j), y_3 (j)). alpha_j is a 3x3 matrix. The A matrix is block diagonal matrix of size 3nx3n. I am having trouble with the W matrix. How do I code a matrix of size 3nx3n, where the (i,j)th block is the 3x3 matrix given by alpha_i*G_[ij]*alpha_j?? I am lost.
My alpha_j matrix also seems to be having some trouble. The loop keeps throwing me the error, "only length-1 arrays can be converted to Python scalars." pls help :/
def W(x, y, k, alpha, A):
u = x - y
n = x.shape[0]
W = np.zeros((3*n, 3*n))
for i in range(0, n-1):
for j in range(0, n-1):
#u = -np.array([[x[i,0] - x[j,0]], [x[i,1] - x[j,1]], [0]]) ??
W[i][j] = (alpha_j(alpha, A) * G(u, k) * alpha_j(alpha, A))
W[i][i] = np.zeros((n, n))
return W
def alpha_j(a, A):
alph = np.array([[0,0,0],[0,0,0],[0,0,0]],complex)
rho = np.random.rand(3,1)
for i in range(0, 2):
for j in range(0, 2):
alph[i][j] = (rho[i] * a * A[i][j])
return alph
#-------------------------------------------------------------------
x1 = np.array([[1], [2], [0]])
y1 = np.array([[4], [5], [0]])
# SYSTEM PARAMETERS
# incoming Wave angle
theta = 0 # can range from [0, 2pi)
# susceptibility
chi = 10 + 1j
# wavelength
lam = 0.5 # microns (values between .4-.7)
# frequency
k = (2 * np.pi)/lam # 1/microns
# volume
V_0 = (0.05)**3 # microns^3
# incoming wave vector
K = k * np.array([[0], [np.sin(theta)], [np.cos(theta)]])
# polarization vector
vecinc = np.array([[1], [0], [0]]) # (can choose any vector perpendicular to K)
# for the fixed alpha case
alpha = (V_0 * 3 * chi)/(chi + 3)
# 3 x 3 matrix
A = np.matlib.identity(3) # could be any symmetric matrix,
#-------------------------------------------------------------------
# TEST FUNCTIONS
test = G((x1-y1), k)
print(test)
w = W(x1, y1, k, alpha, A)
print(w)
Sometimes my W loops throws me the error, "can't set an array element with a sequence." But I need to set each array element in this arbitrary matrix W to the 3x3 matrix created by multiplying alpha by G...
To your question of how to create a new array with a block for each element, the following should do the trick:
G = np.random.random([3,3])
result = np.zeros([9,9])
num_blocks = 3
a = np.random.random([3,3])
b = np.random.random([3,3])
for i in range(G.shape[0]):
for j in range(G.shape[1]):
block_result = a*G[i,j]*b
for k in range(num_blocks):
for l in range(num_blocks):
result[3*i + k, 3*j + l] = block_result[i, j]
You should be able to generalize from there. I hope I've understood correctly.
EDIT: It looks like I haven't understood correctly. I'm leaving it in hopes it spurs you to an answer. The general idea is to generate ranges of indices to operate on, and then just operate on them directly. Slicing might be helpful, too.
Ah, you asked how to create a diagonal filled with blocks. In that case:
num_diagonal_blocks = 3 # for example
for block_dim in range(num_diagonal_blocks)
# do your block calculation...
for k in range(G.shape[0]):
for l in range(G.shape[1]):
result[3*block_dim + k, 3*block_dim + l] = # assign to element of block
I think that's nearly it.

evaluate many monomials at many points

The following problem concerns evaluating many monomials (x**k * y**l * z**m) at many points.
I would like to compute the "inner power" of two numpy arrays, i.e.,
import numpy
a = numpy.random.rand(10, 3)
b = numpy.random.rand(3, 5)
out = numpy.ones((10, 5))
for i in range(10):
for j in range(5):
for k in range(3):
out[i, j] *= a[i, k]**b[k, j]
print(out.shape)
If instead the line would read
out[i, j] += a[i, k]*b[j, k]
this would be a a number of inner products, computable with a simple dot or einsum.
Is it possible to perform the above loop in just one numpy line?
What about thinking of it in terms of logarithms:
import numpy
a = numpy.random.rand(10, 3)
b = numpy.random.rand(3, 5)
out = np.exp(np.matmul(np.log(a), b))
Since c_ij = prod(a_ik ** b_kj, k=1..K), then log(c_ij) = sum(log(a_ik) * b_ik, k=1..K).
Note: Having zeros in a may mess up the result (also negatives, but then the result wouldn't be well defined anyway). I have given it a try and it doesn't seem to actually break somehow; I don't know if that behavior is guaranteed by NumPy but, to be safe, you can add something at the end like:
out[np.logical_or.reduce(a < eps, axis=1)] = 0
You can use broadcasting after extending those arrays to 3D versions -
(a[:,:,None]**b[None,:,:]).prod(axis=1)
Simply put -
(a[...,None]**b[None]).prod(1)
Basically, we are keeping the last axis and first axis from the two arrays aligned, while performing element-wise powers between the first and last axes from the two inputs. Schematically put using the given sample on shapes -
10 x 3 x 1
1 x 3 x 5
Two more solutions:
Inlining
numpy.array([
numpy.prod([a[:, i]**bb[i] for i in range(len(bb))], axis=0)
for bb in b.T
]).T
and using power.outer:
numpy.prod([numpy.power.outer(a[:, k], b[k]) for k in range(len(b))], axis=0)
Both are a bit slower than the broadcasting solution.
Even with some logic to accommodate for zero and negative values, the exp-log solution takes the cake.
Code to reproduce the plot:
import numpy
import perfplot
def loop(data):
a, b = data
m = a.shape[0]
n = b.shape[1]
out = numpy.ones((m, n))
for i in range(m):
for j in range(n):
for k in range(3):
out[i, j] *= a[i, k]**b[k, j]
return out
def broadcasting(data):
a, b = data
return (a[..., None]**b[None]).prod(1)
def log_exp(data):
a, b = data
neg_a = numpy.zeros(a.shape, dtype=int)
neg_a[a < 0.0] = 1
odd_b = numpy.zeros(b.shape, dtype=int)
odd_b[b % 2 == 1] = 1
negative_count = numpy.dot(neg_a, odd_b)
out = (-1)**negative_count * numpy.exp(
numpy.matmul(
numpy.log(abs(a), where=abs(a) > 0.0),
b
))
zero_a = numpy.zeros(a.shape, dtype=int)
zero_a[a == 0.0] = 1
pos_b = numpy.zeros(b.shape, dtype=int)
pos_b[b > 0] = 1
zero_count = numpy.dot(zero_a, pos_b)
out[zero_count > 0] = 0.0
return out
def inline(data):
a, b = data
return numpy.array([
numpy.prod([a[:, i]**bb[i] for i in range(len(bb))], axis=0)
for bb in b.T
]).T
def outer_power(data):
a, b = data
return numpy.prod([
numpy.power.outer(a[:, k], b[k]) for k in range(len(b))
], axis=0)
perfplot.show(
setup=lambda n: (
numpy.random.rand(n, 3) - 0.5,
numpy.random.randint(0, 10, (3, n))
),
n_range=[2**k for k in range(11)],
repeat=10,
kernels=[
loop,
broadcasting,
inline,
log_exp,
outer_power
],
logx=True,
logy=True,
xlabel='len(a)',
)
import numpy
a = numpy.random.rand(10, 3)
b = numpy.random.rand(3, 5)
out = [[numpy.prod([a[i, k]**b[k, j] for k in range(3)]) for j in range(5)] for i in range(10)]

Procrustes Analysis with NumPy?

Is there something like Matlab's procrustes function in NumPy/SciPy or related libraries?
For reference. Procrustes analysis aims to align 2 sets of points (in other words, 2 shapes) to minimize square distance between them by removing scale, translation and rotation warp components.
Example in Matlab:
X = [0 1; 2 3; 4 5; 6 7; 8 9]; % first shape
R = [1 2; 2 1]; % rotation matrix
t = [3 5]; % translation vector
Y = X * R + repmat(t, 5, 1); % warped shape, no scale and no distortion
[d Z] = procrustes(X, Y); % Z is Y aligned back to X
Z
Z =
0.0000 1.0000
2.0000 3.0000
4.0000 5.0000
6.0000 7.0000
8.0000 9.0000
Same task in NumPy:
X = arange(10).reshape((5, 2))
R = array([[1, 2], [2, 1]])
t = array([3, 5])
Y = dot(X, R) + t
Z = ???
Note: I'm only interested in aligned shape, since square error (variable d in Matlab code) is easily computed from 2 shapes.
I'm not aware of any pre-existing implementation in Python, but it's easy to take a look at the MATLAB code using edit procrustes.m and port it to Numpy:
def procrustes(X, Y, scaling=True, reflection='best'):
"""
A port of MATLAB's `procrustes` function to Numpy.
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Inputs:
------------
X, Y
matrices of target and input coordinates. they must have equal
numbers of points (rows), but Y may have fewer dimensions
(columns) than X.
scaling
if False, the scaling component of the transformation is forced
to 1
reflection
if 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Outputs
------------
d
the residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(0))**2).sum()
Z
the matrix of transformed Y-values
tform
a dict specifying the rotation, translation and scaling that
maps X --> Y
"""
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection != 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
tform = {'rotation':T, 'scale':b, 'translation':c}
return d, Z, tform
There is a Scipy function for it: scipy.spatial.procrustes
I'm just posting its example here:
>>> import numpy as np
>>> from scipy.spatial import procrustes
>>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
>>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
>>> mtx1, mtx2, disparity = procrustes(a, b)
>>> round(disparity)
0.0
You can have both Ordinary Procrustes Analysis and Generalized Procrustes Analysis in python with something like this:
import numpy as np
def opa(a, b):
aT = a.mean(0)
bT = b.mean(0)
A = a - aT
B = b - bT
aS = np.sum(A * A)**.5
bS = np.sum(B * B)**.5
A /= aS
B /= bS
U, _, V = np.linalg.svd(np.dot(B.T, A))
aR = np.dot(U, V)
if np.linalg.det(aR) < 0:
V[1] *= -1
aR = np.dot(U, V)
aS = aS / bS
aT-= (bT.dot(aR) * aS)
aD = (np.sum((A - B.dot(aR))**2) / len(a))**.5
return aR, aS, aT, aD
def gpa(v, n=-1):
if n < 0:
p = avg(v)
else:
p = v[n]
l = len(v)
r, s, t, d = np.ndarray((4, l), object)
for i in range(l):
r[i], s[i], t[i], d[i] = opa(p, v[i])
return r, s, t, d
def avg(v):
v_= np.copy(v)
l = len(v_)
R, S, T = [list(np.zeros(l)) for _ in range(3)]
for i, j in np.ndindex(l, l):
r, s, t, _ = opa(v_[i], v_[j])
R[j] += np.arccos(min(1, max(-1, np.trace(r[:1])))) * np.sign(r[1][0])
S[j] += s
T[j] += t
for i in range(l):
a = R[i] / l
r = [np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]
v_[i] = v_[i].dot(r) * (S[i] / l) + (T[i] / l)
return v_.mean(0)
For testing purposes, the output of each algorithm can be visualized as follows:
import matplotlib.pyplot as p; p.rcParams['toolbar'] = 'None';
def plt(o, e, b):
p.figure(figsize=(10, 10), dpi=72, facecolor='w').add_axes([0.05, 0.05, 0.9, 0.9], aspect='equal')
p.plot(0, 0, marker='x', mew=1, ms=10, c='g', zorder=2, clip_on=False)
p.gcf().canvas.set_window_title('%f' % e)
x = np.ravel(o[0].T[0])
y = np.ravel(o[0].T[1])
p.xlim(min(x), max(x))
p.ylim(min(y), max(y))
a = []
for i, j in np.ndindex(len(o), 2):
a.append(o[i].T[j])
O = p.plot(*a, marker='x', mew=1, ms=10, lw=.25, c='b', zorder=0, clip_on=False)
O[0].set(c='r', zorder=1)
if not b:
O[2].set_color('b')
O[2].set_alpha(0.4)
p.axis('off')
p.show()
# Fly wings example (Klingenberg, 2015 | https://en.wikipedia.org/wiki/Procrustes_analysis)
arr1 = np.array([[588.0, 443.0], [178.0, 443.0], [56.0, 436.0], [50.0, 376.0], [129.0, 360.0], [15.0, 342.0], [92.0, 293.0], [79.0, 269.0], [276.0, 295.0], [281.0, 331.0], [785.0, 260.0], [754.0, 174.0], [405.0, 233.0], [386.0, 167.0], [466.0, 59.0]])
arr2 = np.array([[477.0, 557.0], [130.129, 374.307], [52.0, 334.0], [67.662, 306.953], [111.916, 323.0], [55.119, 275.854], [107.935, 277.723], [101.899, 259.73], [175.0, 329.0], [171.0, 345.0], [589.0, 527.0], [591.0, 468.0], [299.0, 363.0], [306.0, 317.0], [406.0, 288.0]])
def opa_out(a):
r, s, t, d = opa(a[0], a[1])
a[1] = a[1].dot(r) * s + t
return a, d, False
plt(*opa_out([arr1, arr2, np.matrix.copy(arr2)]))
def gpa_out(a):
g = gpa(a, -1)
D = [avg(a)]
for i in range(len(a)):
D.append(a[i].dot(g[0][i]) * g[1][i] + g[2][i])
return D, sum(g[3])/len(a), True
plt(*gpa_out([arr1, arr2]))
Probably you want to try this package with various flavors of different Procrustes methods, https://github.com/theochem/procrustes.

Categories