I am learning GMM to do color segmentation. I found a good resource online with the following GMM code:
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
import numpy as np
from scipy.stats import norm
np.random.seed(0)
X = np.linspace(-5, 5, num=20)
X0 = X * np.random.rand(len(X)) + 15 # Create data cluster 1
X1 = X * np.random.rand(len(X)) - 15 # Create data cluster 2
X2 = X * np.random.rand(len(X)) # Create data cluster 3
X_tot = np.stack((X0, X1, X2)).flatten() # Combine the clusters to get the random datapoints from above
class GM1D:
def __init__(self, X, iterations):
self.iterations = iterations
self.X = X
self.mu = None
self.pi = None
self.var = None
def run(self):
self.mu = [-8, 8, 5]
self.pi = [1 / 3, 1 / 3, 1 / 3]
self.var = [5, 3, 1]
for iter in range(self.iterations):
r = np.zeros((len(X_tot), 3))
for c, g, p in zip(range(3), [norm(loc=self.mu[0], scale=self.var[0]),
norm(loc=self.mu[1], scale=self.var[1]),
norm(loc=self.mu[2], scale=self.var[2])], self.pi):
r[:, c] = p * g.pdf(X_tot) # Write the probability that x belongs to gaussian c in column c.
for i in range(len(r)):
r[i] = r[i] / (np.sum(self.pi) * np.sum(r, axis=1)[i])
fig = plt.figure(figsize=(10, 10))
ax0 = fig.add_subplot(111)
for i in range(len(r)):
ax0.scatter(self.X[i], 0, c=np.array([r[i][0], r[i][1], r[i][2]]), s=100)
for g, c in zip([norm(loc=self.mu[0], scale=self.var[0]).pdf(np.linspace(-20, 20, num=60)),
norm(loc=self.mu[1], scale=self.var[1]).pdf(np.linspace(-20, 20, num=60)),
norm(loc=self.mu[2], scale=self.var[2]).pdf(np.linspace(-20, 20, num=60))], ['r', 'g', 'b']):
ax0.plot(np.linspace(-20, 20, num=60), g, c=c)
m_c = []
for c in range(len(r[0])):
m = np.sum(r[:, c])
m_c.append(m) # For each cluster c, calculate the m_c and add it to the list m_c
for k in range(len(m_c)):
self.pi[k] = (m_c[k] / np.sum(m_c)) # For each cluster c, calculate the fraction of points pi_c which belongs to cluster c
self.mu = np.sum(self.X.reshape(len(self.X), 1) * r, axis=0) / m_c
var_c = []
for c in range(len(r[0])):
var_c.append((1 / m_c[c]) * np.dot(((np.array(r[:, c]).reshape(60, 1)) * (self.X.reshape(len(self.X), 1) - self.mu[c])).T, (self.X.reshape(len(self.X), 1) - self.mu[c])))
plt.show()
GM1D = GM1D(X_tot, 10)
GM1D.run()
Now my understanding is that in the Maximation step of EM, we must update the gaussian parameters (covariance matrix, mean and size of the gaussian(pi_c) ) In the code given above, I can see the values of pi_c and mean(mu) being updated but I don't think the value of the covariance matrix is updated. However, when I run the code it seems to be working (?). Can someone please help me determine if the code is correct or not. The code is from the following resource
Related
I have implemented a simple randomized, population-based optimization method - Grey Wolf optimizer. I am having some trouble with properly capturing the Matplotlib plots at each iteration using the camera package.
I am running GWO for the objective function f(x,y) = x^2 + y^2. I can only see the candidate solutions converging to the minima, but the contour plot doesn't show up.
Do you have any suggestions, how can I display the contour plot in the background?
GWO Algorithm implementation
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from celluloid import Camera
import ffmpeg
import pillow
# X : Position vector of the initial population
# n : Initial population size
def gwo(f,max_iterations,LB,UB):
fig = plt.figure()
camera = Camera(fig)
def random_population_uniform(m,a,b):
dims = len(a)
x = [list(a + np.multiply(np.random.rand(dims),b - a)) for i in range(m)]
return np.array(x)
def search_agent_fitness(fitness):
alpha = 0
if fitness[1] < fitness[alpha]:
alpha, beta = 1, alpha
else:
beta = 1
if fitness[2] > fitness[alpha] and fitness[2] < fitness[beta]:
beta, delta = 2, beta
elif fitness[2] < fitness[alpha]:
alpha,beta,delta = 2,alpha,beta
else:
delta = 2
for i in range(3,len(fitness)):
if fitness[i] <= fitness[alpha]:
alpha, beta,delta = i, alpha, beta
elif fitness[i] > fitness[alpha] and fitness[i]<= fitness[beta]:
beta,delta = i,beta
elif fitness[i] > fitness[beta] and fitness[i]<= fitness[delta]:
delta = i
return alpha, beta, delta
def plot_search_agent_positions(f,X,alpha,beta,delta,a,b):
# Plot the positions of search agents
x = X[:,0]
y = X[:,1]
s = plt.scatter(x,y,c='gray',zorder=1)
s = plt.scatter(x[alpha],y[alpha],c='red',zorder=1)
s = plt.scatter(x[beta],y[beta],c='blue',zorder=1)
s = plt.scatter(x[delta],y[delta],c='green',zorder=1)
camera.snap()
# Initialize the position of the search agents
X = random_population_uniform(50,np.array(LB),np.array(UB))
n = len(X)
l = 1
# Plot the first image on screen
x = np.linspace(LB[0],LB[1],1000)
y = np.linspace(LB[0],UB[1],1000)
X1,X2 = np.meshgrid(x,y)
Z = f(X1,X2)
cont = plt.contour(X1,X2,Z,20,linewidths=0.75)
while (l < max_iterations):
# Take the x,y coordinates of the initial population
x = X[:,0]
y = X[:,1]
# Calculate the objective function for each search agent
fitness = list(map(f,x,y))
# Update alpha, beta and delta
alpha,beta,delta = search_agent_fitness(fitness)
# Plot search agent positions
plot_search_agent_positions(f,X,alpha,beta,delta,LB,UB)
# a decreases linearly from 2 to 0
a = 2 - l *(2 / max_iterations)
# Update the position of search agents including the Omegas
for i in range(n):
x_prey = X[alpha]
r1 = np.random.rand(2) #r1 is a random vector in [0,1] x [0,1]
r2 = np.random.rand(2) #r2 is a random vector in [0,1] x [0,1]
A1 = 2*a*r1 - a
C1 = 2*r2
D_alpha = np.abs(C1 * x_prey - X[i])
X_1 = x_prey - A1*D_alpha
x_prey = X[beta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A2 = 2*a*r1 - a
C2 = 2*r2
D_beta = np.abs(C2 * x_prey - X[i])
X_2 = x_prey - A2*D_beta
x_prey = X[delta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A3 = 2*a*r1 - a
C3 = 2*r2
D_delta = np.abs(C3 * x_prey - X[i])
X_3 = x_prey - A3*D_delta
X[i] = (X_1 + X_2 + X_3)/3
l = l + 1
return X[alpha],camera
Function call
# define the objective function
def f(x,y):
return x**2 + y**2
minimizer,camera = gwo(f,7,[-10,-10],[10,10])
animation = camera.animate(interval = 1000, repeat = True,
repeat_delay = 500)
Is it possible that the line x = np.linspace(LB[0],LB[1],1000) should be x = np.linspace(LB[0],UB[1],1000) instead? With your current definition of x, x is an array only filled with the value -10 which means that you are unlikely to find a contour.
Another thing that you might want to do is to move the cont = plt.contour(X1,X2,Z,20,linewidths=0.75) line inside of your plot_search_agent_positions function to ensure that the contour is plotted at each iteration of the animation.
Once you make those changes, the code looks like that:
import matplotlib.pyplot as plt
import numpy as np
from celluloid import Camera
import ffmpeg
import PIL
from matplotlib import animation, rc
from IPython.display import HTML, Image # For GIF
from scipy.interpolate import griddata
rc('animation', html='html5')
# X : Position vector of the initial population
# n : Initial population size
def gwo(f,max_iterations,LB,UB):
fig = plt.figure()
fig.gca(aspect='equal')
camera = Camera(fig)
def random_population_uniform(m,a,b):
dims = len(a)
x = [list(a + np.multiply(np.random.rand(dims),b - a)) for i in range(m)]
return np.array(x)
def search_agent_fitness(fitness):
alpha = 0
if fitness[1] < fitness[alpha]:
alpha, beta = 1, alpha
else:
beta = 1
if fitness[2] > fitness[alpha] and fitness[2] < fitness[beta]:
beta, delta = 2, beta
elif fitness[2] < fitness[alpha]:
alpha,beta,delta = 2,alpha,beta
else:
delta = 2
for i in range(3,len(fitness)):
if fitness[i] <= fitness[alpha]:
alpha, beta,delta = i, alpha, beta
elif fitness[i] > fitness[alpha] and fitness[i]<= fitness[beta]:
beta,delta = i,beta
elif fitness[i] > fitness[beta] and fitness[i]<= fitness[delta]:
delta = i
return alpha, beta, delta
def plot_search_agent_positions(f,X,alpha,beta,delta,a,b,X1,X2,Z):
# Plot the positions of search agents
x = X[:,0]
y = X[:,1]
s = plt.scatter(x,y,c='gray',zorder=1)
s = plt.scatter(x[alpha],y[alpha],c='red',zorder=1)
s = plt.scatter(x[beta],y[beta],c='blue',zorder=1)
s = plt.scatter(x[delta],y[delta],c='green',zorder=1)
Z=f(X1,X2)
cont=plt.contour(X1,X2,Z,levels=20,colors='k',norm=True)
plt.clabel(cont, cont.levels, inline=True, fontsize=10)
camera.snap()
# Initialize the position of the search agents
X = random_population_uniform(50,np.array(LB),np.array(UB))
n = len(X)
l = 1
# Plot the first image on screen
x = np.linspace(LB[0],UB[1],1000)
y = np.linspace(LB[0],UB[1],1000)
X1,X2 = np.meshgrid(x,y)
Z=f(X1,X2)
while (l < max_iterations):
# Take the x,y coordinates of the initial population
x = X[:,0]
y = X[:,1]
# Calculate the objective function for each search agent
fitness = list(map(f,x,y))
# Update alpha, beta and delta
alpha,beta,delta = search_agent_fitness(fitness)
# Plot search agent positions
plot_search_agent_positions(f,X,alpha,beta,delta,LB,UB,X1,X2,Z)
# a decreases linearly from 2 to 0
a = 2 - l *(2 / max_iterations)
# Update the position of search agents including the Omegas
for i in range(n):
x_prey = X[alpha]
r1 = np.random.rand(2) #r1 is a random vector in [0,1] x [0,1]
r2 = np.random.rand(2) #r2 is a random vector in [0,1] x [0,1]
A1 = 2*a*r1 - a
C1 = 2*r2
D_alpha = np.abs(C1 * x_prey - X[i])
X_1 = x_prey - A1*D_alpha
x_prey = X[beta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A2 = 2*a*r1 - a
C2 = 2*r2
D_beta = np.abs(C2 * x_prey - X[i])
X_2 = x_prey - A2*D_beta
x_prey = X[delta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A3 = 2*a*r1 - a
C3 = 2*r2
D_delta = np.abs(C3 * x_prey - X[i])
X_3 = x_prey - A3*D_delta
X[i] = (X_1 + X_2 + X_3)/3
l = l + 1
return X[alpha],camera
# define the objective function
def f(x,y):
return x**2 + y**2
minimizer,camera = gwo(f,7,[-10,-10],[10,10])
animation = camera.animate(interval = 1000, repeat = True,repeat_delay = 500)
And the output gives:
Let us assume that we have a closed form analytical function y = a * x + b. We have also experimental data to which this function should be fitted. Thus, error minimization by fitting right a and b, might be done. Attached Python 3.8 code do this easily.
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
class Model:
def __init__(self, x_exp, y_exp, params_init):
self.x_exp = x_exp
self.y_exp = y_exp
self.params_init = params_init
self.possible_b = [-1, 0, 1, 2, 3, 4]
self.method = "brute"
self.tolerance = 1 * 10**(-10)
self.res = None
def _fun_linear(self, x, a, b):
y = a * x + b
return y
def _model(self, params):
a = params[0]
b = params[1]
y_fit = self._fun_linear(self.x_exp, a, b)
err = np.linalg.norm(y_fit - self.y_exp)
return err
def fit(self):
time_beg = time.time()
self.res = minimize(fun=self._model,
x0=self.params_init)
time_end = time.time()
print(time_end - time_beg)
print(self.res.x)
print(self.res)
def f(x, a, b):
return a * x + b
x_exp = np.linspace(0, 10, 1000)
y_exp = f(x_exp, a=1, b=2) + np.random.normal(0, 0.5, 1000)
model = Model(x_exp, y_exp, [0, 0])
model.fit()
y_fit = f(x_exp, a=model.res.x[0], b=model.res.x[1])
fig, ax = plt.subplots()
ax.plot(x_exp, y_exp)
ax.plot(x_exp, y_fit)
fig.show()
Now, I would like do chose b value from my own list defined in the self.possible_b instead to approximate it by the used minimization method. Its some-kind of fixed values constrain to b.
Is it possible? How such kind of algorithms are called if they exist? Any libraries are able to do this?
I don't know that how make the code the three graph in damping harmonic oscillation model,
[X - t(time)], [V(velocity) - t(time)], [a(acceleration) - t(time)] graph
i can make the [X - t(time)] graph
but i don`t know how to make another graphs..
import numpy as np
from matplotlib import pyplot as plt
# mx'' = - bx' - kx
x_0 = 3
v_0 = 0
y_0 = np.array([x_0,v_0]) # first array
def Euler_Method(f,a,b,y0,step):
t = np.linspace(a,b,step)
h = t[1] - t[0]
Y = [y0]
N = len(t)
n = 0
y = y0
for n in range(0,N-1) :
y = y + h*f(y,t[n])
Y.append(y)
n = n+1
Y = np.array(Y)
return Y, t
def harmonic(y,t) :
k = 50
m = 200
b = 20 # drag coefficient
a = (-1*k/m)*y[0] - (b/m)*y[1] # x'' = a, y[0] : first position
v = y[1] # v = first velocity : y[1]
f = np.array([v,a])
return f
a = Euler_Method(harmonic, 0, 100, y_0, 100000)
X = a[0][:,0]
t = a[1]
plt.plot(t,X)
plt.show()
Why can't you just take the derivative of X to get V and A?
V = np.diff(X)
A = np.diff(V)
fig, (ax1, ax2, ax3) = plt.subplots(3)
fig.suptitle('Vertically stacked subplots')
ax1.plot(t, X)
ax2.plot(t[1:], V)
ax3.plot(t[2:], A)
plt.show()
Gives,
I need to plot every iteration in a loop of Kachmarz algorithm. But when i try there is a error:
ValueError: x and y can be no greater than 2-D, but have shapes (2, 1, 1) and (2,)
Threre is my code (we have matrix A, where every row give a plane and it has to solve Ax = f by Kaczmarz algorithm and animate it):
import os
import matplotlib.pyplot as plt
import numpy as np
from numpy import linalg as la
x = np.linspace(-100, 15, 100)
A = np.matrix([[2, 3], [1, -1],[6, 1]]);
f = np.matrix([4, 2, 15]);
resh = la.pinv(A).dot(np.transpose(f))
e1 = (1 - 2*x)/3;
e2 = (x - 2);
e3 = 15 - 6*x;
plt.plot(x, e1, color="black")
plt.plot(x, e2, color="black")
plt.plot(x, e3, color="black")
plt.axis([-0, 4, -4, 4])
[m , n] = np.shape(A)
precision = 0.01
iteration = 100
lmbd = 0.8
nrm=[]
for i in range(0, m):
nrm.append(la.norm(A[i][:]));
approx = np.zeros((n, 1))
predx =
np.zeros((n, 1))
print(np.shape(approx))
for i in range(0, iteration):
j = (i-1) % m
predx = approx
approx = approx + np.multiply((f[:,j] - A[j,:].dot(approx)),(np.transpose(A[j, :])))
plt.plot([predx[0], approx[0]], [predx[1], 2], "r-")
# c = plt.plot([predx[1], 2], [predx[2], 2], "g-")
# pause(0.01)
if(la.norm(predx - approx) <= precision):
break
plt.show()
I have no idea how to fix it, in the end it should be like this: Kaczmarz animation
How I can fix this loop? What I do wrong?
Is there something like Matlab's procrustes function in NumPy/SciPy or related libraries?
For reference. Procrustes analysis aims to align 2 sets of points (in other words, 2 shapes) to minimize square distance between them by removing scale, translation and rotation warp components.
Example in Matlab:
X = [0 1; 2 3; 4 5; 6 7; 8 9]; % first shape
R = [1 2; 2 1]; % rotation matrix
t = [3 5]; % translation vector
Y = X * R + repmat(t, 5, 1); % warped shape, no scale and no distortion
[d Z] = procrustes(X, Y); % Z is Y aligned back to X
Z
Z =
0.0000 1.0000
2.0000 3.0000
4.0000 5.0000
6.0000 7.0000
8.0000 9.0000
Same task in NumPy:
X = arange(10).reshape((5, 2))
R = array([[1, 2], [2, 1]])
t = array([3, 5])
Y = dot(X, R) + t
Z = ???
Note: I'm only interested in aligned shape, since square error (variable d in Matlab code) is easily computed from 2 shapes.
I'm not aware of any pre-existing implementation in Python, but it's easy to take a look at the MATLAB code using edit procrustes.m and port it to Numpy:
def procrustes(X, Y, scaling=True, reflection='best'):
"""
A port of MATLAB's `procrustes` function to Numpy.
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Inputs:
------------
X, Y
matrices of target and input coordinates. they must have equal
numbers of points (rows), but Y may have fewer dimensions
(columns) than X.
scaling
if False, the scaling component of the transformation is forced
to 1
reflection
if 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Outputs
------------
d
the residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(0))**2).sum()
Z
the matrix of transformed Y-values
tform
a dict specifying the rotation, translation and scaling that
maps X --> Y
"""
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection != 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
tform = {'rotation':T, 'scale':b, 'translation':c}
return d, Z, tform
There is a Scipy function for it: scipy.spatial.procrustes
I'm just posting its example here:
>>> import numpy as np
>>> from scipy.spatial import procrustes
>>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
>>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
>>> mtx1, mtx2, disparity = procrustes(a, b)
>>> round(disparity)
0.0
You can have both Ordinary Procrustes Analysis and Generalized Procrustes Analysis in python with something like this:
import numpy as np
def opa(a, b):
aT = a.mean(0)
bT = b.mean(0)
A = a - aT
B = b - bT
aS = np.sum(A * A)**.5
bS = np.sum(B * B)**.5
A /= aS
B /= bS
U, _, V = np.linalg.svd(np.dot(B.T, A))
aR = np.dot(U, V)
if np.linalg.det(aR) < 0:
V[1] *= -1
aR = np.dot(U, V)
aS = aS / bS
aT-= (bT.dot(aR) * aS)
aD = (np.sum((A - B.dot(aR))**2) / len(a))**.5
return aR, aS, aT, aD
def gpa(v, n=-1):
if n < 0:
p = avg(v)
else:
p = v[n]
l = len(v)
r, s, t, d = np.ndarray((4, l), object)
for i in range(l):
r[i], s[i], t[i], d[i] = opa(p, v[i])
return r, s, t, d
def avg(v):
v_= np.copy(v)
l = len(v_)
R, S, T = [list(np.zeros(l)) for _ in range(3)]
for i, j in np.ndindex(l, l):
r, s, t, _ = opa(v_[i], v_[j])
R[j] += np.arccos(min(1, max(-1, np.trace(r[:1])))) * np.sign(r[1][0])
S[j] += s
T[j] += t
for i in range(l):
a = R[i] / l
r = [np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]
v_[i] = v_[i].dot(r) * (S[i] / l) + (T[i] / l)
return v_.mean(0)
For testing purposes, the output of each algorithm can be visualized as follows:
import matplotlib.pyplot as p; p.rcParams['toolbar'] = 'None';
def plt(o, e, b):
p.figure(figsize=(10, 10), dpi=72, facecolor='w').add_axes([0.05, 0.05, 0.9, 0.9], aspect='equal')
p.plot(0, 0, marker='x', mew=1, ms=10, c='g', zorder=2, clip_on=False)
p.gcf().canvas.set_window_title('%f' % e)
x = np.ravel(o[0].T[0])
y = np.ravel(o[0].T[1])
p.xlim(min(x), max(x))
p.ylim(min(y), max(y))
a = []
for i, j in np.ndindex(len(o), 2):
a.append(o[i].T[j])
O = p.plot(*a, marker='x', mew=1, ms=10, lw=.25, c='b', zorder=0, clip_on=False)
O[0].set(c='r', zorder=1)
if not b:
O[2].set_color('b')
O[2].set_alpha(0.4)
p.axis('off')
p.show()
# Fly wings example (Klingenberg, 2015 | https://en.wikipedia.org/wiki/Procrustes_analysis)
arr1 = np.array([[588.0, 443.0], [178.0, 443.0], [56.0, 436.0], [50.0, 376.0], [129.0, 360.0], [15.0, 342.0], [92.0, 293.0], [79.0, 269.0], [276.0, 295.0], [281.0, 331.0], [785.0, 260.0], [754.0, 174.0], [405.0, 233.0], [386.0, 167.0], [466.0, 59.0]])
arr2 = np.array([[477.0, 557.0], [130.129, 374.307], [52.0, 334.0], [67.662, 306.953], [111.916, 323.0], [55.119, 275.854], [107.935, 277.723], [101.899, 259.73], [175.0, 329.0], [171.0, 345.0], [589.0, 527.0], [591.0, 468.0], [299.0, 363.0], [306.0, 317.0], [406.0, 288.0]])
def opa_out(a):
r, s, t, d = opa(a[0], a[1])
a[1] = a[1].dot(r) * s + t
return a, d, False
plt(*opa_out([arr1, arr2, np.matrix.copy(arr2)]))
def gpa_out(a):
g = gpa(a, -1)
D = [avg(a)]
for i in range(len(a)):
D.append(a[i].dot(g[0][i]) * g[1][i] + g[2][i])
return D, sum(g[3])/len(a), True
plt(*gpa_out([arr1, arr2]))
Probably you want to try this package with various flavors of different Procrustes methods, https://github.com/theochem/procrustes.