I have a recommendation dataset that I have transformed into a matrix of the form:
item1 item2 item3 ...
user1 NaN 2.3 NaN
user2 1.7 3.4 NaN
user3 NaN 1.1 2.6
...
where NaN are items that the particular user has not reviewed yet. The above is in the form of a pandas dataframe. I want to construct an adjacency matrix from this, based on a predefined distance metric. I have a working function:
def compute_adjacency_matrix(reccomender_matrix):
# replace nan with 0
rec_num = reccomender_matrix.fillna(value=0)
# compute the distances between every two users
result = np.array([[compute_distance(li[2:], lj[2:]) for lj in rec_num.itertuples()] for li in rec_num.itertuples()])
adjacency_matrix = (result > 0.0).astype(int)
return adjacency_matrix
the problem is that, for large matrices, the line that computes result takes very long. What is the most efficient way of doing this, that would scale for larger datasets?
EDIT: Here is the compute distance function:
def compute_distance(vec1, vec2):
rez = sum(abs(v1[(v1>0)&(v2>0)] - v2[(v1>0)&(v2>0)]))
norm = np.count_nonzero(v1) if np.count_nonzero(v1) < np.count_nonzero(v2) else np.count_nonzero(v2)
norm_rez = rez / norm
return norm_rez
So it looks like you want a mean absolute distance metric, although that's not exactly what you wrote (since you're normalizing not by the size of the intersection but the size of the smaller vector). If you want mean absolute distance, it's simply:
def compute_distance(vec1, vec2):
return np.nanmean(np.abs(vec1 - vec2))
You can then use that metric with scipy.spatial.distance.pdist and squareform
from scipy.spatial.distance import pdist, squareform
def compute_adjacency_matrix(reccomender_matrix):
result = squareform(pdist(reccomender_matrix.values.T, metric = compute_distance))
result = np.nan_to_num(result)
adjacency_matrix = (result > 0.0).astype(int)
return adjacency_matrix
As noted in my comment, I think you need to rethink your metrics and outputs. That code will make anyone who's recommended the same item adjacent, no matter what score they gave - unless the gave the same scores, then they won't be adjacent. Not sure that's what you want.
A slightly better method would be carrying through the nans and using them to make your adjacency matrix.
def compute_adjacency_matrix(reccomender_matrix):
result = squareform(pdist(reccomender_matrix.values.T, metric = compute_distance))
adjacency_matrix = np.logical_not(np.isnan(result)).astype(int)
return adjacency_matrix
If you don't need the distances, you can do it all with binary operations:
def adjacency(x, y):
return np.any(np.logical_and(x, y))
def compute_adjacency_matrix(reccomender_matrix):
return squareform(pdist(np.isfinite(reccomender_matrix.values.T),
metric = adjacency)).astype(int)
Finally, you can do it all with numba if that's all too slow:
import numba as nb
#nb.njit
def compute_adjacency_matrix(reccomender_matrix):
n, m = reccomender_matrix.shape
out = np.zeros((m, m))
count = np.zeros((m, m))
dists = np.zeros((m, m))
adj = np.zeros((m, m))
for i in range(1, m):
for j in range(i + 1, m):
for k in range(n):
if not(np.isnan(reccomender_matrix[k, i]) or \
np.isnan(reccomender_matrix[k, j])):
out[i, j] += np.abs(reccomender_matrix[k, i] - reccomender_matrix[k, j])
count[i, j] += 1
for i in range(m):
for j in range(m):
if i == j:
dists[i, j] = 0.
elif i < j:
if count[i, j] != 0:
dists[i, j] = out[i, j] / count [i, j]
adj[i, j] = 1
else:
dists[i, j] = 0.
else:
dists[i, j] = dists[j, i]
adj[i, j] = adj[j, i]
return dists, adj
Related
currently running into a problem solving this.
The objective of the exercise given is to find a polynom of certian degree (the degree is given) from a dataset of points (that can be noist) and to best fit it using least sqaure method.
I don't understand the steps that lead to solving the linear equations?
what are the steps or should anyone provide such a python program that lead to the matrix that I put as an argument in my decomposition program?
Note:I have a python program for cubic splines ,LU decomposition/Guassian decomposition.
Thanks.
I tried to apply guassin / LU decomposition straight away on the dataset but I understand there are more steps to the solution...
I donwt understand how cubic splines add to the mix either..
Edit:
guassian elimintaion :
import numpy as np
import math
def swapRows(v,i,j):
if len(v.shape) == 1:
v[i],v[j] = v[j],v[i]
else:
v[[i,j],:] = v[[j,i],:]
def swapCols(v,i,j):
v[:,[i,j]] = v[:,[j,i]]
def gaussPivot(a,b,tol=1.0e-12):
n = len(b)
# Set up scale factors
s = np.zeros(n)
for i in range(n):
s[i] = max(np.abs(a[i,:]))
for k in range(0,n-1):
# Row interchange, if needed
p = np.argmax(np.abs(a[k:n,k])/s[k:n]) + k
if abs(a[p,k]) < tol: error.err('Matrix is singular')
if p != k:
swapRows(b,k,p)
swapRows(s,k,p)
swapRows(a,k,p)
# Elimination
for i in range(k+1,n):
if a[i,k] != 0.0:
lam = a[i,k]/a[k,k]
a[i,k+1:n] = a[i,k+1:n] - lam*a[k,k+1:n]
b[i] = b[i] - lam*b[k]
if abs(a[n-1,n-1]) < tol: error.err('Matrix is singular')
# Back substitution
b[n-1] = b[n-1]/a[n-1,n-1]
for k in range(n-2,-1,-1):
b[k] = (b[k] - np.dot(a[k,k+1:n],b[k+1:n]))/a[k,k]
return b
def polyFit(xData,yData,m):
a = np.zeros((m+1,m+1))
b = np.zeros(m+1)
s = np.zeros(2*m+1)
for i in range(len(xData)):
temp = yData[i]
for j in range(m+1):
b[j] = b[j] + temp
temp = temp*xData[i]
temp = 1.0
for j in range(2*m+1):
s[j] = s[j] + temp
temp = temp*xData[i]
for i in range(m+1):
for j in range(m+1):
a[i,j] = s[i+j]
return gaussPivot(a,b)
degree = 10 # can be any degree
polyFit(xData,yData,degree)
I was under the impression the code above gets a dataset of points and a degree. The output should be coeefients of a polynom that fits those points but I have a grader that was provided by my proffesor , and after checking the grading the polynom that returns has a lrage error.
After that I tried the following LU decomposition instead:
import numpy as np
def swapRows(v,i,j):
if len(v.shape) == 1:
v[i],v[j] = v[j],v[i]
else:
v[[i,j],:] = v[[j,i],:]
def swapCols(v,i,j):
v[:,[i,j]] = v[:,[j,i]]
def LUdecomp(a,tol=1.0e-9):
n = len(a)
seq = np.array(range(n))
# Set up scale factors
s = np.zeros((n))
for i in range(n):
s[i] = max(abs(a[i,:]))
for k in range(0,n-1):
# Row interchange, if needed
p = np.argmax(np.abs(a[k:n,k])/s[k:n]) + k
if abs(a[p,k]) < tol: error.err('Matrix is singular')
if p != k:
swapRows(s,k,p)
swapRows(a,k,p)
swapRows(seq,k,p)
# Elimination
for i in range(k+1,n):
if a[i,k] != 0.0:
lam = a[i,k]/a[k,k]
a[i,k+1:n] = a[i,k+1:n] - lam*a[k,k+1:n]
a[i,k] = lam
return a,seq
def LUsolve(a,b,seq):
n = len(a)
# Rearrange constant vector; store it in [x]
x = b.copy()
for i in range(n):
x[i] = b[seq[i]]
# Solution
for k in range(1,n):
x[k] = x[k] - np.dot(a[k,0:k],x[0:k])
x[n-1] = x[n-1]/a[n-1,n-1]
for k in range(n-2,-1,-1):
x[k] = (x[k] - np.dot(a[k,k+1:n],x[k+1:n]))/a[k,k]
return x
the results were a bit better but nowhere near what it should be
Edit 2:
I tried the chebyshev method suggested in the comments and came up with:
import numpy as np
def chebyshev_transform(x, n):
"""
Transforms x-coordinates to Chebyshev coordinates
"""
return np.cos(n * np.arccos(x))
def chebyshev_design_matrix(x, n):
"""
Constructs the Chebyshev design matrix
"""
x_cheb = chebyshev_transform(x, n)
T = np.zeros((len(x), n+1))
T[:,0] = 1
T[:,1] = x_cheb
for i in range(2, n+1):
T[:,i] = 2 * x_cheb * T[:,i-1] - T[:,i-2]
return T
degree =10
f = lambda x: np.cos(X)
xdata = np.linspace(-1,1,num=100)
ydata = np.array([f(i) for i in xdata])
M = chebyshev_design_matrix(xdata,degree)
D_x ,D_y = np.linalg.qr(M)
D_x, seq = LUdecomp(D_x)
A = LUsolve(D_x,D_y,seq)
I can't use linalg.qr in my program , it was just for checking how it works.In addition , I didn't get the 'slow way' of the formula that were in the comment.
The program cant get an x point that is not between -1 and 1 , is there any way around it , any normalizition?
Thanks a lot.
Hints:
You are probably asked for an unsophisticated method. If the degree of the polynomial remains low, you can use the straightforward approach below. For the sake of the explanation, I'll use a cubic model.
Assume that you want to fit your data to this polynomial, by observing that it seems to follow a cubic behavior:
ax³ + bx² + cx + d ~ y
[All x and y should be understood with an index i which is omitted for notational convenience.]
If there are more than four data points, you get an overdetermined system of equations, usually with no solution. The trick is to consider the error on the individual equations, e = ax³ + bx² + cx + d - y, and to minimize the total error. As the error is a signed number, negative errors would make minimization impossible. Instead, we minimize the sum of squared errors. (The sum of absolute errors is another option but it unfortunately leads to a much harder problem.)
Min(a, b, c, d) Σ(ax³ + bx² + cx + d - y)²
As the unknown parameters are unconstrained, it suffices to look for a stationary point, i.e. cancel the gradient of the total error. By differentiation on the unknowns a, b, c and d, we obtain
2Σ(ax³x³ + bx²x³ + cxx³ + dx³ - yx³) = 0
2Σ(ax³x² + bx²x² + cxx² + dx² - yx²) = 0
2Σ(ax³x + bx²x + cxx + dx - yx ) = 0
2Σ(ax³ + bx² + cx + d - y ) = 0
As you can recognize, this is a square linear system of equations.
I have created a function determinant which outputs a determinant of a 3x3 matrix. I also need to create a function to invert that matrix however the code doesn't seem to work and I can't figure out why.
M = np.array([
[4.,3.,9.],
[2.,1.,8.],
[10.,7.,5.]
])
def inverse(M):
'''
This function finds the inverse of a matrix using the Cramers rule.
Input: Matrix - M
Output: The inverse of the Matrix - M.
'''
d = determinant(M) # Simply returns the determinant of the matrix M.
counter = 1
array = []
for line in M: # This for loop simply creates a co-factor of Matrix M and puts it in a list.
y = []
for item in line:
if counter %2 == 0:
x = -item
else:
x = item
counter += 1
y.append(x)
array.append(y)
cf = np.matrix(array) # Translating the list into a matrix.
adj = np.matrix.transpose(cf) # Transposing the matrix.
inv = (1/d) * adj
return inv
OUTPUT:
via inverse(M):
[[ 0.0952381 -0.04761905 0.23809524],
[-0.07142857 0.02380952 -0.16666667],
[ 0.21428571 -0.19047619 0.11904762]]
via built-in numpy inverse function:
[[-1.21428571 1.14285714 0.35714286]
[ 1.66666667 -1.66666667 -0.33333333]
[ 0.0952381 0.04761905 -0.04761905]]
As you can see some of the numbers match and I'm just not sure why the answer isn't exact as I'm using the formula correctly.
You co-factor matrix calculation isn't correct.
def inverse(M):
d = np.linalg.det(M)
cf_mat = []
for i in range(M.shape[0]):
for j in range(M.shape[1]):
# for each position we need to calculate det
# of submatrix without current row and column
# and multiply it on position coefficient
coef = (-1) ** (i + j)
new_mat = []
for i1 in range(M.shape[0]):
for j1 in range(M.shape[1]):
if i1 != i and j1 != j:
new_mat.append(M[i1, j1])
new_mat = np.array(new_mat).reshape(
(M.shape[0] - 1, M.shape[1] - 1))
new_mat_det = np.linalg.det(new_mat)
cf_mat.append(new_mat_det * coef)
cf_mat = np.array(cf_mat).reshape(M.shape)
adj = np.matrix.transpose(cf_mat)
inv = (1 / d) * adj
return inv
This code isn't very effective, but here you can see, how it should be calculated. More information and clear formula you can find at Wiki.
Output matrix:
[[-1.21428571 1.14285714 0.35714286]
[ 1.66666667 -1.66666667 -0.33333333]
[ 0.0952381 0.04761905 -0.04761905]]
So I have two matrices, A and B, and I want to compute the min-plus product as given here: Min-plus matrix multiplication. For that I've implemented the following:
def min_plus_product(A,B):
B = np.transpose(B)
Y = np.zeros((len(B),len(A)))
for i in range(len(B)):
Y[i] = (A + B[i]).min(1)
return np.transpose(Y)
This works fine, but is slow for big matrices, is there a way to make it faster? I've heard that implemeting in C or using the GPU might be good options.
Here is an algo that saves a bit if the middle dimension is large enough and entries are uniformly distributed. It exploits the fact that the smallest sum typically will be from two small terms.
import numpy as np
def min_plus_product(A,B):
B = np.transpose(B)
Y = np.zeros((len(B),len(A)))
for i in range(len(B)):
Y[i] = (A + B[i]).min(1)
return np.transpose(Y)
def min_plus_product_opt(A,B, chop=None):
if chop is None:
# not sure this is optimal
chop = int(np.ceil(np.sqrt(A.shape[1])))
B = np.transpose(B)
Amin = A.min(1)
Y = np.zeros((len(B),len(A)))
for i in range(len(B)):
o = np.argsort(B[i])
Y[i] = (A[:, o[:chop]] + B[i, o[:chop]]).min(1)
if chop < len(o):
idx = np.where(Amin + B[i, o[chop]] < Y[i])[0]
for j in range(chop, len(o), chop):
if len(idx) == 0:
break
x, y = np.ix_(idx, o[j : j + chop])
slmin = (A[x, y] + B[i, o[j : j + chop]]).min(1)
slmin = np.minimum(Y[i, idx], slmin)
Y[i, idx] = slmin
nidx = np.where(Amin[idx] + B[i, o[j + chop]] < Y[i, idx])[0]
idx = idx[nidx]
return np.transpose(Y)
A = np.random.random(size=(1000,1000))
B = np.random.random(size=(1000,2000))
print(np.allclose(min_plus_product(A,B), min_plus_product_opt(A,B)))
import time
t = time.time();min_plus_product(A,B);print('naive {}sec'.format(time.time()-t))
t = time.time();min_plus_product_opt(A,B);print('opt {}sec'.format(time.time()-t))
Sample output:
True
naive 7.794037580490112sec
opt 1.65810227394104sec
A possible simple route is to use numba.
from numba import autojit
import numpy as np
#autojit(nopython=True)
def min_plus_product(A,B):
n = A.shape[0]
C = np.zeros((n,n))
for i in range(n):
for j in range(n):
minimum = A[i,0]+B[0,j]
for k in range(1,n):
minimum = min(A[i,k]+B[k,j],minimum)
C[i,j] = minimum
return C
Timings on 1000x1000 A,B matrices are:
1 loops, best of 3: 4.28 s per loop for the original code
1 loops, best of 3: 2.32 s per loop for the numba code
Here is a succinct and fully numpy solution, without any python-based loops:
(np.expand_dims(a, 0) + np.expand_dims(b.T, 1)).min(axis=2).T
I'm standardizing each cell in my train/test matrices across all users (1st dimension) using the following code. This is of course highly inefficient, but I wanted to make sure the idea worked. How do I do it using NumPy's optimized methods?
X_dims = X.shape
channels = 14 # not all columns as binary variables should stay untouched
mu_cell = np.zeros(shape=(channels, X_dims[2], X_dims[3]))
sigma_cell = np.zeros(shape=(channels, X_dims[2], X_dims[3]))
for j in range(channels):
for k in range(X_dims[2]):
for l in range(X_dims[3]):
mu_cell[j,k,l] = np.mean(X_train[:,j,k,l])
sigma_cell[j,k,l] = np.std(X_train[:,j,k,l])
def standardizeCellWise(matrix):
for i in range(matrix.shape[0]):
for j in range(channels):
for k in range(matrix.shape[2]):
for l in range(matrix.shape[3]):
matrix[i, j, k, l] -= mu_cell[j,k,l]
matrix[i, j, k, l] = matrix[i, j, k, l] / sigma_cell[j,k,l] if sigma_cell[j,k,l] != 0 else 0
return matrix
X_train = standardizeCellWise(X_train)
X_test = standardizeCellWise(X_test)
The mu and sigma arrays can be calculated in a numpythonic way as shown here -
import numpy as np
mu_cell = X_train[:,0:channels,:,:].mean(0)
sigma_cell = X_train[:,0:channels,:,:].std(0)
Next up, if you know that you don't have any infinite number or NaN in the input matrix, you can use this vectorized approach to standardize cells -
def standardizeCellWise(matrix,mu_cell,sigma_cell):
matrix_cut = matrix[:,0:channels,:,:]
matrix_cut = (matrix_cut - mu_cell[None,:])/sigma_cell[None,:]
mask = ~np.isfinite(matrix_cut)
matrix_cut[mask] = 0
matrix[:,0:channels,:,:] = matrix_cut
return matrix
For a general input matrix case, you just need to change the calculation of the mask like so -
mask = np.tile(sigma_cell[None,:]==0,[matrix.shape[0],1,1,1])
I have a m × n × n numpy.ndarray of m simultaneously diagonalizable square matrices and would like to use numpy to obtain their simultaneous eigenvalues.
For example, if I had
from numpy import einsum, diag, array, linalg, random
U = linalg.svd(random.random((3,3)))[2]
M = einsum(
"ij, ajk, lk",
U, [diag([2,2,0]), diag([1,-1,1])], U)
the two matrices in M are simultaneously diagonalizable, and I am looking for a way to obtain the array
array([[2., 1.],
[2., -1.],
[0., 1.]])
(up to permutation of the lines) from M. Is there a built-in or easy way to get this?
There is a fairly simple and very elegant simultaneous diagonalization algorithm based on Givens rotation that was published by Cardoso and Soulomiac in 1996:
Cardoso, J., & Souloumiac, A. (1996). Jacobi Angles for Simultaneous Diagonalization. SIAM Journal on Matrix Analysis and Applications, 17(1), 161–164. doi:10.1137/S0895479893259546
I've attached a numpy implementation of the algorithm at the end of this response. Caveat: It turns out simultaneous diagonalization is a bit of a tricky numerical problem, with no algorithm (to the best of my knowledge) that guarantees global convergence. However, the cases in which it does not work (see the paper) are degenerate and in practice I have never had the Jacobi angles algorithm fail on me.
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
Routines for simultaneous diagonalization
Arun Chaganty <arunchaganty#gmail.com>
"""
import numpy as np
from numpy import zeros, eye, diag
from numpy.linalg import norm
def givens_rotate( A, i, j, c, s ):
"""
Rotate A along axis (i,j) by c and s
"""
Ai, Aj = A[i,:], A[j,:]
A[i,:], A[j,:] = c * Ai + s * Aj, c * Aj - s * Ai
return A
def givens_double_rotate( A, i, j, c, s ):
"""
Rotate A along axis (i,j) by c and s
"""
Ai, Aj = A[i,:], A[j,:]
A[i,:], A[j,:] = c * Ai + s * Aj, c * Aj - s * Ai
A_i, A_j = A[:,i], A[:,j]
A[:,i], A[:,j] = c * A_i + s * A_j, c * A_j - s * A_i
return A
def jacobi_angles( *Ms, **kwargs ):
r"""
Simultaneously diagonalize using Jacobi angles
#article{SC-siam,
HTML = "ftp://sig.enst.fr/pub/jfc/Papers/siam_note.ps.gz",
author = "Jean-Fran\c{c}ois Cardoso and Antoine Souloumiac",
journal = "{SIAM} J. Mat. Anal. Appl.",
title = "Jacobi angles for simultaneous diagonalization",
pages = "161--164",
volume = "17",
number = "1",
month = jan,
year = {1995}}
(a) Compute Givens rotations for every pair of indices (i,j) i < j
- from eigenvectors of G = gg'; g = A_ij - A_ji, A_ij + A_ji
- Compute c, s as \sqrt{x+r/2r}, y/\sqrt{2r(x+r)}
(b) Update matrices by multiplying by the givens rotation R(i,j,c,s)
(c) Repeat (a) until stopping criterion: sin theta < threshold for all ij pairs
"""
assert len(Ms) > 0
m, n = Ms[0].shape
assert m == n
sweeps = kwargs.get('sweeps', 500)
threshold = kwargs.get('eps', 1e-8)
rank = kwargs.get('rank', m)
R = eye(m)
for _ in xrange(sweeps):
done = True
for i in xrange(rank):
for j in xrange(i+1, m):
G = zeros((2,2))
for M in Ms:
g = np.array([ M[i,i] - M[j,j], M[i,j] + M[j,i] ])
G += np.outer(g,g) / len(Ms)
# Compute the eigenvector directly
t_on, t_off = G[0,0] - G[1,1], G[0,1] + G[1,0]
theta = 0.5 * np.arctan2( t_off, t_on + np.sqrt( t_on*t_on + t_off * t_off) )
c, s = np.cos(theta), np.sin(theta)
if abs(s) > threshold:
done = False
# Update the matrices and V
for M in Ms:
givens_double_rotate(M, i, j, c, s)
#assert M[i,i] > M[j, j]
R = givens_rotate(R, i, j, c, s)
if done:
break
R = R.T
L = np.zeros((m, len(Ms)))
err = 0
for i, M in enumerate(Ms):
# The off-diagonal elements of M should be 0
L[:,i] = diag(M)
err += norm(M - diag(diag(M)))
return R, L, err
I am not aware of any direct solution. But why not just getting the eigenvalues and the eigenvectors of the first matrix, and using the eigenvectors to transform all other matrices to the diagonal form? Something like:
eigvals, eigvecs = np.linalg.eig(matrix1)
eigvals2 = np.diagonal(np.dot(np.dot(transpose(eigvecs), matrix2), eigvecs))
You can the add the columns to an array via hstack if you like.
UPDATE: As pointed out below, this is only valid if no degenerate eigenvalues occur. Otherwise one would have to check first for the degenerate eigenvalues, then transform the 2nd matrix to a blockdiagonal form, and diagonalize eventual blocks bigger than 1x1 separately.
I am sure there is significant room for improvement in my solution, but I have come up with the following set of three functions doing the calculation for me in a semi-robust way.
def clusters(array,
orig_indices = None,
start = 0,
rtol=numpy.allclose.__defaults__[0],
atol=numpy.allclose.__defaults__[1]):
"""For an array, return a permutation that sorts the numbers and the sizes of the resulting blocks of identical numbers."""
array = numpy.asarray(array)
if not len(array):
return numpy.array([]),[]
if orig_indices is None:
orig_indices = numpy.arange(len(array))
x = array[0]
close = abs(array-x) <= (atol + rtol*abs(x))
first = sum(close)
r_perm, r_sizes = clusters(
array[~close],
orig_indices[~close],
start+first,
rtol, atol)
r_sizes.insert(0, first)
return numpy.concatenate((orig_indices[close], r_perm)), r_sizes
def permutation_matrix(permutation, dtype=dtype):
n = len(permutation)
P = numpy.zeros((n,n), dtype)
for i,j in enumerate(permutation):
P[j,i]=1
return P
def simultaneously_diagonalize(tensor, atol=numpy.allclose.__defaults__[1]):
tensor = numpy.asarray(tensor)
old_shape = tensor.shape
size = old_shape[-1]
tensor = tensor.reshape((-1, size, size))
diag_mask = 1-numpy.eye(size)
eigvalues, diagonalizer = numpy.linalg.eig(tensor[0])
diagonalization = numpy.dot(
numpy.dot(
matrix.linalg.inv(diagonalizer),
tensor).swapaxes(0,-2),
diagonalizer)
if numpy.allclose(diag_mask*diagonalization, 0):
return diagonalization.diagonal(axis1=-2, axis2=-1).reshape(old_shape[:-1])
else:
perm, cluster_sizes = clusters(diagonalization[0].diagonal())
perm_matrix = permutation_matrix(perm)
diagonalization = numpy.dot(
numpy.dot(
perm_matrix.T,
diagonalization).swapaxes(0,-2),
perm_matrix)
mask = 1-scipy.linalg.block_diag(
*list(
numpy.ones((blocksize, blocksize))
for blocksize in cluster_sizes))
print(diagonalization)
assert(numpy.allclose(
diagonalization*mask,
0)) # Assert that the matrices are co-diagonalizable
blocks = numpy.cumsum(cluster_sizes)
start = 0
other_part = []
for block in blocks:
other_part.append(
simultaneously_diagonalize(
diagonalization[1:, start:block, start:block]))
start = block
return numpy.vstack(
(diagonalization[0].diagonal(axis1=-2, axis2=-1),
numpy.hstack(other_part)))
If you know something about the size of the eigenvalues of the two matrices in advance, you can diagonalize a linear combination of the two matrices, with coefficients chosen to break the degeneracy. For example, if the eigenvalues of both lie between -10 and 10, you could diagonalize 100*M1 + M2. There's a slight loss of precision, but for many purposes it's good enough--and quick and easy!