Calculate special correlation distance matrix faster - python

I would like to build a distance matrix using Pearson correlation distance.
I first tried the scipy.spatial.distance.pdist(df,'correlation') which is very fast for my 5000 rows * 20 features dataset.
Since I want to build a recommender, I wanted to slightly change the distance, only considering features which are distinct for NaN for both users. Indeed, scipy.spatial.distance.pdist(df,'correlation') output NaN when it meets any feature whose value is float('nan').
Here is my code, df being my 5000*20 pandas DataFrame
dist_mat = []
d = df.shape[1]
for i,row_i in enumerate(df.itertuples()):
for j,row_j in enumerate(df.itertuples()):
if i<j:
print(i,j)
ind = [False if (math.isnan(row_i[t+1]) or math.isnan(row_j[t+1])) else True for t in range(d)]
dist_mat.append(scipy.spatial.distance.correlation([row_i[t] for t in ind],[row_j[t] for t in ind]))
This code works but it is ashtoningly slow compared to the scipy.spatial.distance.pdist(df,'correlation') one. My question is: how can I improve my code so it runs a lot faster? Or where can I find a library which calculates correlation between two vectors which only take in consideration features which appears in both of them?
Thank you for your answers.

I think you need to do this with Cython, here is an example:
#cython: boundscheck=False, wraparound=False, cdivision=True
import numpy as np
cdef extern from "math.h":
bint isnan(double x)
double sqrt(double x)
def pair_correlation(double[:, ::1] x):
cdef double[:, ::] res = np.empty((x.shape[0], x.shape[0]))
cdef double u, v
cdef int i, j, k, count
cdef double du, dv, d, n, r
cdef double sum_u, sum_v, sum_u2, sum_v2, sum_uv
for i in range(x.shape[0]):
for j in range(i, x.shape[0]):
sum_u = sum_v = sum_u2 = sum_v2 = sum_uv = 0.0
count = 0
for k in range(x.shape[1]):
u = x[i, k]
v = x[j, k]
if u == u and v == v:
sum_u += u
sum_v += v
sum_u2 += u*u
sum_v2 += v*v
sum_uv += u*v
count += 1
if count == 0:
res[i, j] = res[j, i] = -9999
continue
um = sum_u / count
vm = sum_v / count
n = sum_uv - sum_u * vm - sum_v * um + um * vm * count
du = sqrt(sum_u2 - 2 * sum_u * um + um * um * count)
dv = sqrt(sum_v2 - 2 * sum_v * vm + vm * vm * count)
r = 1 - n / (du * dv)
res[i, j] = res[j, i] = r
return res.base
To check the output without NAN:
import numpy as np
from scipy.spatial.distance import pdist, squareform, correlation
x = np.random.rand(2000, 20)
np.allclose(pair_correlation(x), squareform(pdist(x, "correlation")))
To check the output with NAN:
x = np.random.rand(2000, 20)
x[x < 0.3] = np.nan
r = pair_correlation(x)
i, j = 200, 60 # change this
mask = ~(np.isnan(x[i]) | np.isnan(x[j]))
u = x[i, mask]
v = x[j, mask]
assert abs(correlation(u, v) - r[i, j]) < 1e-12

Related

Implementation of an algorithm for simultaneous diagonalization

I am trying to write an implementation of an algorithm for the simultaneous diagonalization of two matrices (which are assumed to be simultaneously diagonalizable). However, the algorithm does not seem to converge. The algorithm is described in SIAM J. Matrix Anal. Appl. 14, 927 (1993).
Here is the first part of my code to set up a test case:
import numpy as np
import numpy.linalg as lin
from scipy.optimize import minimize
N = 3
# Unitary example matrix
X = np.array([
[-0.54717736-0.43779416j, 0.26046313+0.11082439j, 0.56151027-0.33692186j],
[-0.33452046-0.37890784j, -0.40907097-0.70730291j, -0.15344477+0.23100467j],
[-0.31253864-0.39468687j, 0.05342909+0.49940543j, -0.70062586+0.05835082j]
])
# Generate eigenvalues
LA = np.diag(np.arange(0, N))
LB = np.diag(np.arange(N, 2*N))
# Generate simultaneously diagonalizable matrices
A = X # LA # np.conj(X).T
B = X # LB # np.conj(X).T
This should generate two 3x3 matrices which are simultaneously diagonalizable, since they are constructed this way via X. The following code block then defines a few helper functions:
def off2(A, B):
"""Defines the distance from the matrices from
their diagonal form.
"""
C = np.abs(A) ** 2 + np.abs(B) ** 2
diag_idx = np.diag_indices(N)
C[diag_idx] = 0
return np.sum(C)
def Rijcs(i, j, c, s):
"""Function R(i, j, c, s) from the paper, see
Eq. (1) therein. Used for plane rotations in
the plane ij.
"""
res = np.eye(N, dtype=complex)
res[i, i] = c
res[i, j] = -np.conj(s)
res[j, i] = s
res[j, j] = np.conj(c)
return res
def cs(theta, phi):
"""Parametrization for c and s."""
c = np.cos(theta)
s = np.exp(1j * phi) * np.sin(theta)
return c, s
With these definitions, the algorithm can be implemented:
tol = 1e-10
Q = np.eye(N, dtype=complex)
while True:
off = off2(A, B)
# Print statement for debugging purposes
print(off)
# Terminate if the result is converged
if off <= tol * (lin.norm(A, "fro") + lin.norm(B, "fro")):
break
for i in range(N):
for j in range(i + 1, N):
def fij(c, s):
aij = A[i, j]
aji = A[j, i]
aii = A[i, i]
ajj = A[j, j]
bij = B[i, j]
bji = B[j, i]
bii = B[i, i]
bjj = B[j, j]
x = np.array(
[
[np.conj(aij), np.conj(aii - ajj), -np.conj(aji)],
[aji, (aii - ajj), -aij ],
[np.conj(bij), np.conj(bii - bjj), -np.conj(bji)],
[bji, (bii - bjj), -bij ]
]
)
y = np.array(
[
[c ** 2],
[c * s],
[s ** 2]
]
)
return lin.norm(x # y, 2)
# 5
result = minimize(
lambda x: fij(*cs(x[0], x[1])),
x0=(0, 0),
bounds=(
(-0.25 * np.pi, 0.25 * np.pi),
(-np.pi, np.pi)
),
)
theta, phi = result['x']
c, s = cs(theta, phi)
# 6
R = Rijcs(i, j, c, s)
# 7
Q = Q # R
A = np.conj(R).T # A # R
B = np.conj(R).T # B # R
As you can observe from the print statement, the "distance" of A and B from diagonal form does not really converge. Instead, the values printed range from 0.5 up to 3 and oscillate up and down. Is there a bug in this code and if so, where exactly is it?

How to reduce run time in my python code?

I am currently working on a project that requires me to run a complete python code base. For research purpose, I need to run the code as fast as possible. Yet I am fairly new to programming and have no idea how to reduce run time. So I hope someone can help me on that. Any advice would be appreciated. Here's part of my code base, which used a lot of nested for loops, so it might significantly increase run time.
def a_j(r, a, A): # the Claussius-Mossotti factor, determined by a symmetric (3 × 3) matrix such that (A_i)^T = A_i
alph = np.array([[0,0,0],[0,0,0],[0,0,0]],complex)
for i in range(3):
for j in range(3):
alph[i,j] = (r * a * A[i,j])
return alph
def W_ext(x, k, rho, alpha, A): # particle–particle interaction term
n = x.shape[0] # the number of x vextors
result = np.zeros([3*n,3*n],complex)
u = np.zeros((n, 3)) # u = x - x'
for i in range(n):
for j in range(n):
if i != j:
u[i] = x[i] - x[j]
block_result = a_j(rho[i], alpha, A) * G((u[i]), k) * a_j(rho[j], alpha, A)
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return result.imag
def A_ext(rho, a, A): # single-particle term
n = rho.shape[0]
result = np.zeros([3*n,3*n],complex)
for i in range(n):
for j in range(n):
if i == j:
block_result = a_j(rho[i], a, A).imag
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return result # (3 x 3) matrix
def P_ext(e, A, W, omega):
eT = np.matrix.getH(e)
mm1 = np.matmul(A, e)
mm2 = np.matmul(W, e)
extinction = (np.dot(eT, mm1) + np.dot(eT, mm2)) * (omega/2.0)
return extinction
#ABSORPTION
def W_abs(x, k, rho, alpha, A, chi): # particle–particle interaction term
n = x.shape[0]
result = np.zeros([3*n,3*n],complex)
u = np.zeros((n, 3))
for i in range(n):
for j in range(n):
if i != j:
u[i] = x[i] - x[j]
block_result = np.matrix.getH(a_j(rho[i], alpha, A)) * (1.0 / np.conjugate(chi)).imag * a_j(rho[i], alpha, A) * G((u[i]), k) * a_j(rho[j], alpha, A)
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return 2.0 * result.real # (3 x 3) matrix
def A_abs(rho, a, A, chi): # single-particle term
n = rho.shape[0]
result = np.zeros([3*n,3*n],complex)
for i in range(n):
for j in range(n):
if i == j:
block_result = np.matrix.getH(a_j(rho[i], a, A)) * (1.0 / np.conjugate(chi)).imag * a_j(rho[i], a, A)
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return result # (3 x 3) matrix

General minimal residual method with right-preconditioner of SSOR

I am trying to implement the algorithm of GMRES with right-preconditioner P for solving the linear system Ax = b . The code is running without error; however, it pops into unprecise result for me because the error I have is very large. For the GMRES method (without preconditioning matrix - remove P in the algorithm), the error I get is around 1e^{-12} and it converges with the same matrix.
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
from scipy.linalg import norm as norm
import scipy.sparse as sp
from scipy.sparse import diags
"""The program is to split the matrix into D-diagonal; L: strictly lower matrix; U strictly upper matrix
satisfying: A = D - L - U """
def splitMat(A):
n,m = A.shape
if (n == m):
diagval = np.diag(A)
D = diags(diagval,0).toarray()
L = (-1)*np.tril(A,-1)
U = (-1)*np.triu(A,1)
else:
print("A needs to be a square matrix")
return (L,D,U)
"""Preconditioned Matrix for symmetric successive over-relaxation (SSOR): """
def P_SSOR(A,w):
## Split up matrix A:
L,D,U = splitMat(A)
Comp1 = (D - w*U)
Comp2 = (D - w*L)
Comp1inv = np.linalg.inv(Comp1)
Comp2inv = np.linalg.inv(Comp2)
P = w*(2-w)*np.matmul(Comp1inv, np.matmul(D,Comp2inv))
return P
"""GMRES_SSOR using right preconditioning P:
A - matrix of linear system Ax = b
x0 - initial guess
tol - tolerance
maxit - maximum iteration """
def myGMRES_SSOR(A,x0, b, tol, maxit):
matrixSize = A.shape[0]
e = np.zeros((maxit+1,1))
rr = 1
rstart = 2
X = x0
w = 1.9 ## in ssor
P = P_SSOR(A,w) ### preconditioned matrix
### Starting the GMRES ####
for rs in range(0,rstart+1):
### first check the residual:
if rr<tol:
break
else:
r0 = (b-A.dot(x0))
rho = norm(r0)
e[0] = rho
H = np.zeros((maxit+1,maxit))
Qcol = np.zeros((matrixSize, maxit+1))
Qcol[:,0:1] = r0/rho
for k in range(1, maxit+1):
### Arnodi procedure ##
Qcol[:,k] =np.matmul(np.matmul(A,P), Qcol[:,k-1]) ### This step applies P here:
for j in range(0,k):
H[j,k-1] = np.dot(np.transpose(Qcol[:,k]),Qcol[:,j])
Qcol[:,k] = Qcol[:,k] - (np.dot(H[j,k-1], Qcol[:,j]))
H[k,k-1] =norm(Qcol[:,k])
Qcol[:,k] = Qcol[:,k]/H[k,k-1]
### QR decomposition step ###
n = k
Q = np.zeros((n+1, n))
R = np.zeros((n, n))
R[0, 0] = norm(H[0:n+2, 0])
Q[:, 0] = H[0:n+1, 0] / R[0,0]
for j in range (0, n+1):
t = H[0:n+1, j-1]
for i in range (0, j-1):
R[i, j-1] = np.dot(Q[:, i], t)
t = t - np.dot(R[i, j-1], Q[:, i])
R[j-1, j-1] = norm(t)
Q[:, j-1] = t / R[j-1, j-1]
g = np.dot(np.transpose(Q), e[0:k+1])
Y = np.dot(np.linalg.inv(R), g)
Res= e[0:n] - np.dot(H[0:n, 0:n], Y[0:n])
rr = norm(Res)
#### second check on the residual ###
if rr < tol:
break
#### Updating the solution with the preconditioned matrix ####
X = X + np.matmul(np.matmul(P,Qcol[:, 0:k]), Y) ### This steps applies P here:
return X
######
A = np.random.rand(100,100)
x = np.random.rand(100,1)
b = np.matmul(A,x)
x0 = np.zeros((100,1))
maxit = 100
tol = 0.00001
x = myGMRES_SSOR(A,x0,b,tol,maxit)
res = b - np.matmul(A,x)
print(norm(res))
print("Solution with gmres\n", np.matmul(A,x))
print("---------------------------------------")
print("b matrix:", b)
I hope anyone could help me figure out this!!!
I'm not sure where you got you "Symmetric_successive_over-relaxation" SSOR code from, but it appears to be wrong. You also seem to be assuming that A is symmetric matrix, but in your random test case it is not.
Following SSOR's Wikipedia entry, I replaced your P_SSOR function with
def P_SSOR(A,w):
L,D,U = splitMat(A)
P = 2/(2-w) * (1/w*D+L)*np.linalg.inv(D)*(1/w*D+L).T
return P
and your test matrix with
A = np.random.rand(100,100)
A = A + A.T
and your code works up to a 12 digit residual error.

Using numba for cosine similarity between a vector and rows in a matix

Found this gist using numba for fast computation of cosine similarity.
import numba
#numba.jit(target='cpu', nopython=True)
def fast_cosine(u, v):
m = u.shape[0]
udotv = 0
u_norm = 0
v_norm = 0
for i in range(m):
if (np.isnan(u[i])) or (np.isnan(v[i])):
continue
udotv += u[i] * v[i]
u_norm += u[i] * u[i]
v_norm += v[i] * v[i]
u_norm = np.sqrt(u_norm)
v_norm = np.sqrt(v_norm)
if (u_norm == 0) or (v_norm == 0):
ratio = 1.0
else:
ratio = udotv / (u_norm * v_norm)
return ratio
Results look promising (500ns vs. only 200us without jit decorator in my machine).
I would like to use numba to parallelize this computation between a vector u and a candidate matrix M -- i.e. cosine across each row.
Example:
def fast_cosine_matrix(u, M):
"""
Return array of cosine similarity between u and rows in M
>>> import numpy as np
>>> u = np.random.rand(100)
>>> M = np.random.rand(10, 100)
>>> fast_cosine_matrix(u, M)
"""
One way is to just rewrite with second input a matrix. But I get a NotImplementedError if I try to iterate over the rows of a matrix. Going to try just using slices.
I thought about using vectorize but I can't get it to work.
Solution rewriting it a bit:
import numpy as np
import numba
#numba.jit(target='cpu', nopython=True, parallel=True)
def fast_cosine_matrix(u, M):
scores = np.zeros(M.shape[0])
for i in numba.prange(M.shape[0]):
v = M[i]
m = u.shape[0]
udotv = 0
u_norm = 0
v_norm = 0
for j in range(m):
if (np.isnan(u[j])) or (np.isnan(v[j])):
continue
udotv += u[j] * v[j]
u_norm += u[j] * u[j]
v_norm += v[j] * v[j]
u_norm = np.sqrt(u_norm)
v_norm = np.sqrt(v_norm)
if (u_norm == 0) or (v_norm == 0):
ratio = 1.0
else:
ratio = udotv / (u_norm * v_norm)
scores[i] = ratio
return scores
u = np.random.rand(100)
M = np.random.rand(100000, 100)
fast_cosine_matrix(u, M)
Alternative: make a Generalized UFunc with numba
#numba.guvectorize(["void(float64[:], float64[:], float64[:])"], "(n),(n)->()", target='parallel')
def fast_cosine_gufunc(u, v, result):
m = u.shape[0]
udotv = 0
u_norm = 0
v_norm = 0
for i in range(m):
if (np.isnan(u[i])) or (np.isnan(v[i])):
continue
udotv += u[i] * v[i]
u_norm += u[i] * u[i]
v_norm += v[i] * v[i]
u_norm = np.sqrt(u_norm)
v_norm = np.sqrt(v_norm)
if (u_norm == 0) or (v_norm == 0):
ratio = 1.0
else:
ratio = udotv / (u_norm * v_norm)
result[:] = ratio
u = np.random.rand(100)
M = np.random.rand(100000, 100)
fast_cosine_gufunc(u, M[0,:])
fast_cosine_gufunc(u, M)

Numba or Cython acceleration in reaction-diffusion algorithm

I'd like to accelerate the code written in Python and NumPy. I use Gray-Skott algorithm (http://mrob.com/pub/comp/xmorphia/index.html) for reaction-diffusion model, but with Numba and Cython it's even slower! Is it possible to speed it up? Thanks in advance!
Python+NumPy
def GrayScott(counts, Du, Dv, F, k):
n = 300
U = np.zeros((n+2,n+2), dtype=np.float_)
V = np.zeros((n+2,n+2), dtype=np.float_)
u, v = U[1:-1,1:-1], V[1:-1,1:-1]
r = 20
u[:] = 1.0
U[n/2-r:n/2+r,n/2-r:n/2+r] = 0.50
V[n/2-r:n/2+r,n/2-r:n/2+r] = 0.25
u += 0.15*np.random.random((n,n))
v += 0.15*np.random.random((n,n))
for i in range(counts):
Lu = ( U[0:-2,1:-1] +
U[1:-1,0:-2] - 4*U[1:-1,1:-1] + U[1:-1,2:] +
U[2: ,1:-1] )
Lv = ( V[0:-2,1:-1] +
V[1:-1,0:-2] - 4*V[1:-1,1:-1] + V[1:-1,2:] +
V[2: ,1:-1] )
uvv = u*v*v
u += Du*Lu - uvv + F*(1 - u)
v += Dv*Lv + uvv - (F + k)*v
return V
Numba
from numba import jit, autojit
#autojit
def numbaGrayScott(counts, Du, Dv, F, k):
n = 300
U = np.zeros((n+2,n+2), dtype=np.float_)
V = np.zeros((n+2,n+2), dtype=np.float_)
u, v = U[1:-1,1:-1], V[1:-1,1:-1]
r = 20
u[:] = 1.0
U[n/2-r:n/2+r,n/2-r:n/2+r] = 0.50
V[n/2-r:n/2+r,n/2-r:n/2+r] = 0.25
u += 0.15*np.random.random((n,n))
v += 0.15*np.random.random((n,n))
Lu = np.zeros_like(u)
Lv = np.zeros_like(v)
for i in range(counts):
for row in range(n):
for col in range(n):
Lu[row,col] = U[row+1,col+2] + U[row+1,col] + U[row+2,col+1] + U[row,col+1] - 4*U[row+1,col+1]
Lv[row,col] = V[row+1,col+2] + V[row+1,col] + V[row+2,col+1] + V[row,col+1] - 4*V[row+1,col+1]
uvv = u*v*v
u += Du*Lu - uvv + F*(1 - u)
v += Dv*Lv + uvv - (F + k)*v
return V
Cython
%%cython
cimport cython
import numpy as np
cimport numpy as np
cpdef cythonGrayScott(int counts, double Du, double Dv, double F, double k):
cdef int n = 300
cdef np.ndarray U = np.zeros((n+2,n+2), dtype=np.float_)
cdef np.ndarray V = np.zeros((n+2,n+2), dtype=np.float_)
cdef np.ndarray u = U[1:-1,1:-1]
cdef np.ndarray v = V[1:-1,1:-1]
cdef int r = 20
u[:] = 1.0
U[n/2-r:n/2+r,n/2-r:n/2+r] = 0.50
V[n/2-r:n/2+r,n/2-r:n/2+r] = 0.25
u += 0.15*np.random.random((n,n))
v += 0.15*np.random.random((n,n))
cdef np.ndarray Lu = np.zeros_like(u)
cdef np.ndarray Lv = np.zeros_like(v)
cdef int i, row, col
cdef np.ndarray uvv
for i in range(counts):
for row in range(n):
for col in range(n):
Lu[row,col] = U[row+1,col+2] + U[row+1,col] + U[row+2,col+1] + U[row,col+1] - 4*U[row+1,col+1]
Lv[row,col] = V[row+1,col+2] + V[row+1,col] + V[row+2,col+1] + V[row,col+1] - 4*V[row+1,col+1]
uvv = u*v*v
u += Du*Lu - uvv + F*(1 - u)
v += Dv*Lv + uvv - (F + k)*v
return V
Usage example:
GrayScott(4000, 0.16, 0.08, 0.04, 0.06)
Here is steps to speedup the cython version:
cdef np.ndarray doen't make element access faster, you need use memoryview in cython: cdef double[:, ::1] bU = U.
Turn off boundscheck and wraparound.
Do all the calculations in for-loop.
Here is the modified cython code:
%%cython
#cython: boundscheck=False
#cython: wraparound=False
cimport cython
import numpy as np
cimport numpy as np
cpdef cythonGrayScott(int counts, double Du, double Dv, double F, double k):
cdef int n = 300
cdef np.ndarray U = np.zeros((n+2,n+2), dtype=np.float_)
cdef np.ndarray V = np.zeros((n+2,n+2), dtype=np.float_)
cdef np.ndarray u = U[1:-1,1:-1]
cdef np.ndarray v = V[1:-1,1:-1]
cdef int r = 20
u[:] = 1.0
U[n/2-r:n/2+r,n/2-r:n/2+r] = 0.50
V[n/2-r:n/2+r,n/2-r:n/2+r] = 0.25
u += 0.15*np.random.random((n,n))
v += 0.15*np.random.random((n,n))
cdef np.ndarray Lu = np.zeros_like(u)
cdef np.ndarray Lv = np.zeros_like(v)
cdef int i, c, r1, c1, r2, c2
cdef double uvv
cdef double[:, ::1] bU = U
cdef double[:, ::1] bV = V
cdef double[:, ::1] bLu = Lu
cdef double[:, ::1] bLv = Lv
for i in range(counts):
for r in range(n):
r1 = r + 1
r2 = r + 2
for c in range(n):
c1 = c + 1
c2 = c + 2
bLu[r,c] = bU[r1,c2] + bU[r1,c] + bU[r2,c1] + bU[r,c1] - 4*bU[r1,c1]
bLv[r,c] = bV[r1,c2] + bV[r1,c] + bV[r2,c1] + bV[r,c1] - 4*bV[r1,c1]
for r in range(n):
r1 = r + 1
for c in range(n):
c1 = c + 1
uvv = bU[r1,c1]*bV[r1,c1]*bV[r1,c1]
bU[r1,c1] += Du*bLu[r,c] - uvv + F*(1 - bU[r1,c1])
bV[r1,c1] += Dv*bLv[r,c] + uvv - (F + k)*bV[r1,c1]
return V
It's about 11x faster than the numpy version.
Aside from the looping and the sheer volume of operations involved, what is most likely killing performance in your case is array allocation. I don't know why your Numba and Cython versions are not living up to your expectation, but you can make your numpy code 2x faster (at the cost of some readability), by doing all operations in-place, i.e. replacing your current loop with:
Lu, Lv, uvv = np.empty_like(u), np.empty_like(v), np.empty_like(u)
for i in range(counts):
Lu[:] = u
Lu *= -4
Lu += U[:-2,1:-1]
Lu += U[1:-1,:-2]
Lu += U[1:-1,2:]
Lu += U[2:,1:-1]
Lu *= Du
Lv[:] = v
Lv *= -4
Lv += V[:-2,1:-1]
Lv += V[1:-1,:-2]
Lv += V[1:-1,2:]
Lv += V[2:,1:-1]
Lv *= Dv
uvv[:] = u
uvv *= v
uvv *= v
Lu -= uvv
Lv += uvv
u *= 1 - F
u += F
u += Lu
v *= 1 - F - k
v += Lv

Categories