I have a large sparse matrix - using sparse.csr_matrix from scipy. The values are binary. For each row, I need to compute the Jaccard distance to every row in the same matrix. What's the most efficient way to do this? Even for a 10.000 x 10.000 matrix, my runtime takes minutes to finish.
Current solution:
def jaccard(a, b):
intersection = float(len(set(a) & set(b)))
union = float(len(set(a) | set(b)))
return 1.0 - (intersection/union)
def regions(csr, p, epsilon):
neighbors = []
for index in range(len(csr.indptr)-1):
if jaccard(p, csr.indices[csr.indptr[index]:csr.indptr[index+1]]) <= epsilon:
neighbors.append(index)
return neighbors
csr = scipy.sparse.csr_matrix("file")
regions(csr, 0.51) #this is called for every row
Vectorization is relatively easy if you use matrix multiplication to calculate the set intersections and then the rule |union(a, b)| == |a| + |b| - |intersection(a, b)| to determine the unions:
# Not actually necessary for sparse matrices, but it is for
# dense matrices and ndarrays, if X.dtype is integer.
from __future__ import division
def pairwise_jaccard(X):
"""Computes the Jaccard distance between the rows of `X`.
"""
X = X.astype(bool).astype(int)
intrsct = X.dot(X.T)
row_sums = intrsct.diagonal()
unions = row_sums[:,None] + row_sums - intrsct
dist = 1.0 - intrsct / unions
return dist
Note the cast to bool and then int, because the dtype of X must be large enough to accumulate twice the maximum row sum and that entries of X must be either zero or one. The downside of this code is that it's heavy on RAM, because unions and dists are dense matrices.
If you're only interested in distances smaller than some cut-off epsilon, the code can be tuned for sparse matrices:
from scipy.sparse import csr_matrix
def pairwise_jaccard_sparse(csr, epsilon):
"""Computes the Jaccard distance between the rows of `csr`,
smaller than the cut-off distance `epsilon`.
"""
assert(0 < epsilon < 1)
csr = csr_matrix(csr).astype(bool).astype(int)
csr_rownnz = csr.getnnz(axis=1)
intrsct = csr.dot(csr.T)
nnz_i = np.repeat(csr_rownnz, intrsct.getnnz(axis=1))
unions = nnz_i + csr_rownnz[intrsct.indices] - intrsct.data
dists = 1.0 - intrsct.data / unions
mask = (dists > 0) & (dists <= epsilon)
data = dists[mask]
indices = intrsct.indices[mask]
rownnz = np.add.reduceat(mask, intrsct.indptr[:-1])
indptr = np.r_[0, np.cumsum(rownnz)]
out = csr_matrix((data, indices, indptr), intrsct.shape)
return out
If this still takes to much RAM you could try to vectorize over one dimension and Python-loop over the other.
To add to the accepted answer: I had use for a weighted version of the above method which is simply implemented as:
def pairwise_jaccard_sparse_weighted(csr, epsilon, weight):
csr = scipy.sparse.csr_matrix(csr).astype(bool).astype(int)
csr_w = csr * scipy.sparse.diags(weight)
csr_rowsum = numpy.array(csr_w.sum(axis = 1)).flatten()
intrsct = csr.dot(csr_w.T)
rowsum_i = numpy.repeat(csr_rowsum, intrsct.getnnz(axis = 1))
unions = rowsum_i + csr_rowsum[intrsct.indices] - intrsct.data
dists = 1.0 - 1.0 * intrsct.data / unions
mask = (dists > 0) & (dists <= epsilon)
data = dists[mask]
indices = intrsct.indices[mask]
rownnz = numpy.add.reduceat(mask, intrsct.indptr[:-1])
indptr = numpy.r_[0, numpy.cumsum(rownnz)]
out = scipy.sparse.csr_matrix((data, indices, indptr), intrsct.shape)
return out
I doubt this is the most efficient implementation, but it's a damn sight quicker than the dense implementation in scipy.spatial.distance.jaccard.
Here a solution that has a scikit-learn-like API.
def pairwise_sparse_jaccard_distance(X, Y=None):
"""
Computes the Jaccard distance between two sparse matrices or between all pairs in
one sparse matrix.
Args:
X (scipy.sparse.csr_matrix): A sparse matrix.
Y (scipy.sparse.csr_matrix, optional): A sparse matrix.
Returns:
numpy.ndarray: A similarity matrix.
"""
if Y is None:
Y = X
assert X.shape[1] == Y.shape[1]
X = X.astype(bool).astype(int)
Y = Y.astype(bool).astype(int)
intersect = X.dot(Y.T)
x_sum = X.sum(axis=1).A1
y_sum = Y.sum(axis=1).A1
xx, yy = np.meshgrid(x_sum, y_sum)
union = ((xx + yy).T - intersect)
return (1 - intersect / union).A
Here some testing and benchmarking:
>>> import timeit
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> from sklearn.metrics import pairwise_distances
>>> X = csr_matrix(np.random.choice(a=[False, True], size=(10000, 1000), p=[0.9, 0.1]))
>>> Y = csr_matrix(np.random.choice(a=[False, True], size=(1000, 1000), p=[0.9, 0.1]))
Asserting that all results are approximately equivalent
>>> custom_jaccard_distance = pairwise_sparse_jaccard_distance(X, Y)
>>> sklearn_jaccard_distance = pairwise_distances(X.todense(), Y.todense(), "jaccard")
>>> np.allclose(custom_jaccard_distance, sklearn_jaccard_distance)
True
Benchmarking runtime (from Jupyer Notebook)
>>> %timeit pairwise_jaccard_index(X, Y)
795 ms ± 58.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
>>> %timeit 1 - pairwise_distances(X.todense(), Y.todense(), "jaccard")
14.7 s ± 694 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
Related
I am translating this code from Matlab to Python. The code function fine but it is painfully slow in python. In Matlab, the code runs in way less then a minute, in python it took 30 min!!! Someone with mode experience in python could help me?
# P({ai})
somai = 0
for i in range(1, n):
somaj = 0
for j in range(1, n):
exponencial = math.exp(-((a[i] - a[j]) * (a[i] - a[j])) / dev_a2 - ((b[i] - b[j]) * (b[i] - b[j])) / dev_b2)
somaj = somaj + exponencial
somai = somai + somaj
As with MATLAB, I'd recommend you vectorize your code. Iterating by for-loops can be much slower than the lower level implementation of MATLAB and numpy.
Your operations (a[i] - a[j])*(a[i] - a[j]) are pairwise squared-Euclidean distance for all N data points. You can calculate a pairwise distance matrix using scipy's pdist and squareform functions -- pdist, squareform.
Then you calculate the difference between pairwise distance matrices A and B, and sum the exponential decay. So you could get a vectorized code like:
import numpy as np
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
# Example data
N = 1000
a = np.random.rand(N,1)
b = np.random.rand(N,1)
dev_a2 = np.random.rand()
dev_b2 = np.random.rand()
# `a` is an [N,1] matrix (i.e. column vector)
A = pdist(a, 'sqeuclidean')
# Change to pairwise distance matrix
A = squareform(A)
# Divide all elements by same divisor
A = A / dev_a2
# Then do the same for `b`'s
# `b` is an [N,1] matrix (i.e. column vector)
B = pdist(b, 'sqeuclidean')
B = squareform(B)
B = B / dev_b2
# Calculate exponential decay
expo = np.exp(-(A-B))
# Sum all elements
total = np.sum(expo)
Here's a quick timing comparison between the iterative method and this vectorized code.
N: 1000 | Iter Output: 2729989.851117 | Vect Output: 2732194.924364
Iter time: 6.759 secs | Vect time: 0.031 secs
N: 5000 | Iter Output: 24855530.997400 | Vect Output: 24864471.007726
Iter time: 171.795 secs | Vect time: 0.784 secs
Note that the final results are not exactly the same. I'm not sure why this is, it might be rounding error or math error on my part, but I'll leave that to you.
TLDR
Use numpy
Why Numpy?
Python, by default, is slow. One of the powers of python is that it plays nicely with C and has tons of libraries. The one that will help you hear is numpy. Numpy is mostly implemented in C and, when used properly, is blazing fast. The trick is to phrase the code in such a way that you keep the execution inside numpy and outside of python proper.
Code and Results
import math
import numpy as np
n = 1000
np_a = np.random.rand(n)
a = list(np_a)
np_b = np.random.rand(n)
b = list(np_b)
dev_a2, dev_b2 = (1, 1)
def old():
somai = 0.0
for i in range(0, n):
somaj = 0.0
for j in range(0, n):
tmp_1 = -((a[i] - a[j]) * (a[i] - a[j])) / dev_a2
tmp_2 = -((b[i] - b[j]) * (b[i] - b[j])) / dev_b2
exponencial = math.exp(tmp_1 + tmp_2)
somaj += exponencial
somai += somaj
return somai
def new():
tmp_1 = -np.square(np.subtract.outer(np_a, np_a)) / dev_a2
tmp_2 = -np.square(np.subtract.outer(np_b, np_b)) / dev_a2
exponential = np.exp(tmp_1 + tmp_2)
somai = np.sum(exponential)
return somai
old = 1.76 s ± 48.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
new = 24.6 ms ± 66.1 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
This is about a 70x improvement
old yields 740919.6020840995
new yields 740919.602084099
Explanation
You'll notice I broke up your code with the tmp_1 and tmp_2 a bit for clarity.
np.random.rand(n): This creates an array of length n that has random floats going from 0 to 1 (excluding 1) (documented here).
np.subtract.outer(a, b): Numpy has modules for all the operators that allow you do various things with them. Lets say you had np_a = [1, 2, 3], np.subtract.outer(np_a, np_a) would yield
array([[ 0, -1, -2],
[ 1, 0, -1],
[ 2, 1, 0]])
Here's a stackoverflow link if you want to go deeper on this. (also the word "outer" comes from "outer product" like from linear algebra)
np.square: simply squares every element in the matrix.
/: In numpy when you do arithmetic operators between scalars and matrices it does the appropriate thing and applies that operation to every element in the matrix.
np.exp: like np.square
np.sum: sums every element together and returns a scalar.
My problem is the following. I have two arrays X and Y of shape n, p where p >> n (e.g. n = 50, p = 10000).
I also have a mask mask (1-d array of booleans of size p) with respect to p, of small density (e.g. np.mean(mask) is 0.05).
I try to compute, as fast as possible, the inner product of X and Y with respect to mask: the output inner is an array of shape n, n, and is such that inner[i, j] = np.sum(X[i, np.logical_not(mask)] * Y[j, np.logical_not(mask)]).
I have tried using the numpy.ma library, but it is quite slow for my use:
import numpy as np
import numpy.ma as ma
n, p = 50, 10000
density = 0.05
mask = np.array(np.random.binomial(1, density, size=p), dtype=np.bool_)
mask_big = np.ones(n)[:, None] * mask[None, :]
X = np.random.randn(n, p)
Y = np.random.randn(n, p)
X_ma = ma.array(X, mask=mask_big)
Y_ma = ma.array(Y, mask=mask_big)
But then, on my machine, X_ma.dot(Y_ma.T) is about 5 times slower than X.dot(Y.T)...
To begin with, I think it is a problem that .dot does not know that the mask is only with respect to p but I don't if its possible to use this information.
I'm looking for a way to perform the computation without being much slower than the naive dot.
Thanks a lot !
We can use matrix-multiplication with and without the masked versions as the masked subtraction from the full version yields to us the desired output -
inner = X.dot(Y.T)-X[:,mask].dot(Y[:,mask].T)
Or simply use the reversed mask, would be slower though for a sparsey mask -
inner = X[:,~mask].dot(Y[:,~mask].T)
Timings -
In [34]: np.random.seed(0)
...: p,n = 10000,50
...: X = np.random.rand(n,p)
...: Y = np.random.rand(n,p)
...: mask = np.random.rand(p)>0.95
In [35]: mask.mean()
Out[35]: 0.0507
In [36]: %timeit X.dot(Y.T)-X[:,mask].dot(Y[:,mask].T)
100 loops, best of 3: 2.54 ms per loop
In [37]: %timeit X[:,~mask].dot(Y[:,~mask].T)
100 loops, best of 3: 4.1 ms per loop
In [39]: %%timeit
...: inner = np.empty((n,n))
...: for i in range(X.shape[0]):
...: for j in range(X.shape[0]):
...: inner[i, j] = np.sum(X[i, ~mask] * Y[j, ~mask])
1 loop, best of 3: 302 ms per loop
I got a 2-D dataset with two columns x and y. I would like to get the linear regression coefficients and interception dynamically when new data feed in. Using scikit-learn I could calculate all current available data like this:
from sklearn.linear_model import LinearRegression
regr = LinearRegression()
x = np.arange(100)
y = np.arange(100)+10*np.random.random_sample((100,))
regr.fit(x,y)
print(regr.coef_)
print(regr.intercept_)
However, I got quite big dataset (more than 10k rows in total) and I want to calculate coefficient and intercept as fast as possible whenever there's new rows coming in. Currently calculate 10k rows takes about 600 microseconds, and I want to accelerate this process.
Scikit-learn looks like does not have online update function for linear regression module. Is there any better ways to do this?
I've found solution from this paper: updating simple linear regression. The implementation is as below:
def lr(x_avg,y_avg,Sxy,Sx,n,new_x,new_y):
"""
x_avg: average of previous x, if no previous sample, set to 0
y_avg: average of previous y, if no previous sample, set to 0
Sxy: covariance of previous x and y, if no previous sample, set to 0
Sx: variance of previous x, if no previous sample, set to 0
n: number of previous samples
new_x: new incoming 1-D numpy array x
new_y: new incoming 1-D numpy array x
"""
new_n = n + len(new_x)
new_x_avg = (x_avg*n + np.sum(new_x))/new_n
new_y_avg = (y_avg*n + np.sum(new_y))/new_n
if n > 0:
x_star = (x_avg*np.sqrt(n) + new_x_avg*np.sqrt(new_n))/(np.sqrt(n)+np.sqrt(new_n))
y_star = (y_avg*np.sqrt(n) + new_y_avg*np.sqrt(new_n))/(np.sqrt(n)+np.sqrt(new_n))
elif n == 0:
x_star = new_x_avg
y_star = new_y_avg
else:
raise ValueError
new_Sx = Sx + np.sum((new_x-x_star)**2)
new_Sxy = Sxy + np.sum((new_x-x_star).reshape(-1) * (new_y-y_star).reshape(-1))
beta = new_Sxy/new_Sx
alpha = new_y_avg - beta * new_x_avg
return new_Sxy, new_Sx, new_n, alpha, beta, new_x_avg, new_y_avg
Performance comparison:
Scikit learn version that calculate 10k samples altogether.
from sklearn.linear_model import LinearRegression
x = np.arange(10000).reshape(-1,1)
y = np.arange(10000)+100*np.random.random_sample((10000,))
regr = LinearRegression()
%timeit regr.fit(x,y)
# 419 µs ± 14.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
My version assume 9k sample is already calculated:
Sxy, Sx, n, alpha, beta, new_x_avg, new_y_avg = lr(0, 0, 0, 0, 0, x.reshape(-1,1)[:9000], y[:9000])
new_x, new_y = x.reshape(-1,1)[9000:], y[9000:]
%timeit lr(new_x_avg, new_y_avg, Sxy,Sx,n,new_x, new_y)
# 38.7 µs ± 1.31 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
10 times faster, which is expected.
Nice! Thanks for sharing your findings :) Here is an equivalent implementation of this solution written with dot products:
class SimpleLinearRegressor(object):
def __init__(self):
self.dots = np.zeros(5)
self.intercept = None
self.slope = None
def update(self, x: np.ndarray, y: np.ndarray):
self.dots += np.array(
[
x.shape[0],
x.sum(),
y.sum(),
np.dot(x, x),
np.dot(x, y),
]
)
size, sum_x, sum_y, sum_xx, sum_xy = self.dots
det = size * sum_xx - sum_x ** 2
if det > 1e-10: # determinant may be zero initially
self.intercept = (sum_xx * sum_y - sum_xy * sum_x) / det
self.slope = (sum_xy * size - sum_x * sum_y) / det
When working with time series data, we can extend this idea to do sliding window regression with a soft (EMA-like) window.
You can use accelerated libraries that implement faster algorithms - particularly
https://github.com/intel/scikit-learn-intelex
For linear regression you would get much better performance
First install package
pip install scikit-learn-intelex
And then add in your python script
from sklearnex import patch_sklearn
patch_sklearn()
I have two arrays that have the shapes N X T and M X T. I'd like to compute the correlation coefficient across T between every possible pair of rows n and m (from N and M, respectively).
What's the fastest, most pythonic way to do this? (Looping over N and M would seem to me to be neither fast nor pythonic.) I'm expecting the answer to involve numpy and/or scipy. Right now my arrays are numpy arrays, but I'm open to converting them to a different type.
I'm expecting my output to be an array with the shape N X M.
N.B. When I say "correlation coefficient," I mean the Pearson product-moment correlation coefficient.
Here are some things to note:
The numpy function correlate requires input arrays to be one-dimensional.
The numpy function corrcoef accepts two-dimensional arrays, but they must have the same shape.
The scipy.stats function pearsonr requires input arrays to be one-dimensional.
Correlation (default 'valid' case) between two 2D arrays:
You can simply use matrix-multiplication np.dot like so -
out = np.dot(arr_one,arr_two.T)
Correlation with the default "valid" case between each pairwise row combinations (row1,row2) of the two input arrays would correspond to multiplication result at each (row1,row2) position.
Row-wise Correlation Coefficient calculation for two 2D arrays:
def corr2_coeff(A, B):
# Rowwise mean of input arrays & subtract from input arrays themeselves
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
# Sum of squares across rows
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
# Finally get corr coeff
return np.dot(A_mA, B_mB.T) / np.sqrt(np.dot(ssA[:, None],ssB[None]))
This is based upon this solution to How to apply corr2 functions in Multidimentional arrays in MATLAB
Benchmarking
This section compares runtime performance with the proposed approach against generate_correlation_map & loopy pearsonr based approach listed in the other answer.(taken from the function test_generate_correlation_map() without the value correctness verification code at the end of it). Please note the timings for the proposed approach also include a check at the start to check for equal number of columns in the two input arrays, as also done in that other answer. The runtimes are listed next.
Case #1:
In [106]: A = np.random.rand(1000, 100)
In [107]: B = np.random.rand(1000, 100)
In [108]: %timeit corr2_coeff(A, B)
100 loops, best of 3: 15 ms per loop
In [109]: %timeit generate_correlation_map(A, B)
100 loops, best of 3: 19.6 ms per loop
Case #2:
In [110]: A = np.random.rand(5000, 100)
In [111]: B = np.random.rand(5000, 100)
In [112]: %timeit corr2_coeff(A, B)
1 loops, best of 3: 368 ms per loop
In [113]: %timeit generate_correlation_map(A, B)
1 loops, best of 3: 493 ms per loop
Case #3:
In [114]: A = np.random.rand(10000, 10)
In [115]: B = np.random.rand(10000, 10)
In [116]: %timeit corr2_coeff(A, B)
1 loops, best of 3: 1.29 s per loop
In [117]: %timeit generate_correlation_map(A, B)
1 loops, best of 3: 1.83 s per loop
The other loopy pearsonr based approach seemed too slow, but here are the runtimes for one small datasize -
In [118]: A = np.random.rand(1000, 100)
In [119]: B = np.random.rand(1000, 100)
In [120]: %timeit corr2_coeff(A, B)
100 loops, best of 3: 15.3 ms per loop
In [121]: %timeit generate_correlation_map(A, B)
100 loops, best of 3: 19.7 ms per loop
In [122]: %timeit pearsonr_based(A, B)
1 loops, best of 3: 33 s per loop
#Divakar provides a great option for computing the unscaled correlation, which is what I originally asked for.
In order to calculate the correlation coefficient, a bit more is required:
import numpy as np
def generate_correlation_map(x, y):
"""Correlate each n with each m.
Parameters
----------
x : np.array
Shape N X T.
y : np.array
Shape M X T.
Returns
-------
np.array
N X M array in which each element is a correlation coefficient.
"""
mu_x = x.mean(1)
mu_y = y.mean(1)
n = x.shape[1]
if n != y.shape[1]:
raise ValueError('x and y must ' +
'have the same number of timepoints.')
s_x = x.std(1, ddof=n - 1)
s_y = y.std(1, ddof=n - 1)
cov = np.dot(x,
y.T) - n * np.dot(mu_x[:, np.newaxis],
mu_y[np.newaxis, :])
return cov / np.dot(s_x[:, np.newaxis], s_y[np.newaxis, :])
Here's a test of this function, which passes:
from scipy.stats import pearsonr
def test_generate_correlation_map():
x = np.random.rand(10, 10)
y = np.random.rand(20, 10)
desired = np.empty((10, 20))
for n in range(x.shape[0]):
for m in range(y.shape[0]):
desired[n, m] = pearsonr(x[n, :], y[m, :])[0]
actual = generate_correlation_map(x, y)
np.testing.assert_array_almost_equal(actual, desired)
For those interested in computing the Pearson correlation coefficient between a 1D and 2D array, I wrote the following function, where x is a 1D array and y a 2D array.
def pearsonr_2D(x, y):
"""computes pearson correlation coefficient
where x is a 1D and y a 2D array"""
upper = np.sum((x - np.mean(x)) * (y - np.mean(y, axis=1)[:,None]), axis=1)
lower = np.sqrt(np.sum(np.power(x - np.mean(x), 2)) * np.sum(np.power(y - np.mean(y, axis=1)[:,None], 2), axis=1))
rho = upper / lower
return rho
Example run:
>>> x
Out[1]: array([1, 2, 3])
>>> y
Out[2]: array([[ 1, 2, 3],
[ 6, 7, 12],
[ 9, 3, 1]])
>>> pearsonr_2D(x, y)
Out[3]: array([ 1. , 0.93325653, -0.96076892])
Im trying to implement ZCA whitening and found some articles to do it, but they are a bit confusing.. can someone shine a light for me?
Any tip or help is appreciated!
Here is the articles i read :
http://courses.media.mit.edu/2010fall/mas622j/whiten.pdf
http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of
I tried several things but most of them i didnt understand and i got locked at some step.
Right now i have this as base to start again :
dtype = np.float32
data = np.loadtxt("../inputData/train.csv", dtype=dtype, delimiter=',', skiprows=1)
img = ((data[1,1:]).reshape((28,28)).astype('uint8')*255)
Here is a python function for generating the ZCA whitening matrix:
def zca_whitening_matrix(X):
"""
Function to compute ZCA whitening matrix (aka Mahalanobis whitening).
INPUT: X: [M x N] matrix.
Rows: Variables
Columns: Observations
OUTPUT: ZCAMatrix: [M x M] matrix
"""
# Covariance matrix [column-wise variables]: Sigma = (X-mu)' * (X-mu) / N
sigma = np.cov(X, rowvar=True) # [M x M]
# Singular Value Decomposition. X = U * np.diag(S) * V
U,S,V = np.linalg.svd(sigma)
# U: [M x M] eigenvectors of sigma.
# S: [M x 1] eigenvalues of sigma.
# V: [M x M] transpose of U
# Whitening constant: prevents division by zero
epsilon = 1e-5
# ZCA Whitening matrix: U * Lambda * U'
ZCAMatrix = np.dot(U, np.dot(np.diag(1.0/np.sqrt(S + epsilon)), U.T)) # [M x M]
return ZCAMatrix
And an example of the usage:
X = np.array([[0, 2, 2], [1, 1, 0], [2, 0, 1], [1, 3, 5], [10, 10, 10] ]) # Input: X [5 x 3] matrix
ZCAMatrix = zca_whitening_matrix(X) # get ZCAMatrix
ZCAMatrix # [5 x 5] matrix
xZCAMatrix = np.dot(ZCAMatrix, X) # project X onto the ZCAMatrix
xZCAMatrix # [5 x 3] matrix
Hope it helps!
Details for why Edgar Andrés Margffoy Tuay's answer is not correct: As pointed out in R.M's comment, Edgar Andrés Margffoy Tuay's ZCA whitening function contains a small, but crucial mistake: the np.diag(S) should be removed. Numpy returns S as a m x 1 vector and not a m x m matrix (as is common to other svd implementations, e.g. Matlab). Hence the ZCAMatrix variable becomes a m x 1 vector and not a m x m matrix as it should be (when the input is m x n). (Also, the covariance matrix in Andfoy's answer is only valid if X is pre-centered, i.e mean 0).
Other references for ZCA: You can see the full answer, in Python, to the Stanford UFLDL ZCA Whitening exercise here.
Is your data stored in an mxn matrix? Where m is the dimension of the data and n are the total number of cases? If that's not the case, you should resize your data. For instance if your images are of size 28x28 and you have only one image, you should have a 1x784 vector. You could use this function:
import numpy as np
def flatten_matrix(matrix):
vector = matrix.flatten(1)
vector = vector.reshape(1, len(vector))
return vector
Then you apply ZCA Whitening to your training set using:
def zca_whitening(inputs):
sigma = np.dot(inputs, inputs.T)/inputs.shape[1] #Correlation matrix
U,S,V = np.linalg.svd(sigma) #Singular Value Decomposition
epsilon = 0.1 #Whitening constant, it prevents division by zero
ZCAMatrix = np.dot(np.dot(U, np.diag(1.0/np.sqrt(np.diag(S) + epsilon))), U.T) #ZCA Whitening matrix
return np.dot(ZCAMatrix, inputs) #Data whitening
It is important to save the ZCAMatrix matrix, you should multiply your test cases if you want to predict after training the Neural Net.
Finally, I invite you to take the Stanford UFLDL Tutorials at http://ufldl.stanford.edu/wiki/index.php/UFLDL_Tutorial or http://ufldl.stanford.edu/tutorial/ . They have pretty good explanations and also some programming exercises on MATLAB, however, almost all the functions found on MATLAB are on Numpy by the same name. I hope this may give an insight.
I may be a little late to the discussion, but I found this thread recently as I struggled to implement ZCA in TensorFlow because my poor PC processor was too slow to process large volume of data.
If anyone is interested, I have made a gist of my implementation of the ZCA in TensorFlow:
import tensorflow as tf
from keras.datasets import mnist
import numpy as np
tf.enable_eager_execution()
assert tf.executing_eagerly()
class ZCA(object):
"""
Simple ZCA aka Mahalanobis transformation class made in TensorFlow.
The code was largely ported from Keras ImageDataGenerator
"""
def __init__(self, epsilon=1e-5, dtype='float64'):
"""epsilon is the normalization constant, dtype refers to the data type used in the computation.
WARNING: the default precision is set to float64 as i have found that when computing the mean tensorflow'
and numpy results can differ by a substantial amount.
Usage: fit method computes the principal components and should be called first,
compute method returns the actual transformed tensor
NOTE : The input to both methods must be a 4D tensor.
"""
assert dtype is 'float32' or 'float64', "precision must be float32 or float64"
self.epsilon = epsilon
self.dtype = dtype
self.princ_comp = None
self.mean = None
def _featurewise_center(self, images_tensor):
if self.mean is None:
self.mean, _ = tf.nn.moments(images_tensor, axes=(0, 1, 2))
broadcast_shape = [1, 1, 1]
broadcast_shape[2] = images_tensor.shape[3]
self.mean = tf.reshape(self.mean, broadcast_shape)
norm_images = tf.subtract(images_tensor, self.mean)
return norm_images
def fit(self, images_tensor):
assert images_tensor.shape[3], "The input should be a 4D tensor"
if images_tensor.dtype is not self.dtype: # numerical error for float32
images_tensor = tf.cast(images_tensor, self.dtype)
images_tensor = self._featurewise_center(images_tensor)
flat = tf.reshape(images_tensor, (-1, np.prod(images_tensor.shape[1:].as_list())))
sigma = tf.div(tf.matmul(tf.transpose(flat), flat), tf.cast(flat.shape[0], self.dtype))
s, u, _ = tf.svd(sigma)
s_inv = tf.div(tf.cast(1, self.dtype), (tf.sqrt(tf.add(s[tf.newaxis], self.epsilon))))
self.princ_comp = tf.matmul(tf.multiply(u, s_inv), tf.transpose(u))
def compute(self, images_tensor):
assert images_tensor.shape[3], "The input should be a 4D tensor"
assert self.princ_comp is not None, "Fit method should be called first"
if images_tensor.dtype is not self.dtype:
images_tensor = tf.cast(images_tensor, self.dtype)
images_tensors = self._featurewise_center(images_tensor)
flatx = tf.cast(tf.reshape(images_tensors, (-1, np.prod(images_tensors.shape[1:]))), self.dtype)
whitex = tf.matmul(flatx, self.princ_comp)
x = tf.reshape(whitex, images_tensors.shape)
return x
def main():
import matplotlib.pyplot as plt
train_set, test_set = mnist.load_data()
x_train, y_train = train_set
zca1 = ZCA(epsilon=1e-5, dtype='float64')
# input should be a 4D tensor
x_train = x_train.reshape(*x_train.shape, 1)
zca1.fit(x_train)
x_train_transf = zca1.compute(x_train)
# reshaping to 28*28 and casting to uint8 for plotting
x_train_transf = tf.reshape(x_train_transf, x_train_transf.shape[0:3])
fig, axes = plt.subplots(3, 3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(x_train_transf[i],
cmap='binary'
)
xlabel = "True: %d" % y_train[i]
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
if __name__ == '__main__':
main()
I know this isn't a proper answer to the original question, but still it may be useful to anyone who is looking for a GPU implementation of ZCA but couldn't find one.
Although both answers refer to the UFLDL tutorial, none of them seems to use the steps as described in it.
Therefore, I thought it might not be bad idea to just provide an answer that simply implements PCA/ZCA-whitening according to the tutorial:
import numpy as np
# generate some random, 2D data
x = np.random.randn(1000, 2)
# and center it
x_c = x - np.mean(x, 0)
# compute the 2x2 covariance matrix
# (remember that covariance matrix is symmetric)
sigma = np.cov(x, rowvar=False)
# and extract eigenvalues and eigenvectors
# using the algorithm for symmetric matrices
l,u = np.linalg.eigh(sigma)
# NOTE that for symmetric matrices,
# eigenvalues and singular values are the same.
# u, l, _ = np.linalg.svd(sigma) should thus give equivalent results
# rotate the (centered) data to decorrelate it
x_rot = np.dot(x_c, u)
# check that the covariance is diagonal (indicating decorrelation)
np.allclose(np.cov(x_rot.T), np.diag(np.diag(np.cov(x_rot.T))))
# scale the data by eigenvalues to get unit variance
x_white = x_rot / np.sqrt(l)
# have the whitened data be closer to the original data
x_zca = np.dot(x_white, u.T)
I assume you can wrap this in a function by yourself...
For completeness, different implementation flavours and their runtime (evaluated on a centred version of CIFAR10):
x = np.random.randn(10_000, 3, 32, 32)
x_ = np.reshape(x, (len(x), -1))
x_c = x_ - np.mean(x_, axis=0)
def zca1(x):
s, u = np.linalg.eigh(x.T # x)
scale = np.sqrt(len(x) / s)
return (u * scale) # u.T
def zca2(x):
u, s, _ = np.linalg.svd(x.T # x, hermitian=True)
scale = np.sqrt(len(x) / s)
return (u * scale) # u.T
def zca3(x):
_, s, v = np.linalg.svd(x, full_matrices=False)
scale = np.sqrt(len(x)) / s
return (v.T * scale) # v
%timeit zca1(x_c)
# 4.57 s ± 14.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
%timeit zca2(x_c)
# 4.62 s ± 22.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
%timeit zca3(x_c)
# 20.2 s ± 1.2 s per loop (mean ± std. dev. of 7 runs, 1 loop each)
For the mathematics behind this, I refer to this excellent answer from cross validated.
This works with an array of 48x48:
def flatten_matrix(matrix):
vector = matrix.flatten(order='F')
vector = vector.reshape(1, len(vector))
return vector
def zca_whitening(inputs):
sigma = np.dot(inputs, inputs.T)/inputs.shape[1] #Correlation matrix
U,S,V = np.linalg.svd(sigma) #Singular Value Decomposition
epsilon = 0.1 #Whitening constant, it prevents division by zero
ZCAMatrix = np.dot(np.dot(U, np.diag(1.0/np.sqrt(np.diag(S) + epsilon))), U.T) #ZCA Whitening matrix
return np.dot(ZCAMatrix, inputs) #Data whitening
def global_contrast_normalize(X, scale=1., subtract_mean=True, use_std=True,
sqrt_bias=10, min_divisor=1e-8):
"""
__author__ = "David Warde-Farley"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["David Warde-Farley"]
__license__ = "3-clause BSD"
__email__ = "wardefar#iro"
__maintainer__ = "David Warde-Farley"
.. [1] A. Coates, H. Lee and A. Ng. "An Analysis of Single-Layer
Networks in Unsupervised Feature Learning". AISTATS 14, 2011.
http://www.stanford.edu/~acoates/papers/coatesleeng_aistats_2011.pdf
"""
assert X.ndim == 2, "X.ndim must be 2"
scale = float(scale)
assert scale >= min_divisor
mean = X.mean(axis=1)
if subtract_mean:
X = X - mean[:, np.newaxis]
else:
X = X.copy()
if use_std:
ddof = 1
if X.shape[1] == 1:
ddof = 0
normalizers = np.sqrt(sqrt_bias + X.var(axis=1, ddof=ddof)) / scale
else:
normalizers = np.sqrt(sqrt_bias + (X ** 2).sum(axis=1)) / scale
normalizers[normalizers < min_divisor] = 1.
X /= normalizers[:, np.newaxis] # Does not make a copy.
return X
def ZeroCenter(data):
data = data - np.mean(data,axis=0)
return data
def Zerocenter_ZCA_whitening_Global_Contrast_Normalize(data):
numpy_data = np.array(data).reshape(48,48)
data2 = ZeroCenter(numpy_data)
data3 = zca_whitening(flatten_matrix(data2)).reshape(48,48)
data4 = global_contrast_normalize(data3)
data5 = np.rot90(data4,3)
return data5
for example from this image:
returns:
Here is the code:
https://gist.github.com/m-alcu/45f4a083cb5e388d2ed26ace4392ed66, needs to put fer2013.csv file in the same directory (https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data)