def GaussianMatrix(X,sigma):
row,col=X.shape
GassMatrix=np.zeros(shape=(row,row))
X=np.asarray(X)
i=0
for v_i in X:
j=0
for v_j in X:
GassMatrix[i,j]=Gaussian(v_i.T,v_j.T,sigma)
j+=1
i+=1
return GassMatrix
def Gaussian(x,z,sigma):
return np.exp((-(np.linalg.norm(x-z)**2))/(2*sigma**2))
This is my current way. Is there any way I can use matrix operation to do this? X is the data points.
I myself used the accepted answer for my image processing, but I find it (and the other answers) too dependent on other modules. Therefore, here is my compact solution:
import numpy as np
def gkern(l=5, sig=1.):
"""\
creates gaussian kernel with side length `l` and a sigma of `sig`
"""
ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)
gauss = np.exp(-0.5 * np.square(ax) / np.square(sig))
kernel = np.outer(gauss, gauss)
return kernel / np.sum(kernel)
Edit: Changed arange to linspace to handle even side lengths
Edit: Use separability for faster computation, thank you Yves Daoust.
Do you want to use the Gaussian kernel for e.g. image smoothing? If so, there's a function gaussian_filter() in scipy:
Updated answer
This should work - while it's still not 100% accurate, it attempts to account for the probability mass within each cell of the grid. I think that using the probability density at the midpoint of each cell is slightly less accurate, especially for small kernels. See https://homepages.inf.ed.ac.uk/rbf/HIPR2/gsmooth.htm for an example.
import numpy as np
import scipy.stats as st
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel."""
x = np.linspace(-nsig, nsig, kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d/kern2d.sum()
Testing it on the example in Figure 3 from the link:
gkern(5, 2.5)*273
gives
array([[ 1.0278445 , 4.10018648, 6.49510362, 4.10018648, 1.0278445 ],
[ 4.10018648, 16.35610171, 25.90969361, 16.35610171, 4.10018648],
[ 6.49510362, 25.90969361, 41.0435344 , 25.90969361, 6.49510362],
[ 4.10018648, 16.35610171, 25.90969361, 16.35610171, 4.10018648],
[ 1.0278445 , 4.10018648, 6.49510362, 4.10018648, 1.0278445 ]])
The original (accepted) answer below accepted is wrong
The square root is unnecessary, and the definition of the interval is incorrect.
import numpy as np
import scipy.stats as st
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
interval = (2*nsig+1.)/(kernlen)
x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = kernel_raw/kernel_raw.sum()
return kernel
I'm trying to improve on FuzzyDuck's answer here. I think this approach is shorter and easier to understand. Here I'm using signal.scipy.gaussian to get the 2D gaussian kernel.
import numpy as np
from scipy import signal
def gkern(kernlen=21, std=3):
"""Returns a 2D Gaussian kernel array."""
gkern1d = signal.gaussian(kernlen, std=std).reshape(kernlen, 1)
gkern2d = np.outer(gkern1d, gkern1d)
return gkern2d
Plotting it using matplotlib.pyplot:
import matplotlib.pyplot as plt
plt.imshow(gkern(21), interpolation='none')
You may simply gaussian-filter a simple 2D dirac function, the result is then the filter function that was being used:
import numpy as np
import scipy.ndimage.filters as fi
def gkern2(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
# create nxn zeros
inp = np.zeros((kernlen, kernlen))
# set element at the middle to one, a dirac delta
inp[kernlen//2, kernlen//2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter mask
return fi.gaussian_filter(inp, nsig)
I tried using numpy only. Here is the code
def get_gauss_kernel(size=3,sigma=1):
center=(int)(size/2)
kernel=np.zeros((size,size))
for i in range(size):
for j in range(size):
diff=np.sqrt((i-center)**2+(j-center)**2)
kernel[i,j]=np.exp(-(diff**2)/(2*sigma**2))
return kernel/np.sum(kernel)
You can visualise the result using:
plt.imshow(get_gauss_kernel(5,1))
A 2D gaussian kernel matrix can be computed with numpy broadcasting,
def gaussian_kernel(size=21, sigma=3):
"""Returns a 2D Gaussian kernel.
Parameters
----------
size : float, the kernel size (will be square)
sigma : float, the sigma Gaussian parameter
Returns
-------
out : array, shape = (size, size)
an array with the centered gaussian kernel
"""
x = np.linspace(- (size // 2), size // 2)
x /= np.sqrt(2)*sigma
x2 = x**2
kernel = np.exp(- x2[:, None] - x2[None, :])
return kernel / kernel.sum()
For small kernel sizes this should be reasonably fast.
Note: this makes changing the sigma parameter easier with respect to the accepted answer.
If you are a computer vision engineer and you need heatmap for a particular point as Gaussian distribution(especially for keypoint detection on image)
def gaussian_heatmap(center = (2, 2), image_size = (10, 10), sig = 1):
"""
It produces single gaussian at expected center
:param center: the mean position (X, Y) - where high value expected
:param image_size: The total image size (width, height)
:param sig: The sigma value
:return:
"""
x_axis = np.linspace(0, image_size[0]-1, image_size[0]) - center[0]
y_axis = np.linspace(0, image_size[1]-1, image_size[1]) - center[1]
xx, yy = np.meshgrid(x_axis, y_axis)
kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))
return kernel
The usage and output
kernel = gaussian_heatmap(center = (2, 2), image_size = (10, 10), sig = 1)
plt.imshow(kernel)
print("max at :", np.unravel_index(kernel.argmax(), kernel.shape))
print("kernel shape", kernel.shape)
max at : (2, 2)
kernel shape (10, 10)
kernel = gaussian_heatmap(center = (25, 40), image_size = (100, 50), sig = 5)
plt.imshow(kernel)
print("max at :", np.unravel_index(kernel.argmax(), kernel.shape))
print("kernel shape", kernel.shape)
max at : (40, 25)
kernel shape (50, 100)
linalg.norm takes an axis parameter. With a little experimentation I found I could calculate the norm for all combinations of rows with
np.linalg.norm(x[None,:,:]-x[:,None,:],axis=2)
It expands x into a 3d array of all differences, and takes the norm on the last dimension.
So I can apply this to your code by adding the axis parameter to your Gaussian:
def Gaussian(x,z,sigma,axis=None):
return np.exp((-(np.linalg.norm(x-z, axis=axis)**2))/(2*sigma**2))
x=np.arange(12).reshape(3,4)
GaussianMatrix(x,1)
produces
array([[ 1.00000000e+00, 1.26641655e-14, 2.57220937e-56],
[ 1.26641655e-14, 1.00000000e+00, 1.26641655e-14],
[ 2.57220937e-56, 1.26641655e-14, 1.00000000e+00]])
Matching:
Gaussian(x[None,:,:],x[:,None,:],1,axis=2)
array([[ 1.00000000e+00, 1.26641655e-14, 2.57220937e-56],
[ 1.26641655e-14, 1.00000000e+00, 1.26641655e-14],
[ 2.57220937e-56, 1.26641655e-14, 1.00000000e+00]])
Building up on Teddy Hartanto's answer. You can just calculate your own one dimensional Gaussian functions and then use np.outer to calculate the two dimensional one. Very fast and efficient way.
With the code below you can also use different Sigmas for every dimension
import numpy as np
def generate_gaussian_mask(shape, sigma, sigma_y=None):
if sigma_y==None:
sigma_y=sigma
rows, cols = shape
def get_gaussian_fct(size, sigma):
fct_gaus_x = np.linspace(0,size,size)
fct_gaus_x = fct_gaus_x-size/2
fct_gaus_x = fct_gaus_x**2
fct_gaus_x = fct_gaus_x/(2*sigma**2)
fct_gaus_x = np.exp(-fct_gaus_x)
return fct_gaus_x
mask = np.outer(get_gaussian_fct(rows,sigma), get_gaussian_fct(cols,sigma_y))
return mask
A good way to do that is to use the gaussian_filter function to recover the kernel.
For instance:
indicatrice = np.zeros((5,5))
indicatrice[2,2] = 1
gaussian_kernel = gaussian_filter(indicatrice, sigma=1)
gaussian_kernel/=gaussian_kernel[2,2]
This gives
array[[0.02144593, 0.08887207, 0.14644428, 0.08887207, 0.02144593],
[0.08887207, 0.36828649, 0.60686612, 0.36828649, 0.08887207],
[0.14644428, 0.60686612, 1. , 0.60686612, 0.14644428],
[0.08887207, 0.36828649, 0.60686612, 0.36828649, 0.08887207],
[0.02144593, 0.08887207, 0.14644428, 0.08887207, 0.02144593]]
Adapting th accepted answer by FuzzyDuck to match the results of this website: http://dev.theomader.com/gaussian-kernel-calculator/ I now present this definition to you:
import numpy as np
import scipy.stats as st
def gkern(kernlen=21, sig=3):
"""Returns a 2D Gaussian kernel."""
x = np.linspace(-(kernlen/2)/sig, (kernlen/2)/sig, kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d/kern2d.sum()
print(gkern(kernlen=5, sig=1))
output:
[[0.003765 0.015019 0.02379159 0.015019 0.003765 ]
[0.015019 0.05991246 0.0949073 0.05991246 0.015019 ]
[0.02379159 0.0949073 0.15034262 0.0949073 0.02379159]
[0.015019 0.05991246 0.0949073 0.05991246 0.015019 ]
[0.003765 0.015019 0.02379159 0.015019 0.003765 ]]
As I didn't find what I was looking for, I coded my own one-liner. You can modify it accordingly (according to the dimensions and the standard deviation).
Here is the one-liner function for a 3x5 patch for example.
from scipy import signal
def gaussian2D(patchHeight, patchWidth, stdHeight=1, stdWidth=1):
gaussianWindow = signal.gaussian(patchHeight, stdHeight).reshape(-1, 1)#signal.gaussian(patchWidth, stdWidth).reshape(1, -1)
return gaussianWindow
print(gaussian2D(3, 5))
You get an output like this:
[[0.082085 0.36787944 0.60653066 0.36787944 0.082085 ]
[0.13533528 0.60653066 1. 0.60653066 0.13533528]
[0.082085 0.36787944 0.60653066 0.36787944 0.082085 ]]
You can read more about scipy's Gaussian here.
Yet another implementation.
This is normalized so that for sigma > 1 and sufficiently large win_size, the total sum of the kernel elements equals 1.
def gaussian_kernel(win_size, sigma):
t = np.arange(win_size)
x, y = np.meshgrid(t, t)
o = (win_size - 1) / 2
r = np.sqrt((x - o)**2 + (y - o)**2)
scale = 1 / (sigma**2 * 2 * np.pi)
return scale * np.exp(-0.5 * (r / sigma)**2)
To generate a 5x5 kernel:
gaussian_kernel(win_size=5, sigma=1)
I took a similar approach to Nils Werner's answer -- since convolution of any kernel with a Kronecker delta results in the kernel itself centered around that Kronecker delta -- but I made it slightly more general to deal with both odd and even dimensions. In three lines:
import scipy.ndimage as scim
def gaussian_kernel(dimension: int, sigma: float):
dirac = np.zeros((dimension,dimension))
dirac[(dimension-1)//2:dimension//2+1, (dimension-1)//2:dimension//2+1] = 1.0 / (1 + 3 * ((dimension + 1) % 2))
return scim.gaussian_filter(dirac, sigma=sigma)
The second line creates either a single 1.0 in the middle of the matrix (if the dimension is odd), or a square of four 0.25 elements (if the dimension is even). The division could be moved to the third line too; the result is normalised either way.
For those who like to have the kernel the matrix with one (odd) or four (even) 1.0 element(s) in the middle instead of normalisation, this works:
import scipy.ndimage as scim
def gaussian_kernel(dimension: int, sigma: float, ones_in_the_middle=False):
dirac = np.zeros((dimension,dimension))
dirac[(dimension-1)//2:dimension//2+1, (dimension-1)//2:dimension//2+1] = 1.0
kernel = scim.gaussian_filter(dirac, sigma=sigma)
divisor = kernel[(dimension-1)//2, (dimension-1)//2] if ones_in_the_middle else 1 + 3 * ((dimension + 1) % 2)
return kernel/divisor
Related
I am trying to perform a 2d convolution in python using numpy
I have a 2d array as follows with kernel H_r for the rows and H_c for the columns
data = np.zeros((nr, nc), dtype=np.float32)
#fill array with some data here then convolve
for r in range(nr):
data[r,:] = np.convolve(data[r,:], H_r, 'same')
for c in range(nc):
data[:,c] = np.convolve(data[:,c], H_c, 'same')
data = data.astype(np.uint8);
It does not produce the output that I was expecting, does this code look OK, I think the problem is with the casting from float32 to 8bit. Whats the best way to do this
Thanks
Maybe it is not the most optimized solution, but this is an implementation I used before with numpy library for Python:
def convolution2d(image, kernel, bias):
m, n = kernel.shape
if (m == n):
y, x = image.shape
y = y - m + 1
x = x - m + 1
new_image = np.zeros((y,x))
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel) + bias
return new_image
I hope this code helps other guys with the same doubt.
Regards.
Edit [Jan 2019]
#Tashus comment bellow is correct, and #dudemeister's answer is thus probably more on the mark. The function he suggested is also more efficient, by avoiding a direct 2D convolution and the number of operations that would entail.
Possible Problem
I believe you are doing two 1d convolutions, the first per columns and the second per rows, and replacing the results from the first with the results of the second.
Notice that numpy.convolve with the 'same' argument returns an array of equal shape to the largest one provided, so when you make the first convolution you already populated the entire data array.
One good way to visualize your arrays during these steps is to use Hinton diagrams, so you can check which elements already have a value.
Possible Solution
You can try to add the results of the two convolutions (use data[:,c] += .. instead of data[:,c] = on the second for loop), if your convolution matrix is the result of using the one dimensional H_r and H_c matrices like so:
Another way to do that would be to use scipy.signal.convolve2d with a 2d convolution array, which is probably what you wanted to do in the first place.
Since you already have your kernel separated you should simply use the sepfir2d function from scipy:
from scipy.signal import sepfir2d
convolved = sepfir2d(data, H_r, H_c)
On the other hand, the code you have there looks all right ...
I checked out many implementations and found none for my purpose, which should be really simple. So here is a dead-simple implementation with for loop
def convolution2d(image, kernel, stride, padding):
image = np.pad(image, [(padding, padding), (padding, padding)], mode='constant', constant_values=0)
kernel_height, kernel_width = kernel.shape
padded_height, padded_width = image.shape
output_height = (padded_height - kernel_height) // stride + 1
output_width = (padded_width - kernel_width) // stride + 1
new_image = np.zeros((output_height, output_width)).astype(np.float32)
for y in range(0, output_height):
for x in range(0, output_width):
new_image[y][x] = np.sum(image[y * stride:y * stride + kernel_height, x * stride:x * stride + kernel_width] * kernel).astype(np.float32)
return new_image
It might not be the most optimized solution either, but it is approximately ten times faster than the one proposed by #omotto and it only uses basic numpy function (as reshape, expand_dims, tile...) and no 'for' loops:
def gen_idx_conv1d(in_size, ker_size):
"""
Generates a list of indices. This indices correspond to the indices
of a 1D input tensor on which we would like to apply a 1D convolution.
For instance, with a 1D input array of size 5 and a kernel of size 3, the
1D convolution product will successively looks at elements of indices [0,1,2],
[1,2,3] and [2,3,4] in the input array. In this case, the function idx_conv1d(5,3)
outputs the following array: array([0,1,2,1,2,3,2,3,4]).
args:
in_size: (type: int) size of the input 1d array.
ker_size: (type: int) kernel size.
return:
idx_list: (type: np.array) list of the successive indices of the 1D input array
access to the 1D convolution algorithm.
example:
>>> gen_idx_conv1d(in_size=5, ker_size=3)
array([0, 1, 2, 1, 2, 3, 2, 3, 4])
"""
f = lambda dim1, dim2, axis: np.reshape(np.tile(np.expand_dims(np.arange(dim1),axis),dim2),-1)
out_size = in_size-ker_size+1
return f(ker_size, out_size, 0)+f(out_size, ker_size, 1)
def repeat_idx_2d(idx_list, nbof_rep, axis):
"""
Repeats an array of indices (idx_list) a number of time (nbof_rep) "along" an axis
(axis). This function helps to browse through a 2d array of size
(len(idx_list),nbof_rep).
args:
idx_list: (type: np.array or list) a 1D array of indices.
nbof_rep: (type: int) number of repetition.
axis: (type: int) axis "along" which the repetition will be applied.
return
idx_list: (type: np.array) a 1D array of indices of size len(idx_list)*nbof_rep.
example:
>>> a = np.array([0, 1, 2])
>>> repeat_idx_2d(a, 3, 0) # repeats array 'a' 3 times along 'axis' 0
array([0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> repeat_idx_2d(a, 3, 1) # repeats array 'a' 3 times along 'axis' 1
array([0, 1, 2, 0, 1, 2, 0, 1, 2])
>>> b = np.reshape(np.arange(3*4), (3,4))
>>> b[repeat_idx_2d(np.arange(3), 4, 0), repeat_idx_2d(np.arange(4), 3, 1)]
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
assert axis in [0,1], "Axis should be equal to 0 or 1."
tile_axis = (nbof_rep,1) if axis else (1,nbof_rep)
return np.reshape(np.tile(np.expand_dims(idx_list, 1),tile_axis),-1)
def conv2d(im, ker):
"""
Performs a 'valid' 2D convolution on an image. The input image may be
a 2D or a 3D array.
The output image first two dimensions will be reduced depending on the
convolution size.
The kernel may be a 2D or 3D array. If 2D, it will be applied on every
channel of the input image. If 3D, its last dimension must match the
image one.
args:
im: (type: np.array) image (2D or 3D).
ker: (type: np.array) convolution kernel (2D or 3D).
returns:
im: (type: np.array) convolved image.
example:
>>> im = np.reshape(np.arange(10*10*3),(10,10,3))/(10*10*3) # 3D image
>>> ker = np.array([[0,1,0],[-1,0,1],[0,-1,0]]) # 2D kernel
>>> conv2d(im, ker) # 3D array of shape (8,8,3)
"""
if len(im.shape)==2: # if the image is a 2D array, it is reshaped by expanding the last dimension
im = np.expand_dims(im,-1)
im_x, im_y, im_w = im.shape
if len(ker.shape)==2: # if the kernel is a 2D array, it is reshaped so it will be applied to all of the image channels
ker = np.tile(np.expand_dims(ker,-1),[1,1,im_w]) # the same kernel will be applied to all of the channels
assert ker.shape[-1]==im.shape[-1], "Kernel and image last dimension must match."
ker_x = ker.shape[0]
ker_y = ker.shape[1]
# shape of the output image
out_x = im_x - ker_x + 1
out_y = im_y - ker_y + 1
# reshapes the image to (out_x, ker_x, out_y, ker_y, im_w)
idx_list_x = gen_idx_conv1d(im_x, ker_x) # computes the indices of a 1D conv (cf. idx_conv1d doc)
idx_list_y = gen_idx_conv1d(im_y, ker_y)
idx_reshaped_x = repeat_idx_2d(idx_list_x, len(idx_list_y), 0) # repeats the previous indices to be used in 2D (cf. repeat_idx_2d doc)
idx_reshaped_y = repeat_idx_2d(idx_list_y, len(idx_list_x), 1)
im_reshaped = np.reshape(im[idx_reshaped_x, idx_reshaped_y, :], [out_x, ker_x, out_y, ker_y, im_w]) # reshapes
# reshapes the 2D kernel
ker = np.reshape(ker,[1, ker_x, 1, ker_y, im_w])
# applies the kernel to the image and reduces the dimension back to the one of original input image
return np.squeeze(np.sum(im_reshaped*ker, axis=(1,3)))
I tried to add a lot of comments to explain the method but the global idea is to reshape the 3D input image to a 5D one of shape (output_image_height, kernel_height, output_image_width, kernel_width, output_image_channel) and then to apply the kernel directly using the basic array multiplication. Of course, this methods is then using more memory (during the execution the size of the image is thus multiply by kernel_height*kernel_width) but it is faster.
To do this reshape step, I 'over-used' the indexing methods of numpy arrays, especially, the possibility of giving a numpy array as indices into a numpy array.
This methods could also be used to re-code the 2D convolution product in Pytorch or Tensorflow using the base math functions but I have no doubt in saying that it will be slower than the existing nn.conv2d operator...
I really enjoyed coding this method by only using the numpy basic tools.
One of the most obvious is to hard code the kernel.
img = img.convert('L')
a = np.array(img)
out = np.zeros([a.shape[0]-2, a.shape[1]-2], dtype='float')
out += a[:-2, :-2]
out += a[1:-1, :-2]
out += a[2:, :-2]
out += a[:-2, 1:-1]
out += a[1:-1,1:-1]
out += a[2:, 1:-1]
out += a[:-2, 2:]
out += a[1:-1, 2:]
out += a[2:, 2:]
out /= 9.0
out = out.astype('uint8')
img = Image.fromarray(out)
This example does a box blur 3x3 completely unrolled. You can multiply the values where you have a different value and divide them by a different amount. But, if you honestly want the quickest and dirtiest method this is it. I think it beats Guillaume Mougeot's method by a factor of like 5. His method beating the others by a factor of 10.
It may lose a few steps if you're doing something like a gaussian blur. and need to multiply some stuff.
Try to first round and then cast to uint8:
data = data.round().astype(np.uint8);
I wrote this convolve_stride which uses numpy.lib.stride_tricks.as_strided. Moreover it supports both strides and dilation. It is also compatible to tensor with order > 2.
import numpy as np
from numpy.lib.stride_tricks import as_strided
from im2col import im2col
def conv_view(X, F_s, dr, std):
X_s = np.array(X.shape)
F_s = np.array(F_s)
dr = np.array(dr)
Fd_s = (F_s - 1) * dr + 1
if np.any(Fd_s > X_s):
raise ValueError('(Dilated) filter size must be smaller than X')
std = np.array(std)
X_ss = np.array(X.strides)
Xn_s = (X_s - Fd_s) // std + 1
Xv_s = np.append(Xn_s, F_s)
Xv_ss = np.tile(X_ss, 2) * np.append(std, dr)
return as_strided(X, Xv_s, Xv_ss, writeable=False)
def convolve_stride(X, F, dr=None, std=None):
if dr is None:
dr = np.ones(X.ndim, dtype=int)
if std is None:
std = np.ones(X.ndim, dtype=int)
if not (X.ndim == F.ndim == len(dr) == len(std)):
raise ValueError('X.ndim, F.ndim, len(dr), len(std) must be the same')
Xv = conv_view(X, F.shape, dr, std)
return np.tensordot(Xv, F, axes=X.ndim)
%timeit -n 100 -r 10 convolve_stride(A, F)
#31.2 ms ± 1.31 ms per loop (mean ± std. dev. of 10 runs, 100 loops each)
Super simple and fast convolution using only basic numpy:
import numpy as np
def conv2d(image, kernel):
# apply kernel to image, return image of the same shape
# assume both image and kernel are 2D arrays
# kernel = np.flipud(np.fliplr(kernel)) # optionally flip the kernel
k = kernel.shape[0]
width = k//2
# place the image inside a frame to compensate for the kernel overlap
a = framed(image, width)
b = np.zeros(image.shape) # fill the output array with zeros; do not use np.empty()
# shift the image around each pixel, multiply by the corresponding kernel value and accumulate the results
for p, dp, r, dr in [(i, i + image.shape[0], j, j + image.shape[1]) for i in range(k) for j in range(k)]:
b += a[p:dp, r:dr] * kernel[p, r]
# or just write two nested for loops if you prefer
# np.clip(b, 0, 255, out=b) # optionally clip values exceeding the limits
return b
def framed(image, width):
a = np.zeros((image.shape[0]+2*width, image.shape[1]+2*width))
a[width:-width, width:-width] = image
# alternatively fill the frame with ones or copy border pixels
return a
Run it:
Image.fromarray(conv2d(image, kernel).astype('uint8'))
Instead of sliding the kernel along the image and computing the transformation pixel by pixel, create a series of shifted versions of the image corresponding to each element in the kernel and apply the corresponding kernel value to each of the shifted image versions.
This is probably the fastest you can get using just basic numpy; the speed is already comparable to C implementation of scipy convolve2d and better than fftconvolve. The idea is similar to #Tatarize. This example works only for one color component; for RGB just repeat for each (or modify the algorithm accordingly).
Typically, Convolution 2D is a misnomer. Ideally, under the hood,
whats being done is a correlation of 2 matrices.
pad == same
returns the output as the same as input dimension
It can also take asymmetric images. In order to perform correlation(convolution in deep learning lingo) on a batch of 2d matrices, one can iterate over all the channels, calculate the correlation for each of the channel slices with the respective filter slice.
For example: If image is (28,28,3) and filter size is (5,5,3) then take each of the 3 slices from the image channel and perform the cross correlation using the custom function above and stack the resulting matrix in the respective dimension of the output.
def get_cross_corr_2d(W, X, pad = 'valid'):
if(pad == 'same'):
pr = int((W.shape[0] - 1)/2)
pc = int((W.shape[1] - 1)/2)
conv_2d = np.zeros((X.shape[0], X.shape[1]))
X_pad = np.zeros((X.shape[0] + 2*pr, X.shape[1] + 2*pc))
X_pad[pr:pr+X.shape[0], pc:pc+X.shape[1]] = X
for r in range(conv_2d.shape[0]):
for c in range(conv_2d.shape[1]):
conv_2d[r,c] = np.sum(np.inner(W, X_pad[r:r+W.shape[0], c:c+W.shape[1]]))
return conv_2d
else:
pr = W.shape[0] - 1
pc = W.shape[1] - 1
conv_2d = np.zeros((X.shape[0] - W.shape[0] + 2*pr + 1,
X.shape[1] - W.shape[1] + 2*pc + 1))
X_pad = np.zeros((X.shape[0] + 2*pr, X.shape[1] + 2*pc))
X_pad[pr:pr+X.shape[0], pc:pc+X.shape[1]] = X
for r in range(conv_2d.shape[0]):
for c in range(conv_2d.shape[1]):
conv_2d[r,c] = np.sum(np.multiply(W, X_pad[r:r+W.shape[0], c:c+W.shape[1]]))
return conv_2d
This code incorrect:
for r in range(nr):
data[r,:] = np.convolve(data[r,:], H_r, 'same')
for c in range(nc):
data[:,c] = np.convolve(data[:,c], H_c, 'same')
See Nussbaumer transformation from multidimentional convolution to one dimentional.
I need to calculate the covariance matrix for RGB values across an image dataset, and then apply Cholesky decomposition to the final result.
The covariance matrix for RGB values is a 3x3 matrix M, where M_(i, i) is the variance of channel i and M_(i, j) is the covariance between channels i and j.
The end result should be something like this:
([[0.26, 0.09, 0.02],
[0.27, 0.00, -0.05],
[0.27, -0.09, 0.03]])
I'd prefer to stick to PyTorch functions even though Numpy has a Cov function.
I attempted to recreate the numpy Cov function in PyTorch here based on other cov implementations and clones:
def pytorch_cov(tensor, tensor2=None, rowvar=True):
if tensor2 is not None:
tensor = torch.cat((tensor, tensor2), dim=0)
tensor = tensor.view(1, -1) if tensor.dim() < 2 else tensor
tensor = tensor.t() if not rowvar and tensor.size(0) != 1 else tensor
tensor = tensor - torch.mean(tensor, dim=1, keepdim=True)
return 1 / (tensor.size(1) - 1) * tensor.mm(tensor.t())
def cov_vec(x):
c = x.size(0)
m1 = x - torch.sum(x, dim=[1],keepdims=True)/ c
out = torch.einsum('ijk,ilk->ijl',m1,m1) / (c - 1)
return out
The dataset loading would be like this:
dataset = torchvision.datasets.ImageFolder(data_path)
loader = torch.utils.data.DataLoader(dataset)
for images, _ in loader:
batch_size = images.size(0)
...
For the moment I'm just experimenting with images created with torch.randn(batch_size, 3, height, width).
Edit:
I'm attempting to replicate the matrix from Tensorflow's Lucid here, and somewhat explained on distill.pub here.
Second Edit:
In order to make the output resemble the example one, you have to do this instead of using Cholesky:
rgb_cov_tensor = rgb_cov_tensor / len(loader.dataset)
U,S,V = torch.svd(rgb_cov_tensor)
epsilon = 1e-10
svd_sqrt = U # torch.diag(torch.sqrt(S + epsilon))
The resulting matrix can then be used to perform color decorrelation, which is useful for visualizing features (DeepDream). I've implemented it in my project here.
Here is a function for computing the (unbiased) sample covariance matrix on a 3 channel image, named rgb_cov. Cholesky decomposition is straightforward with torch.cholesky:
import torch
def rgb_cov(im):
'''
Assuming im a torch.Tensor of shape (H,W,3):
'''
im_re = im.reshape(-1, 3)
im_re -= im_re.mean(0, keepdim=True)
return 1/(im_re.shape[0]-1) * im_re.T # im_re
#Test:
im = torch.randn(50,50,3)
cov = rgb_cov(im)
L_cholesky = torch.cholesky(cov)
I implemented FFT-based convolution in Pytorch and compared the result with spatial convolution via conv2d() function. The convolution filter used is an average filter. The conv2d() function produced smoothened output due to average filtering as expected but the fft-based convolution returned a more blurry output.
I have attached the code and outputs here -
spatial convolution -
from PIL import Image, ImageOps
import torch
from matplotlib import pyplot as plt
from torchvision.transforms import ToTensor
import torch.nn.functional as F
import numpy as np
im = Image.open("/kaggle/input/tiger.jpg")
im = im.resize((256,256))
gray_im = im.convert('L')
gray_im = ToTensor()(gray_im)
gray_im = gray_im.squeeze()
fil = torch.tensor([[1/9,1/9,1/9],[1/9,1/9,1/9],[1/9,1/9,1/9]])
conv_gray_im = gray_im.unsqueeze(0).unsqueeze(0)
conv_fil = fil.unsqueeze(0).unsqueeze(0)
conv_op = F.conv2d(conv_gray_im,conv_fil)
conv_op = conv_op.squeeze()
plt.figure()
plt.imshow(conv_op, cmap='gray')
FFT-based convolution -
def fftshift(image):
sh = image.shape
x = np.arange(0, sh[2], 1)
y = np.arange(0, sh[3], 1)
xm, ym = np.meshgrid(x,y)
shifter = (-1)**(xm + ym)
shifter = torch.from_numpy(shifter)
return image*shifter
shift_im = fftshift(conv_gray_im)
padded_fil = F.pad(conv_fil, (0, gray_im.shape[0]-fil.shape[0], 0, gray_im.shape[1]-fil.shape[1]))
shift_fil = fftshift(padded_fil)
fft_shift_im = torch.rfft(shift_im, 2, onesided=False)
fft_shift_fil = torch.rfft(shift_fil, 2, onesided=False)
shift_prod = fft_shift_im*fft_shift_fil
shift_fft_conv = fftshift(torch.irfft(shift_prod, 2, onesided=False))
fft_op = shift_fft_conv.squeeze()
plt.figure('shifted fft')
plt.imshow(fft_op, cmap='gray')
original image -
spatial convolution output -
fft-based convolution output -
Could someone kindly explain the issue?
The main problem with your code is that Torch doesn't do complex numbers, the output of its FFT is a 3D array, with the 3rd dimension having two values, one for the real component and one for the imaginary. Consequently, the multiplication does not do a complex multiplication.
There currently is no complex multiplication defined in Torch (see this issue), we'll have to define our own.
A minor issue, but also important if you want to compare the two convolution operations, is the following:
The FFT takes the origin of its input in the first element (top-left pixel for an image). To avoid a shifted output, you need to generate a padded kernel where the origin of the kernel is the top-left pixel. This is quite tricky, actually...
Your current code:
fil = torch.tensor([[1/9,1/9,1/9],[1/9,1/9,1/9],[1/9,1/9,1/9]])
conv_fil = fil.unsqueeze(0).unsqueeze(0)
padded_fil = F.pad(conv_fil, (0, gray_im.shape[0]-fil.shape[0], 0, gray_im.shape[1]-fil.shape[1]))
generates a padded kernel where the origin is in pixel (1,1), rather than (0,0). It needs to be shifted by one pixel in each direction. NumPy has a function roll that is useful for this, I don't know the Torch equivalent (I'm not at all familiar with Torch). This should work:
fil = torch.tensor([[1/9,1/9,1/9],[1/9,1/9,1/9],[1/9,1/9,1/9]])
padded_fil = fil.unsqueeze(0).unsqueeze(0).numpy()
padded_fil = np.pad(padded_fil, ((0, gray_im.shape[0]-fil.shape[0]), (0, gray_im.shape[1]-fil.shape[1])))
padded_fil = np.roll(padded_fil, -1, axis=(0, 1))
padded_fil = torch.from_numpy(padded_fil)
Finally, your fftshift function, applied to the spatial-domain image, causes the frequency-domain image (the result of the FFT applied to the image) to be shifted such that the origin is in the middle of the image, rather than the top-left. This shift is useful when looking at the output of the FFT, but is pointless when computing the convolution.
Putting these things together, the convolution is now:
def complex_multiplication(t1, t2):
real1, imag1 = t1[:,:,0], t1[:,:,1]
real2, imag2 = t2[:,:,0], t2[:,:,1]
return torch.stack([real1 * real2 - imag1 * imag2, real1 * imag2 + imag1 * real2], dim = -1)
fft_im = torch.rfft(gray_im, 2, onesided=False)
fft_fil = torch.rfft(padded_fil, 2, onesided=False)
fft_conv = torch.irfft(complex_multiplication(fft_im, fft_fil), 2, onesided=False)
Note that you can do one-sided FFTs to save a bit of computation time:
fft_im = torch.rfft(gray_im, 2, onesided=True)
fft_fil = torch.rfft(padded_fil, 2, onesided=True)
fft_conv = torch.irfft(complex_multiplication(fft_im, fft_fil), 2, onesided=True, signal_sizes=gray_im.shape)
Here the frequency domain is about half the size as in the full FFT, but it is only redundant parts that are left out. The result of the convolution is unchanged.
I'm trying to understand scipy.signal.deconvolve.
From the mathematical point of view a convolution is just the multiplication in fourier space so I would expect
that for two functions f and g:
Deconvolve(Convolve(f,g) , g) == f
In numpy/scipy this is either not the case or I'm missing an important point.
Although there are some questions related to deconvolve on SO already (like here and here) they do not address this point, others remain unclear (this) or unanswered (here). There are also two questions on SignalProcessing SE (this and this) the answers to which are not helpful in understanding how scipy's deconvolve function works.
The question would be:
How do you reconstruct the original signal f from a convoluted signal,
assuming you know the convolving function g.?
Or in other words: How does this pseudocode Deconvolve(Convolve(f,g) , g) == f translate into numpy / scipy?
Edit: Note that this question is not targeted at preventing numerical inaccuracies (although this is also an open question) but at understanding how convolve/deconvolve work together in scipy.
The following code tries to do that with a Heaviside function and a gaussian filter.
As can be seen in the image, the result of the deconvolution of the convolution is not at
all the original Heaviside function. I would be glad if someone could shed some light into this issue.
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
# Define heaviside function
H = lambda x: 0.5 * (np.sign(x) + 1.)
#define gaussian
gauss = lambda x, sig: np.exp(-( x/float(sig))**2 )
X = np.linspace(-5, 30, num=3501)
X2 = np.linspace(-5,5, num=1001)
# convolute a heaviside with a gaussian
H_c = np.convolve( H(X), gauss(X2, 1), mode="same" )
# deconvolute a the result
H_dc, er = scipy.signal.deconvolve(H_c, gauss(X2, 1) )
#### Plot ####
fig , ax = plt.subplots(nrows=4, figsize=(6,7))
ax[0].plot( H(X), color="#907700", label="Heaviside", lw=3 )
ax[1].plot( gauss(X2, 1), color="#907700", label="Gauss filter", lw=3 )
ax[2].plot( H_c/H_c.max(), color="#325cab", label="convoluted" , lw=3 )
ax[3].plot( H_dc, color="#ab4232", label="deconvoluted", lw=3 )
for i in range(len(ax)):
ax[i].set_xlim([0, len(X)])
ax[i].set_ylim([-0.07, 1.2])
ax[i].legend(loc=4)
plt.show()
Edit: Note that there is a matlab example, showing how to convolve/deconvolve a rectangular signal using
yc=conv(y,c,'full')./sum(c);
ydc=deconv(yc,c).*sum(c);
In the spirit of this question it would also help if someone was able to translate this example into python.
After some trial and error I found out how to interprete the results of scipy.signal.deconvolve() and I post my findings as an answer.
Let's start with a working example code
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
# let the signal be box-like
signal = np.repeat([0., 1., 0.], 100)
# and use a gaussian filter
# the filter should be shorter than the signal
# the filter should be such that it's much bigger then zero everywhere
gauss = np.exp(-( (np.linspace(0,50)-25.)/float(12))**2 )
print gauss.min() # = 0.013 >> 0
# calculate the convolution (np.convolve and scipy.signal.convolve identical)
# the keywordargument mode="same" ensures that the convolution spans the same
# shape as the input array.
#filtered = scipy.signal.convolve(signal, gauss, mode='same')
filtered = np.convolve(signal, gauss, mode='same')
deconv, _ = scipy.signal.deconvolve( filtered, gauss )
#the deconvolution has n = len(signal) - len(gauss) + 1 points
n = len(signal)-len(gauss)+1
# so we need to expand it by
s = (len(signal)-n)/2
#on both sides.
deconv_res = np.zeros(len(signal))
deconv_res[s:len(signal)-s-1] = deconv
deconv = deconv_res
# now deconv contains the deconvolution
# expanded to the original shape (filled with zeros)
#### Plot ####
fig , ax = plt.subplots(nrows=4, figsize=(6,7))
ax[0].plot(signal, color="#907700", label="original", lw=3 )
ax[1].plot(gauss, color="#68934e", label="gauss filter", lw=3 )
# we need to divide by the sum of the filter window to get the convolution normalized to 1
ax[2].plot(filtered/np.sum(gauss), color="#325cab", label="convoluted" , lw=3 )
ax[3].plot(deconv, color="#ab4232", label="deconvoluted", lw=3 )
for i in range(len(ax)):
ax[i].set_xlim([0, len(signal)])
ax[i].set_ylim([-0.07, 1.2])
ax[i].legend(loc=1, fontsize=11)
if i != len(ax)-1 :
ax[i].set_xticklabels([])
plt.savefig(__file__ + ".png")
plt.show()
This code produces the following image, showing exactly what we want (Deconvolve(Convolve(signal,gauss) , gauss) == signal)
Some important findings are:
The filter should be shorter than the signal
The filter should be much bigger than zero everywhere (here > 0.013 is good enough)
Using the keyword argument mode = 'same' to the convolution ensures that it lives on the same array shape as the signal.
The deconvolution has n = len(signal) - len(gauss) + 1 points.
So in order to let it also reside on the same original array shape we need to expand it by s = (len(signal)-n)/2 on both sides.
Of course, further findings, comments and suggestion to this question are still welcome.
As written in the comments, I cannot help with the example you posted originally. As #Stelios has pointed out, the deconvolution might not work out due to numerical issues.
I can, however, reproduce the example you posted in your Edit:
That is the code which is a direct translation from the matlab source code:
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
x = np.arange(0., 20.01, 0.01)
y = np.zeros(len(x))
y[900:1100] = 1.
y += 0.01 * np.random.randn(len(y))
c = np.exp(-(np.arange(len(y))) / 30.)
yc = scipy.signal.convolve(y, c, mode='full') / c.sum()
ydc, remainder = scipy.signal.deconvolve(yc, c)
ydc *= c.sum()
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(4, 4))
ax[0][0].plot(x, y, label="original y", lw=3)
ax[0][1].plot(x, c, label="c", lw=3)
ax[1][0].plot(x[0:2000], yc[0:2000], label="yc", lw=3)
ax[1][1].plot(x, ydc, label="recovered y", lw=3)
plt.show()
I am trying to perform a 2d convolution in python using numpy
I have a 2d array as follows with kernel H_r for the rows and H_c for the columns
data = np.zeros((nr, nc), dtype=np.float32)
#fill array with some data here then convolve
for r in range(nr):
data[r,:] = np.convolve(data[r,:], H_r, 'same')
for c in range(nc):
data[:,c] = np.convolve(data[:,c], H_c, 'same')
data = data.astype(np.uint8);
It does not produce the output that I was expecting, does this code look OK, I think the problem is with the casting from float32 to 8bit. Whats the best way to do this
Thanks
Maybe it is not the most optimized solution, but this is an implementation I used before with numpy library for Python:
def convolution2d(image, kernel, bias):
m, n = kernel.shape
if (m == n):
y, x = image.shape
y = y - m + 1
x = x - m + 1
new_image = np.zeros((y,x))
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel) + bias
return new_image
I hope this code helps other guys with the same doubt.
Regards.
Edit [Jan 2019]
#Tashus comment bellow is correct, and #dudemeister's answer is thus probably more on the mark. The function he suggested is also more efficient, by avoiding a direct 2D convolution and the number of operations that would entail.
Possible Problem
I believe you are doing two 1d convolutions, the first per columns and the second per rows, and replacing the results from the first with the results of the second.
Notice that numpy.convolve with the 'same' argument returns an array of equal shape to the largest one provided, so when you make the first convolution you already populated the entire data array.
One good way to visualize your arrays during these steps is to use Hinton diagrams, so you can check which elements already have a value.
Possible Solution
You can try to add the results of the two convolutions (use data[:,c] += .. instead of data[:,c] = on the second for loop), if your convolution matrix is the result of using the one dimensional H_r and H_c matrices like so:
Another way to do that would be to use scipy.signal.convolve2d with a 2d convolution array, which is probably what you wanted to do in the first place.
Since you already have your kernel separated you should simply use the sepfir2d function from scipy:
from scipy.signal import sepfir2d
convolved = sepfir2d(data, H_r, H_c)
On the other hand, the code you have there looks all right ...
I checked out many implementations and found none for my purpose, which should be really simple. So here is a dead-simple implementation with for loop
def convolution2d(image, kernel, stride, padding):
image = np.pad(image, [(padding, padding), (padding, padding)], mode='constant', constant_values=0)
kernel_height, kernel_width = kernel.shape
padded_height, padded_width = image.shape
output_height = (padded_height - kernel_height) // stride + 1
output_width = (padded_width - kernel_width) // stride + 1
new_image = np.zeros((output_height, output_width)).astype(np.float32)
for y in range(0, output_height):
for x in range(0, output_width):
new_image[y][x] = np.sum(image[y * stride:y * stride + kernel_height, x * stride:x * stride + kernel_width] * kernel).astype(np.float32)
return new_image
It might not be the most optimized solution either, but it is approximately ten times faster than the one proposed by #omotto and it only uses basic numpy function (as reshape, expand_dims, tile...) and no 'for' loops:
def gen_idx_conv1d(in_size, ker_size):
"""
Generates a list of indices. This indices correspond to the indices
of a 1D input tensor on which we would like to apply a 1D convolution.
For instance, with a 1D input array of size 5 and a kernel of size 3, the
1D convolution product will successively looks at elements of indices [0,1,2],
[1,2,3] and [2,3,4] in the input array. In this case, the function idx_conv1d(5,3)
outputs the following array: array([0,1,2,1,2,3,2,3,4]).
args:
in_size: (type: int) size of the input 1d array.
ker_size: (type: int) kernel size.
return:
idx_list: (type: np.array) list of the successive indices of the 1D input array
access to the 1D convolution algorithm.
example:
>>> gen_idx_conv1d(in_size=5, ker_size=3)
array([0, 1, 2, 1, 2, 3, 2, 3, 4])
"""
f = lambda dim1, dim2, axis: np.reshape(np.tile(np.expand_dims(np.arange(dim1),axis),dim2),-1)
out_size = in_size-ker_size+1
return f(ker_size, out_size, 0)+f(out_size, ker_size, 1)
def repeat_idx_2d(idx_list, nbof_rep, axis):
"""
Repeats an array of indices (idx_list) a number of time (nbof_rep) "along" an axis
(axis). This function helps to browse through a 2d array of size
(len(idx_list),nbof_rep).
args:
idx_list: (type: np.array or list) a 1D array of indices.
nbof_rep: (type: int) number of repetition.
axis: (type: int) axis "along" which the repetition will be applied.
return
idx_list: (type: np.array) a 1D array of indices of size len(idx_list)*nbof_rep.
example:
>>> a = np.array([0, 1, 2])
>>> repeat_idx_2d(a, 3, 0) # repeats array 'a' 3 times along 'axis' 0
array([0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> repeat_idx_2d(a, 3, 1) # repeats array 'a' 3 times along 'axis' 1
array([0, 1, 2, 0, 1, 2, 0, 1, 2])
>>> b = np.reshape(np.arange(3*4), (3,4))
>>> b[repeat_idx_2d(np.arange(3), 4, 0), repeat_idx_2d(np.arange(4), 3, 1)]
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
assert axis in [0,1], "Axis should be equal to 0 or 1."
tile_axis = (nbof_rep,1) if axis else (1,nbof_rep)
return np.reshape(np.tile(np.expand_dims(idx_list, 1),tile_axis),-1)
def conv2d(im, ker):
"""
Performs a 'valid' 2D convolution on an image. The input image may be
a 2D or a 3D array.
The output image first two dimensions will be reduced depending on the
convolution size.
The kernel may be a 2D or 3D array. If 2D, it will be applied on every
channel of the input image. If 3D, its last dimension must match the
image one.
args:
im: (type: np.array) image (2D or 3D).
ker: (type: np.array) convolution kernel (2D or 3D).
returns:
im: (type: np.array) convolved image.
example:
>>> im = np.reshape(np.arange(10*10*3),(10,10,3))/(10*10*3) # 3D image
>>> ker = np.array([[0,1,0],[-1,0,1],[0,-1,0]]) # 2D kernel
>>> conv2d(im, ker) # 3D array of shape (8,8,3)
"""
if len(im.shape)==2: # if the image is a 2D array, it is reshaped by expanding the last dimension
im = np.expand_dims(im,-1)
im_x, im_y, im_w = im.shape
if len(ker.shape)==2: # if the kernel is a 2D array, it is reshaped so it will be applied to all of the image channels
ker = np.tile(np.expand_dims(ker,-1),[1,1,im_w]) # the same kernel will be applied to all of the channels
assert ker.shape[-1]==im.shape[-1], "Kernel and image last dimension must match."
ker_x = ker.shape[0]
ker_y = ker.shape[1]
# shape of the output image
out_x = im_x - ker_x + 1
out_y = im_y - ker_y + 1
# reshapes the image to (out_x, ker_x, out_y, ker_y, im_w)
idx_list_x = gen_idx_conv1d(im_x, ker_x) # computes the indices of a 1D conv (cf. idx_conv1d doc)
idx_list_y = gen_idx_conv1d(im_y, ker_y)
idx_reshaped_x = repeat_idx_2d(idx_list_x, len(idx_list_y), 0) # repeats the previous indices to be used in 2D (cf. repeat_idx_2d doc)
idx_reshaped_y = repeat_idx_2d(idx_list_y, len(idx_list_x), 1)
im_reshaped = np.reshape(im[idx_reshaped_x, idx_reshaped_y, :], [out_x, ker_x, out_y, ker_y, im_w]) # reshapes
# reshapes the 2D kernel
ker = np.reshape(ker,[1, ker_x, 1, ker_y, im_w])
# applies the kernel to the image and reduces the dimension back to the one of original input image
return np.squeeze(np.sum(im_reshaped*ker, axis=(1,3)))
I tried to add a lot of comments to explain the method but the global idea is to reshape the 3D input image to a 5D one of shape (output_image_height, kernel_height, output_image_width, kernel_width, output_image_channel) and then to apply the kernel directly using the basic array multiplication. Of course, this methods is then using more memory (during the execution the size of the image is thus multiply by kernel_height*kernel_width) but it is faster.
To do this reshape step, I 'over-used' the indexing methods of numpy arrays, especially, the possibility of giving a numpy array as indices into a numpy array.
This methods could also be used to re-code the 2D convolution product in Pytorch or Tensorflow using the base math functions but I have no doubt in saying that it will be slower than the existing nn.conv2d operator...
I really enjoyed coding this method by only using the numpy basic tools.
One of the most obvious is to hard code the kernel.
img = img.convert('L')
a = np.array(img)
out = np.zeros([a.shape[0]-2, a.shape[1]-2], dtype='float')
out += a[:-2, :-2]
out += a[1:-1, :-2]
out += a[2:, :-2]
out += a[:-2, 1:-1]
out += a[1:-1,1:-1]
out += a[2:, 1:-1]
out += a[:-2, 2:]
out += a[1:-1, 2:]
out += a[2:, 2:]
out /= 9.0
out = out.astype('uint8')
img = Image.fromarray(out)
This example does a box blur 3x3 completely unrolled. You can multiply the values where you have a different value and divide them by a different amount. But, if you honestly want the quickest and dirtiest method this is it. I think it beats Guillaume Mougeot's method by a factor of like 5. His method beating the others by a factor of 10.
It may lose a few steps if you're doing something like a gaussian blur. and need to multiply some stuff.
Try to first round and then cast to uint8:
data = data.round().astype(np.uint8);
I wrote this convolve_stride which uses numpy.lib.stride_tricks.as_strided. Moreover it supports both strides and dilation. It is also compatible to tensor with order > 2.
import numpy as np
from numpy.lib.stride_tricks import as_strided
from im2col import im2col
def conv_view(X, F_s, dr, std):
X_s = np.array(X.shape)
F_s = np.array(F_s)
dr = np.array(dr)
Fd_s = (F_s - 1) * dr + 1
if np.any(Fd_s > X_s):
raise ValueError('(Dilated) filter size must be smaller than X')
std = np.array(std)
X_ss = np.array(X.strides)
Xn_s = (X_s - Fd_s) // std + 1
Xv_s = np.append(Xn_s, F_s)
Xv_ss = np.tile(X_ss, 2) * np.append(std, dr)
return as_strided(X, Xv_s, Xv_ss, writeable=False)
def convolve_stride(X, F, dr=None, std=None):
if dr is None:
dr = np.ones(X.ndim, dtype=int)
if std is None:
std = np.ones(X.ndim, dtype=int)
if not (X.ndim == F.ndim == len(dr) == len(std)):
raise ValueError('X.ndim, F.ndim, len(dr), len(std) must be the same')
Xv = conv_view(X, F.shape, dr, std)
return np.tensordot(Xv, F, axes=X.ndim)
%timeit -n 100 -r 10 convolve_stride(A, F)
#31.2 ms ± 1.31 ms per loop (mean ± std. dev. of 10 runs, 100 loops each)
Super simple and fast convolution using only basic numpy:
import numpy as np
def conv2d(image, kernel):
# apply kernel to image, return image of the same shape
# assume both image and kernel are 2D arrays
# kernel = np.flipud(np.fliplr(kernel)) # optionally flip the kernel
k = kernel.shape[0]
width = k//2
# place the image inside a frame to compensate for the kernel overlap
a = framed(image, width)
b = np.zeros(image.shape) # fill the output array with zeros; do not use np.empty()
# shift the image around each pixel, multiply by the corresponding kernel value and accumulate the results
for p, dp, r, dr in [(i, i + image.shape[0], j, j + image.shape[1]) for i in range(k) for j in range(k)]:
b += a[p:dp, r:dr] * kernel[p, r]
# or just write two nested for loops if you prefer
# np.clip(b, 0, 255, out=b) # optionally clip values exceeding the limits
return b
def framed(image, width):
a = np.zeros((image.shape[0]+2*width, image.shape[1]+2*width))
a[width:-width, width:-width] = image
# alternatively fill the frame with ones or copy border pixels
return a
Run it:
Image.fromarray(conv2d(image, kernel).astype('uint8'))
Instead of sliding the kernel along the image and computing the transformation pixel by pixel, create a series of shifted versions of the image corresponding to each element in the kernel and apply the corresponding kernel value to each of the shifted image versions.
This is probably the fastest you can get using just basic numpy; the speed is already comparable to C implementation of scipy convolve2d and better than fftconvolve. The idea is similar to #Tatarize. This example works only for one color component; for RGB just repeat for each (or modify the algorithm accordingly).
Typically, Convolution 2D is a misnomer. Ideally, under the hood,
whats being done is a correlation of 2 matrices.
pad == same
returns the output as the same as input dimension
It can also take asymmetric images. In order to perform correlation(convolution in deep learning lingo) on a batch of 2d matrices, one can iterate over all the channels, calculate the correlation for each of the channel slices with the respective filter slice.
For example: If image is (28,28,3) and filter size is (5,5,3) then take each of the 3 slices from the image channel and perform the cross correlation using the custom function above and stack the resulting matrix in the respective dimension of the output.
def get_cross_corr_2d(W, X, pad = 'valid'):
if(pad == 'same'):
pr = int((W.shape[0] - 1)/2)
pc = int((W.shape[1] - 1)/2)
conv_2d = np.zeros((X.shape[0], X.shape[1]))
X_pad = np.zeros((X.shape[0] + 2*pr, X.shape[1] + 2*pc))
X_pad[pr:pr+X.shape[0], pc:pc+X.shape[1]] = X
for r in range(conv_2d.shape[0]):
for c in range(conv_2d.shape[1]):
conv_2d[r,c] = np.sum(np.inner(W, X_pad[r:r+W.shape[0], c:c+W.shape[1]]))
return conv_2d
else:
pr = W.shape[0] - 1
pc = W.shape[1] - 1
conv_2d = np.zeros((X.shape[0] - W.shape[0] + 2*pr + 1,
X.shape[1] - W.shape[1] + 2*pc + 1))
X_pad = np.zeros((X.shape[0] + 2*pr, X.shape[1] + 2*pc))
X_pad[pr:pr+X.shape[0], pc:pc+X.shape[1]] = X
for r in range(conv_2d.shape[0]):
for c in range(conv_2d.shape[1]):
conv_2d[r,c] = np.sum(np.multiply(W, X_pad[r:r+W.shape[0], c:c+W.shape[1]]))
return conv_2d
This code incorrect:
for r in range(nr):
data[r,:] = np.convolve(data[r,:], H_r, 'same')
for c in range(nc):
data[:,c] = np.convolve(data[:,c], H_c, 'same')
See Nussbaumer transformation from multidimentional convolution to one dimentional.