Convolving sobel operator in x direction in frequency domain - python

I implemented the code given by Cris Luengo for convolution in frequency in domain, however I'm not getting the intended gradient image in x direction.
Image without flipping the kernel in x and y direction:
Image after flipping the kernel:
If you notice, the second image is same as given by ImageKernel filter from the pillow library. Also, one thing to notice is I don't have to flip the kernel if I apply Sobel kernel in y direction, I get the exactly intended image.
This is my code:
import numpy as np
from scipy import misc
from scipy import fftpack
import matplotlib.pyplot as plt
from PIL import Image,ImageDraw,ImageOps,ImageFilter
from pylab import figure, title, imshow, hist, grid,show
im1=Image.open("astronaut.png").convert('L')
# im1=ImageOps.grayscale(im1)
img=np.array(im1)
# kernel = np.ones((3,3)) / 9
# kernel=np.array([[0,-1,0],[-1,4,-1],[0,-1,0]])
kernel=np.array([[-1,0,1],[-2,0,2],[-1,0,1]])
kernel=np.rot90(kernel,2)
print(kernel)
sz = (img.shape[0] - kernel.shape[0], img.shape[1] - kernel.shape[1]) # total
amount of padding
kernel = np.pad(kernel, (((sz[0]+1)//2, sz[0]//2), ((sz[1]+1)//2, sz[1]//2)),
'constant')
kernel = fftpack.ifftshift(kernel)
filtered = np.real(fftpack.ifft2(fftpack.fft2(img) *
fftpack.fft2(kernel)))+np.imag(fftpack.ifft2(fftpack.fft2(img) *
fftpack.fft2(kernel)))
filtered=np.maximum(0,np.minimum(filtered,255))
im2=Image.open("astronaut.png").convert('L')
u=im2.filter(ImageFilter.Kernel((3,3), [-1,0,1,-2,0,2,-1,0,1],
scale=1, offset=0))
fig2=figure()
ax1 = fig2.add_subplot(221)
ax2 = fig2.add_subplot(222)
ax3 = fig2.add_subplot(223)
ax1.title.set_text('Original Image')
ax2.title.set_text('After convolving in freq domain')
ax3.title.set_text('imagefilter conv')
ax1.imshow(img,cmap='gray')
ax2.imshow(filtered,cmap='gray')
ax3.imshow(np.array(u),cmap='gray')
show()

We can use np.fft module's FFT implementation too and here is how we can obtain convolution with sobel horizontal kernel in frequency domain (by the convolution theorem):
h, w = im.shape
kernel = np.array(array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])) #sobel_filter_x
k = len(kernel) // 2 # assuming odd-length square kernel, here it's 3x3
kernel_padded = np.pad(kernel, [(h//2-k-1, h//2-k), (w//2-k-1, w//2-k)])
im_freq = np.fft.fft2(im) # input image frequency
kernel_freq = np.fft.fft2(kernel_padded) # kernel frequency
out_freq = im_freq * kernel_freq # frequency domain convolution output
out = np.fft.ifftshift(np.fft.ifft2(out_freq)).real # spatial domain output
The below figure shows the input, kernel and output images in spatial and frequency domain:

Related

Tensorflow custom filter layer definition like glcm or gabor

I want to apply various filters like GLCM or Gabor filter bank as a custom layer in Tensorflow, but I could not find enough custom layer samples. How can I apply these type of filters as a layer?
The process of generating GLCM is defined in the scikit-image library as follows:
from skimage.feature import greycomatrix, greycoprops
from skimage import data
#load image
img = data.brick()
#result glcm
glcm = greycomatrix(img, distances=[5], angles=[0], levels=256, symmetric=True, normed=True)
The use of Gabor filter bank is as follows:
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.brick())[shrink]
grass = img_as_float(data.grass())[shrink]
gravel = img_as_float(data.gravel())[shrink]
image_names = ('brick', 'grass', 'gravel')
images = (brick, grass, gravel)
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for sigmax in (1, 3):
for sigmay in (1, 3):
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta,sigma_x=sigmax, sigma_y=sigmay)
params = 'theta=%d,f=%.2f\nsx=%.2f sy=%.2f' % (theta * 180 / np.pi, frequency,sigmax, sigmay)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=6, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel))
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
How do I define these and similar filters in tensorflow.
I tried above code but it didnt gave the same results like : https://scikit-image.org/docs/dev/auto_examples/features_detection/plot_gabor.html
I got this:
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from tensorflow.keras import Input, layers
from tensorflow.keras.models import Model
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def gfb_filter(shape,size=3, tlist=[1,2,3], slist=[2,5],flist=[0.01,0.25],dtype=None):
print(shape)
fsize=np.ones([size,size])
kernels = []
for theta in tlist:
theta = theta / 4. * np.pi
for sigma in slist:
for frequency in flist:
kernel = np.real(gabor_kernel(frequency, theta=theta,sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
gfblist = []
for k, kernel in enumerate(kernels):
ck=ndi.convolve(fsize, kernel, mode='wrap')
gfblist.append(ck)
gfblist=np.asarray(gfblist).reshape(size,size,1,len(gfblist))
print(gfblist.shape)
return K.variable(gfblist, dtype='float32')
dimg=img_as_float(data.brick())
input_mat = dimg.reshape((1, 512, 512, 1))
def build_model():
input_tensor = Input(shape=(512,512,1))
x = layers.Conv2D(filters=12,
kernel_size = 3,
kernel_initializer=gfb_filter,
strides=1,
padding='valid') (input_tensor)
model = Model(inputs=input_tensor, outputs=x)
return model
model = build_model()
out = model.predict(input_mat)
print(out)
o1=out.reshape(12,510,510)
plt.subplot(2,2,1)
plt.imshow(dimg)
plt.subplot(2,2,2)
plt.imshow(o1[0,:,:])
plt.subplot(2,2,3)
plt.imshow(o1[6,:,:])
plt.subplot(2,2,4)
plt.imshow(o1[10,:,:])
You can read the documentation about writing a custom layer, and about Making new Layers and Models via subclassing
Here is a simple implementation of the Gabor filter bank based on your code:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from skimage.filters import gabor_kernel
class GaborFilterBank(layers.Layer):
def __init__(self):
super().__init__()
def build(self, input_shape):
# assumption: shape is NHWC
self.n_channel = input_shape[-1]
self.kernels = []
for theta in range(4):
theta = theta / 4.0 * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(
gabor_kernel(
frequency, theta=theta, sigma_x=sigma, sigma_y=sigma
)
).astype(np.float32)
# tf.nn.conv2d does crosscorrelation, not convolution, so flipping
# the kernel is needed
kernel = np.flip(kernel)
# we stack the kernel on itself to match the number of channel of
# the input
kernel = np.stack((kernel,)*self.n_channel, axis=-1)
# print(kernel.shape)
# adding the number of out channel, here 1.
kernel = kernel[:, :, : , np.newaxis]
# because the kernel shapes are different, we can't do the conv op
# in one go, so we stack the kernels in a list
self.kernels.append(tf.Variable(kernel, trainable=False))
def call(self, x):
out_list = []
for kernel in self.kernels:
out_list.append(tf.nn.conv2d(x, kernel, strides=1, padding="SAME"))
# output is [batch_size, H, W, 16] where 16 is the number of filters
# 16 = n_theta * n_sigma * n_freq = 4 * 2 * 2
return tf.concat(out_list,axis=-1)
There is some differences though:
tensorflow does not have a "wrap" mode for convolution. I used "SAME" which is akin to "constant" with a padding value of 0 inscipy. Its possible to provide your own padding, so it is definitely possible to mimic the "wrap" mode, I let that as an exercise to the reader.
tf.nn.conv2d expect a 4D input, so I add a batch dimension and a channel dimension to the img as an input.
the filters for tf.nn.conv2d must follow the shape [filter_height, filter_width, in_channels, out_channels]. In that case, I use the number of channel of the input as in_channels. out_channels could be equal to the number of filters in the filter bank, but because their shape is not constant, it is easier to concatenate them afterwards, so I set it to 1. It means that the output of the layer is [N,H,W,C] where C is the number of filters in the bank (in your example, 16).
tf.nn.conv2d is not a real convolution, but a cross-correlation (see the doc), so flipping the filters before hand is needed to get an actual convolution.
I'm adding a quick example on how to use it:
# defining the model
inp = tf.keras.Input(shape=(512,512,1))
conv = tf.keras.layers.Conv2D(4, (3,3), padding="SAME")(inp)
g = GaborFilterBank()(conv)
model = tf.keras.Model(inputs=inp, outputs=g)
# calling the model with an example Image
img = img_as_float(data.brick())
img_nhwc = img[np.newaxis, :, :, np.newaxis]
out = model(img_nhwc)
# out shape is [1,512,512,16]

How do I create a shear matrix for PyTorch's F.affine_grid & F.grid_sample?

I need to create a shear matrix that is autograd compatible, works on B,C,H,W tensors, and takes input values (possibly generated randomly) for the shear values. How can I generate the shear matrix for this?
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from PIL import Image
# Load image
def preprocess_simple(image_name, image_size):
Loader = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor()])
image = Image.open(image_name).convert('RGB')
return Loader(image).unsqueeze(0)
# Save image
def deprocess_simple(output_tensor, output_name):
output_tensor.clamp_(0, 1)
Image2PIL = transforms.ToPILImage()
image = Image2PIL(output_tensor.squeeze(0))
image.save(output_name)
def get_shear_mat(theta):
...
return shear_mat
def shear_img(x, theta, dtype):
shear_mat = get_shear_mat(theta)
grid = F.affine_grid(shear_mat , x.size()).type(dtype)
x = F.grid_sample(x, grid)
return x
# Shear tensor
test_input = # Test image
shear_values = (3,4) # Example values
sheared_tensor = shear_img(test_input, shear_values)
Say m is the shear factor, then theta = atan(1/m) is the shear angle.
You can now pick either horizontal shear or vertical shear. Here's how you implement get_shear_mat such that you can pick horizontal shear by setting ax=0 and vertical shear by setting ax=1:
def get_shear_mat(theta, ax=0):
assert ax in [0, 1]
m = 1 / torch.tan(torch.tensor(theta))
if ax == 0: # Horizontal shear
shear_mat = torch.tensor([[1, m, 0],
[0, 1, 0]])
else: # Vertical shear
shear_mat = torch.tensor([[1, 0, 0],
[m, 1, 0]])
return shear_mat
Notice that a shear mapping is just a mapping of point (x,y) in the original image to the point (x+my,y) for horizontal shear, and (x,y+mx) for vertical shear. This is exactly what we do here by defining the shear_mat as above.
An optional modification to shear_img to support the operation for a batched input in the first row. Also adding an argument - ax to shear_img to define whether we want a horizontal (ax=0) or vertical(ax=1) shear:
def shear_img(x, ax, theta, dtype):
shear_mat = get_shear_mat(theta, ax)[None, ...].type(dtype).repeat(x.shape[0], 1, 1)
grid = F.affine_grid(shear_mat , x.size()).type(dtype)
x = F.grid_sample(x.type(dtype), grid)
return x
Let's test this implementation on an image:
# Let im be a 4D tensor of shape BxCxHxW (an image or a batch of images):
dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor # Set type of data
sheared_im = shear_img(im, 0, np.pi/4, dtype) #Horizontal shear by shear angle of pi/4
plt.imshow(sheared_im.squeeze(0).permute(1,2,0)/255)
plt.show()
If im is our dancing cat with a skirt:
Then our plot will be:
If we want a vertical shear:
sheared_im = shear_img(im, 1, np.pi/4, dtype) # Vertical shear by shear angle of pi/4
plt.imshow(sheared_im.squeeze(0).permute(1, 2, 0)/255)
plt.show()
We obtain:
Hooray!

Extract N number of patches from an image

I have an image of dimension 155 x 240. Like the following:
I want to extract certain shape of patchs (25 x 25).
I don't want to patch from the whole image.
I want to extract N number of patch from non-zero (not background) area of the image. How can I do that? Any idea or suggestion or implementation will be appreciated. You can try with either Matlab or Python.
Note:
I have generated a random image so that you can process it for patching. image_process variable is that image in this code.
import numpy as np
from scipy.ndimage.filters import convolve
import matplotlib.pyplot as plt
background = np.ones((155,240))
background[78,120] = 2
n_d = 50
y,x = np.ogrid[-n_d: n_d+1, -n_d: n_d+1]
mask = x**2+y**2 <= n_d**2
mask = 254*mask.astype(float)
image_process = convolve(background, mask)-sum(sum(mask))+1
image_process[image_process==1] = 0
image_process[image_process==255] = 1
plt.imshow(image_process)
Lets assume that the pixels values you want to omit is 0.
In this case what you could do, is first find the indices of the non-zero values, then slice the image in the min/max position to get only the desired area, and then simply apply extract_patches_2d with the desired window size and number of patches.
For example, given the dummy image you supplied:
import numpy as np
from scipy.ndimage.filters import convolve
import matplotlib.pyplot as plt
background = np.ones((155,240))
background[78,120] = 2
n_d = 50
y,x = np.ogrid[-n_d: n_d+1, -n_d: n_d+1]
mask = x**2+y**2 <= n_d**2
mask = 254*mask.astype(float)
image_process = convolve(background, mask)-sum(sum(mask))+1
image_process[image_process==1] = 0
image_process[image_process==255] = 1
plt.figure()
plt.imshow(image_process)
plt.show()
from sklearn.feature_extraction.image import extract_patches_2d
x, y = np.nonzero(image_process)
xl,xr = x.min(),x.max()
yl,yr = y.min(),y.max()
only_desired_area = image_process[xl:xr+1, yl:yr+1]
window_shape = (25, 25)
B = extract_patches_2d(only_desired_area, window_shape, max_patches=100) # B shape will be (100, 25, 25)
If you plot the only_desired_area you will get the following image:
This is the main logic if you wish an even tighter bound you should adjust the slicing properly.

How do I rotate a PyTorch image tensor around it's center in a way that supports autograd?

I'd like to randomly rotate an image tensor (B, C, H, W) around it's center (2d rotation I think?). I would like to avoid using NumPy and Kornia, so that I basically only need to import from the torch module. I'm also not using torchvision.transforms, because I need it to be autograd compatible. Essentially I'm trying to create an autograd compatible version of torchvision.transforms.RandomRotation() for visualization techniques like DeepDream (so I need to avoid artifacts as much as possible).
import torch
import math
import random
import torchvision.transforms as transforms
from PIL import Image
# Load image
def preprocess_simple(image_name, image_size):
Loader = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor()])
image = Image.open(image_name).convert('RGB')
return Loader(image).unsqueeze(0)
# Save image
def deprocess_simple(output_tensor, output_name):
output_tensor.clamp_(0, 1)
Image2PIL = transforms.ToPILImage()
image = Image2PIL(output_tensor.squeeze(0))
image.save(output_name)
# Somehow rotate tensor around it's center
def rotate_tensor(tensor, radians):
...
return rotated_tensor
# Get a random angle within a specified range
r_degrees = 5
angle_range = list(range(-r_degrees, r_degrees))
n = random.randint(angle_range[0], angle_range[len(angle_range)-1])
# Convert angle from degrees to radians
ang_rad = angle * math.pi / 180
# test_tensor = preprocess_simple('path/to/file', (512,512))
test_tensor = torch.randn(1,3,512,512)
# Rotate input tensor somehow
output_tensor = rotate_tensor(test_tensor, ang_rad)
# Optionally use this to check rotated image
# deprocess_simple(output_tensor, 'rotated_image.jpg')
Some example outputs of what I'm trying to accomplish:
So the grid generator and the sampler are sub-modules of the Spatial Transformer (JADERBERG, Max, et al.). These sub-modules are not trainable, they let you apply a learnable, as well as non-learnable, spatial transformation.
Here I take these two submodules and use them to rotate an image by theta using PyTorch's functions torch.nn.functional.affine_grid and torch.nn.functional.affine_sample (these functions are implementations of the generator and the sampler, respectively):
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
def get_rot_mat(theta):
theta = torch.tensor(theta)
return torch.tensor([[torch.cos(theta), -torch.sin(theta), 0],
[torch.sin(theta), torch.cos(theta), 0]])
def rot_img(x, theta, dtype):
rot_mat = get_rot_mat(theta)[None, ...].type(dtype).repeat(x.shape[0],1,1)
grid = F.affine_grid(rot_mat, x.size()).type(dtype)
x = F.grid_sample(x, grid)
return x
#Test:
dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
#im should be a 4D tensor of shape B x C x H x W with type dtype, range [0,255]:
plt.imshow(im.squeeze(0).permute(1,2,0)/255) #To plot it im should be 1 x C x H x W
plt.figure()
#Rotation by np.pi/2 with autograd support:
rotated_im = rot_img(im, np.pi/2, dtype) # Rotate image by 90 degrees.
plt.imshow(rotated_im.squeeze(0).permute(1,2,0)/255)
In the example above, assume we take our image, im, to be a dancing cat in a skirt:
rotated_im will be a 90-degrees CCW rotated dancing cat in a skirt:
And this is what we get if we call rot_img with theta eqauls to np.pi/4:
And the best part that it's differentiable w.r.t the input and has autograd support! Hooray!
With torchvision it should be simple:
import torchvision.transforms.functional as TF
angle = 30
x = torch.randn(1,3,512,512)
out = TF.rotate(x, angle)
For example if x is:
out with a 30 degree rotation is (NOTE: counterclockwise):
There is a pytorch function for that:
x = torch.tensor([[0, 1],
[2, 3]])
x = torch.rot90(x, 1, [0, 1])
>> tensor([[1, 3],
[0, 2]])
Here are the docs: https://pytorch.org/docs/stable/generated/torch.rot90.html

torch.rfft - fft-based convolution creating different output than spatial convolution

I implemented FFT-based convolution in Pytorch and compared the result with spatial convolution via conv2d() function. The convolution filter used is an average filter. The conv2d() function produced smoothened output due to average filtering as expected but the fft-based convolution returned a more blurry output.
I have attached the code and outputs here -
spatial convolution -
from PIL import Image, ImageOps
import torch
from matplotlib import pyplot as plt
from torchvision.transforms import ToTensor
import torch.nn.functional as F
import numpy as np
im = Image.open("/kaggle/input/tiger.jpg")
im = im.resize((256,256))
gray_im = im.convert('L')
gray_im = ToTensor()(gray_im)
gray_im = gray_im.squeeze()
fil = torch.tensor([[1/9,1/9,1/9],[1/9,1/9,1/9],[1/9,1/9,1/9]])
conv_gray_im = gray_im.unsqueeze(0).unsqueeze(0)
conv_fil = fil.unsqueeze(0).unsqueeze(0)
conv_op = F.conv2d(conv_gray_im,conv_fil)
conv_op = conv_op.squeeze()
plt.figure()
plt.imshow(conv_op, cmap='gray')
FFT-based convolution -
def fftshift(image):
sh = image.shape
x = np.arange(0, sh[2], 1)
y = np.arange(0, sh[3], 1)
xm, ym = np.meshgrid(x,y)
shifter = (-1)**(xm + ym)
shifter = torch.from_numpy(shifter)
return image*shifter
shift_im = fftshift(conv_gray_im)
padded_fil = F.pad(conv_fil, (0, gray_im.shape[0]-fil.shape[0], 0, gray_im.shape[1]-fil.shape[1]))
shift_fil = fftshift(padded_fil)
fft_shift_im = torch.rfft(shift_im, 2, onesided=False)
fft_shift_fil = torch.rfft(shift_fil, 2, onesided=False)
shift_prod = fft_shift_im*fft_shift_fil
shift_fft_conv = fftshift(torch.irfft(shift_prod, 2, onesided=False))
fft_op = shift_fft_conv.squeeze()
plt.figure('shifted fft')
plt.imshow(fft_op, cmap='gray')
original image -
spatial convolution output -
fft-based convolution output -
Could someone kindly explain the issue?
The main problem with your code is that Torch doesn't do complex numbers, the output of its FFT is a 3D array, with the 3rd dimension having two values, one for the real component and one for the imaginary. Consequently, the multiplication does not do a complex multiplication.
There currently is no complex multiplication defined in Torch (see this issue), we'll have to define our own.
A minor issue, but also important if you want to compare the two convolution operations, is the following:
The FFT takes the origin of its input in the first element (top-left pixel for an image). To avoid a shifted output, you need to generate a padded kernel where the origin of the kernel is the top-left pixel. This is quite tricky, actually...
Your current code:
fil = torch.tensor([[1/9,1/9,1/9],[1/9,1/9,1/9],[1/9,1/9,1/9]])
conv_fil = fil.unsqueeze(0).unsqueeze(0)
padded_fil = F.pad(conv_fil, (0, gray_im.shape[0]-fil.shape[0], 0, gray_im.shape[1]-fil.shape[1]))
generates a padded kernel where the origin is in pixel (1,1), rather than (0,0). It needs to be shifted by one pixel in each direction. NumPy has a function roll that is useful for this, I don't know the Torch equivalent (I'm not at all familiar with Torch). This should work:
fil = torch.tensor([[1/9,1/9,1/9],[1/9,1/9,1/9],[1/9,1/9,1/9]])
padded_fil = fil.unsqueeze(0).unsqueeze(0).numpy()
padded_fil = np.pad(padded_fil, ((0, gray_im.shape[0]-fil.shape[0]), (0, gray_im.shape[1]-fil.shape[1])))
padded_fil = np.roll(padded_fil, -1, axis=(0, 1))
padded_fil = torch.from_numpy(padded_fil)
Finally, your fftshift function, applied to the spatial-domain image, causes the frequency-domain image (the result of the FFT applied to the image) to be shifted such that the origin is in the middle of the image, rather than the top-left. This shift is useful when looking at the output of the FFT, but is pointless when computing the convolution.
Putting these things together, the convolution is now:
def complex_multiplication(t1, t2):
real1, imag1 = t1[:,:,0], t1[:,:,1]
real2, imag2 = t2[:,:,0], t2[:,:,1]
return torch.stack([real1 * real2 - imag1 * imag2, real1 * imag2 + imag1 * real2], dim = -1)
fft_im = torch.rfft(gray_im, 2, onesided=False)
fft_fil = torch.rfft(padded_fil, 2, onesided=False)
fft_conv = torch.irfft(complex_multiplication(fft_im, fft_fil), 2, onesided=False)
Note that you can do one-sided FFTs to save a bit of computation time:
fft_im = torch.rfft(gray_im, 2, onesided=True)
fft_fil = torch.rfft(padded_fil, 2, onesided=True)
fft_conv = torch.irfft(complex_multiplication(fft_im, fft_fil), 2, onesided=True, signal_sizes=gray_im.shape)
Here the frequency domain is about half the size as in the full FFT, but it is only redundant parts that are left out. The result of the convolution is unchanged.

Categories