Python numpy matrix of matrix - python

I have this code, and it works. It just seems like there may be a better way to do this. Does anyone know a cleaner solution?
def Matrix2toMatrix(Matrix2):
scaleSize = len(Matrix2[0, 0])
FinalMatrix = np.empty([len(Matrix2)*scaleSize, len(Matrix2[0])*scaleSize])
for x in range(0, len(Matrix2)):
for y in range(0, len(Matrix2[0])):
for xFinal in range(0, scaleSize):
for yFinal in range(0, scaleSize):
FinalMatrix[(x*scaleSize)+xFinal, (y*scaleSize)+yFinal] = Matrix2[x, y][xFinal, yFinal]
return FinalMatrix
This is where Matrix2 is a 4x4 matrix, with each cell containing a 2x2 matrix
Full code in case anyone was wondering:
import matplotlib.pyplot as plt
import numpy as np
def Matrix2toMatrix(Matrix2):
scaleSize = len(Matrix2[0, 0])
FinalMatrix = np.empty([len(Matrix2)*scaleSize, len(Matrix2[0])*scaleSize])
for x in range(0, len(Matrix2)):
for y in range(0, len(Matrix2[0])):
for xFinal in range(0, scaleSize):
for yFinal in range(0, scaleSize):
FinalMatrix[(x*scaleSize)+xFinal, (y*scaleSize)+yFinal] = Matrix2[x, y][xFinal, yFinal]
return FinalMatrix
XSize = 4
Xtest = np.array([[255, 255, 255, 255]
,[255, 255, 255, 255]
,[127, 127, 127, 127]
,[0, 0, 0, 0]
])
scaleFactor = 2
XMarixOfMatrix = np.empty([XSize, XSize], dtype=object)
Xexpanded = np.empty([XSize*scaleFactor, XSize*scaleFactor], dtype=int) # careful, will contain garbage data
for xOrg in range(0, XSize):
for yOrg in range(0, XSize):
newMatrix = np.empty([scaleFactor, scaleFactor], dtype=int) # careful, will contain garbage data
# grab org point equivalent
pointValue = Xtest[xOrg, yOrg]
newMatrix.fill(pointValue)
# now write the data
XMarixOfMatrix[xOrg, yOrg] = newMatrix
# need to concat all matrix together to form a larger singular matrix
Xexpanded = Matrix2toMatrix(XMarixOfMatrix)
img = plt.imshow(Xexpanded)
img.set_cmap('gray')
plt.axis('off')
plt.show()

Permute axes and reshape -
m,n = Matrix2.shape[0], Matrix2.shape[2]
out = Matrix2.swapaxes(1,2).reshape(m*n,-1)
For permuting axes, we could also use np.transpose or np.rollaxis, as functionally all are the same.
Verify with sample run -
In [17]: Matrix2 = np.random.rand(3,3,3,3)
# With given solution
In [18]: out1 = Matrix2toMatrix(Matrix2)
In [19]: m,n = Matrix2.shape[0], Matrix2.shape[2]
...: out2 = Matrix2.swapaxes(1,2).reshape(m*n,-1)
In [20]: np.allclose(out1, out2)
Out[20]: True

Related

Slicing sections around every index of an array

I need to slice sections out of a NumPy array in a specific way. Say I have a (200,200, 4) shape NumPy array. Then for every index in (200, 200), I want to select the 5x5x4 surrounding indexes, flatten it, and then put it into another array. So finally, the shape of the final array would be (200, 200, 100). Additionally, I want to delete all values at the location (:, :, 12). So finally, we'd get shape (200, 200, 99).
I've thought of two ways to go about this but they give different results and I'm not sure what I'm doing wrong.
Method 1:
import numpy as np
arr_lst = [np.random.normal(size=(200, 200)) for _ in range(4)]
slice_arr = np.zeros([200, 200, 99])
start = 0
for i, arr in enumerate(arr_lst):
for idx, _ in np.ndenumerate(arr):
#Getting surrounding 25 pixels
pos_arr = arr[idx[0]-2:idx[0]+3, idx[1]-2:idx[1]+3]
#Reshaping, into size 100
pos_arr = pos_arr.reshape(-1)
#Near the boundaries slicing does not result in size 25
if pos_arr.shape[0] != 25:
pos_arr = np.full(25, np.nan)
if i == 0:
pos_arr = np.delete(pos_arr, 12)
end = start + 25 - 1
else:
end = start + 25
slice_arr[idx[0], idx[1], start:end] = pos_arr
start = end
print(slice_arr[10, 100])
Method 2:
import numpy as np
arr_lst = [np.random.normal(size=(200, 200)) for _ in range(4)]
stacked_arr = np.stack(arr_lst, axis=2)
slice_arr = np.zeros([200, 200, 100])
for i in range(200):
for j in range(200):
x = stacked_arr[i-2:i+3, j-2:j+3, 0:4]
if x.shape != (5, 5, 4):
x = np.array([np.nan for _ in range(100)])
else:
x = x.reshape(100)
slice_arr[i,j] = x
slice_arr = np.delete(slice_arr, 12, 2)
print(slice_arr[10, 100])
The first method gives me the array that I want in the correct order, but the second method feels more natural and faster. Another question I have is if I can optimize this at all? Is there a fast way for slicing around every index at the same time and keeping each slice the same shape? Then afterwards, deleting what things we want to?
Using #hpaulj helpful comments I designed a solution that I think works for my purposes. It's similar to what was suggested here: Rolling windows for ndarrays but has the additional border of np.nan values. If anyone else finds this useful I've posted it here, for debugging purposes, I've set the values in the padded array to coordinate tuples:
from skimage.util.shape import view_as_windows
arr_lst = [np.empty(shape=(200, 200), dtype=tuple) for _ in range(4)]
arr_lst = [np.pad(x, pad_width=2, mode='constant', constant_values=np.nan) for x in arr_lst]
padded_arr = np.stack(arr_lst, axis=2)
for idx, _ in np.ndenumerate(padded_arr):
padded_arr[idx[0], idx[1], idx[2]] = idx
w = view_as_windows(padded_arr, (5, 5, 4)).reshape(200, 200, 100)

How to create a 2D array with N lots of random numbers?

I am trying to obtain a variance for a value I obtained by processing a 2x150 array into a discrete correlation function. In order to do this I need to randomly sample 80% of the original data N times, which will allow me to calculate a variance over these values.
have so far been able to create one randomly sampled set of data using this:
rand_indices = []
running_var = (len(find_length)*0.8)
x=0
while x<running_var:
rand_inx = randint(0, (len(find_length)-1))
rand_indices.append(rand_inx)
x=x+1
which creates an array 80% of the length of my original with randomly selected indices to be picked out and processed.
My problem is that I am not sure how to iterate this in order to get N sets of these random numbers, I think ideally in a Nx120 sized array. My whole code so far is:
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from random import randint
useless, just_to, find_length = np.loadtxt("w2_mjy_final.dat").T
w2_dat = np.loadtxt("w2_mjy_final.dat")
w2_rel = np.delete(w2_dat, 2, axis = 1)
w2_array = np.asarray(w2_rel)
w1_dat = np.loadtxt("w1_mjy_final.dat")
w1_rel = np.delete(w1_dat, 2, axis=1)
w1_array = np.asarray(w1_rel)
peaks = []
y=1
N = 0
x = 0
z = 0
rand_indices = []
rand_indices2d = []
running_var = (len(find_length)*0.8)
while z<N:
while x<running_var:
rand_inx = randint(0, (len(find_length)-1))
rand_indices.append(rand_inx)
x=x+1
rand_indices2d.append(rand_indices)
z=z+1
while y<N:
w1_sampled = w1_array[rand_indices, :]
w2_sampled = w2_array[rand_indices, :]
w1s_t, w1s_dat = zip(*w1_sampled)
w2s_t, w2s_dat = zip(*w2_sampled)
w2s_mean = np.mean(w2s_dat)
w2s_stdev = np.std(w2s_dat)
w1s_mean = np.mean(w1s_dat)
w1s_stdev = np.std(w1s_dat)
taus = []
dcfs = []
bins = 40
for i in w2s_t:
for j in w1s_t:
tau_datpoint = i-j
taus.append(tau_datpoint)
for k in w2s_dat:
for l in w1s_dat:
dcf_datpoint = ((k - w2s_mean)*(l - w1s_mean))/((w2s_stdev*w1s_stdev))
dcfs.append(dcf_datpoint)
plotdat = np.vstack((taus, dcfs)).T
sort_plotdat = sorted(plotdat, key=lambda x:x[0])
np.savetxt("w1sw2sarray.txt", sort_plotdat)
taus_sort, dcfs_sort = np.loadtxt("w1w2array.txt").T
dcfs_means, taubins_edges, taubins_number = stats.binned_statistic(taus_sort, dcfs_sort, statistic='mean', bins=bins)
taubin_edge = np.delete(taubins_edges, 0)
import operator
indexs, values = max(enumerate(dcfs_means), key=operator.itemgetter(1))
percents = values*0.8
dcf_lists = dcfs_means.tolist()
centarr_negs, centarr_poss = np.split(dcfs_means, [indexs])
centind_negs = np.argmin(np.abs(centarr_negs - percents))
centind_poss = np.argmin(np.abs(centarr_poss - percents))
lagcent_negs = taubins_edges[centind_negs]
lagcent_poss = taubins_edges[int((bins/2)+centind_poss)]
sampled_peak = (np.abs(lagcent_poss - lagcent_negs)/2)+lagcent_negs
peaks.append(sampled_peak)
y=y+1
print peaks
Seeing as you're using numpy already, why not use np.random.randint
In your case:
np.random.randint(len(find_length)-1, size=(N, running_var))
Would give you an N*running_var sized matrix, with random integer entries from 0 to len(find_length)-2 inclusive.
Example Usage:
>>> N=4
>>> running_var=6
>>> find_length = [1,2,3]
>>> np.random.randint(len(find_length)-1, size=(N, running_var))
array([[1, 0, 1, 0, 0, 1],
[1, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 0],
[1, 1, 0, 1, 0, 1]])

How to get the index of a list items in another list?

Consider I have these lists:
l = [5,6,7,8,9,10,5,15,20]
m = [10,5]
I want to get the index of m in l. I used list comprehension to do that:
[(i,i+1) for i,j in enumerate(l) if m[0] == l[i] and m[1] == l[i+1]]
Output : [(5,6)]
But if I have more numbers in m, I feel its not the right way. So is there any easy approach in Python or with NumPy?
Another example:
l = [5,6,7,8,9,10,5,15,20,50,16,18]
m = [10,5,15,20]
The output should be:
[(5,6,7,8)]
The easiest way (using pure Python) would be to iterate over the items and first only check if the first item matches. This avoids doing sublist comparisons when not needed. Depending on the contents of your l this could outperform even NumPy broadcasting solutions:
def func(haystack, needle): # obviously needs a better name ...
if not needle:
return
# just optimization
lengthneedle = len(needle)
firstneedle = needle[0]
for idx, item in enumerate(haystack):
if item == firstneedle:
if haystack[idx:idx+lengthneedle] == needle:
yield tuple(range(idx, idx+lengthneedle))
>>> list(func(l, m))
[(5, 6, 7, 8)]
In case your interested in speed I checked the performance of the approaches (borrowing from my setup here):
import random
import numpy as np
# strided_app is from https://stackoverflow.com/a/40085052/
def strided_app(a, L, S ): # Window len = L, Stride len/stepsize = S
nrows = ((a.size-L)//S)+1
n = a.strides[0]
return np.lib.stride_tricks.as_strided(a, shape=(nrows,L), strides=(S*n,n))
def pattern_index_broadcasting(all_data, search_data):
n = len(search_data)
all_data = np.asarray(all_data)
all_data_2D = strided_app(np.asarray(all_data), n, S=1)
return np.flatnonzero((all_data_2D == search_data).all(1))
# view1D is from https://stackoverflow.com/a/45313353/
def view1D(a, b): # a, b are arrays
a = np.ascontiguousarray(a)
void_dt = np.dtype((np.void, a.dtype.itemsize * a.shape[1]))
return a.view(void_dt).ravel(), b.view(void_dt).ravel()
def pattern_index_view1D(all_data, search_data):
a = strided_app(np.asarray(all_data), L=len(search_data), S=1)
a0v, b0v = view1D(np.asarray(a), np.asarray(search_data))
return np.flatnonzero(np.in1d(a0v, b0v))
def find_sublist_indices(haystack, needle):
if not needle:
return
# just optimization
lengthneedle = len(needle)
firstneedle = needle[0]
restneedle = needle[1:]
for idx, item in enumerate(haystack):
if item == firstneedle:
if haystack[idx+1:idx+lengthneedle] == restneedle:
yield tuple(range(idx, idx+lengthneedle))
def Divakar1(l, m):
return np.squeeze(pattern_index_broadcasting(l, m)[:,None] + np.arange(len(m)))
def Divakar2(l, m):
return np.squeeze(pattern_index_view1D(l, m)[:,None] + np.arange(len(m)))
def MSeifert(l, m):
return list(find_sublist_indices(l, m))
# Timing setup
timings = {Divakar1: [], Divakar2: [], MSeifert: []}
sizes = [2**i for i in range(5, 20, 2)]
# Timing
for size in sizes:
l = [random.randint(0, 50) for _ in range(size)]
m = [random.randint(0, 50) for _ in range(10)]
larr = np.asarray(l)
marr = np.asarray(m)
for func in timings:
# first timings:
# res = %timeit -o func(l, m)
# second timings:
if func is MSeifert:
res = %timeit -o func(l, m)
else:
res = %timeit -o func(larr, marr)
timings[func].append(res)
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure(1)
ax = plt.subplot(111)
for func in timings:
ax.plot(sizes,
[time.best for time in timings[func]],
label=str(func.__name__))
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('size')
ax.set_ylabel('time [seconds]')
ax.grid(which='both')
ax.legend()
plt.tight_layout()
In case your l and m are lists my function outperforms the NumPy solutions for all sizes:
But in case you have these as numpy arrays you'll get faster results for large arrays (size > 1000 elements) when using Divakars NumPy solutions:
You are basically looking for the starting indices of a list in another list.
Approach #1 : One approach to solve it would be to create sliding windows of the elements in list in which we are searching, giving us a 2D array and then simply use NumPy broadcasting to perform broadcasted comparison against the search list against each row of the 2D sliding window version obtained earlier. Thus, one method would be -
# strided_app is from https://stackoverflow.com/a/40085052/
def strided_app(a, L, S ): # Window len = L, Stride len/stepsize = S
nrows = ((a.size-L)//S)+1
n = a.strides[0]
return np.lib.stride_tricks.as_strided(a, shape=(nrows,L), strides=(S*n,n))
def pattern_index_broadcasting(all_data, search_data):
n = len(search_data)
all_data = np.asarray(all_data)
all_data_2D = strided_app(np.asarray(all_data), n, S=1)
return np.flatnonzero((all_data_2D == search_data).all(1))
out = np.squeeze(pattern_index_broadcasting(l, m)[:,None] + np.arange(len(m)))
Sample runs -
In [340]: l = [5,6,7,8,9,10,5,15,20,50,16,18]
...: m = [10,5,15,20]
...:
In [341]: np.squeeze(pattern_index_broadcasting(l, m)[:,None] + np.arange(len(m)))
Out[341]: array([5, 6, 7, 8])
In [342]: l = [5,6,7,8,9,10,5,15,20,50,16,18,10,5,15,20]
...: m = [10,5,15,20]
...:
In [343]: np.squeeze(pattern_index_broadcasting(l, m)[:,None] + np.arange(len(m)))
Out[343]:
array([[ 5, 6, 7, 8],
[12, 13, 14, 15]])
Approach #2 : Another method would be to get the sliding window and then get the row-wise scalar view into the data to be search data and the data to be search for, giving us 1D data to work with, like so -
# view1D is from https://stackoverflow.com/a/45313353/
def view1D(a, b): # a, b are arrays
a = np.ascontiguousarray(a)
void_dt = np.dtype((np.void, a.dtype.itemsize * a.shape[1]))
return a.view(void_dt).ravel(), b.view(void_dt).ravel()
def pattern_index_view1D(all_data, search_data):
a = strided_app(np.asarray(all_data), L=len(search_data), S=1)
a0v, b0v = view1D(np.asarray(a), np.asarray(search_data))
return np.flatnonzero(np.in1d(a0v, b0v))
out = np.squeeze(pattern_index_view1D(l, m)[:,None] + np.arange(len(m)))
2020 Versions
In search of more easy/compact approaches, we could look into scikit-image's view_as_windows for getting sliding windows with a built-in. I am assuming arrays as inputs for less messy code. For lists as input, we have to use np.asarray() as shown earlier.
Approach #3 : Basically a derivative of pattern_index_broadcasting with view_as_windows for a one-liner with a as the larger data and b is the array to be searched -
from skimage.util import view_as_windows
np.flatnonzero((view_as_windows(a,len(b))==b).all(1))[:,None]+np.arange(len(b))
Approach #4 : For a small number of matches from b in a, we could optimize, by looking for first element match from b to reduce the dataset size for searches -
mask = a[:-len(b)+1]==b[0]
mask[mask] = (view_as_windows(a,len(b))[mask]).all(1)
out = np.flatnonzero(mask)[:,None]+np.arange(len(b))
Approach #5 : For a small sized b, we could simply run a loop for each of the elements in b and perform bitwise and-reduction -
mask = np.bitwise_and.reduce([a[i:len(a)-len(b)+1+i]==b[i] for i in range(len(b))])
out = np.flatnonzero(mask)[:,None]+np.arange(len(b))
Just making the point that #MSeifert's approach can, of course, also be implemented in numpy:
def pp(h,n):
nn = len(n)
NN = len(h)
c = (h[:NN-nn+1]==n[0]).nonzero()[0]
if c.size==0: return
for i,l in enumerate(n[1:].tolist(),1):
c = c[h[i:][c]==l]
if c.size==0: return
return np.arange(c[0],c[0]+nn)
def get_data(l1,l2):
d=defaultdict(list)
[d[item].append(index) for index,item in enumerate(l1)]
print(d)
Using defaultdict to store indices of elements from other list.

How to map pixels (R, G, B) in a collection of images to a distinct pixel-color-value indices?

Lets say one has 600 annotated semantic segmentation mask images, which contain 10 different colors, each representing one entity. These images are in a numpy array of shape (600, 3, 72, 96), where n = 600, 3 = RGB channels, 72 = height, 96 = width.
How to map each RGB-pixel in the numpy array to a color-index-value? For example, a color list would be [(128, 128, 0), (240, 128, 0), ...n], and all (240, 128, 0) pixels in the numpy array would be converted to index value in unique mapping (= 1).
How to do this efficiently and with less code? Here's one solution I came up with, but it's quite slow.
# Input imgs.shape = (N, 3, H, W), where (N = count, W = width, H = height)
def unique_map_pixels(imgs):
original_shape = imgs.shape
# imgs.shape = (N, H, W, 3)
imgs = imgs.transpose(0, 2, 3, 1)
# tupleview.shape = (N, H, W, 1); contains tuples [(R, G, B), (R, G, B)]
tupleview = imgs.reshape(-1, 3).view(imgs.dtype.descr * imgs.shape[3])
# get unique pixel values in images, [(R, G, B), ...]
uniques = list(np.unique(tupleview))
# map uniques into hashed list ({"RXBXG": 0, "RXBXG": 1}, ...)
uniqmap = {}
idx = 0
for x in uniques:
uniqmap["%sX%sX%s" % (x[0], x[1], x[2])] = idx
idx = idx + 1
if idx >= np.iinfo(np.uint16).max:
raise Exception("Can handle only %s distinct colors" % np.iinfo(np.uint16).max)
# imgs1d.shape = (N), contains RGB tuples
imgs1d = tupleview.reshape(np.prod(tupleview.shape))
# imgsmapped.shape = (N), contains uniques-index values
imgsmapped = np.empty((len(imgs1d))).astype(np.uint16)
# map each pixel into unique-pixel-ID
idx = 0
for x in imgs1d:
str = ("%sX%sX%s" % (x[0], x[1] ,x[2]))
imgsmapped[idx] = uniqmap[str]
idx = idx + 1
imgsmapped.shape = (original_shape[0], original_shape[2], original_shape[3]) # (N, H, W)
return (imgsmapped, uniques)
Testing it:
import numpy as np
n = 30
pixelvalues = (np.random.rand(10)*255).astype(np.uint8)
images = np.random.choice(pixelvalues, (n, 3, 72, 96))
(mapped, pixelmap) = unique_map_pixels(images)
assert len(pixelmap) == mapped.max()+1
assert mapped.shape == (len(images), images.shape[2], images.shape[3])
assert pixelmap[mapped[int(n*0.5)][60][81]][0] == images[int(n*0.5)][0][60][81]
print("Done: %s" % list(mapped.shape))
Here's a compact vectorized approach without those error checks -
def unique_map_pixels_vectorized(imgs):
N,H,W = len(imgs), imgs.shape[2], imgs.shape[3]
img2D = imgs.transpose(0, 2, 3, 1).reshape(-1,3)
ID = np.ravel_multi_index(img2D.T,img2D.max(0)+1)
_, firstidx, tags = np.unique(ID,return_index=True,return_inverse=True)
return tags.reshape(N,H,W), img2D[firstidx]
Runtime test and verification -
In [24]: # Setup inputs (3x smaller than original ones)
...: N,H,W = 200,24,32
...: imgs = np.random.randint(0,10,(N,3,H,W))
...:
In [25]: %timeit unique_map_pixels(imgs)
1 loop, best of 3: 2.21 s per loop
In [26]: %timeit unique_map_pixels_vectorized(imgs)
10 loops, best of 3: 37 ms per loop ## 60x speedup!
In [27]: map1,unq1 = unique_map_pixels(imgs)
...: map2,unq2 = unique_map_pixels_vectorized(imgs)
...:
In [28]: np.allclose(map1,map2)
Out[28]: True
In [29]: np.allclose(np.array(map(list,unq1)),unq2)
Out[29]: True
I have an image of 3 channels. I have pixel values of 3 channels that if a pixel has these 3 values in its 3 channels then it belongs to class 'A'.
Basically I want to generate an array of channels equal to number of classes with each class separate in a particular channel.
This can be done
seg_channel = np.zeros((image.shape[0], image.shape[1], num_classes))
pixel_class_dict={'1': [128, 64, 128]. '2': [230, 50, 140]} #num_classes=2
for channel in range(num_classes):
pixel_value= pixel_class_dict[str(channel)]
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if list(image[i][j])==pixel_value:
classes_channel[i,j,channel]=1
There is another way also to do this efficiently
import numpy as np
import cv2
for class_id in self.pixel_class_dict:
class_color = np.array(self.pixel_class_dict:[class_id])
seg_channel[:, :, class_id] = cv2.inRange(mask, class_color, class_color).astype('bool').astype('float32')
This is what I do:
def rgb2mask(img):
if img.shape[0] == 3:
img = img.rollaxis(img, 0, 3)
W = np.power(256, [[0],[1],[2]])
img_id = img.dot(W).squeeze(-1)
values = np.unique(img_id)
mask = np.zeros(img_id.shape)
cmap = {}
for i, c in enumerate(values):
idx = img_id==c
mask[idx] = i
cmap[tuple(img[idx][0])] = i
return mask, cmap
If you want to map values according to an already existing dictionary, check out my answer on this thread: Convert RGB image to index image

Python scipy.numpy.convolve and scipy.signal.fftconvolve different results

i am having 2 arrays (G and G_). They have the same shape and size and i want to convolve them. i found the numpy.convolve and fftconvolve.
My Code is like:
foldedX = getFoldGradientsFFT(G, G_)
foldedY = getFoldGradientsNumpy(G, G_)
def getFoldGradientsFFT(G, G_):
# convolve via scipy fast fourier transform
X =signal.fftconvolve(G,G_, "same)
X*=255.0/numpy.max(X);
return X
def getFoldGradientsNumpy(G, G_):
# convolve via numpy.convolve
Y = ndimage.convolve(G, G_)
Y*=255.0/numpy.max(Y);
return Y
But the results aren't the same.
The result is like:
Numpy.concolve()
[ 11.60287582 3.28262652 18.80395211 52.75829556 99.61675945
147.74124258 187.66178244 215.06160439 234.1907606 229.04221552]
scipy.signal.fftconvolve:
[ -4.88130620e-15 6.74371119e-02 4.91875539e+00 1.94250997e+01
3.88227012e+01 6.70322921e+01 9.78460423e+01 1.08486302e+02
1.17267015e+02 1.15691562e+02]
I thought the result is supposed to be the same, even if the two functions convolves with a different procedure?!
i forgot to mention, that i want to convolve 2 2-dimensional arrays :S
the arrays:
G = array([[1,2],[3,4]])
G_ = array([[5,6],[7,8]])
the code
def getFoldGradientsFFT(G, G_):
X =signal.fftconvolve(G,G_,"same")
X=X.astype("int")
X*=255.0/np.max(X);
return X
def getFoldGradientsNumpy(G, G_):
# convolve via convolve
old_shape = G.shape
G = np.reshape(G, G.size)
G_ = np.reshape(G_, G.size)
Y = np.convolve(G, G_, "same")
Y = np.reshape(Y,old_shape)
Y = Y.astype("int")
Y*=255.0/np.max(Y);
return Y
def getFoldGradientsNDImage(G, G_):
Y = ndimage.convolve(G, G_)
Y = Y.astype("int")
Y *= 255.0/np.max(Y)
return Y
the results:
getFoldGradientsFFT
[[ 21 68]
[ 93 255]]
getFoldGradientsNumpy
[[ 66 142]
[250 255]]
getFoldGradientsNDImage
[[147 181]
[220 255]]
numpy.convolve is for one-dimensional data.
The following code compares the results of signal.convolve, signal.fftconvolve, and ndimage.convolve.
for ndimage.convolve, we need to set mode argument to "constant", and origin argument to -1 when N is even, and 0 when N is odd.
from scipy import signal
from scipy import ndimage
import numpy as np
np.random.seed(1)
for N in xrange(2, 20):
a = np.random.randint(0, 10, size=(N, N))
b = np.random.randint(0, 10, size=(N, N))
r1 = signal.convolve(a, b, mode="same")
r2 = signal.fftconvolve(a, b, mode="same")
r3 = ndimage.convolve(a, b, mode="constant", origin=-1 if N%2==0 else 0)
print "N=", N
print np.allclose(r1, r2)
print np.allclose(r2, r3)
getFoldGradientsNumpy is using scipy.ndimage.convolve. That does multi-dimensional convolution and is not the same as scipy.convolve.
For me, when convolving two one-dimensional arrays, scipy.convolve, scipy.signal.convolve, and scipy.signal.fftconvolve all return the same answer.

Categories