How (in python3) can I extract several n-by-m block from a M-by-N array centering in each element?
for example, In a 9-by-9 matrix, and using a 3-by-3 block, I need to extract the block at each position of the matrix. The challenge here (for me) is because of the I,J element inside of his respective block change of position.
here an image where I show the 9 blocks (for 9 positions) of a 9-by-9 matrix (of course there are 81 blocks to extract)
The code below works perfectly (ONLY) for the corners. Here the size of the block (Wsize-by-Wsize) is a odd number in order to locate the index (Eindex[0],Eindex1) in the middle.
def windowing(array,Wsize,Eindex):
'''
Extract an sub-array of array for the element located in the index 'Eindex'.
Wsize-by-Wsize is the shape of the window
'''
block=np.zeros(tuple(Wsize))
k0 = int((Wsize[0]-1)/2)
k1 = int((Wsize[1]-1)/2)
s0 = Wsize[0]
s1 = Wsize[1]
I = array.shape[0]-1 # las index in i-direction
J = array.shape[1]-1 # las index in i-direction
if (Eindex[0]==0) and (Eindex[1]==0):
block=array[0:Eindex[0]+s0,0:Eindex[1]+s1]
return block
elif (Eindex[0]==I) and (Eindex[1]==0):
block=array[-s0:,0:Eindex[1]+s1]
return block
elif (Eindex[0]==0) and (Eindex[1]==J):
block=array[0:Eindex[0]+s0,-s1:]
return block
elif (Eindex[0]==I) and (Eindex[1]==J):
block=array[-s0:,-s1:]
return block
for example check:
x = np.arange(81).reshape(9,9)
print(windowing(x,[3,3],[0,0]))
print(windowing(x,[3,3],[8,8))
print(windowing(x,[3,3],[8,0]))
print(windowing(x,[3,3],[0,8]))
Here is an approach that takes arbitrary arrays, coordinates and window sizes.
def compute_indices(c, ws, length):
# default setting: % operations to accommodate odd/even window sizes
low, high = c - (ws//2), c + (ws//2) + ws%2
# correction for overlap with borders of array
if low<0:
low, high = 0, ws
elif high>length:
low, high = -ws, None
return low, high
def extract_block(arr, coords, window_size=(3,3)):
# extract array shapes and window sizes into single
# variables
len_r, len_c = arr.shape
wsr, wsc = window_size
# extract coords and correct for 0-indexing
r, c = coords
r0, c0 = r-1, c-1
row_low, row_high = compute_indices(r0, wsr, len_r)
col_low, col_high = compute_indices(c0, wsc, len_c)
return arr[row_low:row_high, col_low:col_high]
test cases:
a = np.arange(81).reshape(9,9)
extract_block(a, [1,1], (3,3))
array[[ 0 1 2]
[ 9 10 11]
[18 19 20]]
extract_block(a, [9,9], (3,3))
array([[60, 61, 62],
[69, 70, 71],
[78, 79, 80]])
extract_block(a, [5,2], (3,6))
array([[27, 28, 29, 30, 31, 32],
[36, 37, 38, 39, 40, 41],
[45, 46, 47, 48, 49, 50]])
You can use numpy like this:
import numpy as np
# Array 9 by 9
x = np.arange(81).reshape((9, 9))
# -1 is important for the indexing
desired_position = np.array([[1,1], [1,5], [1,9], [5,1], [5,5], [5,9], [9,1], [9,5], [9,9]]) - 1
#print(desired_position)
for dp in desired_position:
pos = []
p1, p2 =dp[0] - 1, dp[0] + 2
if p1 <= 0:
p1, p2 = 0, 3
elif p2 >= x.shape[0]:
p2, p1 = x.shape[0], x.shape[0] - 3
pos.append([p1, p2])
p1, p2 = dp[1] - 1, dp[1] + 2
if p1 <= 0:
p1, p2 = 0, 3
elif p2 >= x.shape[1]:
p2, p1 = x.shape[1], x.shape[1] - 3
pos.append([p1, p2])
print(x[pos[0][0]:pos[0][1],pos[1][0]:pos[1][1]])
Please read the docs for numpy for futher information
I edited the code, so now it works.
Related
I have a list of crops, I need to divide the list into sublists of 48 items and then plot them in a mosaic, I have been doing it manually. How can I do it automatically?
This is the code I use:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
p1 = listf[:48]
p2 = listf[48:96]
p3 = listf[96:144]
p4 = listf[144:192]
p5 = listf[192:240]
p6 = listf[240:288]
p7 = listf[288:336]
p8 = listf[336:384]
p9 = listf[384:432]
p10 = listf[432:480]
p11 = listf[480:528]
p12 = listf[528:576]
p13 = listf[576:624]
p14 = listf[624:642]
final = [p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14]
for idx, part in enumerate(final):
nc = 8
#fig = plt.figure(figsize=(8, (len(part)/6) * 8), dpi=600)
fig = plt.figure(figsize=(9, 6), dpi=300)
grid = ImageGrid(fig, 111, # similar to subplot(111)
#nrows_ncols=(int((len(part))/2), nc), # creates 12x2 grid of axes
nrows_ncols=(6, nc),
axes_pad=0.2, # pad between axes in inch.
)
for ax, im in zip(grid, part):
# Iterating over the grid returns the Axes.
ax.tick_params(labelbottom= False,labeltop = False, labelleft = False, labelright = False)
ax.imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
fig.suptitle('Predicted vs Real', fontsize=15 )
my_lists = list(zip(*[iter(my_big_list)]*48))
is a common(ish) pattern to do this without numpy or pandas I think
a more readable version
split_size = 48
my_lists = [my_big_list[i:i+split_size] for i in range(0,len(my_big_list),split_size)]
If you need each group to have 48, and are sure that the total is divisible by 48:
final = np.array_split(listf, len(listf)//48)
If you need 14 groups:
final = np.array_split(listf, 14)
If you aren't sure that it's divisible by 48:
listf = [1,3] * 40 * 10
len_list = len(listf)
if len_list%48:
x = 48*(len_list//48)
temp_list = listf[x:]
listf = listf[:x]
final = np.array_split(listf, len(listf)//48)
final.append(temp_list)
print([len(x) for x in final])
Output, all 48 except the last set:
[48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 32]
I need to make subtractions inside red frames as [20-10,60-40,100-70]
that results in [10,20,30]
Current code makes subtractions but I don't know how to define red frames
seq = [10, 20, 40, 60, 70, 100]
window_size = 2
for i in range(len(seq) - window_size+1):
x=seq[i: i + window_size]
y=x[1]-x[0]
print(y)
You can build a quick solution using the fact that seq[0::2] will give you every other element of seq starting at zero. So you can compute seq[1::2] - seq[0::2] to get this result.
Without using any packages you could do:
seq = [10, 20, 40, 60, 70, 100]
sub_seq = [0]*(len(seq)//2)
for i in range(len(sub_seq)):
sub_seq[i] = seq[1::2][i] - seq[0::2][i]
print(sub_seq)
Instead you could use Numpy. Using the numpy array object you can subtract the arrays rather than explicitly looping:
import numpy as np
seq = np.array([10, 20, 40, 60, 70, 100])
sub_seq = seq[1::2] - seq[0::2]
print(sub_seq)
Here's a solution using numpy which might be useful if you have to process large amounts of data in a short time. We select values based on whether their index is even (index % 2 == 0) or odd (index % 2 != 0).
import numpy as np
seq = [10, 20, 40, 60, 70, 100]
seq = np.array(seq)
index = np.arange(len(seq))
seq[index % 2 != 0] - seq[index % 2 == 0]
I have this code, and it works. It just seems like there may be a better way to do this. Does anyone know a cleaner solution?
def Matrix2toMatrix(Matrix2):
scaleSize = len(Matrix2[0, 0])
FinalMatrix = np.empty([len(Matrix2)*scaleSize, len(Matrix2[0])*scaleSize])
for x in range(0, len(Matrix2)):
for y in range(0, len(Matrix2[0])):
for xFinal in range(0, scaleSize):
for yFinal in range(0, scaleSize):
FinalMatrix[(x*scaleSize)+xFinal, (y*scaleSize)+yFinal] = Matrix2[x, y][xFinal, yFinal]
return FinalMatrix
This is where Matrix2 is a 4x4 matrix, with each cell containing a 2x2 matrix
Full code in case anyone was wondering:
import matplotlib.pyplot as plt
import numpy as np
def Matrix2toMatrix(Matrix2):
scaleSize = len(Matrix2[0, 0])
FinalMatrix = np.empty([len(Matrix2)*scaleSize, len(Matrix2[0])*scaleSize])
for x in range(0, len(Matrix2)):
for y in range(0, len(Matrix2[0])):
for xFinal in range(0, scaleSize):
for yFinal in range(0, scaleSize):
FinalMatrix[(x*scaleSize)+xFinal, (y*scaleSize)+yFinal] = Matrix2[x, y][xFinal, yFinal]
return FinalMatrix
XSize = 4
Xtest = np.array([[255, 255, 255, 255]
,[255, 255, 255, 255]
,[127, 127, 127, 127]
,[0, 0, 0, 0]
])
scaleFactor = 2
XMarixOfMatrix = np.empty([XSize, XSize], dtype=object)
Xexpanded = np.empty([XSize*scaleFactor, XSize*scaleFactor], dtype=int) # careful, will contain garbage data
for xOrg in range(0, XSize):
for yOrg in range(0, XSize):
newMatrix = np.empty([scaleFactor, scaleFactor], dtype=int) # careful, will contain garbage data
# grab org point equivalent
pointValue = Xtest[xOrg, yOrg]
newMatrix.fill(pointValue)
# now write the data
XMarixOfMatrix[xOrg, yOrg] = newMatrix
# need to concat all matrix together to form a larger singular matrix
Xexpanded = Matrix2toMatrix(XMarixOfMatrix)
img = plt.imshow(Xexpanded)
img.set_cmap('gray')
plt.axis('off')
plt.show()
Permute axes and reshape -
m,n = Matrix2.shape[0], Matrix2.shape[2]
out = Matrix2.swapaxes(1,2).reshape(m*n,-1)
For permuting axes, we could also use np.transpose or np.rollaxis, as functionally all are the same.
Verify with sample run -
In [17]: Matrix2 = np.random.rand(3,3,3,3)
# With given solution
In [18]: out1 = Matrix2toMatrix(Matrix2)
In [19]: m,n = Matrix2.shape[0], Matrix2.shape[2]
...: out2 = Matrix2.swapaxes(1,2).reshape(m*n,-1)
In [20]: np.allclose(out1, out2)
Out[20]: True
Here's the code:
x = range(-6,7)
tmp1 = []
for i in range(len(x)):
tmp1.append(math.exp(-(i*i)/(2*self.sigma*self.sigma)))
max_tmp1 = max(tmp1)
mod_tmp1 = []
for i in range(len(tmp1)):
mod_tmp1.append(max_tmp1 - i)
ht1 = np.kron(np.ones((9,1)),tmp1)
sht1 = sum(ht1.flatten(1))
mean = sht1/(13*9)
ht1 = ht1 - mean
ht1 = ht1/sht1
print ht1.shape
h = np.zeros((16,16))
for i in range(0, 9):
for j in range(0, 13):
h[i+3, j+1] = ht1[i, j]
for i in range(0, 10):
ag = 15*i
np.append(h, scipy.misc.imrotate(h, ag, 'bicubic'))
R = []
print h.shape
print self.img.shape
for i in range(0, 11):
print 'here'
R[i] = scipy.signal.convolve2d(self.img, h[i], mode = 'same')
rt = np.zeros(self.img.shape)
x, y = self.img.shape
The error I get states:
ValueError: object of too small depth for desired array
It looks to me as if the problem is that you're setting h up wrongly. I assume you want h[i] to be a 16x16 array suitable for convolving with, but that's not what you've actually made it, for a couple of different reasons.
I suggest you change the loop with the imrotate calls to this:
h = [scipy.misc.imrotate(h, 15*i, 'bicubic') for i in range(10)]
(What your existing code does is: first set up h as a single 16x16 array; then, repeatedly: compute a rotated version, "flatten" both h and that to make 256-element vectors, compute the result of appending them to make a 512-element vector, and throw the result away. numpy.append doesn't operate in place, and defaults to flattening its arguments before it appends. Neither of those is what you want!)
The list comprehension above will give you a 10-element Python list containing rotated versions of your convolution kernel.
... Oh, I see that your loop computing R actually wants 11 kernels, not 10. Make it range(11), then. (Your original code generated rotations of 0, 0, 15, 30, ..., 135 degrees, but I'm guessing 0, 15, 30, ..., 150 degrees is more likely to be what you want.)
i am having 2 arrays (G and G_). They have the same shape and size and i want to convolve them. i found the numpy.convolve and fftconvolve.
My Code is like:
foldedX = getFoldGradientsFFT(G, G_)
foldedY = getFoldGradientsNumpy(G, G_)
def getFoldGradientsFFT(G, G_):
# convolve via scipy fast fourier transform
X =signal.fftconvolve(G,G_, "same)
X*=255.0/numpy.max(X);
return X
def getFoldGradientsNumpy(G, G_):
# convolve via numpy.convolve
Y = ndimage.convolve(G, G_)
Y*=255.0/numpy.max(Y);
return Y
But the results aren't the same.
The result is like:
Numpy.concolve()
[ 11.60287582 3.28262652 18.80395211 52.75829556 99.61675945
147.74124258 187.66178244 215.06160439 234.1907606 229.04221552]
scipy.signal.fftconvolve:
[ -4.88130620e-15 6.74371119e-02 4.91875539e+00 1.94250997e+01
3.88227012e+01 6.70322921e+01 9.78460423e+01 1.08486302e+02
1.17267015e+02 1.15691562e+02]
I thought the result is supposed to be the same, even if the two functions convolves with a different procedure?!
i forgot to mention, that i want to convolve 2 2-dimensional arrays :S
the arrays:
G = array([[1,2],[3,4]])
G_ = array([[5,6],[7,8]])
the code
def getFoldGradientsFFT(G, G_):
X =signal.fftconvolve(G,G_,"same")
X=X.astype("int")
X*=255.0/np.max(X);
return X
def getFoldGradientsNumpy(G, G_):
# convolve via convolve
old_shape = G.shape
G = np.reshape(G, G.size)
G_ = np.reshape(G_, G.size)
Y = np.convolve(G, G_, "same")
Y = np.reshape(Y,old_shape)
Y = Y.astype("int")
Y*=255.0/np.max(Y);
return Y
def getFoldGradientsNDImage(G, G_):
Y = ndimage.convolve(G, G_)
Y = Y.astype("int")
Y *= 255.0/np.max(Y)
return Y
the results:
getFoldGradientsFFT
[[ 21 68]
[ 93 255]]
getFoldGradientsNumpy
[[ 66 142]
[250 255]]
getFoldGradientsNDImage
[[147 181]
[220 255]]
numpy.convolve is for one-dimensional data.
The following code compares the results of signal.convolve, signal.fftconvolve, and ndimage.convolve.
for ndimage.convolve, we need to set mode argument to "constant", and origin argument to -1 when N is even, and 0 when N is odd.
from scipy import signal
from scipy import ndimage
import numpy as np
np.random.seed(1)
for N in xrange(2, 20):
a = np.random.randint(0, 10, size=(N, N))
b = np.random.randint(0, 10, size=(N, N))
r1 = signal.convolve(a, b, mode="same")
r2 = signal.fftconvolve(a, b, mode="same")
r3 = ndimage.convolve(a, b, mode="constant", origin=-1 if N%2==0 else 0)
print "N=", N
print np.allclose(r1, r2)
print np.allclose(r2, r3)
getFoldGradientsNumpy is using scipy.ndimage.convolve. That does multi-dimensional convolution and is not the same as scipy.convolve.
For me, when convolving two one-dimensional arrays, scipy.convolve, scipy.signal.convolve, and scipy.signal.fftconvolve all return the same answer.