I'd like to add two numpy arrays of different shapes, but without broadcasting, rather the "missing" values are treated as zeros. Probably easiest with an example like
[1, 2, 3] + [2] -> [3, 2, 3]
or
[1, 2, 3] + [[2], [1]] -> [[3, 2, 3], [1, 0, 0]]
I do not know the shapes in advance.
I'm messing around with the output of np.shape for each, trying to find the smallest shape which holds both of them, embedding each in a zero-ed array of that shape and then adding them. But it seems rather a lot of work, is there an easier way?
Thanks in advance!
edit: by "a lot of work" I meant "a lot of work for me" rather than for the machine, I seek elegance rather than efficiency: my effort getting the smallest shape holding them both is
def pad(a, b) :
sa, sb = map(np.shape, [a, b])
N = np.max([len(sa),len(sb)])
sap, sbp = map(lambda x : x + (1,)*(N-len(x)), [sa, sb])
sp = np.amax( np.array([ tuple(sap), tuple(sbp) ]), 1)
not pretty :-/
I'm messing around with the output of np.shape for each, trying to find the smallest shape which holds both of them, embedding each in a zero-ed array of that shape and then adding them. But it seems rather a lot of work, is there an easier way?
Getting the np.shape is trivial, finding the smallest shape that holds both is very easy, and of course adding is trivial, so the only "a lot of work" part is the "embedding each in a zero-ed array of that shape".
And yes, you can eliminate that, by just calling the resize method (or the resize function, if you want to make copies instead of changing them in-place). As the docs explain:
Enlarging an array: … missing entries are filled with zeros
For example, if you know the dimensionality statically:
>>> a1 = np.array([[1, 2, 3], [4, 5, 6]])
>>> a2 = np.array([[2], [2]])
>>> shape = [max(a.shape[axis] for a in (a1, a2)) for axis in range(2)]
>>> a1.resize(shape)
>>> a2.resize(shape)
>>> print(a1 + a2)
array([[3, 4, 3],
[4, 5, 6]])
This is the best I could come up with:
import numpy as np
def magic_add(*args):
n = max(a.ndim for a in args)
args = [a.reshape((n - a.ndim)*(1,) + a.shape) for a in args]
shape = np.max([a.shape for a in args], 0)
result = np.zeros(shape)
for a in args:
idx = tuple(slice(i) for i in a.shape)
result[idx] += a
return result
You can clean up the for loop a little if you know how many dimensions you expect on result, something like:
for a in args:
i, j = a.shape
result[:i, :j] += a
You may try my solution - for dimension 1 arrays you have to expand your arrays to
dimension 2 (as shown in the example below), before passing it to the function.
import numpy as np
import timeit
matrix1 = np.array([[0,10],
[1,20],
[2,30]])
matrix2 = np.array([[0,10],
[1,20],
[2,30],
[3,40]])
matrix3 = np.arange(0,0,dtype=int) # empty numpy-array
matrix3.shape = (0,2) # reshape to 0 rows
matrix4 = np.array([[0,10,100,1000],
[1,20,200,2000]])
matrix5 = np.arange(0,4000,1)
matrix5 = np.reshape(matrix5,(4,1000))
matrix6 = np.arange(0.0,4000,0.5)
matrix6 = np.reshape(matrix6,(20,400))
matrix1 = np.array([1,2,3])
matrix1 = np.expand_dims(matrix1, axis=0)
matrix2 = np.array([2,1])
matrix2 = np.expand_dims(matrix2, axis=0)
def add_2d_matrices(m1, m2, pos=(0,0), filler=None):
"""
Add two 2d matrices of different sizes or shapes,
offset by xy coordinates, whereat x is "from left to right" (=axis:1)
and y is "from top to bottom" (=axis:0)
Parameterse:
- m1: first matrix
- m2: second matrix
- pos: tuple (x,y) containing coordinates for m2 offset,
- filler: gaps are filled with the value of filler (or zeros)
Returns:
- 2d array (float):
containing filler-values, m1-values, m2-values
or the sum of m1,m2 (at overlapping areas)
Author:
Reinhard Daemon, Austria
"""
# determine shape of final array:
_m1 = np.copy(m1)
_m2 = np.copy(m2)
x,y = pos
y1,x1 = _m1.shape
y2,x2 = _m2.shape
xmax = max(x1, x2+x)
ymax = max(y1, y2+y)
# fill-up _m1 array with zeros:
y1,x1 = _m1.shape
diff = xmax - x1
_z = np.zeros((y1,diff))
_m1 = np.hstack((_m1,_z))
y1,x1 = _m1.shape
diff = ymax - y1
_z = np.zeros((diff,x1))
_m1 = np.vstack((_m1,_z))
# shift _m2 array by 'pos' and fill-up with zeros:
y2,x2 = _m2.shape
_z = np.zeros((y2,x))
_m2 = np.hstack((_z,_m2))
y2,x2 = _m2.shape
diff = xmax - x2
_z = np.zeros((y2,diff))
_m2 = np.hstack((_m2,_z))
y2,x2 = _m2.shape
_z = np.zeros((y,x2))
_m2 = np.vstack((_z,_m2))
y2,x2 = _m2.shape
diff = ymax - y2
_z = np.zeros((diff,x2))
_m2 = np.vstack((_m2,_z))
# add the 2 arrays:
_m3 = _m1 + _m2
# find and fill the "unused" positions within the summed array:
if filler not in (None,0,0.0):
y1,x1 = m1.shape
y2,x2 = m2.shape
x1min = 0
x1max = x1-1
y1min = 0
y1max = y1-1
x2min = x
x2max = x + x2-1
y2min = y
y2max = y + y2-1
for xx in range(xmax):
for yy in range(ymax):
if x1min <= xx <= x1max and y1min <= yy <= y1max:
continue
if x2min <= xx <= x2max and y2min <= yy <= y2max:
continue
_m3[yy,xx] = filler
return(_m3)
t1 = timeit.Timer("add_2d_matrices(matrix5, matrix6, pos=(1,1), filler=111.111)", \
"from __main__ import add_2d_matrices,matrix5,matrix6")
print("ran:",t1.timeit(number=10), "milliseconds")
print("\n\n")
my_res = add_2d_matrices(matrix1, matrix2, pos=(1,1), filler=99.99)
print(my_res)
Related
I have a 3D array (4,3,3) in which I would like to iteratively multiply with a 1D array (t variable) and sum to end up with an array (A) that is a summation of the four 3,3 arrays
I'm unsure on how I should be assigning indexes or how and if I should be using np.ndenumerate
Thanks
import numpy as np
import math
#Enter material constants for calculation of stiffness matrix
E1 = 20
E2 = 1.2
G12 = 0.8
v12=0.25
v21=(v12/E1)*E2
theta = np.array([30,-30,-30,30])
deg = ((math.pi*theta/180))
k = len(theta) #number of layers
t = np.array([0.005,0.005,0.005,0.005])
#Calculation of Q Values
Q11 = 1
Q12 = 2
Q21 = 3
Q22 = 4
Q66 = 5
Qbar = np.zeros((len(theta),3,3),order='F')
#CALCULATING THE VALUES OF THE QBAR MATRIX
for i, x in np.ndenumerate(deg):
m= np.cos(x) #sin of rotated lamina
n= np.sin(x) #cos of rotated lamina
Qbar11=Q11*3
Qbar12=Q22*4
Qbar16=Q16*4
Qbar21 = Qbar12
Qbar22=Q22*1
Qbar26=Q66*2
Qbar66=Q12*3
Qbar[i] = np.array([[Qbar11, Qbar12, Qbar16], [Qbar21, Qbar22, Qbar26], [Qbar16, Qbar26, Qbar66]], order = 'F')
print(Qbar)
A = np.zeros((3,3))
for i in np.nditer(t):
A[i]=Qbar[i]*t[i]
A=sum(A[i])
If I understand correctly, you want to multiply Qbar and t over the first axis, and then summing the result over the first axis (which results in an array of shape (3, 3)).
I created random arrays to make the code minimal:
import numpy as np
Qbar = np.random.randint(2, size=(4, 3, 3))
t = np.arange(4)
A = (Qbar * t[:, None, None]).sum(axis=0)
t[:, None, None] will create two new dimensions so that the shape becomes (4, 1, 1), which can be multiplied to Qbar element-wise. Then we just have to sum over the first axis.
NB: A = np.tensordot(t, Qbar, axes=([0],[0])) also works and can be faster for larger dimensions, but for the dimensions you provided I prefer the first solution.
Suppose p has shape (4, 3, 2). I want to iterate 12 times over arrays of size (2,)
q = np.empty_like(p)
op_axes = [list(range(len(p.shape) - 1)) + [-1]] * 2
it = np.nditer([p, q],
op_axes=op_axes,
op_flags=[['readonly'], ['writeonly', 'allocate']])
with it:
for this_p, this_q in it:
print(this_p.shape) # I want this to have shape (2,)
this_q[...] = some_function_of(this_p)
What am I doing wrong?
Best I can do:
q = np.empty_like(p)
for i in np.ndindex(p.shape[: -1]):
this_p = p[i]
...
q[i] = solution.x
Consider the following arrays:
a = np.array([0,1])[:,None]
b = np.array([1,2,3])
print(a)
array([[0],
[1]])
print(b)
b = np.array([1,2,3])
Is there a simple way to concatenate these two arrays in a way that the latter is broadcast, in order to obtain the following?
array([[0, 1, 2, 3],
[1, 1, 2, 3]])
I've seen there is this closed issue with a related question. An alternative is proposed involving np.broadcast_arrays, however I cannot manage to adapt it to my example. Is there some way to do this, excluding the np.tile/np.concatenate solution?
You can do it in the following way
import numpy as np
a = np.array([0,1])[:,None]
b = np.array([1,2,3])
b_new = np.broadcast_to(b,(a.shape[0],b.shape[0]))
c = np.concatenate((a,b_new),axis=1)
print(c)
Here a more general solution:
def concatenate_broadcast(arrays, axis=-1):
def broadcast(x, shape):
shape = [*shape] # weak copy
shape[axis] = x.shape[axis]
return np.broadcast_to(x, shape)
shapes = [list(a.shape) for a in arrays]
for s in shapes:
s[axis] = 1
broadcast_shape = np.broadcast(*[
np.broadcast_to(0, s)
for s in shapes
]).shape
arrays = [broadcast(a, broadcast_shape) for a in arrays]
return np.concatenate(arrays, axis=axis)
I had a similar problem where I had two matrices x and y of size (X, c1) and (Y, c2) and wanted the result to be the matrix of size (X * Y, c1 + c2) where the rows of the result were all the concatenations of rows from x and rows from y.
I, like the original poster, was disappointed to discover that concatenate() would not do broadcasting for me. I thought of using the solution above, except X and Y could potentially be large, and that solution would use a large temporary array.
I finally came up with the following:
result = np.empty((x.shape[0], y.shape[0], x.shape[1] + y.shape[1]), dtype=x.dtype)
result[...,:x.shape[0]] = x[:,None,:]
result[...,x.shape[0]:] = y[None,:,:]
result = result.reshape((-1, x.shape[1] + y.shape[1]))
I create a result array of size (X, Y, c1 + c2), I broadcast in the contents of x and y, and then reshape the results to the right size.
I have an array of 5 values, consisting of 4 values and one index. I sort and split the array along the index. This leads me to splits of matrices with different lengths. From here on I want to calculate the mean, variance of the fourth values and covariance of the first 3 values for every split. My current approach works with a for loop, which I would like to replace by matrix operations, but I am struggeling with the different sizes of my matrices.
import numpy as np
A = np.random.rand(10,5)
A[:,-1] = np.random.randint(4, size=10)
sorted_A = A[np.argsort(A[:,4])]
splits = np.split(sorted_A, np.where(np.diff(sorted_A[:,4]))[0]+1)
My current for loop looks like this:
result = np.zeros((len(splits), 5))
for idx, values in enumerate(splits):
if(len(values))>0:
result[idx, 0] = np.mean(values[:,3])
result[idx, 1] = np.var(values[:,3])
result[idx, 2:5] = np.cov(values[:,0:3].transpose(), ddof=0).diagonal()
else:
result[idx, 0] = values[:,3]
I tried to work with masked arrays without success, since I couldn't load the matrices into the masked arrays in a proper form. Maybe someone knows how to do this or has a different suggestion.
You can use np.add.reduceat as follows:
>>> idx = np.concatenate([[0], np.where(np.diff(sorted_A[:,4]))[0]+1, [A.shape[0]]])
>>> result2 = np.empty((idx.size-1, 5))
>>> result2[:, 0] = np.add.reduceat(sorted_A[:, 3], idx[:-1]) / np.diff(idx)
>>> result2[:, 1] = np.add.reduceat(sorted_A[:, 3]**2, idx[:-1]) / np.diff(idx) - result2[:, 0]**2
>>> result2[:, 2:5] = np.add.reduceat(sorted_A[:, :3]**2, idx[:-1], axis=0) / np.diff(idx)[:, None]
>>> result2[:, 2:5] -= (np.add.reduceat(sorted_A[:, :3], idx[:-1], axis=0) / np.diff(idx)[:, None])**2
>>>
>>> np.allclose(result, result2)
True
Note that the diagonal of the covariance matrix are just the variances which simplifies this vectorization quite a bit.
I have an array input_data of shape (A, B, C), and an array ind of shape (B,). I want to loop through the B axis and take the sum of elements C[B[i]] and C[B[i]+1]. The desired output is of shape (A, B). I have the following code which works, but I feel is inefficient due to index-based looping through the B axis. Is there a more efficient method?
import numpy as np
input_data = np.random.rand(2, 6, 10)
ind = [ 2, 3, 5, 6, 5, 4 ]
out = np.zeros( ( input_data.shape[0], input_data.shape[1] ) )
for i in range( len(ind) ):
d = input_data[:, i, ind[i]:ind[i]+2]
out[:, i] = np.sum(d, axis = 1)
Edited based on Divakar's answer:
import timeit
import numpy as np
N = 1000
input_data = np.random.rand(10, N, 5000)
ind = ( 4999 * np.random.rand(N) ).astype(np.int)
def test_1(): # Old loop-based method
out = np.zeros( ( input_data.shape[0], input_data.shape[1] ) )
for i in range( len(ind) ):
d = input_data[:, i, ind[i]:ind[i]+2]
out[:, i] = np.sum(d, axis = 1)
return out
def test_2():
extent = 2 # Comes from 2 in "ind[i]:ind[i]+2"
m,n,r = input_data.shape
idx = (np.arange(n)*r + ind)[:,None] + np.arange(extent)
out1 = input_data.reshape(m,-1)[:,idx].reshape(m,n,-1).sum(2)
return out1
print timeit.timeit(stmt = test_1, number = 1000)
print timeit.timeit(stmt = test_2, number = 1000)
print np.all( test_1() == test_2(), keepdims = True )
>> 7.70429363482
>> 0.392034666757
>> [[ True]]
Here's a vectorized approach using linear indexing with some help from broadcasting. We merge the last two axes of the input array, calculate the linear indices corresponding to the last two axes, perform slicing and reshape back to a 3D shape. Finally, we do summation along the last axis to get the desired output. The implementation would look something like this -
extent = 2 # Comes from 2 in "ind[i]:ind[i]+2"
m,n,r = input_data.shape
idx = (np.arange(n)*r + ind)[:,None] + np.arange(extent)
out1 = input_data.reshape(m,-1)[:,idx].reshape(m,n,-1).sum(2)
If the extent is always going to be 2 as stated in the question - "... sum of elements C[B[i]] and C[B[i]+1]", then you could simply do -
m,n,r = input_data.shape
ind_arr = np.array(ind)
axis1_r = np.arange(n)
out2 = input_data[:,axis1_r,ind_arr] + input_data[:,axis1_r,ind_arr+1]
You could also use integer array indexing combined with basic slicing:
import numpy as np
m,n,r = 2, 6, 10
input_data = np.arange(2*6*10).reshape(m, n, r)
ind = np.array([ 2, 3, 5, 6, 5, 4 ])
out = np.zeros( ( input_data.shape[0], input_data.shape[1] ) )
for i in range( len(ind) ):
d = input_data[:, i, ind[i]:ind[i]+2]
out[:, i] = np.sum(d, axis = 1)
out2 = input_data[:, np.arange(n)[:,None], np.add.outer(ind,range(2))].sum(axis=-1)
print(out2)
# array([[ 5, 27, 51, 73, 91, 109],
# [125, 147, 171, 193, 211, 229]])
assert np.allclose(out, out2)