Splitting the multi relational graph using lil_matrix - python

I am storing graph with two types of relationships using sparse lil_matrix format. This is how I am doing:
e=15
k= 2
X = [lil_matrix((e,e)) for i in range(k)]
#storing type 0 relation#
X[0][0,14] =1
X[0][0,8] =1
X[0][0,9] =1
X[0][0,10] =1
X[0][1,14] =1
X[0][1,6] =1
X[0][1,7] =1
X[0][2,8] =1
X[0][2,9] =1
X[0][2,10] =1
X[0][2,12] =1
X[0][3,6] =1
X[0][3,12] =1
X[0][3,11] =1
X[0][3,13] =1
X[0][4,11] =1
X[0][4,13] =1
X[0][5,13] =1
X[0][5,11] =1
X[0][5,10] =1
X[0][5,12] =1
#storing type 1 relation#
X[1][14,7] =1
X[1][14,6] =1
X[1][6,7] =1
X[1][6,8] =1
X[1][6,9] =1
X[1][10,9] =1
X[1][10,8] =1
X[1][10,11] =1
X[1][12,8] =1
X[1][12,10] =1
X[1][12,11] =1
X[1][12,13] =1
X[1][14,12] =1
X[1][11,9] =1
X[1][8,7] =1
X[1][8,9] =1
I would like to prune the network containing 50% of the nodes only. The way I am approaching this by:
nodes_list = range(e)
total_nodes = len(nodes_list)
get_percentage_of_prune_nodes = np.int(total_nodes * 0.5)
new_nodes = sorted(random.sample(nodes_list,get_percentage_of_prune_nodes))
e_new= get_percentage_of_prune_nodes
k_new= 2
#Y is the pruned matrix#
Y = [lil_matrix((e_new,e_new)) for i in range(k_new)]
for i in xrange(e):
for j in xrange(e):
for rel in xrange(k_new):
if i in new_nodes and j in new_nodes:
if X[rel][i,j]==1:
Y[rel][new_nodes.index(i),new_nodes.index(j)] = 1
This is not very efficient way in the case if the original matrix (X) is huge. Is there any fastest or smartest way to prune this ?

Focusing on just on matrix:
In [318]: X=X[0].astype(int)
In [327]: X.A
Out[327]:
array([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
In [331]: new_nodes=sorted(random.sample(np.arange(e).tolist(),7))
In [332]: new_nodes
Out[332]: [0, 1, 2, 5, 8, 12, 13]
In [333]: Y=sparse.lil_matrix((7,7),dtype=int)
In [334]: for i in range(15):
...: for j in range(e):
...: if i in new_nodes and j in new_nodes:
...: if X[i,j]:
...: Y[new_nodes.index(i),new_nodes.index(j)]=1
...:
In [335]: Y
Out[335]:
<7x7 sparse matrix of type '<class 'numpy.int32'>'
with 5 stored elements in LInked List format>
In [336]: Y.A
Out[336]:
array([[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
This is the same as selecting rows and columns with new_nodes:
In [337]: X[np.ix_(new_nodes,new_nodes)]
Out[337]:
<7x7 sparse matrix of type '<class 'numpy.int32'>'
with 5 stored elements in LInked List format>
In [338]: _.A
Out[338]:
array([[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
This indexing is faster with dense arrays:
In [341]: timeit X[np.ix_(new_nodes,new_nodes)]
188 µs ± 1.3 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
In [342]: timeit X[np.ix_(new_nodes,new_nodes)].A
222 µs ± 6.77 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
In [343]: timeit X.A[np.ix_(new_nodes,new_nodes)]
62 µs ± 654 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
The dense array approach may run into memory errors. But sparse indexing can also have memory problems.
Sparse matrix slicing memory error

Related

Looping and counting python 2d arrays

I have array like this:
[
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0]
]
I want to count value every 3 array, so the result i expected is:
[
[3, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 1, 0, 0, 0, 0, 0]
]
I have no idea to loop it.
UPDATE..
this problem was solved. Thank you. I try Shijith's code this
if len(arr)%3==0:
print([[sum(y) for y in zip(arr[x],arr[x+1],arr[x+2])] for x in range(0, len(arr),3)])
Try:
result = []
for i in range(int(len(a)/3)):
result.append(np.sum(a[i*3:i*3+3], axis=0))
[array([3, 0, 0, 0, 0, 0, 0, 0, 0]),
array([0, 3, 0, 0, 0, 0, 0, 0, 0]),
array([0, 0, 2, 1, 0, 0, 0, 0, 0])]
you can use numpy.sum() along axis=0 , for every three rows in your array.
import numpy as np
if len(arr)%3==0:
print(np.array([np.sum(arr[x:x+3], axis = 0) for x in range(0, len(arr),3) ]))
[[3 0 0 0 0 0 0 0 0]
[0 3 0 0 0 0 0 0 0]
[0 0 2 1 0 0 0 0 0]]
or use simple list comprehension,
if len(arr)%3==0:
print([[sum(y) for y in zip(arr[x],arr[x+1],arr[x+2])] for x in range(0, len(arr),3)])
[[3, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 1, 0, 0, 0, 0, 0]]
You can do this in pandas like this ( It splits the rows into 3, then takes the sum of each set of rows) :
import pandas as pd
import numpy as np
df=pd.DataFrame([
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0]
])
df.groupby(np.arange(len(df))//3).sum().to_numpy().tolist()
output:
[[3, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 1, 0, 0, 0, 0, 0]]
For a pure non import way:
combine=[]
for x in range(3):
combine.append(list(sum((a[x*3:x*3+3]))))
list(combine)
output:
[[3, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 1, 0, 0, 0, 0, 0]]
result = [[sum(three) for three in
zip(arr[first], arr[first + 1], arr[first + 2])]
for first in range(0, len(array)-len(array)%3, 3)]
print(result)
Output
[[3, 0, 0, 0, 0, 0, 0, 0, 0], [0, 3, 0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 1, 0, 0, 0, 0, 0]]

How to set a probability of a value becoming a zero for an np.array?

I've got an np.array 219 by 219 with mostly 0s and 2% of nonzeros and I know want to create new arrays where each of the nonzero values has 90% of chance of becoming a zero.
I now know how to change the n-th non zero value to 0 but how to work with probabilities?
Probably this can be modified:
index=0
for x in range(0, 219):
for y in range(0, 219):
if (index+1) % 10 == 0:
B[x][y] = 0
index+=1
print(B)
You could use np.random.random to create an array of random numbers to compare with 0.9, and then use np.where to select either the original value or 0. Since each draw is independent, it doesn't matter if we replace a 0 with a 0, so we don't need to treat zero and nonzero values differently. For example:
In [184]: A = np.random.randint(0, 2, (8,8))
In [185]: A
Out[185]:
array([[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 1],
[0, 1, 0, 1, 1, 1, 1, 0],
[1, 1, 0, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1, 0, 1]])
In [186]: np.where(np.random.random(A.shape) < 0.9, 0, A)
Out[186]:
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0]])
# first method
prob=0.3
print(np.random.choice([2,5], (5,), p=[prob,1-prob]))
# second method (i prefer)
import random
import numpy as np
def randomZerosOnes(a,b, N, prob):
if prob > 1-prob:
n1=int((1-prob)*N)
n0=N-n1
else:
n0=int(prob*N)
n1=N-n0
zo=np.concatenate(([a for _ in range(n0)] ,[b for _ in range(n1)] ), axis=0 )
random.shuffle(zo)
return zo
zo=randomZerosOnes(2,5, N=5, prob=0.3)
print(zo)

Replacing part of Numpy 2D Array using list in both rows and columns

Let's import numpy first,
import numpy as np
For example, I have a matrix A as,
A = np.identity(10)
I have two other matrices as,
B = np.random.sample((4, 4))
C = np.random.sample((6, 6))
In addition, I have two lists of indices as,
idx_1 = [1, 2, 4, 7]
idx_2 = [0, 3, 5, 6, 8, 9]
Now I want to replace idx_1 rows and columns of A by B and idx_2 rows and columns of A by C. The final A matrix will be a block diagonal matrix.
What is the efficient way to achieve this?
I tried as follows but I did not change A, I don't know why, but I did not get any error as well.
A[idx_1][:,idx_1] = B
In [99]: A = np.identity(10).astype(int)
In [100]: A
Out[100]:
array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
In [101]: idx_1 = [1, 2, 4, 7]
I can select a set of diagonal values with:
In [102]: A[idx_1, idx_1]
Out[102]: array([1, 1, 1, 1])
In [103]: A[idx_1, idx_1] = [10,20,30,40]
In [104]: A
Out[104]:
array([[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 10, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 20, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 30, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 40, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
But it looks like you want to replace a (4,4) block of values. We have to construct a pair on indices that together broadcast to the shape, that is (4,1) array with a (1,4) array.
In [105]: np.ix_(idx_1, idx_1)
Out[105]:
(array([[1],
[2],
[4],
[7]]), array([[1, 2, 4, 7]]))
In [106]: A[np.ix_(idx_1, idx_1)]
Out[106]:
array([[10, 0, 0, 0],
[ 0, 20, 0, 0],
[ 0, 0, 30, 0],
[ 0, 0, 0, 40]])
In [107]: A[np.ix_(idx_1, idx_1)] += 1
In [108]: A
Out[108]:
array([[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 11, 1, 0, 1, 0, 0, 1, 0, 0],
[ 0, 1, 21, 0, 1, 0, 0, 1, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 1, 1, 0, 31, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[ 0, 1, 1, 0, 1, 0, 0, 41, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
The equivalent indexing with nested lists is:
In [109]: A[[[1],[2],[4],[7]],[[1,2,4,7]]]
Out[109]:
array([[11, 1, 1, 1],
[ 1, 21, 1, 1],
[ 1, 1, 31, 1],
[ 1, 1, 1, 41]])
Regarding your indexing attempt:
In [110]: A[idx_1] # A[idx_1,:]
Out[110]:
array([[ 0, 11, 1, 0, 1, 0, 0, 1, 0, 0],
[ 0, 1, 21, 0, 1, 0, 0, 1, 0, 0],
[ 0, 1, 1, 0, 31, 0, 0, 1, 0, 0],
[ 0, 1, 1, 0, 1, 0, 0, 41, 0, 0]])
In [111]: A[idx_1][:,idx_1]
Out[111]:
array([[11, 1, 1, 1],
[ 1, 21, 1, 1],
[ 1, 1, 31, 1],
[ 1, 1, 1, 41]])
In[111] is evaluated in 2 steps; first rows are selected, and then columns.
In
A[idx_1][:,idx_1] = B
the values of B will replace columns in Out[110]. But that's a copy of values from A, not a view. So a good grasp of the difference between view and copy, and between basic and advanced indexing is important when working with numpy.

Build numpy array with multiple custom index ranges without explicit loop

In Numpy, is there a pythonic way to create array3 with custom ranges from array1 and array2 without a loop? The straightforward solution of iterating over the ranges works but since my arrays run into millions of items, I am looking for a more efficient solution (maybe syntactic sugar too).
For ex.,
array1 = np.array([10, 65, 200])
array2 = np.array([14, 70, 204])
array3 = np.concatenate([np.arange(array1[i], array2[i]) for i in
np.arange(0,len(array1))])
print array3
result: [10,11,12,13,65,66,67,68,69,200,201,202,203].
Assuming the ranges do not overlap, you could build a mask which is nonzero where the index is between the ranges specified by array1 and array2 and then use np.flatnonzero to obtain an array of indices -- the desired array3:
import numpy as np
array1 = np.array([10, 65, 200])
array2 = np.array([14, 70, 204])
first, last = array1.min(), array2.max()
array3 = np.zeros(last-first+1, dtype='i1')
array3[array1-first] = 1
array3[array2-first] = -1
array3 = np.flatnonzero(array3.cumsum())+first
print(array3)
yields
[ 10 11 12 13 65 66 67 68 69 200 201 202 203]
For large len(array1), using_flatnonzero can be significantly faster than using_loop:
def using_flatnonzero(array1, array2):
first, last = array1.min(), array2.max()
array3 = np.zeros(last-first+1, dtype='i1')
array3[array1-first] = 1
array3[array2-first] = -1
return np.flatnonzero(array3.cumsum())+first
def using_loop(array1, array2):
return np.concatenate([np.arange(array1[i], array2[i]) for i in
np.arange(0,len(array1))])
array1, array2 = (np.random.choice(range(1, 11), size=10**4, replace=True)
.cumsum().reshape(2, -1, order='F'))
assert np.allclose(using_flatnonzero(array1, array2), using_loop(array1, array2))
In [260]: %timeit using_loop(array1, array2)
100 loops, best of 3: 9.36 ms per loop
In [261]: %timeit using_flatnonzero(array1, array2)
1000 loops, best of 3: 564 µs per loop
If the ranges overlap, then using_loop will return an array3 which contains duplicates. using_flatnonzero returns an array with no duplicates.
Explanation: Let's look at a small example with
array1 = np.array([10, 65, 200])
array2 = np.array([14, 70, 204])
The objective is to build an array which looks like goal, below. The 1's are located at index values [ 10, 11, 12, 13, 65, 66, 67, 68, 69, 200, 201, 202, 203] (i.e. array3):
In [306]: goal
Out[306]:
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], dtype=int8)
Once we have the goal array, array3 can be obtained with a call to np.flatnonzero:
In [307]: np.flatnonzero(goal)
Out[307]: array([ 10, 11, 12, 13, 65, 66, 67, 68, 69, 200, 201, 202, 203])
goal has the same length as array2.max():
In [308]: array2.max()
Out[308]: 204
In [309]: goal.shape
Out[309]: (204,)
So we can begin by allocating
goal = np.zeros(array2.max()+1, dtype='i1')
and then filling in 1's at the index locations given by array1 and -1's at the indices given by array2:
In [311]: goal[array1] = 1
In [312]: goal[array2] = -1
In [313]: goal
Out[313]:
array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, -1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
-1], dtype=int8)
Now applying cumsum (the cumulative sum) produces the desired goal array:
In [314]: goal = goal.cumsum(); goal
Out[314]:
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0])
In [315]: np.flatnonzero(goal)
Out[315]: array([ 10, 11, 12, 13, 65, 66, 67, 68, 69, 200, 201, 202, 203])
That's the main idea behind using_flatnonzero. The subtraction of first was simply to save a bit of memory.
Prospective Approach
I will go backwards on how to approach this problem.
Take the sample listed in the question. We have -
array1 = np.array([10, 65, 200])
array2 = np.array([14, 70, 204])
Now, look at the desired result -
result: [10,11,12,13,65,66,67,68,69,200,201,202,203]
Let's calculate the group lengths, as we would be needing those to explain the solution approach next.
In [58]: lens = array2 - array1
In [59]: lens
Out[59]: array([4, 5, 4])
The idea is to use 1's initialized array, which when cumumlative summed across the entire length would give us the desired result.
This cumumlative summation would be the last step to our solution.
Why 1's initialized? Well, because we have an array that increasing in steps of 1's except at specific places where we have shifts
corresponding to new groups coming in.
Now, since cumsum would be the last step, so the step before it should give us something like -
array([ 10, 1, 1, 1, 52, 1, 1, 1, 1, 131, 1, 1, 1])
As discussed before, it's 1's filled with [10,52,131] at specific places. That 10 seems to be coming in from the first element in array1, but what about the rest?
The second one 52 came in as 65-13 (looking at the result) and in it 13 came in the group that started with 10 and ran because of the length of
the first group 4. So, if we do 65 - 10 - 4, we will get 51 and then add 1 to accomodate for boundary stop, we would have 52, which is the
desired shifting value. Similarly, we would get 131.
Thus, those shifting-values could be computed, like so -
In [62]: np.diff(array1) - lens[:-1]+1
Out[62]: array([ 52, 131])
Next up, to get those shifting-places where such shifts occur, we can simply do cumulative summation on the group lengths -
In [65]: lens[:-1].cumsum()
Out[65]: array([4, 9])
For completeness, we need to pre-append 0 with the array of shifting-places and array1[0] for shifting-values.
So, we are set to present our approach in a step-by-step format!
Putting back the pieces
1] Get lengths of each group :
lens = array2 - array1
2] Get indices at which shifts occur and values to be put in 1's initialized array :
shift_idx = np.hstack((0,lens[:-1].cumsum()))
shift_vals = np.hstack((array1[0],np.diff(array1) - lens[:-1]+1))
3] Setup 1's initialized ID array for inserting those values at those indices listed in the step before :
id_arr = np.ones(lens.sum(),dtype=array1.dtype)
id_arr[shift_idx] = shift_vals
4] Finally do cumulative summation on the ID array :
output = id_arr.cumsum()
Listed in a function format, we would have -
def using_ones_cumsum(array1, array2):
lens = array2 - array1
shift_idx = np.hstack((0,lens[:-1].cumsum()))
shift_vals = np.hstack((array1[0],np.diff(array1) - lens[:-1]+1))
id_arr = np.ones(lens.sum(),dtype=array1.dtype)
id_arr[shift_idx] = shift_vals
return id_arr.cumsum()
And it works on overlapping ranges too!
In [67]: array1 = np.array([10, 11, 200])
...: array2 = np.array([14, 18, 204])
...:
In [68]: using_ones_cumsum(array1, array2)
Out[68]:
array([ 10, 11, 12, 13, 11, 12, 13, 14, 15, 16, 17, 200, 201,
202, 203])
Runtime test
Let's time the proposed approach against the other vectorized approach in #unutbu's flatnonzero based solution, which already proved to be much better than the loopy approach -
In [38]: array1, array2 = (np.random.choice(range(1, 11), size=10**4, replace=True)
...: .cumsum().reshape(2, -1, order='F'))
In [39]: %timeit using_flatnonzero(array1, array2)
1000 loops, best of 3: 889 µs per loop
In [40]: %timeit using_ones_cumsum(array1, array2)
1000 loops, best of 3: 235 µs per loop
Improvement!
Now, codewise NumPy doesn't like appending. So, those np.hstack calls could be avoided for a slightly improved version as listed below -
def get_ranges_arr(starts,ends):
counts = ends - starts
counts_csum = counts.cumsum()
id_arr = np.ones(counts_csum[-1],dtype=int)
id_arr[0] = starts[0]
id_arr[counts_csum[:-1]] = starts[1:] - ends[:-1] + 1
return id_arr.cumsum()
Let's time it against our original approach -
In [151]: array1,array2 = (np.random.choice(range(1, 11),size=10**4, replace=True)\
...: .cumsum().reshape(2, -1, order='F'))
In [152]: %timeit using_ones_cumsum(array1, array2)
1000 loops, best of 3: 276 µs per loop
In [153]: %timeit get_ranges_arr(array1, array2)
10000 loops, best of 3: 193 µs per loop
So, we have a 30% performance boost there!
This is my approach combining vectorize and concatenate:
Implementation:
import numpy as np
array1, array2 = np.array([10, 65, 200]), np.array([14, 70, 204])
ranges = np.vectorize(lambda a, b: np.arange(a, b), otypes=[np.ndarray])
result = np.concatenate(ranges(array1, array2), axis=0)
print result
# [ 10 11 12 13 65 66 67 68 69 200 201 202 203]
Performance:
%timeit np.concatenate(ranges(array1, array2), axis=0)
100000 loops, best of 3: 13.9 µs per loop
Do you mean this?
In [440]: np.r_[10:14,65:70,200:204]
Out[440]: array([ 10, 11, 12, 13, 65, 66, 67, 68, 69, 200, 201, 202, 203])
or generalizing:
In [454]: np.r_[tuple([slice(i,j) for i,j in zip(array1,array2)])]
Out[454]: array([ 10, 11, 12, 13, 65, 66, 67, 68, 69, 200, 201, 202, 203])
Though this does involve a double loop, the explicit one to generate the slices and one inside r_ to convert the slices to arange.
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
...
newobj = _nx.arange(start, stop, step)
I mention this because it shows that numpy developers consider your kind of iteration normal.
I expect that #unutbu's cleaver, if somewhat obtuse (I haven't figured out what it is doing yet), solution is your best chance of speed. cumsum is a good tool when you need to work with ranges than can vary in length. It probably gains most when the working with many small ranges. I don't think it works with overlapping ranges.
================
np.vectorize uses np.frompyfunc. So this iteration can also be expressed with:
In [467]: f=np.frompyfunc(lambda x,y: np.arange(x,y), 2,1)
In [468]: f(array1,array2)
Out[468]:
array([array([10, 11, 12, 13]), array([65, 66, 67, 68, 69]),
array([200, 201, 202, 203])], dtype=object)
In [469]: timeit np.concatenate(f(array1,array2))
100000 loops, best of 3: 17 µs per loop
In [470]: timeit np.r_[tuple([slice(i,j) for i,j in zip(array1,array2)])]
10000 loops, best of 3: 65.7 µs per loop
With #Darius's vectorize solution:
In [474]: timeit result = np.concatenate(ranges(array1, array2), axis=0)
10000 loops, best of 3: 52 µs per loop
vectorize must be doing some extra work to allow more powerful use of broadcasting. Relative speeds may shift if array1 is much larger.
#unutbu's solution isn't special with this small array1.
In [478]: timeit using_flatnonzero(array1,array2)
10000 loops, best of 3: 57.3 µs per loop
The OP solution, iterative without my r_ middle man is good
In [483]: timeit array3 = np.concatenate([np.arange(array1[i], array2[i]) for i in np.arange(0,len(array1))])
10000 loops, best of 3: 24.8 µs per loop
It's often the case that with a small number of loops, a list comprehension is faster than fancier numpy operations.
For #unutbu's larger test case, my timings are consistent with his - with a 17x speed up.
===================
For the small sample arrays, #Divakar's solution is slower, but for the large ones 3x faster than #unutbu's. So it has more of a setup cost, but scales slower.

In python solve for a matrix with restrictions

Here's the problem: I have two vectors A (1Xn) and B (1Xm) where n>m. I'm looking for a matrix T (nXm), such that AT=B. T has the following properties: All elements of T are either 1's or 0's. The elements in each row in T sum to 1. Ideally, I would like the program to return the best solution where as many elements of AT-B=0 if there is not a perfect solution.
Here's an example:
import numpy as np
A = np.array([-1.051, 1.069, 0.132, -0.003, -0.001, 0.066, -0.28,
-0.121, 0.075, 0.006, 0.229, -0.018, -0.213, -0.11])
B = np.array([-1.051, 1.201, -0.003, -0.001, 0.066, -0.121, 0.075,
-0.045,-0.231, -0.11])
T = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
# This should equal a vector of 0's
print A.dot(T)-B
I've come up with something, but I don't think it's totally satisfactory. I'd prefer not to have to go this route as it's clunky. I first try 1 to 1 mapping because that's a common solution to many of the mappings. Anything left over I move to iterating over all possibilities. This gets messy a bit quickly. You'll also notice I'm fairly new to numpy. I'd appreciate any feedback on how to improve this. Thanks.
def solver(B,A):
m=B.size
n=A.size
start=np.ones((1,n))
start=np.concatenate((start,np.zeros((m-1,n))),axis=0).astype(np.int)
for i in xrange(0,m):
T=np.roll(start,i,axis=0)
test=B.dot(T)-A
if i==0:
matches=np.absolute(test)<.0001
else:
matches=np.vstack((matches,np.absolute(test)<.0001))
rA=(A-B.dot(matches))[np.absolute(A-B.dot(matches))>.0001]
Amissing=A-B.dot(matches)
rB=(B-B*np.sum(matches,axis=1))[np.absolute(B-B*np.sum(matches,axis=1))>.0001]
Bmissing=B-B*np.sum(matches,axis=1)
rm=rB.size
rn=rA.size
rmxrn = np.arange(rm*rn).reshape(rm,rn)
dif=np.absolute(rA)
best=np.zeros(shape=(rm,rn))
for i in xrange(0, 2**(rm*rn)):
arr = (i >> rmxrn) % 2
if np.amax(np.sum(arr,axis=1))>1 or np.sum(arr)>rm:
continue
else:
diftemp=rB.dot(arr)-rA
besttemp=arr
if np.sum(np.absolute(diftemp))<np.sum(np.absolute(dif)):
dif=diftemp
best=besttemp
if np.sum(np.absolute(dif)<.0001)==rn:
break
best=best.astype(np.bool)
matchesT=matches.T
bestT=best.T
newbestT=np.zeros(shape=(m,rn)).astype(np.bool).T
for x in xrange(0,rn):
counter=0
for i, value in enumerate(Bmissing):
if abs(Bmissing[i])>.0001:
newbestT[x,i]=bestT[x,counter]
counter=counter+1
for x in xrange(0,rn):
counter=0
for i, value in enumerate(Amissing):
if abs(Amissing[i])>.0001:
matchesT[i]=newbestT[counter]
counter=counter+1
return(matchesT.T)
A=np.array([-1.051,1.201,-0.003,-0.001,0.066,-0.121,0.075,-0.045,-0.231,-0.11])
B=np.array([-1.051,1.069,0.132,-0.003,-0.001,0.066,-0.28,-0.121,0.075,0.006,0.229,-0.018,-0.213,-0.11])
print solver(B,A)

Categories