I am trying to extract all slices of length 4 along 0th axis of a 2-dim tensor. So far I can do it mixing pure Python with tensorflow.
r = test.shape[0] # test should be a tensor
n = 4
a_list = list(range(r))
the_list = np.array([a_list[slice(i, i+n)] for i in range(r - n+1)])
test_stacked = tf.stack(tf.gather(test, the_list))
What would be an efficient way of doing that without using pure Python? Note that the "test" array is actually supposed to be a tensor, thus its shape isn't known before I execute the first part of the graph.
A full vanilla example:
array = np.array([[0, 1],[1, 2],[2, 3],[3, 4],[4, 5],[5, 6]])
array.shape # (6,2)
r = array.shape[0]
n = 4
a_list = list(range(r))
the_list = np.array([a_list[slice(i, i+n)] for i in range(r - n+1)])
result = array[the_list] # all possible slices of length 4 of the array along 0th axis
result.shape # (3, 4, 2)
result:
[[[0 1]
[1 2]
[2 3]
[3 4]]
[[1 2]
[2 3]
[3 4]
[4 5]]
[[2 3]
[3 4]
[4 5]
[5 6]]]
You may want to try the more general tf.extract_image_patches.
import tensorflow as tf
a = tf.constant([[0, 1],[1, 2],[2, 3],[3, 4],[4, 5],[5, 6]])
# tf.extract_image_patches requires a [batch, in_rows, in_cols, depth] tensor
a = a[None, :, :, None]
b = tf.extract_image_patches(a,
ksizes=[1, 4, 2, 1],
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding='VALID')
b = tf.reshape(tf.squeeze(b), [-1, 4, 2])
sess = tf.InteractiveSession()
print(b.eval())
I believe gather_nd is what you are looking for.
# a is a tensor of size (6, 2)
def get_indices(l, d):
return [[[j] for j in range(i, i + d)] for i in range(l - d + 1)]
b = tf.gather_nd(a, get_indices(6, 4))
# b is a tensor of shape (3, 4, 2)
Related
I need to construct a matrix z that would contain combinations of pairs of rows of a matrix x.
x = tf.constant([[1, 3],
[2, 4],
[0, 2],
[0, 1]], dtype=tf.int32)
z=[[[1,2],
[1,0],
[1,0],
[2,0],
[2,0],
[0,0]],
[3,4],
[3,2],
[3,1],
[4,2],
[4,1],
[2,1]]]
It pairs each value with the rest of the values on that row.
I could not find any function or come up with a good idea to do that.
Update 1
So I need the final shape be 2*6*2 like the z above.
Unfortunately, it's a bit more complex than one would like using tensorflow operators only. I would go with creating the indices for all combinations with a while_loop then use tf.gather to collect values:
import tensorflow as tf
x = tf.constant([[1, 3],
[2, 4],
[3, 2],
[0, 1]], dtype=tf.int32)
m = tf.constant([], shape=(0,2), dtype=tf.int32)
_, idxs = tf.while_loop(
lambda i, m: i < tf.shape(x)[0] - 1,
lambda i, m: (i + 1, tf.concat([m, tf.stack([tf.tile([i], (tf.shape(x)[0] - 1 - i,)), tf.range(i + 1, tf.shape(x)[0])], axis=1)], axis=0)),
loop_vars=(0, m),
shape_invariants=(tf.TensorShape([]), tf.TensorShape([None, 2])))
z = tf.reshape(tf.transpose(tf.gather(x, idxs), (2,0,1)), (-1, 2))
# <tf.Tensor: shape=(12, 2), dtype=int32, numpy=
# array([[1, 2],
# [1, 3],
# [1, 0],
# [2, 3],
# [2, 0],
# [3, 0],
# [3, 4],
# [3, 2],
# [3, 1],
# [4, 2],
# [4, 1],
# [2, 1]])>
This should work in both TF1 and TF2.
If the length of x is known in advance, you don't need the while_loop and could simply precompute the indices in python then place them in a constant.
Here is a way to do that without a loop:
import tensorflow as tf
x = tf.constant([[1, 3],
[2, 4],
[0, 2],
[0, 1]], dtype=tf.int32)
# Number of rows
n = tf.shape(x)[0]
# Grid of indices
ri = tf.range(0, n - 1)
rj = ri + 1
ii, jj = tf.meshgrid(ri, rj, indexing='ij')
# Stack together
grid = tf.stack([ii, jj], axis=-1)
# Get upper triangular part
m = ii < jj
idx = tf.boolean_mask(grid, m)
# Get values
g = tf.gather(x, idx, axis=0)
# Rearrange result
result = tf.transpose(g, [2, 0, 1])
print(result.numpy())
# [[[1 2]
# [1 0]
# [1 0]
# [2 0]
# [2 0]
# [0 0]]
#
# [[3 4]
# [3 2]
# [3 1]
# [4 2]
# [4 1]
# [2 1]]]
I would like to compute all the combinations of two or more tensors. For example, for two tensors containing resp. the values [1, 2] and [3, 4, 5], I would like to get the 6x2 tensor
[[1, 3],
[1, 4],
[1, 5],
[2, 3],
[2, 4],
[2, 5]]
To do this, I came up with the following hack
import tensorflow as tf
def combine(x, y):
x, y = x[:, None], y[:, None]
x1 = tf.concat([x, tf.ones_like(x)], axis=-1)
y1 = tf.concat([tf.ones_like(y), y], axis=-1)
return tf.reshape(x1[:, None] * y1[None], (-1, 2))
x = tf.constant([1, 2])
y = tf.constant([3, 4, 5])
print(combine(x, y))
# tf.Tensor(
# [[1 3]
# [1 4]
# [1 5]
# [2 3]
# [2 4]
# [2 5]], shape=(6, 2), dtype=int32)
However I am not satisfied with this solution:
It uses multiplication to combine the elements, which is clearly overkilled
It does not easily generalize to an arbitrary combination of n tensors
Is there a more efficient and/or general way of doing this?
You can do that easily with tf.meshgrid:
import tensorflow as tf
def combine(x, y):
xx, yy = tf.meshgrid(x, y, indexing='ij')
return tf.stack([tf.reshape(xx, [-1]), tf.reshape(yy, [-1])], axis=1)
x = tf.constant([1, 2])
y = tf.constant([3, 4, 5])
print(combine(x, y).numpy())
# [[1 3]
# [1 4]
# [1 5]
# [2 3]
# [2 4]
# [2 5]]
Now I have a tensor random_row, and I want to create a new tensor, whose shape is known 2-dim, and its random_row to be all zeros, and all other rows to be all ones, e.g.
random_row = [1, 3] # random_row is a tensor itself
# new_tensor needs to be another tensor whose row in random_row to be all zeros
# we already know new_tensor's shape to be (4, 2)
new_tensor = [[1, 1], [0, 0], [1, 1], [0, 0]]
How can I achieve that? Really appreciate it if someone can help!
You can do that with a function like his:
import tensorflow as tf
def zero_rows(x, idx)
# Turn row indices into a boolean mask
n = tf.shape(x)[0]
m = tf.scatter_nd(tf.expand_dims(idx, 1), tf.ones_like(idx, dtype=tf.bool), [n])
# Select zeros where the indices are or the data elsewhere
return tf.where(m, tf.zeros_like(x), x)
# Test
with tf.Graph().as_default(), tf.Session() as sess:
y = zero_rows(x, idx)
print(sess.run(y, feed_dict={x: [[1, 2], [3, 4], [5, 6], [7, 8]], idx: [1, 3]}))
# [[1 2]
# [0 0]
# [5 6]
# [0 0]]
I've a 2D NxN matrix that has elements from a set of real numbers. I need to identify top n DxD sub-matrices from it such that their sum is maximum and return top left index of the sub-matrices. I need to do it in Tensorflow.
For example I have following 4x4 matrix:
[1 1 4 4]
[1 1 4 4]
[3 3 2 2]
[3 3 2 2]
I need to identify 2 sub-matrices that have the largest sum and return their top left index. In above case, 2 sub-matrices that have the largest and second largest sum are:
[[4 4] [[3 3]
[4 4]] & [3 3]]
I need to return [[0,2],[2,0]], the top left indices to both the matrices. Thanks.
You can get that with the following snippet. The idea is to build a tensor holding the row and column indices of each element of each submatrix, then sum the submatrices and find the largest sums.
import tensorflow as tf
# Input data
input = tf.placeholder(tf.int32, [None, None])
# Submatrix dimension
dims = tf.placeholder(tf.int32, [2])
# Number of top submatrices to find
k = tf.placeholder(tf.int32, [])
# Sizes
input_shape = tf.shape(input)
rows, cols = input_shape[0], input_shape[1]
d_rows, d_cols = dims[0], dims[1]
subm_rows, subm_cols = rows - d_rows + 1, cols - d_cols + 1
# Index grids
ii, jj = tf.meshgrid(tf.range(subm_rows), tf.range(subm_cols), indexing='ij')
d_ii, d_jj = tf.meshgrid(tf.range(d_rows), tf.range(d_cols), indexing='ij')
# Add indices
subm_ii = ii[:, :, tf.newaxis, tf.newaxis] + d_ii
subm_jj = jj[:, :, tf.newaxis, tf.newaxis] + d_jj
# Make submatrices tensor
subm = tf.gather_nd(input, tf.stack([subm_ii, subm_jj], axis=-1))
# Add submatrices
subm_sum = tf.reduce_sum(subm, axis=(2, 3))
# Use TopK to find top submatrices
_, top_idx = tf.nn.top_k(tf.reshape(subm_sum, [-1]), tf.minimum(k, tf.size(subm_sum)))
# Get row and column
top_row = top_idx // subm_cols
top_col = top_idx % subm_cols
result = tf.stack([top_row, top_col], axis=-1)
# Test
with tf.Session() as sess:
mat = [
[1, 1, 4, 4],
[1, 1, 4, 4],
[3, 3, 2, 2],
[3, 3, 2, 2],
]
print(sess.run(result, feed_dict={input: mat, dims: [2, 2], k: 2}))
Output:
[[0 2]
[1 2]]
Note that the output in this case is [0, 2] and [1, 2], but not [2, 0]. That's because the submatrix starting at [1, 2] sums the same amount as the one at [2, 0], and it is before in the matrix, if you iterate it by rows. If you pass k: 3 in the test you would get [2, 0] too in the result.
Is there any easy way to do cartesian product in Tensorflow like itertools.product? I want to get combination of elements of two tensors (a and b), in Python it is possible via itertools as list(product(a, b)). I am looking for an alternative in Tensorflow.
I'm going to assume here that both a and b are 1-D tensors.
To get the cartesian product of the two, I would use a combination of tf.expand_dims and tf.tile:
a = tf.constant([1,2,3])
b = tf.constant([4,5,6,7])
tile_a = tf.tile(tf.expand_dims(a, 1), [1, tf.shape(b)[0]])
tile_a = tf.expand_dims(tile_a, 2)
tile_b = tf.tile(tf.expand_dims(b, 0), [tf.shape(a)[0], 1])
tile_b = tf.expand_dims(tile_b, 2)
cartesian_product = tf.concat([tile_a, tile_b], axis=2)
cart = tf.Session().run(cartesian_product)
print(cart.shape)
print(cart)
You end up with a len(a) * len(b) * 2 tensor where each combination of the elements of a and b is represented in the last dimension.
A shorter solution to the same, using tf.add() for broadcasting (tested):
import tensorflow as tf
a = tf.constant([1,2,3])
b = tf.constant([4,5,6,7])
a, b = a[ None, :, None ], b[ :, None, None ]
cartesian_product = tf.concat( [ a + tf.zeros_like( b ),
tf.zeros_like( a ) + b ], axis = 2 )
with tf.Session() as sess:
print( sess.run( cartesian_product ) )
will output:
[[[1 4]
[2 4]
[3 4]]
[[1 5]
[2 5]
[3 5]]
[[1 6]
[2 6]
[3 6]]
[[1 7]
[2 7]
[3 7]]]
import tensorflow as tf
a = tf.constant([0, 1, 2])
b = tf.constant([2, 3])
c = tf.stack(tf.meshgrid(a, b, indexing='ij'), axis=-1)
c = tf.reshape(c, (-1, 2))
with tf.Session() as sess:
print(sess.run(c))
Output:
[[0 2]
[0 3]
[1 2]
[1 3]
[2 2]
[2 3]]
credit to jdehesa: link
A more succinct version of Sunreef's answer uses tf.stack instead of tf.concat
a = tf.constant([1,2,3])
b = tf.constant([4,5,6,7])
tile_a = tf.tile(tf.expand_dims(a, 1), [1, tf.shape(b)[0]])
tile_b = tf.tile(tf.expand_dims(b, 0), [tf.shape(a)[0], 1])
ans = tf.stack([tile_a, tile_b], -1)
I'm inspired by Jaba's answer. If you want to get the cartesian_product of two 2-D tensors, you can do it as following:
input a:[N,L] and b:[M,L], get a [N*M,L] concat tensor
tile_a = tf.tile(tf.expand_dims(a, 1), [1, M, 1])
tile_b = tf.tile(tf.expand_dims(b, 0), [N, 1, 1])
cartesian_product = tf.concat([tile_a, tile_b], axis=2)
cartesian = tf.reshape(cartesian_product, [N*M, -1])
cart = tf.Session().run(cartesian)
print(cart.shape)
print(cart)