Now I have a tensor random_row, and I want to create a new tensor, whose shape is known 2-dim, and its random_row to be all zeros, and all other rows to be all ones, e.g.
random_row = [1, 3] # random_row is a tensor itself
# new_tensor needs to be another tensor whose row in random_row to be all zeros
# we already know new_tensor's shape to be (4, 2)
new_tensor = [[1, 1], [0, 0], [1, 1], [0, 0]]
How can I achieve that? Really appreciate it if someone can help!
You can do that with a function like his:
import tensorflow as tf
def zero_rows(x, idx)
# Turn row indices into a boolean mask
n = tf.shape(x)[0]
m = tf.scatter_nd(tf.expand_dims(idx, 1), tf.ones_like(idx, dtype=tf.bool), [n])
# Select zeros where the indices are or the data elsewhere
return tf.where(m, tf.zeros_like(x), x)
# Test
with tf.Graph().as_default(), tf.Session() as sess:
y = zero_rows(x, idx)
print(sess.run(y, feed_dict={x: [[1, 2], [3, 4], [5, 6], [7, 8]], idx: [1, 3]}))
# [[1 2]
# [0 0]
# [5 6]
# [0 0]]
Related
I have different sizes of arrays that each element is its index if it was flatten. Is there a way to print out every element per square going clockwise? I thought about slicing the arrays but that doesn't go clockwise and only prints one square and not all.
arr1 = np.array([[0, 1],[2, 3]])
arr2 = np.array([[0, 1, 2],[3, 4, 5]])
arr3 = np.array([[0, 1],[2, 3],[4, 5]])
print(arr1[0:2,0:2])
print()
print(arr2[0:2,0:2])
print()
print(arr3[0:2,0:2])
output:
[[0 1]
[2 3]]
[[0 1]
[3 4]]
[[0 1]
[2 3]]
Maybe this helps
import numpy as np
a = np.random.randint(0, 10, size=(7, 9))
print(a)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
x = a[i:i+2, j:j+2]
if x.flatten().size == 4:
print(x) # every 2 by 2 array of 4 elements
m = x.copy() # copy x so not to be changed!
m[1] = m[1][::-1] # reverse row 1 elements
print(m.flatten()) # 1d array clockwise
from numpy.lib.stride_tricks import sliding_window_view
def zay_117(arr):
output = []
for row in sliding_window_view(arr, window_shape=(2,2)):
for sq in row:
output.append(np.hstack((sq[0, 0:2], sq[1, 0:2][::-1])).tolist())
return output
# zay_117(arr1)
# [[0, 1, 3, 2]]
# zay_117(arr2)
# [[0, 1, 4, 3], [1, 2, 5, 4]]
# zay_117(arr3)
# [[0, 1, 3, 2], [2, 3, 5, 4]]
I need to construct a matrix z that would contain combinations of pairs of rows of a matrix x.
x = tf.constant([[1, 3],
[2, 4],
[0, 2],
[0, 1]], dtype=tf.int32)
z=[[[1,2],
[1,0],
[1,0],
[2,0],
[2,0],
[0,0]],
[3,4],
[3,2],
[3,1],
[4,2],
[4,1],
[2,1]]]
It pairs each value with the rest of the values on that row.
I could not find any function or come up with a good idea to do that.
Update 1
So I need the final shape be 2*6*2 like the z above.
Unfortunately, it's a bit more complex than one would like using tensorflow operators only. I would go with creating the indices for all combinations with a while_loop then use tf.gather to collect values:
import tensorflow as tf
x = tf.constant([[1, 3],
[2, 4],
[3, 2],
[0, 1]], dtype=tf.int32)
m = tf.constant([], shape=(0,2), dtype=tf.int32)
_, idxs = tf.while_loop(
lambda i, m: i < tf.shape(x)[0] - 1,
lambda i, m: (i + 1, tf.concat([m, tf.stack([tf.tile([i], (tf.shape(x)[0] - 1 - i,)), tf.range(i + 1, tf.shape(x)[0])], axis=1)], axis=0)),
loop_vars=(0, m),
shape_invariants=(tf.TensorShape([]), tf.TensorShape([None, 2])))
z = tf.reshape(tf.transpose(tf.gather(x, idxs), (2,0,1)), (-1, 2))
# <tf.Tensor: shape=(12, 2), dtype=int32, numpy=
# array([[1, 2],
# [1, 3],
# [1, 0],
# [2, 3],
# [2, 0],
# [3, 0],
# [3, 4],
# [3, 2],
# [3, 1],
# [4, 2],
# [4, 1],
# [2, 1]])>
This should work in both TF1 and TF2.
If the length of x is known in advance, you don't need the while_loop and could simply precompute the indices in python then place them in a constant.
Here is a way to do that without a loop:
import tensorflow as tf
x = tf.constant([[1, 3],
[2, 4],
[0, 2],
[0, 1]], dtype=tf.int32)
# Number of rows
n = tf.shape(x)[0]
# Grid of indices
ri = tf.range(0, n - 1)
rj = ri + 1
ii, jj = tf.meshgrid(ri, rj, indexing='ij')
# Stack together
grid = tf.stack([ii, jj], axis=-1)
# Get upper triangular part
m = ii < jj
idx = tf.boolean_mask(grid, m)
# Get values
g = tf.gather(x, idx, axis=0)
# Rearrange result
result = tf.transpose(g, [2, 0, 1])
print(result.numpy())
# [[[1 2]
# [1 0]
# [1 0]
# [2 0]
# [2 0]
# [0 0]]
#
# [[3 4]
# [3 2]
# [3 1]
# [4 2]
# [4 1]
# [2 1]]]
I've a 2D NxN matrix that has elements from a set of real numbers. I need to identify top n DxD sub-matrices from it such that their sum is maximum and return top left index of the sub-matrices. I need to do it in Tensorflow.
For example I have following 4x4 matrix:
[1 1 4 4]
[1 1 4 4]
[3 3 2 2]
[3 3 2 2]
I need to identify 2 sub-matrices that have the largest sum and return their top left index. In above case, 2 sub-matrices that have the largest and second largest sum are:
[[4 4] [[3 3]
[4 4]] & [3 3]]
I need to return [[0,2],[2,0]], the top left indices to both the matrices. Thanks.
You can get that with the following snippet. The idea is to build a tensor holding the row and column indices of each element of each submatrix, then sum the submatrices and find the largest sums.
import tensorflow as tf
# Input data
input = tf.placeholder(tf.int32, [None, None])
# Submatrix dimension
dims = tf.placeholder(tf.int32, [2])
# Number of top submatrices to find
k = tf.placeholder(tf.int32, [])
# Sizes
input_shape = tf.shape(input)
rows, cols = input_shape[0], input_shape[1]
d_rows, d_cols = dims[0], dims[1]
subm_rows, subm_cols = rows - d_rows + 1, cols - d_cols + 1
# Index grids
ii, jj = tf.meshgrid(tf.range(subm_rows), tf.range(subm_cols), indexing='ij')
d_ii, d_jj = tf.meshgrid(tf.range(d_rows), tf.range(d_cols), indexing='ij')
# Add indices
subm_ii = ii[:, :, tf.newaxis, tf.newaxis] + d_ii
subm_jj = jj[:, :, tf.newaxis, tf.newaxis] + d_jj
# Make submatrices tensor
subm = tf.gather_nd(input, tf.stack([subm_ii, subm_jj], axis=-1))
# Add submatrices
subm_sum = tf.reduce_sum(subm, axis=(2, 3))
# Use TopK to find top submatrices
_, top_idx = tf.nn.top_k(tf.reshape(subm_sum, [-1]), tf.minimum(k, tf.size(subm_sum)))
# Get row and column
top_row = top_idx // subm_cols
top_col = top_idx % subm_cols
result = tf.stack([top_row, top_col], axis=-1)
# Test
with tf.Session() as sess:
mat = [
[1, 1, 4, 4],
[1, 1, 4, 4],
[3, 3, 2, 2],
[3, 3, 2, 2],
]
print(sess.run(result, feed_dict={input: mat, dims: [2, 2], k: 2}))
Output:
[[0 2]
[1 2]]
Note that the output in this case is [0, 2] and [1, 2], but not [2, 0]. That's because the submatrix starting at [1, 2] sums the same amount as the one at [2, 0], and it is before in the matrix, if you iterate it by rows. If you pass k: 3 in the test you would get [2, 0] too in the result.
I am trying to extract all slices of length 4 along 0th axis of a 2-dim tensor. So far I can do it mixing pure Python with tensorflow.
r = test.shape[0] # test should be a tensor
n = 4
a_list = list(range(r))
the_list = np.array([a_list[slice(i, i+n)] for i in range(r - n+1)])
test_stacked = tf.stack(tf.gather(test, the_list))
What would be an efficient way of doing that without using pure Python? Note that the "test" array is actually supposed to be a tensor, thus its shape isn't known before I execute the first part of the graph.
A full vanilla example:
array = np.array([[0, 1],[1, 2],[2, 3],[3, 4],[4, 5],[5, 6]])
array.shape # (6,2)
r = array.shape[0]
n = 4
a_list = list(range(r))
the_list = np.array([a_list[slice(i, i+n)] for i in range(r - n+1)])
result = array[the_list] # all possible slices of length 4 of the array along 0th axis
result.shape # (3, 4, 2)
result:
[[[0 1]
[1 2]
[2 3]
[3 4]]
[[1 2]
[2 3]
[3 4]
[4 5]]
[[2 3]
[3 4]
[4 5]
[5 6]]]
You may want to try the more general tf.extract_image_patches.
import tensorflow as tf
a = tf.constant([[0, 1],[1, 2],[2, 3],[3, 4],[4, 5],[5, 6]])
# tf.extract_image_patches requires a [batch, in_rows, in_cols, depth] tensor
a = a[None, :, :, None]
b = tf.extract_image_patches(a,
ksizes=[1, 4, 2, 1],
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
padding='VALID')
b = tf.reshape(tf.squeeze(b), [-1, 4, 2])
sess = tf.InteractiveSession()
print(b.eval())
I believe gather_nd is what you are looking for.
# a is a tensor of size (6, 2)
def get_indices(l, d):
return [[[j] for j in range(i, i + d)] for i in range(l - d + 1)]
b = tf.gather_nd(a, get_indices(6, 4))
# b is a tensor of shape (3, 4, 2)
I have two tensors and I have to iterate on the first to take only the element that is inside the other tensor. There is only one element in t2 that it is also inside t1. Here an example
t1 = tf.where(values > 0) # I get some indices example [6, 0], [3, 0]
t2 = tf.where(values2 > 0) # I get [4, 0], [3, 0]
t3 = .... # [3, 0]
I've tried to evaluate and iterate over them using .eval() and checked if an element of t2 is in t1 using the operator in, but doesn't work. Is there a function from TensorFlow that can do that?
edit
for index in xrange(max_indices):
indices = tf.where(tf.equal(values, (index + 1))).eval() # indices: [[1 0]\n [4 0]\n [9 0]]
cent_indices = tf.where(centers > 0).eval() # cent_indices: [[6 0]\n [9 0]]
indices_list.append(indices)
for cent in cent_indices:
if cent in indices:
centers_list.append(cent)
break
The first iteration cent has the value [6 0] but it enters the if condition.
answer
for index in xrange(max_indices):
indices = tf.where(tf.equal(values, (index + 1))).eval()
cent_indices = tf.where(centers > 0).eval()
indices_list.append(indices)
for cent in cent_indices:
# batch_item is an iterator from an outer loop
if values[batch_item, cent[0]].eval() == (index + 1):
centers_list.append(tf.constant(cent))
break
The solution is related to my task, but if you are looking for a solution in 1D tensor I suggest to have a look on tf.sets.set_intersection
Is that what you wanted ? I used just these two test cases.
x = tf.constant([[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 1]])
y = tf.constant([[1, 2, 3, 4, 3, 6], [1, 2, 3, 4, 5, 1]])
# x = tf.constant([[1, 2], [4, 5], [7, 7]])
# y = tf.constant([[7, 7], [3, 5]])
def match(xiterations, yiterations, yvalues, xvalues ):
for i in range(xiterations):
for j in range(yiterations):
if (np.array_equal(yvalues[j], xvalues[i])):
print( yvalues[j])
with tf.Session() as sess:
xindex = tf.where( x > 4 )
yindex = tf.where( y > 4 )
xvalues = xindex.eval()
yvalues = yindex.eval()
xiterations = tf.shape(xvalues)[0].eval()
yiterations = tf.shape(yvalues)[0].eval()
print(tf.shape(xvalues)[0].eval())
print(tf.shape(yvalues)[0].eval())
if tf.shape(xvalues)[0].eval() >= tf.shape(yvalues)[0].eval():
match( xiterations, yiterations, yvalues, xvalues)
else:
match( yiterations, xiterations, xvalues, yvalues)