I want to independently shift the columns or rows of a 2-D tensor like:
a = tf.constant([[1,2,3], [4,5,6]])
shift = tf.constant([2, -1])
b = shift_fn(a, shift)
which gives me:
b = [[0, 0, 1], [5, 6, 0]]
I find that tf.roll() can do similar things but will wrap the elements. How can I pad zeros using it?
One not-so-nice solution is to first pad the tensor using tf.pad, then use tf.roll inside tf.map_fn to independently shift each row (or column) of the padded tensor. And then finally, you can take the proper slice of the result. For example:
a = tf.constant([[1,2,3], [4,5,6]])
shift = tf.constant([2, -1])
cols = a.shape[1]
paddings = tf.constant([[0, 0], [cols, cols]])
padded_a = tf.pad(a, paddings)
tf.map_fn(
lambda x: tf.roll(x[0], x[1], axis=-1),
elems=(padded_a, shift),
# The following argument is required here
fn_output_signature=a.dtype
)[:,cols:cols*2]
"""
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[0, 0, 1],
[5, 6, 0]], dtype=int32)>
"""
Alternatively, to help save some memory, both padding and slicing could be done inside the fn function passed to tf.map_fn.
#tf.function
def shift(inputs, shift, axes, pad_val = 0):
assert len(shift) == len(axes)
axis_shift = zip(axes, shift)
axis2shift = dict(axis_shift)
old_shape = inputs.shape
for axis in axis2shift:
pad_shape = list(inputs.shape)
pad_shape[axis] = abs(axis2shift[axis])
input_pad = tf.fill(pad_shape, pad_val)
inputs = tf.concat((inputs, input_pad), axis = axis)
input_roll = tf.roll(inputs, shift, axes)
ret = tf.slice(input_roll, [0 for _ in range(len(old_shape))], old_shape)
return ret
The above function implements shift with tf.roll like interface* (shift and axes are expected to be lists rather than tensors). As told by #today in a previous answer, I performed the pad-roll-slice operation.
Hope this helps
Related
Suppose I have a boolean tensor that tells me whether the value at the given coordinate is "of interest":
is_value_of_interest = np.array(
[[0, 0, 0],
[0, 1, 1],
[1, 0, 0],
[0, 0, 0],
[0, 1, 1]])
is_value_of_interest_tf = tf.constant(is_value_of_interest)
Now I have another array/tensor containing the actual values themselves:
values = np.random.rand(5, 3)
values_tf = tf.constant(values)
What I want to do, is build a tensor that will return the next value of interest along the 0th axis. So in numpy/pandas this would be:
values_of_interest = np.where(is_value_of_interest, values, np.nan)
df = pandas.DataFrame(values_of_interest).bfill()
And the result:
How can I build a tensor operation to achieve the same result, such that (df.values == my_tensor.numpy()).all()?
This was nontrivial due to the lack of any equivalent of pandas' ffill/bfill methods, so began by implementing a ffill function for tensorflow in the specific case of a 2d input along the 0th dimension.
I'm not happy with the result as it's long and messy, so if anyone can improve it please feel free to suggest a better answer:
def tf_ffill(data: tf.Tensor) -> tf.Tensor:
"""
2d forward-fill along 0th dimension
"""
is_value_of_interest = ~tf.math.is_nan(data)
grid = tf.meshgrid(tf.range(data.shape[0]), tf.range(data.shape[1]), indexing="ij")
nan_sentinel = tf.maximum(data.shape[0], data.shape[1])
next_value_of_interest_row_index = (
tf.cumsum(tf.cast(is_value_of_interest, tf.int32), axis=0) - 1
)
defined_values_mask = tf.where(next_value_of_interest_row_index == -1, False, True)
next_value_of_interest_row_index = tf.where(
defined_values_mask, next_value_of_interest_row_index, nan_sentinel
)
next_value_of_interest_col_index = tf.where(
defined_values_mask, tf.cast(grid[1], tf.int32), nan_sentinel
)
# this is a rank N+1 tensor with the last 2 dimensions representing the x/y
# coordinates of the index of the next point of interest The only quirk
# here is that the column is the 2nd last dimension and the row is the
# last. This is to align it with the representation produced by the
# boolean_mask op.
values_of_interest_index_lookup = tf.concat(
[
tf.expand_dims(next_value_of_interest_col_index, axis=2),
tf.expand_dims(next_value_of_interest_row_index, axis=2),
],
2,
)
values_of_interest_indices = tf.ragged.boolean_mask(
tf.transpose(grid[0]), tf.transpose(is_value_of_interest)
)
mapped_indices = tf.gather_nd(
values_of_interest_indices,
tf.where(
values_of_interest_index_lookup == nan_sentinel,
0,
values_of_interest_index_lookup,
),
)
mapped_indices = tf.where(defined_values_mask, mapped_indices, nan_sentinel)
res = tf.experimental.numpy.take_along_axis(
data, tf.where(mapped_indices == nan_sentinel, 0, mapped_indices), axis=0
)
return tf.where(defined_values_mask, res, np.nan)
def tf_bfill(data: tf.Tensor) -> tf.Tensor:
return tf.reverse(tf_ffill(tf.reverse(data, axis=[0])), axis=[0])
Once you have tf_bfill getting to the answer is trivial:
data = tf.where(is_value_of_interest, values_tf, np.nan)
tf_bfill(data)
This whole question could be rephrased as "how do you do a forward fill in tensorflow?"
I already know that Numpy "double-slice" with fancy indexing creates copies instead of views, and the solution seems to be to convert them to one single slice (e.g. This question). However, I am facing this particular problem where i need to deal with an integer indexing followed by boolean indexing and I am at a loss what to do. The problem (simplified) is as follows:
a = np.random.randn(2, 3, 4, 4)
idx_x = np.array([[1, 2], [1, 2], [1, 2]])
idx_y = np.array([[0, 0], [1, 1], [2, 2]])
print(a[..., idx_y, idx_x].shape) # (2, 3, 3, 2)
mask = (np.random.randn(2, 3, 3, 2) > 0)
a[..., idx_y, idx_x][mask] = 1 # assignment doesn't work
How can I make the assignment work?
Not sure, but an idea is to do the broadcasting manually and adding the mask respectively just like Tim suggests. idx_x and idx_y both have the same shape (3,2) which will be broadcasted to the shape (6,6) from the cartesian product (3*2)^2.
x = np.broadcast_to(idx_x.ravel(), (6,6))
y = np.broadcast_to(idx_y.ravel(), (6,6))
# this should be the same as
x,y = np.meshgrid(idx_x, idx_y)
Now reshape the mask to the broadcasted indices and use it to select
mask = mask.reshape(6,6)
a[..., x[mask], y[mask]] = 1
The assignment now works, but I am not sure if this is the exact assignment you wanted.
Ok apparently I am making things complicated. No need to combine the indexing. The following code solves the problem elegantly:
b = a[..., idx_y, idx_x]
b[mask] = 1
a[..., idx_y, idx_x] = b
print(a[..., idx_y, idx_x][mask]) # all 1s
EDIT: Use #Kevin's solution which actually gets the dimensions correct!
I haven't tried it specifically on your sample code but I had a similar issue before. I think I solved it by applying the mask to the indices instead, something like:
a[..., idx_y[mask], idx_x[mask]] = 1
-that way, numpy can assign the values to the a array correctly.
EDIT2: Post some test code as comments remove formatting.
a = np.arange(27).reshape([3, 3, 3])
ind_x = np.array([[0, 0], [1, 2]])
ind_y = np.array([[1, 2], [1, 1]])
x = np.broadcast_to(ind_x.ravel(), (4, 4))
y = np.broadcast_to(ind_y.ravel(), (4, 4)).T
# x1, y2 = np.meshgrid(ind_x, ind_y) # above should be the same as this
mask = a[:, ind_y, ind_x] % 2 == 0 # what should this reshape to?
# a[..., x[mask], y[mask]] = 1 # Then you can mask away (may also need to reshape a or the masked x or y)
I would like to do something like this piece of Numpy code, just in TensorFlow:
a = np.zeros([5, 2])
idx = np.random.randint(0, 2, (5,))
row_idx = np.arange(5)
a[row_idx, idx] = row_idx
meaning indexing all rows of a 2D tensor with another tensor and then assigning a tensor to that. I am absolutely clueless on how to achieve this.
What I can do so far in Tensorflow is the following
a = tf.Variable(tf.zeros((5, 2)))
idx = tf.constant([0, 1, 1, 0, 1])
row_idx = tf.range(5)
indices = tf.transpose([row_idx, idx])
r = tf.gather_nd(a, indices)
tf.assign(r, row_idx) # This line does not work
When I try to execute this, I get the following error in the last line:
AttributeError: 'Tensor' object has no attribute 'assign'
Is there a way around this? There must be some nice way to do this, I don't want to iterate with for loops over the data and manually assign this on a per-element basis. I know that right now array-indexing is not as advanced as Numpy's functionality, but this should still be possible somehow.
What you are trying to do is frequently done with tf.scatter_nd_update. However, that is most times not the right way to do it, you should not need a variable, just another tensor produced from the original tensor with some replaced values. Unfortunately, there is no straightforward way to do this in general. If your original tensor is really all zeros, then you can simply use tf.scatter_nd:
import tensorflow as tf
idx = tf.constant([0, 1, 1, 0, 1])
row_idx = tf.range(5)
indices = tf.stack([row_idx, idx], axis=1)
a = tf.scatter_nd(indices, row_idx, (5, 2))
with tf.Session() as sess:
print(sess.run(a))
# [[0 0]
# [0 1]
# [0 2]
# [3 0]
# [0 4]]
However, if the initial tensor is not all zeros, it is more complicated. One way to do that is do the same as above, then make a mask for the updated, and select between the original and the update according to the mask:
import tensorflow as tf
a = tf.ones((5, 2), dtype=tf.int32)
idx = tf.constant([0, 1, 1, 0, 1])
row_idx = tf.range(5)
indices = tf.stack([row_idx, idx], axis=1)
a_update = tf.scatter_nd(indices, row_idx, (5, 2))
update_mask = tf.scatter_nd(indices, tf.ones_like(row_idx, dtype=tf.bool), (5, 2))
a = tf.where(update_mask, a_update, a)
with tf.Session() as sess:
print(sess.run(a))
# [[0 1]
# [1 1]
# [1 2]
# [3 1]
# [1 4]]
I don't know about previous versions, but in Tensorflow 2.1 you can use tf.tensor_scatter_nd_update to do what you want in a single-line. In your code example, you could do:
a = tf.zeros((5, 2), dtype=tf.int32)
idx = tf.constant([0, 1, 1, 0, 1])
row_idx = tf.range(5)
indices = tf.transpose([row_idx, idx])
a = tf.tensor_scatter_nd_update(a, indices, row_idx)
An example
Suppose I have a tensor values with shape (2,2,2)
values = [[[0, 1],[2, 3]],[[4, 5],[6, 7]]]
And a tensor indicies with shape (2,2) which describes what values to be selected in the innermost dimension
indicies = [[1,0],[0,0]]
Then the result will be a (2,2) matrix with these values
result = [[1,2],[4,6]]
What is this operation called in tensorflow and how to do it?
General
Note that the above shape (2,2,2) is only an example, it can be any dimension. Some conditions for this operation:
ndim(values) -1 = ndim(indicies)
values.shape[:-1] == indicies.shape == result.shape
indicies.max() < values.shape[-1] -1
I think you can emulate this with tf.gather_nd. You will just have to convert "your" indices to a representation that is suitable for tf.gather_nd. The following example here is tied to your specific example, i.e. input tensors of shape (2, 2, 2) but I think this gives you an idea how you could write the conversion for input tensors with arbitrary shape, although I am not sure how easy it would be to implement this (haven't thought about it too long). Also, I'm not claiming that this is the easiest possible solution.
import tensorflow as tf
import numpy as np
values = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
values_tf = tf.constant(values)
indices = np.array([[1, 0], [0, 0]])
converted_idx = []
for k in range(values.shape[0]):
outer = []
for l in range(values.shape[1]):
inds = [k, l, indices[k][l]]
outer.append(inds)
print(inds)
converted_idx.append(outer)
with tf.Session() as sess:
result = tf.gather_nd(values_tf, converted_idx)
print(sess.run(result))
This prints
[[1 2]
[4 6]]
Edit: To handle arbitrary shapes here is a recursive solution that should work (only tested on your example):
def convert_idx(last_dim_vals, ori_indices, access_to_ori, depth):
if depth == len(last_dim_vals.shape) - 1:
inds = access_to_ori + [ori_indices[tuple(access_to_ori)]]
return inds
outer = []
for k in range(ori_indices.shape[depth]):
inds = convert_idx(last_dim_vals, ori_indices, access_to_ori + [k], depth + 1)
outer.append(inds)
return outer
You can use this together with the original code I posted like so:
...
converted_idx = convert_idx(values, indices, [], 0)
with tf.Session() as sess:
result = tf.gather_nd(values_tf, converted_idx)
print(sess.run(result))
If A is a TensorFlow variable like so
A = tf.Variable([[1, 2], [3, 4]])
and index is another variable
index = tf.Variable([0, 1])
I want to use this index to select columns in each row. In this case, item 0 from first row and item 1 from second row.
If A was a Numpy array then to get the columns of corresponding rows mentioned in index we can do
x = A[np.arange(A.shape[0]), index]
and the result would be
[1, 4]
What is the TensorFlow equivalent operation/operations for this? I know TensorFlow doesn't support many indexing operations. What would be the work around if it cannot be done directly?
You can extend your column indices with row indices and then use gather_nd:
import tensorflow as tf
A = tf.constant([[1, 2], [3, 4]])
indices = tf.constant([1, 0])
# prepare row indices
row_indices = tf.range(tf.shape(indices)[0])
# zip row indices with column indices
full_indices = tf.stack([row_indices, indices], axis=1)
# retrieve values by indices
S = tf.gather_nd(A, full_indices)
session = tf.InteractiveSession()
session.run(S)
You can use one hot method to create a one_hot array and use it as a boolean mask to select the indices you'd like.
A = tf.Variable([[1, 2], [3, 4]])
index = tf.Variable([0, 1])
one_hot_mask = tf.one_hot(index, A.shape[1], on_value = True, off_value = False, dtype = tf.bool)
output = tf.boolean_mask(A, one_hot_mask)
After dabbling around for quite a while. I found two functions that could be useful.
One is tf.gather_nd() which might be useful if you can produce a tensor
of the form [[0, 0], [1, 1]] and thereby you could do
index = tf.constant([[0, 0], [1, 1]])
tf.gather_nd(A, index)
If you are unable to produce a vector of the form [[0, 0], [1, 1]](I couldn't produce this as the number of rows in my case was dependent on a placeholder) for some reason then the work around I found is to use the tf.py_func(). Here is an example code on how this can be done
import tensorflow as tf
import numpy as np
def index_along_every_row(array, index):
N, _ = array.shape
return array[np.arange(N), index]
a = tf.Variable([[1, 2], [3, 4]], dtype=tf.int32)
index = tf.Variable([0, 1], dtype=tf.int32)
a_slice_op = tf.py_func(index_along_every_row, [a, index], [tf.int32])[0]
session = tf.InteractiveSession()
a.initializer.run()
index.initializer.run()
a_slice = a_slice_op.eval()
a_slice will be a numpy array [1, 4]
We can do the same using this combination of map_fn and gather_nd.
def get_element(a, indices):
"""
Outputs (ith element of indices) from (ith row of a)
"""
return tf.map_fn(lambda x: tf.gather_nd(x[0], x[1]),
(a, indices),
dtype = tf.float32)
Here's an example usage.
A = tf.constant(np.array([[1,2,3],
[4,5,6],
[7,8,9]], dtype = np.float32))
idx = tf.constant(np.array([[2],[1],[0]]))
elems = get_element(A, idx)
with tf.Session() as sess:
e = sess.run(elems)
print(e)
I don't know if this will be much slower than other answers.
It has the advantage that you don't need to specify the number of rows of A in advance, as long as a and indices have the same number of rows at runtime.
Note the output of the above will be rank 1. If you'd prefer it to have rank 2, replace gather_nd by gather
I couldn't get the accepted answer to work in Tensorflow 2 when I incorporated it into a loss function. Something about GradientTape didn't like it. My solution is an altered version of the accepted answer:
def get_rows(arr):
N, _ = arr.shape
return N
num_rows= tf.py_function(get_rows, [arr], [tf.int32])[0]
rng = tf.range(0,num_rows)
ind = tf.stack([rng, ind], axis=1)
tf.gather_nd(arr, ind)