Argmin on possibly empty tensor - python

I want to to argmin on a possibly empty tensor in tensorflow, and get back some value like the empty list or -1.
By default, armgin on empty tensor yields this:
InvalidArgumentError: Reduction axis 0 is empty in shape [0]
Perhaps using tf.conf could help, but I'm not sure how to check whether a vector is empty or not:
tf.equal(tf.size(vec), tf.constant(0)).eval()
ValueError: Operation 'Equal_5' has been marked as not fetchable.
There's also the issue of laziness.
tf.cond(isEmpty(vec), lambda: tf.constant(0, dtype="int64"), lambda: tf.argmin(vec))
Provided the first issue is resolved, i.e. it's possible to construct some isEmpty function, would the above cond work?

It was just the interactive session acting up. The following does work:
import tensorflow as tf
sess = tf.Session()
a = tf.ones((2, 2, 4))
b = tf.zeros((2, 2, 4))
zero = tf.constant(0, dtype="float32")
idx = tf.constant([1, 1])
region_a = tf.gather_nd(a, idx)
region_b = tf.gather_nd(b, idx)
where_a = tf.not_equal(region_a, zero)
where_b = tf.not_equal(region_b, zero)
inuse_a = tf.where(where_a)
inuse_b = tf.where(where_b)
inuse_a_flat = tf.reshape(inuse_a, [-1])
inuse_b_flat = tf.reshape(inuse_b, [-1])
qvals = tf.ones(4, dtype="float32")
qvals_flat = tf.reshape(qvals, [-1])
inuse_a_q = tf.gather(qvals_flat, inuse_a_flat)
inuse_b_q = tf.gather(qvals_flat, inuse_b_flat)
size_iuq_a = tf.size(inuse_a_q)
size_iuq_b = tf.size(inuse_b_q)
eq_a = tf.equal(tf.size(inuse_a_q), tf.constant(0))
eq_b = tf.equal(tf.size(inuse_b_q), tf.constant(0))
argmin_a = tf.cond(
eq_a,
lambda: tf.constant(-1, dtype="int64"),
lambda: tf.argmin(inuse_a_flat))
argmin_b = tf.cond(
eq_b,
lambda: tf.constant(-1, dtype="int64"),
lambda: tf.argmin(inuse_b_flat))
with sess.as_default():
print(inuse_a_q.eval())
print(inuse_b_q.eval())
print("\n")
print(size_iuq_a.eval())
print(size_iuq_b.eval())
print("\n")
print(eq_a.eval())
print(eq_b.eval())
print("\n")
print(argmin_a.eval())
# 0
print(argmin_b.eval())
# -1

Related

How do I assign a 2D subset of my tensor with another 2D tensor in Keras?

If I have two 3-D tensors img and gen. How do I assign a 2D subset of img with a 2D subset of gen? The following doesn't work as tensorflow doesn't allow direct assignment of tensors.
img[96:160 , 144:240 , :] = gen[96:160 , 144:240 , :]
EDIT:
This is the surrounding code. So I'm using a custom keras layer. This layer must receive the input image img and the generated image x. It must replace a portion of img with x and must return the modified img.
def patcher(tensors):
img = tensor[1]
gen = tensor[0]
#This is where the slicing must happen
img[96:160 , 144:240 , :] = gen[96:160 , 144:240 , :]
return [img]
img = Input( .. )
x = Conv( .. )(img)
out = Lambda(patcher,lambda a : [a[1]] )([x , img])
model = Model(img, out)
Currently, you cannot replace slices of a tensor in a straightforward manner. I actually opened an issue about it because it is something that people keep asking for. With the current API, you have to sort of figure out the best way to build the tensor you want. In this case, assuming img and gen have both the same shape, this is one way you could do that:
import tensorflow as tf
import numpy as np
# Input
img = tf.placeholder(tf.float32, [None, None, None])
gen = tf.placeholder(tf.float32, [None, None, None])
row_start = tf.placeholder(tf.int32, [])
row_end = tf.placeholder(tf.int32, [])
col_start = tf.placeholder(tf.int32, [])
col_end = tf.placeholder(tf.int32, [])
# Masks rows and columns to be replaced
shape = tf.shape(img)
rows = shape[0]
cols = shape[1]
channels = shape[2]
i = tf.range(rows)
row_mask = (row_start <= i) & (i < row_end)
j = tf.range(cols)
col_mask = (col_start <= j) & (j < col_end)
# Full mask of replaced elements
mask = row_mask[:, tf.newaxis] & col_mask
# Select elements from flattened arrays
img_flat = tf.reshape(img, [-1, channels])
gen_flat = tf.reshape(gen, [-1, channels])
mask_flat = tf.reshape(mask, [-1])
result_flat = tf.where(mask_flat, gen_flat, img_flat)
# Reshape back
result = tf.reshape(result_flat, shape)
Here is a small test:
with tf.Session() as sess:
# img is positive and gen is negative
img_val = np.arange(60).reshape((4, 5, 3))
gen_val = -img_val
# Do img[2:4, 0:3, :] = gen[2:4, 0:3, :]
result_val = sess.run(result, feed_dict={
img: img_val,
gen: gen_val,
row_start: 2,
row_end: 4,
col_start: 0,
col_end: 3,
})
# Print one channel only for clarity
print(result_val[:, :, 0])
Output:
[[ 0. 3. 6. 9. 12.]
[ 15. 18. 21. 24. 27.]
[-30. -33. -36. 39. 42.]
[-45. -48. -51. 54. 57.]]
EDIT:
Here is a possible implementation for the code that you have posted. I am using a slightly different method here based on multiplication, which I think is better when you have many images.
import tensorflow as tf
def replace_slices(img, gen, row_start, row_end, col_start, col_end):
# Masks rows and columns to be replaced
shape = tf.shape(img)
rows = shape[1]
cols = shape[2]
i = tf.range(rows)
row_mask = (row_start <= i) & (i < row_end)
j = tf.range(cols)
col_mask = (col_start <= j) & (j < col_end)
# Full mask of replaced elements
mask = row_mask[:, tf.newaxis] & col_mask
# Add channel dimension to mask and cast
mask = tf.cast(mask[:, :, tf.newaxis], img.dtype)
# Compute result
result = img * (1 - mask) + gen * mask
return result
def patcher(tensors):
img = tensor[1]
gen = tensor[0]
img = replace_slices(img, gen, 96, 160, 144, 240)
return [img]
img = Input( .. )
x = Conv( .. )(img)
out = Lambda(patcher, ambda a: [a[1]])([x , img])
model = Model(img, out)
I modified my initial solution, which only worked when the batch size was set, based on #jdehesa's solution. This should work in Keras across all the backends (TensorFlow, Theano and CNTK):
from keras import backend as K
import numpy as np
def replace_slices(ts, row_start, row_end, col_start, col_end):
shape = K.int_shape(ts[0])[1:-1]
np_mask = np.zeros(shape + (1,))
np_mask[row_start:row_end, col_start:col_end] = 1.
mask = K.variable(np_mask, dtype=K.dtype(ts[0]))
# ts[0] is the img and ts[1] is the x tensor
return ts[0] * (1 - mask) + ts[1] * mask
args = {'row_start': 96, 'row_end': 160, 'col_start': 144, 'col_end': 240}
img = Input(shape=(256,384,3))
x = Conv2D(3, (3,3), padding='same')(img) # this must have 3 filters since img has 3 channels
out = Lambda(replace_slices, arguments=args)([img, x])
model = Model(img, out)

Update a variable with tf.while_loop in Tensorflow

I want to update a variable in Tensorflow and for that reason I use the tf.while_loop like:
a = tf.Variable([0, 0, 0, 0, 0, 0] , dtype = np.int16)
i = tf.constant(0)
size = tf.size(a)
def condition(i, size, a):
return tf.less(i, size)
def body(i, size, a):
a = tf.scatter_update(a, i , i)
return [tf.add(i, 1), size, a]
r = tf.while_loop(condition, body, [i, size, a])
This is an example for what I am trying to do. The error that occurs is AttributeError: 'Tensor' object has no attribute '_lazy_read'. What is the appropriate way to update a variable in Tensorflow?
This isn't obvious until one codes and executes. It is like this pattern
import tensorflow as tf
def cond(size, i):
return tf.less(i,size)
def body(size, i):
a = tf.get_variable("a",[6],dtype=tf.int32,initializer=tf.constant_initializer(0))
a = tf.scatter_update(a,i,i)
tf.get_variable_scope().reuse_variables() # Reuse variables
with tf.control_dependencies([a]):
return (size, i+1)
with tf.Session() as sess:
i = tf.constant(0)
size = tf.constant(6)
_,i = tf.while_loop(cond,
body,
[size, i])
a = tf.get_variable("a",[6],dtype=tf.int32)
init = tf.initialize_all_variables()
sess.run(init)
print(sess.run([a,i]))
Output is
[array([0, 1, 2, 3, 4, 5]), 6]
tf.get_variableGets an existing variable with these parameters or create a new one.
tf.control_dependencies It is a happens-before relationship. In this case I understand that the scatter_update happens before the while increments and returns. It doesn't update without this.
Note : I didn't really understand the meaning or cause of the error. I get that too.

Problems faced by Manually Implementing a FIFOQueue in tensorflow

I am trying to come up with a method that could implement a FIFOQueue in tensorflow. So on every iteration, the purpose is to assign a placeholder a certain number, then store it in a Variable named: buffer. After each assignment, I am incrementing an index. The buffer size is [5], so that index should range from 0 to 4. Finally, after the buffer is full, I would set buffer[0:4] to be buffer[1:5], and then add the new value to buffer[4]. So here is my
code:
import tensorflow as tf
import numpy as np
import random
dim = 30
lst = []
for i in range(dim):
lst.append(random.randint(1, 10))
data = np.reshape(lst, [dim, 1])
print(lst)
# create a buffer:
buffer_input = tf.placeholder(tf.int32, shape=[1])
buffer = tf.Variable(tf.zeros([5], tf.int32))
index = tf.Variable(tf.constant(0))
def fillBufferBeforeFilled():
update_op1 = tf.scatter_update(buffer, indices=[index], updates=buffer_input)
index_assign_add = tf.assign_add(index, 1)
return update_op1, index_assign_add
def fillBufferAfterFilled():
tmp = tf.slice(buffer, begin=[0], size=[4])
update_op2 = tf.scatter_update(buffer, indices=[0, 1, 2, 3], updates=tmp)
update_op3 = tf.scatter_update(buffer, indices=[index], updates=buffer_input)
return update_op2, update_op3
cond = tf.cond(tf.equal(index, 4), lambda: fillBufferBeforeFilled(), lambda: fillBufferAfterFilled())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(dim):
cond_ = sess.run(cond, feed_dict={buffer_input: data[i]})
buf = sess.run(buffer, feed_dict={buffer_input: data[i]})
print('buf: ', buf)
Problem: The index Variable is not incremented after each call, while the first element of the buffer is being assigned to the value passed to the placeholder.
I would like to know why I'm getting this behavior and what is the solution to this problem.
any help is much appreciated!!
You've mixed up the order of the conditions in tf.cond; it should be
cond = tf.cond(tf.equal(index, 4), lambda: fillBufferAfterFilled(), lambda: fillBufferBeforeFilled())
I can get your code running and it mostly works, but the updates aren't quite right; I suspect you'll need to add some tf.control_dependencies calls to force things to happen in the right order.
Here is the solution:
import tensorflow as tf
import numpy as np
import random
dim = 30
lst = []
for i in range(dim):
lst.append(random.randint(1, 10))
data = np.reshape(lst, [dim, 1])
print(lst)
# create a buffer:
buffer_input = tf.placeholder(tf.int32, shape=[1])
buffer = tf.Variable(tf.zeros([5], tf.int32))
index = tf.Variable(-1, tf.int32)
def fillBufferBeforeFilled():
index_assign_add = tf.assign_add(index, 1)
with tf.control_dependencies([index_assign_add]):
update_op1 = tf.scatter_update(buffer, indices=[index], updates=buffer_input)
return update_op1, index_assign_add
def fillBufferAfterFilled():
tmp = tf.slice(buffer, begin=[1], size=[4])
update_op2 = tf.scatter_update(buffer, indices=[0, 1, 2, 3], updates=tmp)
with tf.control_dependencies([update_op2]):
update_op3 = tf.scatter_update(buffer, indices=[index], updates=buffer_input)
return update_op2, update_op3
cond = tf.cond(tf.equal(index, 4), lambda: fillBufferAfterFilled(), lambda: fillBufferBeforeFilled())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(dim):
cond_ = sess.run(cond, feed_dict={buffer_input: data[i]})
buf = sess.run(buffer, feed_dict={buffer_input: data[i]})
print('buf: ', buf)

How to loop through the elements of a tensor in TensorFlow?

I want to create a function batch_rot90(batch_of_images) using TensorFlow's tf.image.rot90(), the latter only takes one image at a time, the former should take a batch of n images at once (shape = [n,x,y,f]).
So naturally, one should just itterate through all images in the batch and rotate them one by one. In numpy this would look like:
def batch_rot90(batch):
for i in range(batch.shape[0]):
batch_of_images[i] = rot90(batch[i,:,:,:])
return batch
How is this done in TensorFlow?
using tf.while_loop I got his far:
batch = tf.placeholder(tf.float32, shape=[2, 256, 256, 4])
def batch_rot90(batch, k, name=''):
i = tf.constant(0)
def cond(batch, i):
return tf.less(i, tf.shape(batch)[0])
def body(im, i):
batch[i] = tf.image.rot90(batch[i], k)
i = tf.add(i, 1)
return batch, i
r = tf.while_loop(cond, body, [batch, i])
return r
But the assignment to im[i] is not allowed, and I'm confused about what is returned with r.
I realize there might be a workaround for this particular case using tf.batch_to_space() but I believe it should be possible with a loop of some kind too.
Updated Answer:
x = tf.placeholder(tf.float32, shape=[2, 3])
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[0])
def body(batch, output, i):
output = output.write(i, tf.add(batch[i], 10))
return batch, output, i + 1
# TensorArray is a data structure that support dynamic writing
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(x.get_shape()[1],))
_, output_op, _ = tf.while_loop(cond, body, [x, output_ta, 0])
output_op = output_op.stack()
with tf.Session() as sess:
print(sess.run(output_op, feed_dict={x: [[1, 2, 3], [0, 0, 0]]}))
I think you should consider using tf.scatter_update to update one image in the batch, instead of using batch[i] = .... Refer to this link for detail. In your case, I suggest change the first line of body to:
tf.scatter_update(batch, i, tf.image.rot90(batch[i], k))
There is a map function in tf, that will work:
def batch_rot90(batch, k, name=''):
fun = lambda x: tf.images.rot90(x, k = 1)
return = tf.map_fn(fun, batch)

How TensorArray and while_loop work together in tensorflow?

I am trying to produce a very easy example for combination of TensorArray and while_loop:
# 1000 sequence in the length of 100
matrix = tf.placeholder(tf.int32, shape=(100, 1000), name="input_matrix")
matrix_rows = tf.shape(matrix)[0]
ta = tf.TensorArray(tf.float32, size=matrix_rows)
ta = ta.unstack(matrix)
init_state = (0, ta)
condition = lambda i, _: i < n
body = lambda i, ta: (i + 1, ta.write(i,ta.read(i)*2))
# run the graph
with tf.Session() as sess:
(n, ta_final) = sess.run(tf.while_loop(condition, body, init_state),feed_dict={matrix: tf.ones(tf.float32, shape=(100,1000))})
print (ta_final.stack())
But I am getting the following error:
ValueError: Tensor("while/LoopCond:0", shape=(), dtype=bool) must be from the same graph as Tensor("Merge:0", shape=(), dtype=float32).
Anyone has on idea what is the problem?
There are several things in your code to point out. First, you don't need to unstack the matrix into the TensorArray to use it inside the loop, you can safely reference the matrix Tensor inside the body and index it using matrix[i] notation. Another issue is the different data type between your matrix (tf.int32) and the TensorArray (tf.float32), based on your code you're multiplying the matrix ints by 2 and writing the result into the array so it should be int32 as well. Finally, when you wish to read the final result of the loop, the correct operation is TensorArray.stack() which is what you need to run in your session.run call.
Here's a working example:
import numpy as np
import tensorflow as tf
# 1000 sequence in the length of 100
matrix = tf.placeholder(tf.int32, shape=(100, 1000), name="input_matrix")
matrix_rows = tf.shape(matrix)[0]
ta = tf.TensorArray(dtype=tf.int32, size=matrix_rows)
init_state = (0, ta)
condition = lambda i, _: i < matrix_rows
body = lambda i, ta: (i + 1, ta.write(i, matrix[i] * 2))
n, ta_final = tf.while_loop(condition, body, init_state)
# get the final result
ta_final_result = ta_final.stack()
# run the graph
with tf.Session() as sess:
# print the output of ta_final_result
print sess.run(ta_final_result, feed_dict={matrix: np.ones(shape=(100,1000), dtype=np.int32)})

Categories