I am trying to come up with a method that could implement a FIFOQueue in tensorflow. So on every iteration, the purpose is to assign a placeholder a certain number, then store it in a Variable named: buffer. After each assignment, I am incrementing an index. The buffer size is [5], so that index should range from 0 to 4. Finally, after the buffer is full, I would set buffer[0:4] to be buffer[1:5], and then add the new value to buffer[4]. So here is my
code:
import tensorflow as tf
import numpy as np
import random
dim = 30
lst = []
for i in range(dim):
lst.append(random.randint(1, 10))
data = np.reshape(lst, [dim, 1])
print(lst)
# create a buffer:
buffer_input = tf.placeholder(tf.int32, shape=[1])
buffer = tf.Variable(tf.zeros([5], tf.int32))
index = tf.Variable(tf.constant(0))
def fillBufferBeforeFilled():
update_op1 = tf.scatter_update(buffer, indices=[index], updates=buffer_input)
index_assign_add = tf.assign_add(index, 1)
return update_op1, index_assign_add
def fillBufferAfterFilled():
tmp = tf.slice(buffer, begin=[0], size=[4])
update_op2 = tf.scatter_update(buffer, indices=[0, 1, 2, 3], updates=tmp)
update_op3 = tf.scatter_update(buffer, indices=[index], updates=buffer_input)
return update_op2, update_op3
cond = tf.cond(tf.equal(index, 4), lambda: fillBufferBeforeFilled(), lambda: fillBufferAfterFilled())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(dim):
cond_ = sess.run(cond, feed_dict={buffer_input: data[i]})
buf = sess.run(buffer, feed_dict={buffer_input: data[i]})
print('buf: ', buf)
Problem: The index Variable is not incremented after each call, while the first element of the buffer is being assigned to the value passed to the placeholder.
I would like to know why I'm getting this behavior and what is the solution to this problem.
any help is much appreciated!!
You've mixed up the order of the conditions in tf.cond; it should be
cond = tf.cond(tf.equal(index, 4), lambda: fillBufferAfterFilled(), lambda: fillBufferBeforeFilled())
I can get your code running and it mostly works, but the updates aren't quite right; I suspect you'll need to add some tf.control_dependencies calls to force things to happen in the right order.
Here is the solution:
import tensorflow as tf
import numpy as np
import random
dim = 30
lst = []
for i in range(dim):
lst.append(random.randint(1, 10))
data = np.reshape(lst, [dim, 1])
print(lst)
# create a buffer:
buffer_input = tf.placeholder(tf.int32, shape=[1])
buffer = tf.Variable(tf.zeros([5], tf.int32))
index = tf.Variable(-1, tf.int32)
def fillBufferBeforeFilled():
index_assign_add = tf.assign_add(index, 1)
with tf.control_dependencies([index_assign_add]):
update_op1 = tf.scatter_update(buffer, indices=[index], updates=buffer_input)
return update_op1, index_assign_add
def fillBufferAfterFilled():
tmp = tf.slice(buffer, begin=[1], size=[4])
update_op2 = tf.scatter_update(buffer, indices=[0, 1, 2, 3], updates=tmp)
with tf.control_dependencies([update_op2]):
update_op3 = tf.scatter_update(buffer, indices=[index], updates=buffer_input)
return update_op2, update_op3
cond = tf.cond(tf.equal(index, 4), lambda: fillBufferAfterFilled(), lambda: fillBufferBeforeFilled())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(dim):
cond_ = sess.run(cond, feed_dict={buffer_input: data[i]})
buf = sess.run(buffer, feed_dict={buffer_input: data[i]})
print('buf: ', buf)
Related
I am using Tensorflow v1.15. I have a very basic implementation of the TensorArray given in the following example:
import tensorflow as tf
an_array = tf.TensorArray(dtype=tf.float32, size=5, dynamic_size=True, clear_after_read=False, element_shape=(16, 7, 2))
for i in range(5):
val = tf.random.normal(shape=(16, 7, 2))
an_array.write(i, val)
print(tf.Session().run(val))
tensors = [an_array.read(j) for j in range(5)]
print(tf.Session().run(tensors))
The print in the for loop does not print all zeros, while the last print statement does. Why is this happening? Thanks.
tf.TensorArray.write returns a new tf.TensorArray where the writing operation has taken place. In general, the output of this function should replace the previous reference to the array:
import tensorflow as tf
an_array = tf.TensorArray(dtype=tf.float32, size=5, dynamic_size=True,
clear_after_read=False, element_shape=(16, 7, 2))
for i in range(5):
val = tf.random.normal(shape=(16, 7, 2))
# Replace tensor array reference with the written one
an_array = an_array.write(i, val)
print(tf.Session().run(val))
tensors = [an_array.read(j) for j in range(5)]
print(tf.Session().run(tensors))
I want to update a variable in Tensorflow and for that reason I use the tf.while_loop like:
a = tf.Variable([0, 0, 0, 0, 0, 0] , dtype = np.int16)
i = tf.constant(0)
size = tf.size(a)
def condition(i, size, a):
return tf.less(i, size)
def body(i, size, a):
a = tf.scatter_update(a, i , i)
return [tf.add(i, 1), size, a]
r = tf.while_loop(condition, body, [i, size, a])
This is an example for what I am trying to do. The error that occurs is AttributeError: 'Tensor' object has no attribute '_lazy_read'. What is the appropriate way to update a variable in Tensorflow?
This isn't obvious until one codes and executes. It is like this pattern
import tensorflow as tf
def cond(size, i):
return tf.less(i,size)
def body(size, i):
a = tf.get_variable("a",[6],dtype=tf.int32,initializer=tf.constant_initializer(0))
a = tf.scatter_update(a,i,i)
tf.get_variable_scope().reuse_variables() # Reuse variables
with tf.control_dependencies([a]):
return (size, i+1)
with tf.Session() as sess:
i = tf.constant(0)
size = tf.constant(6)
_,i = tf.while_loop(cond,
body,
[size, i])
a = tf.get_variable("a",[6],dtype=tf.int32)
init = tf.initialize_all_variables()
sess.run(init)
print(sess.run([a,i]))
Output is
[array([0, 1, 2, 3, 4, 5]), 6]
tf.get_variableGets an existing variable with these parameters or create a new one.
tf.control_dependencies It is a happens-before relationship. In this case I understand that the scatter_update happens before the while increments and returns. It doesn't update without this.
Note : I didn't really understand the meaning or cause of the error. I get that too.
I want to to argmin on a possibly empty tensor in tensorflow, and get back some value like the empty list or -1.
By default, armgin on empty tensor yields this:
InvalidArgumentError: Reduction axis 0 is empty in shape [0]
Perhaps using tf.conf could help, but I'm not sure how to check whether a vector is empty or not:
tf.equal(tf.size(vec), tf.constant(0)).eval()
ValueError: Operation 'Equal_5' has been marked as not fetchable.
There's also the issue of laziness.
tf.cond(isEmpty(vec), lambda: tf.constant(0, dtype="int64"), lambda: tf.argmin(vec))
Provided the first issue is resolved, i.e. it's possible to construct some isEmpty function, would the above cond work?
It was just the interactive session acting up. The following does work:
import tensorflow as tf
sess = tf.Session()
a = tf.ones((2, 2, 4))
b = tf.zeros((2, 2, 4))
zero = tf.constant(0, dtype="float32")
idx = tf.constant([1, 1])
region_a = tf.gather_nd(a, idx)
region_b = tf.gather_nd(b, idx)
where_a = tf.not_equal(region_a, zero)
where_b = tf.not_equal(region_b, zero)
inuse_a = tf.where(where_a)
inuse_b = tf.where(where_b)
inuse_a_flat = tf.reshape(inuse_a, [-1])
inuse_b_flat = tf.reshape(inuse_b, [-1])
qvals = tf.ones(4, dtype="float32")
qvals_flat = tf.reshape(qvals, [-1])
inuse_a_q = tf.gather(qvals_flat, inuse_a_flat)
inuse_b_q = tf.gather(qvals_flat, inuse_b_flat)
size_iuq_a = tf.size(inuse_a_q)
size_iuq_b = tf.size(inuse_b_q)
eq_a = tf.equal(tf.size(inuse_a_q), tf.constant(0))
eq_b = tf.equal(tf.size(inuse_b_q), tf.constant(0))
argmin_a = tf.cond(
eq_a,
lambda: tf.constant(-1, dtype="int64"),
lambda: tf.argmin(inuse_a_flat))
argmin_b = tf.cond(
eq_b,
lambda: tf.constant(-1, dtype="int64"),
lambda: tf.argmin(inuse_b_flat))
with sess.as_default():
print(inuse_a_q.eval())
print(inuse_b_q.eval())
print("\n")
print(size_iuq_a.eval())
print(size_iuq_b.eval())
print("\n")
print(eq_a.eval())
print(eq_b.eval())
print("\n")
print(argmin_a.eval())
# 0
print(argmin_b.eval())
# -1
I want to create a function batch_rot90(batch_of_images) using TensorFlow's tf.image.rot90(), the latter only takes one image at a time, the former should take a batch of n images at once (shape = [n,x,y,f]).
So naturally, one should just itterate through all images in the batch and rotate them one by one. In numpy this would look like:
def batch_rot90(batch):
for i in range(batch.shape[0]):
batch_of_images[i] = rot90(batch[i,:,:,:])
return batch
How is this done in TensorFlow?
using tf.while_loop I got his far:
batch = tf.placeholder(tf.float32, shape=[2, 256, 256, 4])
def batch_rot90(batch, k, name=''):
i = tf.constant(0)
def cond(batch, i):
return tf.less(i, tf.shape(batch)[0])
def body(im, i):
batch[i] = tf.image.rot90(batch[i], k)
i = tf.add(i, 1)
return batch, i
r = tf.while_loop(cond, body, [batch, i])
return r
But the assignment to im[i] is not allowed, and I'm confused about what is returned with r.
I realize there might be a workaround for this particular case using tf.batch_to_space() but I believe it should be possible with a loop of some kind too.
Updated Answer:
x = tf.placeholder(tf.float32, shape=[2, 3])
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[0])
def body(batch, output, i):
output = output.write(i, tf.add(batch[i], 10))
return batch, output, i + 1
# TensorArray is a data structure that support dynamic writing
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(x.get_shape()[1],))
_, output_op, _ = tf.while_loop(cond, body, [x, output_ta, 0])
output_op = output_op.stack()
with tf.Session() as sess:
print(sess.run(output_op, feed_dict={x: [[1, 2, 3], [0, 0, 0]]}))
I think you should consider using tf.scatter_update to update one image in the batch, instead of using batch[i] = .... Refer to this link for detail. In your case, I suggest change the first line of body to:
tf.scatter_update(batch, i, tf.image.rot90(batch[i], k))
There is a map function in tf, that will work:
def batch_rot90(batch, k, name=''):
fun = lambda x: tf.images.rot90(x, k = 1)
return = tf.map_fn(fun, batch)
I am trying to produce a very easy example for combination of TensorArray and while_loop:
# 1000 sequence in the length of 100
matrix = tf.placeholder(tf.int32, shape=(100, 1000), name="input_matrix")
matrix_rows = tf.shape(matrix)[0]
ta = tf.TensorArray(tf.float32, size=matrix_rows)
ta = ta.unstack(matrix)
init_state = (0, ta)
condition = lambda i, _: i < n
body = lambda i, ta: (i + 1, ta.write(i,ta.read(i)*2))
# run the graph
with tf.Session() as sess:
(n, ta_final) = sess.run(tf.while_loop(condition, body, init_state),feed_dict={matrix: tf.ones(tf.float32, shape=(100,1000))})
print (ta_final.stack())
But I am getting the following error:
ValueError: Tensor("while/LoopCond:0", shape=(), dtype=bool) must be from the same graph as Tensor("Merge:0", shape=(), dtype=float32).
Anyone has on idea what is the problem?
There are several things in your code to point out. First, you don't need to unstack the matrix into the TensorArray to use it inside the loop, you can safely reference the matrix Tensor inside the body and index it using matrix[i] notation. Another issue is the different data type between your matrix (tf.int32) and the TensorArray (tf.float32), based on your code you're multiplying the matrix ints by 2 and writing the result into the array so it should be int32 as well. Finally, when you wish to read the final result of the loop, the correct operation is TensorArray.stack() which is what you need to run in your session.run call.
Here's a working example:
import numpy as np
import tensorflow as tf
# 1000 sequence in the length of 100
matrix = tf.placeholder(tf.int32, shape=(100, 1000), name="input_matrix")
matrix_rows = tf.shape(matrix)[0]
ta = tf.TensorArray(dtype=tf.int32, size=matrix_rows)
init_state = (0, ta)
condition = lambda i, _: i < matrix_rows
body = lambda i, ta: (i + 1, ta.write(i, matrix[i] * 2))
n, ta_final = tf.while_loop(condition, body, init_state)
# get the final result
ta_final_result = ta_final.stack()
# run the graph
with tf.Session() as sess:
# print the output of ta_final_result
print sess.run(ta_final_result, feed_dict={matrix: np.ones(shape=(100,1000), dtype=np.int32)})