InvalidArgumentError when using summary in Tensorflow v1.2.1 - python

I have wrote a simple code to try out the Tensorflow summarize feature. The code is below.
import tensorflow as tf
import numpy as np
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, [1, 2], name='x')
W = tf.ones([2, 1], tf.float32, name='W')
b = tf.constant([1.5], dtype=tf.float32, shape=(1, 1), name='bias')
y_ = tf.add(tf.matmul(x, W, name='mul'), b, name='add')
tf.summary.scalar('y', y_)
with tf.Session(graph=graph) as session:
merged = tf.summary.merge_all()
fw = tf.summary.FileWriter("/tmp/tensorflow/logs", graph=graph)
tf.global_variables_initializer().run()
x_var = np.array([1., 1.], np.float32).reshape([1, 2])
print(x_var)
summary, y = session.run([merged, y_], feed_dict={x: x_var})
fw.add_summary(summary, 0)
print(y)
fw.close()
Basically, it tries to implement y=Wx + b.
The code works if I remove all the summary related code. But if I add the summary related code, I got below error:
InvalidArgumentError (see above for traceback): tags and values not the same shape: [] != [1,1] (tag 'y')
[[Node: y = ScalarSummary[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](y/tags, add)]]
I tried in both normal python, and IPython.

Tags and values do not have the same shape. You are passing x_var which is a vector and the summary takes a scalar value. You can simply use tf.reduce_mean to solve this problem:
with graph.as_default():
x = tf.placeholder(tf.float32, [None, 2], name='x')
W = tf.ones([2, 1], tf.float32, name='W')
b = tf.constant([1.5], dtype=tf.float32, shape=(1, 1), name='bias')
y_ = tf.add(tf.matmul(x, W, name='mul'), b, name='add')
tf.summary.scalar('y', tf.reduce_mean(y_))
This will create a scalar value.

Related

How to initialize an identity matrix in tensorflow with no specified parameters?

I am trying to implement part of the code on Graph Convolutional Networks given in this article. I notice that the author uses tf.eye() with no shape parameter. When I tried to rerun the same code, using tensorflow 1, it gave me the expected error that TypeError: eye() missing 1 required positional argument: 'num_rows'
Can someone explain how the tf.eye() in the article works and/or if there was another way to initialize an identity matrix with unspecified shape?
Here is the code (compatible with tensorflow 1 coz apprently tensorflow2 doesn't have tf.placeolder())
import numpy as np
import networkx as nx
import tensorflow as tf
features= tf.placeholder(tf.float32, shape=[None, 2])
adjacency= tf.placeholder(tf.float32, shape=[None, 2])
degree= tf.placeholder(tf.float32, shape=[None, 2])
labels= tf.placeholder(tf.float32, shape=[None, 2])
weights= tf.Variable(tf.random.normal([], 0, 1, tf.float32, seed=1))
def layer(features, adjacency, degree, weights):
with tf.name_scope('gcn_layer'):
d_ =tf.pow(degree + tf.eye(), -0.5)
y = tf.matmul(d_, tf.matmul(adjacency, d_))
kernel = tf.matmul(features, weights)
return tf.nn.relu(tf.matmul(y,kernel))
model = layer(features, adjacency, degree, weights)
with tf.name_scope('loss'):
loss =tf.reduce_mean(
tf.nn.somftmax_crosse_ntropy_with_logits(
logits=model, labels=labels))
train_op=train.AdamOptimizer(0.001, 0.9).minimize(loss)
with tf.Session() as sess:
sess.run(train_op, feed_dict={
features:features, adjacency:adjacency, degree:degree, labels:labels})
tf.eye() is used to create a identity matrix.
The correct usage of tf.eye() is:
Code:
tf.eye(
num_rows, num_columns=None, batch_shape=None, dtype=tf.dtypes.float32, name=None
)
num_rows is the number of rows for your identity matrix. So If you want to create an identity matrix of shape : (2,2) you have to specify num_rows = 2
Example Usage:
tf.eye(2)
==> [[1., 0.],
[0., 1.]]

How to make a custom activation function in tensorflow

I need to make an activation function which is not exist in tensorflow.How should I do? I ever saw this link,
How to make a custom activation function with only Python in Tensorflow?
but I still don't know how to implement the new type of activation funcation in the picture.
relu,leaky_relu and a new type of relu
I think this one could serve you. I have only used functions that incorporate tensorflow in that way it is he who manages the backpropagation.
If you use python functions you would have to program both the forward and the backward. But the problem is when you have to save the function's masks of the piecewise function in a "cache" (personally I do not know how it is done and it would be interesting to know).
import numpy as np
import tensorflow as tf
def new_relu(x, k=0.2):
part_1 = tf.to_float(tf.math.less_equal(0.0, x))
part_2 = tf.to_float(tf.math.logical_and(tf.math.less_equal(-1.0, x), tf.math.less(x, 0.0)))
part_3 = tf.to_float(tf.math.less(x, -1.0))
return part_1*x + part_2*x*k #+ part_3*0
def new_relu_test():
# create data
x = tf.random_normal([10])*10000
y = new_relu(x)
with tf.Session():
diff = tf.test.compute_gradient_error(x, [10], y, [10])
print(diff)
# use in dense
x = tf.placeholder(shape=[None, 3], dtype=tf.float32)
nn = tf.layers.dense(x, 3, activation=new_relu)
EDIT:
If you want the second parameter to be a tensor too, you must be the same size as the input.
import numpy as np
import tensorflow as tf
def new_relu(x, k=0.2):
part_1 = tf.to_float(tf.math.less_equal(0.0, x))
part_2 = tf.to_float(tf.math.logical_and(tf.math.less_equal(-1.0, x), tf.math.less(x, 0.0)))
part_3 = tf.to_float(tf.math.less(x, -1.0))
return part_1*x + part_2*x*k #+ part_3*0
def new_relu_test():
# create data
x = tf.random_normal([10])*10000
y = new_relu(x)
with tf.Session():
diff = tf.test.compute_gradient_error(x, [10], y, [10])
print(diff)
# use in dense
x = tf.placeholder(shape=[None, 3], dtype=tf.float32)
x_b = tf.placeholder(shape=[None], dtype=tf.float32)
nn_1 = tf.layers.dense(x, 3)
nn_2 = tf.layers.dense(x, 3)
nn = tf.layers.dense(nn_2, 1, activation=None)
new_r = new_relu(x, tf.tile(tf.expand_dims(x_b, -1), [1, 3]))
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
sess.run(new_r, feed_dict={x: np.random.rand(100, 3), x_b: np.random.rand(100)})
new_relu_test()
EDIT 2:
Using conv2d
import numpy as np
import tensorflow as tf
def new_relu(x, k=0.2):
part_1 = tf.to_float(tf.math.less_equal(0.0, x))
part_2 = tf.to_float(tf.math.logical_and(tf.math.less_equal(-1.0, x), tf.math.less(x, 0.0)))
part_3 = tf.to_float(tf.math.less(x, -1.0))
return part_1*x + part_2*x*k #+ part_3*0
def new_relu_test():
# create data
x = tf.random_normal([10])*10000
y = new_relu(x)
with tf.Session():
diff = tf.test.compute_gradient_error(x, [10], y, [10])
print(diff)
# use in dense
x = tf.placeholder(shape=[None, 28, 28, 3], dtype=tf.float32)
conv1_weights = tf.get_variable("weight",[3,3,3,32],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(x, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = new_relu(tf.nn.bias_add(conv1, conv1_biases))
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
sess.run(relu1, feed_dict={x: np.random.rand(100, 28, 28, 3)})
new_relu_test()

Using scatter_update with feeded data in tensorflow

I am trying to use scatter_update to update a slice of a tensor. My first code snippet to get familiar with the function works out perfectly fine.
import tensorflow as tf
import numpy as np
with tf.Session() as sess:
init_val = tf.Variable(tf.zeros((3, 2)))
indices = tf.constant([0, 1])
update = tf.scatter_update(init_val, indices, tf.ones((2, 2)))
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(update))
But when I try to feed the initial value into the graph like
with tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=(3, 2))
init_val = x
indices = tf.constant([0, 1])
update = tf.scatter_update(init_val, indices, tf.ones((2, 2)))
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(update, feed_dict={x: np.zeros((3, 2))}))
I get the strange error
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [3,2]
[[{{node Placeholder_1}} = Placeholder[dtype=DT_FLOAT, shape=[3,2], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Dropping the tf.Variable around x when assigning it to init_val also does not help since I am getting the error
AttributeError: 'Tensor' object has no attribute '_lazy_read'
(see this entry on Github). Has anyone an idea? Thanks in advance!
I am using Tensorflow 1.12 on CPU.
You can replace in a tensor through scattering by building and update tensor and a mask tensor:
import tensorflow as tf
import numpy as np
with tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=(3, 2))
init_val = x
indices = tf.constant([0, 1])
x_shape = tf.shape(x)
indices = tf.expand_dims(indices, 1)
replacement = tf.ones((2, 2))
update = tf.scatter_nd(indices, replacement, x_shape)
mask = tf.scatter_nd(indices, tf.ones_like(replacement, dtype=tf.bool), x_shape)
result = tf.where(mask, update, x)
print(sess.run(result, feed_dict={x: np.arange(6).reshape((3, 2))}))
Output:
[[1. 1.]
[1. 1.]
[4. 5.]]

tf.gradients - dimensions of output

Here is my code:
import tensorflow as tf
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, 3],name='x')
W_1 = tf.get_variable('W_1', [3,3], dtype = tf.float32, initializer=tf.constant_initializer(1.0))
layer_out = tf.matmul(x, W_1, name = 'layer_out')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run([tf.gradients(layer_out, [x])], feed_dict = {x: np.array([[1,7,5]])} )
it returns:
[[array([[3., 3., 3.]], dtype=float32)]]
I am expecting to get 3 by 3 matrix or as per tf.gradients docs list of dim 3 with 3 elements for each list entry.
What I am missing?
UPDATE:
I see in docs tf.gradients
A list of sum(dy/dx) for each x in xs
but why sum and how do I get all entries of Jacobian?

Using Datasets to consume Numpy arrays

I'm trying to use Numpy arrays within a graph, feeding in the data using a Dataset.
I've read through this, but can't quite make sense of how I should feed placeholder arrays within a Dataset.
If we take a simple example, I start with:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.float32, [None])
b = tf.placeholder(tf.float32, [None])
c = tf.add(a, b)
with tf.Session() as sess:
for i in range(10):
x = sess.run(c, feed_dict={a: A, b:B})
print(i, x)
Then I attempt to modify it to use a Dataset as follows:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.int32, A.shape)
b = tf.placeholder(tf.int32, B.shape)
c = tf.add(a, b)
dataset = tf.data.Dataset.from_tensors((a, b))
iterator = dataset.make_initializable_iterator()
with tf.Session() as sess3:
sess3.run(tf.global_variables_initializer())
sess3.run(iterator.initializer, feed_dict={a: A, b: B})
for i in range(10):
x = sess3.run(c)
print(i, x)
If I run this I get 'InvalidArgumentError: You must feed a value for placeholder tensor ...'
The code until the for loop mimics the example here, but I don't get how I can then employ the placeholders a & b without supplying a feed_dict to every call to sess3.run(c) [which would be expensive]. I suspect I have to somehow use the iterator, but I don't understand how.
Update
It appears I oversimplified too much when picking the example. What I am really trying to do is use Datasets when training a neural network, or similar.
For a more sensible question, how would I go about using Datasets to feed placeholders in the below (though imagine X and Y_true are much longer...). The documentation takes me to the point where the loop starts and then I'm not sure.
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
y = tf.squeeze(tf.matmul(x, w), name='y')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y),
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(100):
_, loss_out = sess.run([optimiser, loss], feed_dict={x: X, y_true:Y_true})
print(i, loss_out)
Trying the following only gets me a InvalidArgumentError
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
dataset = tf.data.Dataset.from_tensor_slices((x, y_true))
iterator = dataset.make_initializable_iterator()
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
y = tf.squeeze(tf.matmul(x, w), name='y')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y),
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(iterator.initializer, feed_dict={x: X,
y_true: Y_true})
for i in range(100):
_, loss_out = sess.run([optimiser, loss])
print(i, loss_out)
Use iterator.get_next() to get elements from Dataset like:
next_element = iterator.get_next()
than initialize the iterator
sess.run(iterator.initializer, feed_dict={a:A, b:B})
and at least get the values from Dataset
value = sess.run(next_element)
EDIT:
The code above just return the elements from Dataset. The Dataset API is intended to serve features and labels for a input_fn, therefore all additional computations for preprocessing should be performed within the Dataset API. If you want to add elements, you should define a function that is applied to the elements, like:
def add_fn(exp1, exp2):
return tf.add(exp1, exp2)
and than you can map these function to your Dataset:
dataset = dataset.map(add_fn)
Complete code example:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.int32, A.shape)
b = tf.placeholder(tf.int32, B.shape)
#c = tf.add(a, b)
def add_fn(exp1, exp2):
return tf.add(exp1, exp2)
dataset = tf.data.Dataset.from_tensors((a, b))
dataset = dataset.map(add_fn)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer, feed_dict={a: A, b: B})
# just one element at dataset
x = sess.run(next_element)
print(x)
The problem in your more complicated example is that you use the same tf.placeholder() nodes as the input to the Dataset.from_tensor_slices() (which is correct) and the network itself (which causes the InvalidArgumentError. Instead, as J.E.K points out in their answer, you should use iterator.get_next() as the input to your network, as follows (note that there are a couple of other fixes I added to make the code run as-is):
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
dataset = tf.data.Dataset.from_tensor_slices((x, y_true))
# You will need to repeat the input (which has 4 elements) to be able to take
# 100 steps.
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
# Use `iterator.get_next()` to create tensors that will consume values from the
# dataset.
x_next, y_true_next = iterator.get_next()
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
# The `x_next` tensor is a vector (i.e. a row of `X`), so you will need to
# convert it to a matrix or apply batching in the dataset to make it work with
# `tf.matmul()`
x_next = tf.expand_dims(x_next, 0)
y = tf.squeeze(tf.matmul(x_next, w), name='y') # Use `x_next` here.
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true_next, logits=y), # Use `y_true_next` here.
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(iterator.initializer, feed_dict={x: X,
y_true: Y_true})
for i in range(100):
_, loss_out = sess.run([optimiser, loss])
print(i, loss_out)

Categories