Tensorflow CNN test data splitting and array sizing problems - python

I've tried to figure things out myself and not fallback to actually creating an account here but as a self-taught beginner I've reached a wall with this code.
I'm having two major issues besides optimizing the net architecture when everything is working:
Everytime I've tried to create a new dataset for a test batch I've ran into 'xTensor is not a Tensor' error and could run a session through it, unlike with the iterator which works just fine. I'm loading custom data with dir names as labels with no manually created train and test directories. I'm probably missing a proper method for tf.
I can't work around the current first error I get which is:
'ValueError: Cannot feed value of shape (100,) for Tensor 'Placeholder_1:0', which has shape '(?, 1)' while feed_dict {y=batch_y}. I've tried some of solutions posted on SO but couldn't get it to work.
I'm pasting the whole thing, ########### are the problem triggering zones at the very bottom in the session.
import tensorflow as tf
import numpy as np
import os
# load custom imageset directory
data_path = r"..\datasets\images\flowers"
# setup hypervariables for labels and images format
n_classes = 5
img_width = 64
img_length = 64
channels = 3
# setup hypervariables for network
learning_rate = 0.0001
epochs = 2
batch_size = 100
drop_rate = 0.6
imagepaths = list()
labels = list()
label = 0
classes = sorted(os.walk(data_path).__next__()[1])
# List each sub-directory (the classes)
for c in classes:
c_dir = os.path.join(data_path, c)
walk = os.walk(c_dir).__next__()
# Add each image to the training set
for sample in walk[2]:
imagepaths.append(os.path.join(c_dir, sample))
labels.append(label)
label += 1
total_input = len(labels)
# Convert to Tensor
imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.int32)
# Build a TF Queue, shuffle data
dataset = tf.data.Dataset.from_tensor_slices((imagepaths, labels))
# read, decode, resize and normalize images on RGB range
def parse(imagepath, label):
image = tf.read_file(imagepath)
image = tf.image.decode_jpeg(image, channels=channels)
image = tf.image.resize_images(image, [img_length, img_width])
image = image * 1.0/255
return image, label
dataset = dataset.map(parse)
dataset = dataset.shuffle(buffer_size=batch_size*10)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
next_batch = iterator.get_next()
# hypervariables for layers' output size
K = 16
L = 32
M = 200
x = tf.placeholder(tf.float32, [None, 4326])
x_shaped = tf.reshape(x, [-1, img_length, img_width, 3])
y = tf.placeholder(tf.float32, [None, 1])
# weight, bias with stride size and activation method after convolution for layer 1
W1 = tf.Variable(tf.truncated_normal([5, 5, 3, K], stddev=0.03))
b1 = tf.Variable(tf.truncated_normal([K], stddev=0.01))
stride = 1
y1 = tf.nn.relu(tf.nn.conv2d(x_shaped, W1, strides=[1, stride, stride, 1], padding='SAME') + b1)
# weight, bias with stride size and activation method after convolution for layer 2
W2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.03))
b2 = tf.Variable(tf.truncated_normal([L], stddev=0.01))
stride = 2 # output is 14x14
y2 = tf.nn.relu(tf.nn.conv2d(y1, W2, strides=[1, stride, stride, 1], padding='SAME') + b2)
yflat = tf.reshape(y2, [-1, 7 * 7 * L])
W3 = tf.Variable(tf.truncated_normal([7 * 7 * L, M], stddev=0.1))
b3 = tf.Variable(tf.truncated_normal([M], stddev=0.01))
y3 = tf.nn.relu(tf.matmul(yflat, W3) + b3)
W4 = tf.Variable(tf.truncated_normal([M, 10], stddev=0.1))
b4 = tf.Variable(tf.truncated_normal([10], stddev=0.01))
ylogits = tf.matmul(y3, W4) + b4
y_ = tf.nn.softmax(ylogits)
# add cross entropy for back prop
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=ylogits, labels=y_))
# add an optimiser for back prop
optimiser = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
########## temporary solution for test_x, test_y
test_x, test_y = sess.run(next_batch)
total_batch = int(total_input / batch_size)
# define the iterator for the network
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = sess.run(next_batch)
########## ValueError: Cannot feed value of shape (100,) for Tensor 'Placeholder_1:0' -> y: batch_y
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y: batch_y})
avg_cost += c / total_batch
test_acc = sess.run(accuracy,feed_dict={x: test_x, y: test_y})
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost), " test accuracy: {:.3f}".format(test_acc))
summary = sess.run(merged, feed_dict={x: test_x, y: test_y})
print("\nTraining complete!")
print(sess.run(accuracy, feed_dict={x: test_x, y: test_y}))

are you sure that this part:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y: batch_y})
doesn't have to be:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})
furthermore you've a batchsize of 100, the data is right of the array, the shape is not complete.
What you have (dummy example)
np.zeros((100,)).shape
>>> (100,)
here 100 matches the '?' of the required shape: shape '(?, 1)', the one can be easily added, and it often occurs that numpy does not do this. See the following code:
np.expand_dims(np.zeros((100,)), axis=-1).shape
>>> (100, 1)
axis -1 stand for the last axis, you basically tell numpy to add a dimension in the end. This does not affect the data itself, but the shape of the array. So your code should be:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y:np.expand_dims(batch_y, axis=-1)})

Related

ValueError: Cannot feed value of shape (2,) for Tensor 'Placeholder:0', which has shape '(1, 2)'

I'm new to tensorflow and trying to create a linear regression model. My code is
import numpy as np
import tensorflow as tf
bias = np.ones((50, 1))
trainX = np.arange(0, 10, 0.2).reshape(50, 1)
trainY = (3 * trainX + np.random.rand(trainX.shape[0]) * 20 - 10) + 10
trainX = np.append(bias, trainX, axis=1)
X = tf.placeholder("float", shape=(1, 2))
Y = tf.placeholder("float")
w = tf.Variable([[0.0, 0.0]], name="weights")
model = tf.matmul(X, tf.transpose(w))
cost = tf.pow((Y - model), 2)
init = tf.global_variables_initializer()
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
with tf.Session() as sess:
sess.run(init)
for i in range(50):
for (x, y) in zip(trainX, trainY):
sess.run(train_op, feed_dict={X: x, Y: y})
print(sess.run(w))
I don't know what I'm doing wrong. I think the problem is in initializing weights. The idea is simple to predict two weight constants which best predict the line fitting the data.
I'm getting this error in colaboratory notebook
Several things are conspiring here. I assume that you want the shape of trainY to be (50,), but since you add the noise only after reshaping, broadcasting causes trainX + np.random.rand(trainX.shape[0]) to have shape (50, 50). If you change the initial part of the code to
bias = np.ones(50)
trainX = np.arange(0, 10, 0.2)
trainY = (3 * trainX + np.random.rand(trainX.shape[0]) * 20 - 10) + 10
trainX = np.vstack([bias, trainX]).T
and ensure that the shapes are set up properly through
sess.run(train_op, feed_dict={X: x.reshape((1, 2)), Y: y})
then your code will run.
However, since you are only dealing with the inner product of 2-dimensional vectors, you can avoid the reshaping altogether by instead simply using tf.tensordot:
X = tf.placeholder("float", shape=(2,))
Y = tf.placeholder("float")
w = tf.Variable([0.0, 0.0], name="weights")
model = tf.tensordot(X, w, 1)
Finally, note that while the approach of splitting up the sample before passing it to the optimizer (in what is typically referred to as batching) works well for large datasets, in your case you could just as well pass the entire sample at once; that is, something that would amount to
X = tf.placeholder("float", shape=(50, 2))
Y = tf.placeholder("float", shape=(50, 1))
w = tf.Variable(tf.zeros([2, 1], "float"), name="weights")
model = tf.matmul(X, w)
cost = tf.reduce_sum(tf.pow((Y - model), 2))
init = tf.global_variables_initializer()
train_op = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
with tf.Session() as sess:
sess.run(init)
for i in range(10000):
sess.run(train_op, feed_dict={X: trainX, Y: trainY.reshape((50, 1))})

Tensorflow Print fixing shape issues?

I'm running across a very weird issue and was hoping to get someone who might be a bit more familiar. I'm attempting a basic LSTM to do some binary classification with the following coding:
class FakeData(object):
def __init__(self, n):
self.x = np.random.randint(4, size=(n, 90, 4))
blah = np.random.randint(2, size=(n))
self.y = np.zeros((n,2))
self.y[:,0] = blah
self.y[:,1] = 1 - blah
self.mask = np.arange(n)
self.cnt = 0
self.n = n
def getdata(self, n):
if self.cnt + n > self.n:
np.randoom.shuffle(self.mask)
self.cnt = 0
mask = self.mask[self.cnt : self.cnt + n]
return self.x[mask], self.y[mask]
n_data = 10000
batch_size = 10
fd = FakeData(n_data)
n_units = 200
n_classes = 2
x = tf.placeholder(tf.float32, shape=[None, 90, 4])
y_ = tf.placeholder(tf.float32, shape=[None, n_classes])
dropout = tf.placeholder(tf.float32)
w_out = tf.Variable(tf.truncated_normal([n_units, n_classes]))
b_out = tf.Variable(tf.truncated_normal([n_classes]))
lstm = tf.contrib.rnn.LSTMCell(n_units)
cell = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=1.0 - dropout)
new_x = tf.unstack(x, 90, 1)
new_x = tf.Print(new_x, [tf.shape(new_x)], message='newx is: ')
output, state = tf.nn.dynamic_rnn(cell, new_x, dtype=tf.float32)
output = tf.Print(output, [tf.shape(output)], message='output is: ')
logits = tf.matmul(output[-1], w_out) + b_out
logits = tf.Print(logits, [tf.shape(logits)], message='logits is: ')
preds = tf.nn.softmax(logits)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=y_))
training = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
correct = tf.equal(tf.argmax(preds, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
#
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(10):
batch_x, batch_y = fd.getdata(batch_size)
sess.run([training], feed_dict={x: batch_x, y_: batch_y, dropout: 0})
if i % 100 == 0:
print "Accuracy {}".format(accuracy.eval(feed_dict={x: batch_x,
y_: batch_y, dropout: 0}))
The specific question I have is, for some reason, when I run the code without the tf.Print lines, I get some sort of weird shape transformation error
ValueError: Dimension must be 2 but is 3 for 'transpose' (op: 'Transpose') with shapes: [?,4], [3].
on line
output, state = tf.nn.dynamic_rnn(cell, new_x, dtype=tf.float32)
However, when I include the tf.Print lines, it correctly logs the shapes and is able to run the whole session. Am I missing something?
For clarity, the shapes should be:
input: n x 90 x 4
new_x: 90 x n x 4
output: 90 x n x 200
logits: n x 2
Adding the answer here in case anyone else runs across this problem in the future.
Turns out, a lot of old RNN examples floating around use unstack. However, that turns it into a list of tensors, which dynamic_rnn cannot take as input. The print was converting it from a list of 2d tensors to a 3d tensor so that it was able to handle it correctly. Solution is to transform the data dimensionally in some other way like:
new_x = tf.transpose(x, perm=(1, 0, 2)) (thanks rvinas)

Error training Cifar-10 Model Tensorflow - Accuracy is 0 and will not optimize and losses not reported

I am currently trying to train my model to categorize the cifar-10 dataset. I read the data like this:
def convert_images(raw):
raw_float = np.array(raw, dtype = float)
images = raw_float.reshape([-1,3,32,32])
images = images.transpose([0,2,3,1])
return images
def load_data(filename):
data = unpickle(filename)
raw_images = data[b'data']
labels = np.array(data[b'labels'])
images = convert_images(raw_images)
return images, labels
def load_training_data():
images = np.zeros(shape=[50000,32,32,3], dtype = float)
labels = np.zeros(shape = [50000], dtype = int)
begin = 0
for i in range(5):
filename = "data_batch_" + str(i+1)
images_batch, labels_batch = load_data(filename)
num_images = len(images_batch)
end = begin + num_images
images[begin:end, :] = images_batch
labels[begin:end] = labels_batch
begin = end
return images, labels, OneHotEncoder(categorical_features=labels, n_values=10)
What this does is reshape the data so that it is a 4d array with 32x32x3 values for the pixels and rgb colors. I define my model like this (i first reshape X to be a row vector because the 4d array creates errors):
X = tf.placeholder(tf.float32, [None,32,32,3])
Y_labeled = tf.placeholder(tf.int32, [None])
data = load_training_data()
with tf.name_scope('dnn'):
XX = tf.reshape(X, [-1,3072])
hidden1 = tf.layers.dense(XX, 300, name = 'hidden1', activation = tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, 200, name = 'hidden2', activation = tf.nn.relu)
hidden3 = tf.layers.dense(hidden2, 200, name = 'hidden3', activation = tf.nn.relu)
hidden4 = tf.layers.dense(hidden3, 100, name = 'hidden4', activation = tf.nn.relu)
logits = tf.layers.dense(hidden4, 10, name = 'outputs')
with tf.name_scope('loss'):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = (Y_labeled), logits = logits)
loss = tf.reduce_mean(cross_entropy, name = 'loss')
learning_rate = 0.01
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope('eval'):
correct = tf.nn.in_top_k(logits,Y_labeled, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
batch_size = 100
n_epochs = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(50000 // batch_size):
X_batch = data[0][iteration*batch_size:(iteration+1)*batch_size]
y_batch = data[1][iteration*batch_size:(iteration+1)*batch_size]
#X_batch, y_batch = data.train.next_batch(batch_size)
sess.run(training_op, feed_dict = {X: X_batch,Y_labeled: y_batch})
acc_train = accuracy.eval(feed_dict = {X: X_batch,Y_labeled: y_batch})
print(epoch, "train accuracy:", acc_train, "loss", loss)
I want to define a simple model that has 4 hidden layers. When I run this it compiles with no errors and starts "training", but the accuracy is 0.0 and it does not print any losses. I am not sure if the error is in my calculation of accuracy and loss or in my definition of the model.
There seem to be a problem with the way you feed your labels. When you create the placholder Y_labeled = tf.placeholder(tf.int32, [None, 10]) it seems to be a vector of dimension 10 but later when you create the label numpy tensor labels = np.zeros(shape = [50000], dtype = int) it seems to be a scalar.
This is why you have this error, the placeholder needs to be fed with a tensor of dimension (batch_size, 10) but you feed it with (batch_size, 0)

TypeError: The value of a feed cannot be a tf.Tensor object

I am a newbie on tensorflow and learning from very beginning. I tried to read csv file and decode using tf.decode_csv and I am trying to understand batch and graph through examples.
My example csv is 8 datas and 1 logistic shape, separated by comma.
This is first four rows of my file.
-0.294118,0.487437,0.180328,-0.292929,0,0.00149028,-0.53117,-0.0333333,0
-0.882353,-0.145729,0.0819672,-0.414141,0,-0.207153,-0.766866,-0.666667,1
-0.0588235,0.839196,0.0491803,0,0,-0.305514,-0.492741,-0.633333,0
-0.882353,-0.105528,0.0819672,-0.535354,-0.777778,-0.162444,-0.923997,0,1
My code is,
import tensorflow as tf
filename_queue = tf.train.string_input_producer(['data-03-diabetes.csv'],
shuffle=False,
name='filename_queue')
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
record_defaults = [[0.],[0.],[0.],[0.],[0.],[0.],[0.],[0.],[0.]]
xy = tf.decode_csv(value, record_defaults=record_defaults, field_delim=',')
train_x_batch, train_y_batch = tf.train.batch([xy[0:-1], xy[-1:]],
batch_size=10)
X = tf.placeholder(tf.float32, shape=[None, 8])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([8,1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sess.run(tf.global_variables_initializer())
feed = {X: train_x_batch, Y: train_y_batch}
for step in range(10001):
x_batch, y_batch = sess.run([train_x_batch, train_y_batch])
if step % 200 == 0:
print(step, sess.run(cost, feed_dict=feed))
h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict=feed)
print("\nHypothesis", h, "\nCorrect (Y):", c, "\nAccuracy", a)
and then I got this error code,
TypeError: The value of a feed cannot be a tf.Tensor object. Acceptable feed values include Python scalars, strings, lists, numpy ndarrays, or TensorHandles.
Does it mean that I declared wrong placeholder and Variable??
Thanks in advance!

How can I log individual scalar values from a TensorFlow Variable?

How can I log (with SummaryWriter, e.g. for TensorBoard) of individual scalar elements of a tensor Variable? For example, how can I log individual weights of a given layer or node in a network?
In my example code, I've pressed a general feed-forward neural network into service to do simple linear regression, and want (in that case) to log the weights of the lone node in the lone hidden layer as learning progresses.
I can get these values explicitly during a session with, for example
sess.run(layer_weights)[0][i][0]
for the i-th weight, where layer_weights is a list of the weight Variables; but I can't figure out how to log the corresponding scalar values. If I try
w1 = tf.slice(layer_weights[0], [0], [1])[0]
tf.scalar_summary('w1', w1)
or
w1 = layer_weights[0][1][0]
tf.scalar_summary('w1', w1)
I get
ValueError: Shape (5, 1) must have rank 1
How can I log individual scalar values from a TensorFlow Variable?
from __future__ import (absolute_import, print_function, division, unicode_literals)
import numpy as np
import tensorflow as tf
# Basic model parameters as external flags
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('network_nodes', [5, 1], 'The number of nodes in each layer, including input and output.')
flags.DEFINE_float('epochs', 250, 'Epochs to run')
flags.DEFINE_float('learning_rate', 0.15, 'Initial learning rate.')
flags.DEFINE_string('data_dir', './data', 'Directory to hold training and test data.')
flags.DEFINE_string('train_dir', './_tmp/train', 'Directory to log training (and the network def).')
flags.DEFINE_string('test_dir', './_tmp/test', 'Directory to log testing.')
def variable_summaries(var, name):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def add_layer(input_tensor, input_dim, output_dim, neuron_fn, layer_name):
with tf.name_scope(layer_name):
with tf.name_scope("weights"):
weights = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))
with tf.name_scope("biases"):
biases = tf.Variable(tf.constant(0.1, shape=[output_dim]))
with tf.name_scope('activations'):
with tf.name_scope('weighted_inputs'):
weighted_inputs = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/weighted_inputs', weighted_inputs)
output = neuron_fn(weighted_inputs)
return output, weights, biases
def make_ff_network(nodes, input_activation, hidden_activation_fn=tf.nn.sigmoid, output_activation_fn=tf.nn.softmax):
layer_activations = [input_activation]
layer_weights = []
layer_biases = []
n_layers = len(nodes)
for l in range(1, n_layers):
a, w, b = add_layer(layer_activations[l - 1], nodes[l - 1], nodes[l],
output_activation_fn if l == n_layers - 1 else hidden_activation_fn,
'output_layer' if l == n_layers - 1 else 'hidden_layer' + (
'_{}'.format(l) if n_layers > 3 else ''))
layer_activations += [a]
layer_weights += [w]
layer_biases += [b]
with tf.name_scope('output'):
net_activation = tf.identity(layer_activations[-1], name='network_activation')
return net_activation, layer_weights, layer_biases
# Inputs and outputs
with tf.name_scope('data'):
x = tf.placeholder(tf.float32, shape=[None, FLAGS.network_nodes[0]], name='inputs')
y_ = tf.placeholder(tf.float32, shape=[None, FLAGS.network_nodes[-1]], name='correct_outputs')
# Network structure
y, layer_weights, layer_biases = make_ff_network(FLAGS.network_nodes, x, output_activation_fn=tf.identity)
# Metrics and operations
with tf.name_scope('accuracy'):
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.square(y - y_))
# NONE OF THESE WORK:
#w1 = tf.slice(layer_weights[0], [0], [1])[0]
#tf.scalar_summary('w1', w1)
#w1 = layer_weights[0][1][0]
#tf.scalar_summary('w1', w1)
tf.scalar_summary('loss', loss)
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(loss)
# Logging
train_writer = tf.train.SummaryWriter(FLAGS.train_dir, tf.get_default_graph())
test_writer = tf.train.SummaryWriter(FLAGS.test_dir)
merged = tf.merge_all_summaries()
W = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
train_x = np.random.rand(100000, FLAGS.network_nodes[0])
train_y = np.array([np.dot(W, train_x.T)+ 6.0]).T
test_x = np.random.rand(1000, FLAGS.network_nodes[0])
test_y = np.array([np.dot(W, test_x.T)+ 6.0]).T
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for ep in range(FLAGS.epochs):
sess.run(train_step, feed_dict={x: train_x, y_: train_y})
summary = sess.run(merged, feed_dict={x: test_x, y_: test_y})
test_writer.add_summary(summary, ep+1)
# THESE WORK
print('w1 = {}'.format(sess.run(layer_weights)[0][0][0]))
print('w2 = {}'.format(sess.run(layer_weights)[0][1][0]))
print('w3 = {}'.format(sess.run(layer_weights)[0][2][0]))
print('w4 = {}'.format(sess.run(layer_weights)[0][3][0]))
print('w5 = {}'.format(sess.run(layer_weights)[0][4][0]))
print(' b = {}'.format(sess.run(layer_biases)[0][0]))
There are different errors in the code.
The main problem is that you are passing a python list of tensors to the scalar_summary.
The error says that your are passing a tensor that does not have Rank 1 is related to the slice operation.
You want to pass the weights and log them layer per layer. One way to do that is to log each weight on each layer:
for weight in layer_weights:
tf.scalar_summary([ ['%s_w%d%d' % (weight.name, i,j) for i in xrange(len(layer_weights))] for j in xrange(5) ], weight)
This will produce in tensorboard tensorboard --logdir=./_tmp/test these nice graphs

Categories