How can I log individual scalar values from a TensorFlow Variable? - python

How can I log (with SummaryWriter, e.g. for TensorBoard) of individual scalar elements of a tensor Variable? For example, how can I log individual weights of a given layer or node in a network?
In my example code, I've pressed a general feed-forward neural network into service to do simple linear regression, and want (in that case) to log the weights of the lone node in the lone hidden layer as learning progresses.
I can get these values explicitly during a session with, for example
sess.run(layer_weights)[0][i][0]
for the i-th weight, where layer_weights is a list of the weight Variables; but I can't figure out how to log the corresponding scalar values. If I try
w1 = tf.slice(layer_weights[0], [0], [1])[0]
tf.scalar_summary('w1', w1)
or
w1 = layer_weights[0][1][0]
tf.scalar_summary('w1', w1)
I get
ValueError: Shape (5, 1) must have rank 1
How can I log individual scalar values from a TensorFlow Variable?
from __future__ import (absolute_import, print_function, division, unicode_literals)
import numpy as np
import tensorflow as tf
# Basic model parameters as external flags
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('network_nodes', [5, 1], 'The number of nodes in each layer, including input and output.')
flags.DEFINE_float('epochs', 250, 'Epochs to run')
flags.DEFINE_float('learning_rate', 0.15, 'Initial learning rate.')
flags.DEFINE_string('data_dir', './data', 'Directory to hold training and test data.')
flags.DEFINE_string('train_dir', './_tmp/train', 'Directory to log training (and the network def).')
flags.DEFINE_string('test_dir', './_tmp/test', 'Directory to log testing.')
def variable_summaries(var, name):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def add_layer(input_tensor, input_dim, output_dim, neuron_fn, layer_name):
with tf.name_scope(layer_name):
with tf.name_scope("weights"):
weights = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))
with tf.name_scope("biases"):
biases = tf.Variable(tf.constant(0.1, shape=[output_dim]))
with tf.name_scope('activations'):
with tf.name_scope('weighted_inputs'):
weighted_inputs = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/weighted_inputs', weighted_inputs)
output = neuron_fn(weighted_inputs)
return output, weights, biases
def make_ff_network(nodes, input_activation, hidden_activation_fn=tf.nn.sigmoid, output_activation_fn=tf.nn.softmax):
layer_activations = [input_activation]
layer_weights = []
layer_biases = []
n_layers = len(nodes)
for l in range(1, n_layers):
a, w, b = add_layer(layer_activations[l - 1], nodes[l - 1], nodes[l],
output_activation_fn if l == n_layers - 1 else hidden_activation_fn,
'output_layer' if l == n_layers - 1 else 'hidden_layer' + (
'_{}'.format(l) if n_layers > 3 else ''))
layer_activations += [a]
layer_weights += [w]
layer_biases += [b]
with tf.name_scope('output'):
net_activation = tf.identity(layer_activations[-1], name='network_activation')
return net_activation, layer_weights, layer_biases
# Inputs and outputs
with tf.name_scope('data'):
x = tf.placeholder(tf.float32, shape=[None, FLAGS.network_nodes[0]], name='inputs')
y_ = tf.placeholder(tf.float32, shape=[None, FLAGS.network_nodes[-1]], name='correct_outputs')
# Network structure
y, layer_weights, layer_biases = make_ff_network(FLAGS.network_nodes, x, output_activation_fn=tf.identity)
# Metrics and operations
with tf.name_scope('accuracy'):
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.square(y - y_))
# NONE OF THESE WORK:
#w1 = tf.slice(layer_weights[0], [0], [1])[0]
#tf.scalar_summary('w1', w1)
#w1 = layer_weights[0][1][0]
#tf.scalar_summary('w1', w1)
tf.scalar_summary('loss', loss)
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(loss)
# Logging
train_writer = tf.train.SummaryWriter(FLAGS.train_dir, tf.get_default_graph())
test_writer = tf.train.SummaryWriter(FLAGS.test_dir)
merged = tf.merge_all_summaries()
W = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
train_x = np.random.rand(100000, FLAGS.network_nodes[0])
train_y = np.array([np.dot(W, train_x.T)+ 6.0]).T
test_x = np.random.rand(1000, FLAGS.network_nodes[0])
test_y = np.array([np.dot(W, test_x.T)+ 6.0]).T
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for ep in range(FLAGS.epochs):
sess.run(train_step, feed_dict={x: train_x, y_: train_y})
summary = sess.run(merged, feed_dict={x: test_x, y_: test_y})
test_writer.add_summary(summary, ep+1)
# THESE WORK
print('w1 = {}'.format(sess.run(layer_weights)[0][0][0]))
print('w2 = {}'.format(sess.run(layer_weights)[0][1][0]))
print('w3 = {}'.format(sess.run(layer_weights)[0][2][0]))
print('w4 = {}'.format(sess.run(layer_weights)[0][3][0]))
print('w5 = {}'.format(sess.run(layer_weights)[0][4][0]))
print(' b = {}'.format(sess.run(layer_biases)[0][0]))

There are different errors in the code.
The main problem is that you are passing a python list of tensors to the scalar_summary.
The error says that your are passing a tensor that does not have Rank 1 is related to the slice operation.
You want to pass the weights and log them layer per layer. One way to do that is to log each weight on each layer:
for weight in layer_weights:
tf.scalar_summary([ ['%s_w%d%d' % (weight.name, i,j) for i in xrange(len(layer_weights))] for j in xrange(5) ], weight)
This will produce in tensorboard tensorboard --logdir=./_tmp/test these nice graphs

Related

What do I need to save and restore for LSTM model in Tensorflow?

I am new to Tensorflow and I am working for training with LSTM-RNN in Tensorflow.
I need to save the model so that I can restore and run with Test data again.
I am not sure what to save.
I need to save sess or I need to save pred
When I save sess, restore and test the Test data as
one_hot_predictions, accuracy, final_loss = sess.run(
[pred, accuracy, cost],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
Then the error is unknown for pred.
Since I am new to Tensorflow, I am not sure what to save and what to restore to test with new data?
X_train = load_X(X_train_path)
X_test = load_X(X_test_path)
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
# proof that it actually works for the skeptical: replace labelled classes with random classes to train on
#for i in range(len(y_train)):
# y_train[i] = randint(0, 5)
# Input Data
training_data_count = len(X_train) # 4519 training series (with 50% overlap between each serie)
test_data_count = len(X_test) # 1197 test series
n_input = len(X_train[0][0]) # num input parameters per timestep
n_hidden = 34 # Hidden layer num of features
n_classes = 6
#updated for learning-rate decay
# calculated as: decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
decaying_learning_rate = True
learning_rate = 0.0025 #used if decaying_learning_rate set to False
init_learning_rate = 0.005
decay_rate = 0.96 #the base of the exponential in the decay
decay_steps = 100000 #used in decay every 60000 steps with a base of 0.96
global_step = tf.Variable(0, trainable=False)
lambda_loss_amount = 0.0015
training_iters = training_data_count *300 # Loop 300 times on the dataset, ie 300 epochs
batch_size = 512
display_iter = batch_size*8 # To show test set accuracy during training
#Utility functions for training:
def LSTM_RNN(_X, _weights, _biases):
# model architecture based on "guillaume-chevalier" and "aymericdamien" under the MIT license.
_X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
_X = tf.reshape(_X, [-1, n_input])
# Rectifies Linear Unit activation function used
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, n_steps, 0)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# A single output is produced, in style of "many to one" classifier, refer to http://karpathy.github.io/2015/05/21/rnn-effectiveness/ for details
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def extract_batch_size(_train, _labels, _unsampled, batch_size):
# Fetch a "batch_size" amount of data and labels from "(X|y)_train" data.
# Elements of each batch are chosen randomly, without replacement, from X_train with corresponding label from Y_train
# unsampled_indices keeps track of sampled data ensuring non-replacement. Resets when remaining datapoints < batch_size
shape = list(_train.shape)
shape[0] = batch_size
batch_s = np.empty(shape)
batch_labels = np.empty((batch_size,1))
for i in range(batch_size):
# Loop index
# index = random sample from _unsampled (indices)
index = random.choice(_unsampled)
batch_s[i] = _train[index]
batch_labels[i] = _labels[index]
_unsampled.remove(index)
return batch_s, batch_labels, _unsampled
def one_hot(y_):
# One hot encoding of the network outputs
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
# Graph input/output
x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM_RNN(x, weights, biases)
# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss
if decaying_learning_rate:
learning_rate = tf.train.exponential_decay(init_learning_rate, global_step*batch_size, decay_steps, decay_rate, staircase=True)
#decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) #exponentially decayed learning rate
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost,global_step=global_step) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
#Train the network:
test_losses = []
test_accuracies = []
train_losses = []
train_accuracies = []
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
init = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
sess.run(init)
# Perform Training steps with "batch_size" amount of data at each loop.
# Elements of each batch are chosen randomly, without replacement, from X_train,
# restarting when remaining datapoints < batch_size
step = 1
time_start = time.time()
unsampled_indices = range(0,len(X_train))
while step * batch_size <= training_iters:
#print (sess.run(learning_rate)) #decaying learning rate
#print (sess.run(global_step)) # global number of iterations
if len(unsampled_indices) < batch_size:
unsampled_indices = range(0,len(X_train))
batch_xs, raw_labels, unsampled_indicies = extract_batch_size(X_train, y_train, unsampled_indices, batch_size)
batch_ys = one_hot(raw_labels)
# check that encoded output is same length as num_classes, if not, pad it
if len(batch_ys[0]) < n_classes:
temp_ys = np.zeros((batch_size, n_classes))
temp_ys[:batch_ys.shape[0],:batch_ys.shape[1]] = batch_ys
batch_ys = temp_ys
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
train_losses.append(loss)
train_accuracies.append(acc)
# Evaluate network only at some steps for faster training:
if (step*batch_size % display_iter == 0) or (step == 1) or (step * batch_size > training_iters):
# To not spam console, show training accuracy/loss in this "if"
print("Iter #" + str(step*batch_size) + \
": Learning rate = " + "{:.6f}".format(sess.run(learning_rate)) + \
": Batch Loss = " + "{:.6f}".format(loss) + \
", Accuracy = {}".format(acc))
# Evaluation on the test set (no learning made here - just evaluation for diagnosis)
loss, acc = sess.run(
[cost, accuracy],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
test_losses.append(loss)
test_accuracies.append(acc)
print("PERFORMANCE ON TEST SET: " + \
"Batch Loss = {}".format(loss) + \
", Accuracy = {}".format(acc))
step += 1
print("Optimization Finished!")
EDIT:
I can save the model as
print("Optimization Finished!")
save_path = saver.save(sess, "/home/test/venv/TFCodes/HumanActivityRecognition/model.ckpt")
Then I tried to restore and ok, I can restore. But I don't know how to test with test data.
My restore code is
X_test = load_X(X_test_path)
with tf.Session() as sess:
saver = tf.train.import_meta_graph('/home/nyan/venv/TFCodes/HumanActivityRecognition/model.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
print("Model restored.")
all_vars = tf.trainable_variables()
for i in range(len(all_vars)):
name = all_vars[i].name
values = sess.run(name)
print('name', name)
#print('value', values)
print('shape',values.shape)
result = sess.run(prediction, feed_dict={X: X_test})
print("loss:", l, "prediction:", result, "true Y:", y_data)
# print char using dic
result_str = [idx2char[c] for c in np.squeeze(res
ult)]
print("\tPrediction str:", ''.join(result_str))
The output is
Model restored.
('name', u'Variable_1:0')
('shape', (36, 34))
('name', u'Variable_2:0')
('shape', (34, 6))
('name', u'Variable_3:0')
('shape', (34,))
('name', u'Variable_4:0')
('shape', (6,))
('name', u'rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0')
('shape', (68, 136))
('name', u'rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0')
('shape', (136,))
('name', u'rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0')
('shape', (68, 136))
('name', u'rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0')
('shape', (136,))
Traceback (most recent call last):
File "restore.py", line 74, in <module>
result = sess.run(prediction, feed_dict={X: X_test})
NameError: name 'prediction' is not defined
How to test the model restored?
What I find the easiest is the tf.saved_model.simple_save() function. It saves the computation graph you use, the weights, the input and the output in a .pb model and the weight variables.
You can later restore this model or even put it on ml-engine or use tf serving.
An example code snippit with a keras model and applied on YOLO:
inputs = {"image_bytes": model.input,
"shape": image_shape}
outputs = {"boxes": boxes,
"scores": scores,
"classes": classes}
tf.saved_model.simple_save(sess, "saved_model/", inputs, outputs)

Tensorflow model outputs probability values larger than 1

I'm working on this classification program where i'm training my model to predict whether the object is a nut or a screw. I created my own dataset since i did not get any. I trained my model but i'm not getting correct predictions. Probability of values go beyond 1, basically i get garbage values.
I get this predicted value: [[9.990779e-01 9.220659e-04]]
#Training code
import dataset
import tensorflow as tf
import time
from datetime import timedelta
import math
import random
import numpy as np
import os
# Adding Seed so that random initialization is consistent
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
batch_size = 20
# Prepare input data
classes = os.listdir('training_set')
num_classes = len(classes)
# 20% of the data will automatically be used for validation
validation_size = 0.2
img_size = 128
num_channels = 3
train_path = 'training_set'
# We shall load all the training and validation images and labels into
memory using openCV and use that during training
data = dataset.read_train_sets(train_path, img_size, classes,
validation_size=validation_size)
print("Complete reading input data. Will Now print a snippet of it")
print("Number of files in Training-
set:\t\t{}".format(len(data.train.labels)))
print("Number of files in Validation-
set:\t{}".format(len(data.valid.labels)))
session = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, img_size, img_size,
num_channels], name='x')
## labels
y_true = tf.placeholder(tf.float32, shape=[None, num_classes],
name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)
##Network graph params
filter_size_conv1 = 3
num_filters_conv1 = 32
filter_size_conv2 = 3
num_filters_conv2 = 32
filter_size_conv3 = 3
num_filters_conv3 = 32
fc_layer_size = 128
def create_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def create_biases(size):
return tf.Variable(tf.constant(0.05, shape=[size]))
def create_convolutional_layer(input,
num_input_channels,
conv_filter_size,
num_filters):
## We shall define the weights that will be trained using create_weights function.
weights = create_weights(shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters])
## We create biases using the create_biases function. These are also trained.
biases = create_biases(num_filters)
## Creating the convolutional layer
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
layer += biases
## We shall be using max-pooling.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
## Output of pooling is fed to Relu which is the activation function for us.
layer = tf.nn.relu(layer)
return layer
def create_flatten_layer(layer):
# We know that the shape of the layer will be [batch_size img_size img_size num_channels]
# But let's get it from the previous layer.
layer_shape = layer.get_shape()
## Number of features will be img_height * img_width* num_channels. But we shall calculate it in place of hard-coding it.
num_features = layer_shape[1:4].num_elements()
## Now, we Flatten the layer so we shall have to reshape to num_features
layer = tf.reshape(layer, [-1, num_features])
return layer
def create_fc_layer(input,
num_inputs,
num_outputs,
use_relu=True):
# Let's define trainable weights and biases.
weights = create_weights(shape=[num_inputs, num_outputs])
biases = create_biases(num_outputs)
# Fully connected layer takes input x and produces wx+b.Since, these are matrices, we use matmul function in Tensorflow
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer
layer_conv1 = create_convolutional_layer(input=x,
num_input_channels=num_channels,
conv_filter_size=filter_size_conv1,
num_filters=num_filters_conv1)
layer_conv2 = create_convolutional_layer(input=layer_conv1,
num_input_channels=num_filters_conv1,
conv_filter_size=filter_size_conv2,
num_filters=num_filters_conv2)
layer_conv3 = create_convolutional_layer(input=layer_conv2,
num_input_channels=num_filters_conv2,
conv_filter_size=filter_size_conv3,
num_filters=num_filters_conv3)
layer_flat = create_flatten_layer(layer_conv3)
layer_fc1 = create_fc_layer(input=layer_flat,
num_inputs=layer_flat.get_shape()
[1:4].num_elements(),
num_outputs=fc_layer_size,
use_relu=True)
layer_fc2 = create_fc_layer(input=layer_fc1,
num_inputs=fc_layer_size,
num_outputs=num_classes,
use_relu=False)
y_pred = tf.nn.softmax(layer_fc2, name='y_pred')
y_pred_cls = tf.argmax(y_pred, dimension=1)
session.run(tf.global_variables_initializer())
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session.run(tf.global_variables_initializer())
def show_progress(epoch, feed_dict_train, feed_dict_validate, val_loss):
acc = session.run(accuracy, feed_dict=feed_dict_train)
val_acc = session.run(accuracy, feed_dict=feed_dict_validate)
msg = "Training Epoch {0} --- Training Accuracy: {1:>6.1%}, Validation
Accuracy: {2:>6.1%}, Validation Loss: {3:.3f}"
print(msg.format(epoch + 1, acc, val_acc, val_loss))
total_iterations = 0
saver = tf.train.Saver()
def train(num_iteration):
global total_iterations
for i in range(total_iterations,
total_iterations + num_iteration):
x_batch, y_true_batch, _, cls_batch =
data.train.next_batch(batch_size)
x_valid_batch, y_valid_batch, _, valid_cls_batch =
data.valid.next_batch(batch_size)
feed_dict_tr = {x: x_batch,
y_true: y_true_batch}
feed_dict_val = {x: x_valid_batch,
y_true: y_valid_batch}
session.run(optimizer, feed_dict=feed_dict_tr)
if i % int(data.train.num_examples / batch_size) == 0:
val_loss = session.run(cost, feed_dict=feed_dict_val)
epoch = int(i / int(data.train.num_examples / batch_size))
show_progress(epoch, feed_dict_tr, feed_dict_val, val_loss)
saver.save(session, 'C:\\Nutsbolts\\nuts-screws-model')
total_iterations += num_iteration
train(num_iteration=3000)
#Prediction code
import tensorflow as tf
import numpy as np
import os,glob,cv2
import sys,argparse
# First, pass the path of the image
dir_path = 'C:\\nutsbolts\\testing_set\\nuts'
image_path= 'nuts11.jpg'
filename = dir_path +'/' +image_path
image_size=128
num_channels=3
images = []
# Reading the image using OpenCV
image = cv2.imread(filename)
# Resizing the image to our desired size and preprocessing will be done
exactly as done during training
image = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0/255.0)
#The input to the network is of shape [None image_size image_size
num_channels]. Hence we reshape.
x_batch = images.reshape(1, image_size,image_size,num_channels)
## Let us restore the saved model
sess = tf.Session()
# Step-1: Recreate the network graph. At this step only graph is created.
saver = tf.train.import_meta_graph('nuts-screws-model.meta')
# Step-2: Now let's load the weights saved using the restore method.
saver.restore(sess, tf.train.latest_checkpoint('./'))
# Accessing the default graph which we have restored
graph = tf.get_default_graph()
# Now, let's get hold of the op that we can be processed to get the output.
# In the original network y_pred is the tensor that is the prediction of the
network
y_pred = graph.get_tensor_by_name("y_pred:0")
## Let's feed the images to the input placeholders
x= graph.get_tensor_by_name("x:0")
y_true = graph.get_tensor_by_name("y_true:0")
y_test_images = np.zeros((1, len(os.listdir('testing_set'))))
### Creating the feed_dict that is required to be fed to calculate y_pred
feed_dict_testing = {x: x_batch, y_true: y_test_images}
result=sess.run(y_pred, feed_dict=feed_dict_testing)
# result is of this format [probabiliy_of_nuts probability_of_screws]
print(result)
9.990779e-01 actually is below 1. You could see it as: 9.990779 * (the exponential of -01).

How to Properly Implement a Dynamic Convolutional Neural Network using TensorFlow

I know that this is a very broad question, but I have asked many other questions and I have still been unable to properly implement a simple dynamic-k max pooling convolutional neural network as described in this paper. Currently, I am trying to modify the code from this tutorial. I believe I have successfully implemented the dynamic-k part. However, my main problem is because the k value is different for each input, the tensors that are produced are different shapes. I have tried countless things to try and fix this (which is why you may see some funny reshaping), but I can't figure out how. I think that you'd need to pad each tensor to get them all to be the size of the biggest one, but I can't seem to get that to work. Here is my code (I am sorry, it is generally rather sloppy).
# train.py
import datetime
import time
import numpy as np
import os
import tensorflow as tf
from env.src.sentiment_analysis.dcnn.text_dcnn import TextDCNN
from env.src.sentiment_analysis.cnn import data_helpers as data_helpers
from tensorflow.contrib import learn
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularizaion lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 256, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_string("positive_file", "../rotten_tomatoes/rt-polarity.pos", "Location of the rt-polarity.pos file")
tf.flags.DEFINE_string("negative_file", "../rotten_tomatoes/rt-polarity.neg", "Location of the rt-polarity.neg file")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{} = {}".format(attr.upper(), value))
print("")
# Data Preparatopn
# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_file, FLAGS.negative_file)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
x_arr = np.array(x_text)
seq_lens = []
for s in x_arr:
seq_lens.append(len(s.split(" ")))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# Split train/test set
x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:]
y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:]
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
# Training
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement
)
sess = tf.Session(config=session_conf)
with sess.as_default():
print("HERE")
print(x_train.shape)
dcnn = TextDCNN(
sequence_lengths=seq_lens,
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
)
# The training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-4)
grads_and_vars = optimizer.compute_gradients(dcnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.scalar_summary("loss", dcnn.loss)
acc_summary = tf.scalar_summary("accuracy", dcnn.accuracy)
# Summaries for training
train_summary_op = tf.merge_summary([loss_summary, acc_summary])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.train.SummaryWriter(train_summary_dir, sess.graph)
# Summaries for devs
dev_summary_op = tf.merge_summary([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.train.SummaryWriter(dev_summary_dir, sess.graph)
# Checkpointing
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
# TensorFlow assumes this directory already exsists so we need to create it
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.all_variables())
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.initialize_all_variables())
def train_step(x_batch, y_batch):
"""
A single training step.
Args:
x_batch: A batch of X training values.
y_batch: A batch of Y training values
Returns: void
"""
feed_dict = {
dcnn.input_x: x_batch,
dcnn.input_y: y_batch,
dcnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
# Execute train_op
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, dcnn.loss, dcnn.accuracy],
feed_dict
)
# Print and save to disk loss and accuracy of the current training batch
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates a model on a dev set.
Args:
x_batch: A batch of X training values.
y_batch: A batch of Y training values.
writer: The writer to use to record the loss and accuracy
Returns: void
"""
feed_dict = {
dcnn.input_x: x_batch,
dcnn.input_y: y_batch,
dcnn.dropout_keep_prob : 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, dcnn.loss, dcnn.accuracy],
feed_dict
)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
# Generate batches
batches = data_helpers.batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
And here is the actual DCNN class:
import tensorflow as tf
class TextDCNN(object):
"""
A CNN for NLP tasks. Architecture is as follows:
Embedding layer, conv layer, max-pooling and softmax layer
"""
def __init__(self, sequence_lengths, sequence_length, num_classes, vocab_size, embedding_size, filter_sizes, num_filters):
"""
Makes a new CNNClassifier
Args:
sequence_length: The length of each sentence
num_classes: Number of classes in the output layer (positive and negative would be 2 classes)
vocab_size: The size of the vocabulary, needed to define the size of the embedding layer
embedding_size: Dimensionality of the embeddings
filter_sizes: Number of words the convolutional filters will cover, there will be num_filters for each size
specified.
num_filters: The number of filters per filter size.
Returns: A new CNNClassifier with the given parameters.
"""
# Define the inputs and the dropout
print("SEQL")
print(sequence_length)
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Runs the operations on the CPU and organizes them into an embedding scope
with tf.device("/cpu:0"), tf.name_scope("embedding"):
W = tf.Variable( # Make a 4D tensor to store batch, width, height, and channel
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W"
)
self.embedded_chars = tf.nn.embedding_lookup(W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Conv layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
# W is the filter matrix
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv"
)
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Max-pooling layer over the outputs
print(sequence_lengths[i] - filter_size + 1)
print(h)
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_lengths[i] - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding="VALID",
name="pool"
)
pooled = tf.reshape(pooled, [-1, 1, 1, num_filters])
print(pooled)
pooled_outputs.append(pooled)
# Combine all of the pooled features
num_filters_total = num_filters * len(filter_sizes)
max_shape = tf.reduce_max(pooled_outputs, 1)
print("shapes")
print([p.get_shape() for p in pooled_outputs])
# pooled_outputs = [tf.pad(p, [[0, int(max_shape.get_shape()[0]) - int(p.get_shape()[0])], [0, 0], [0, 0], [0, 0]]) for p in pooled_outputs]
# pooled_outputs = [tf.reshape(p, [-1, 1, 1, num_filters]) for p in pooled_outputs]
# pooled_outputs = [tf.reshape(out, [-1, 1, 1, self.max_length]) for out in pooled_outputs]
self.h_pool = tf.concat(3, pooled_outputs)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
print("here")
print(self.h_pool_flat)
self.h_pool_flat = tf.reshape(self.h_pool, [max(sequence_lengths), num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
# casted = tf.cast(self.dropout_keep_prob, tf.int32)
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
self.h_drop = tf.reshape(self.h_drop, [-1, num_filters_total])
# Do raw predictions (no softmax)
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal([num_filters_total, num_classes], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
# xw_plus_b(...) is just Wx + b matmul alias
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):
# softmax_cross_entropy_with_logits(...) calculates cross-entropy loss
losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
'''print("here")
print(losses.get_shape())
print(self.scores.get_shape())
print(self.input_y.get_shape())'''
self.loss = tf.reduce_mean(losses)
# Calculate accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
I am using the Rotten Tomatoes sentiment labeled data set. The current error I am getting is this:
InvalidArgumentError (see above for traceback): input[1,0] mismatch: 5888 vs. 4864
[[Node: gradients/concat_grad/ConcatOffset = ConcatOffset[N=3, _device="/job:localhost/replica:0/task:0/cpu:0"](concat/concat_dim, gradients/concat_grad/ShapeN, gradients/concat_grad/ShapeN:1, gradients/concat_grad/ShapeN:2)]]
How can I fix this code so that all of the tensors are normalized to the same size after pooling (while keeping pooling dynamic) and so that the code runs to completion?
Sorry about all of the random commented out lines and prints and stuff, but I have tried extensively to make this work.
Although tensorflow doesn't provide k-max pooling directly, I think tf.nn.top_k might help you build that op.
There are three things to note here.
max-pooling and k-max pooling are two different operations.
max-pooling retrieves the maximum valued activation out of the pooling window while k-max pooling retrieves k maximum values from the pooling window.
Tensorflow doesn't provide API for k-max pooling as of now. The one
which you are trying now is max-pooling operation and not k-max
pooling operation.
As per my knowledge, tensorflow does not provide functionality to handle pooling resulting in different size of matrices. So, you may use bucketing to create batches of sentences of similar length and the use k-max pooling.

TensorFlow neural net outputs linear function

I implemented a basic MLP and I want it to predict a user-generated set of data, but the prediction looks as follows:
.
I am not sure why... I have nonlinearities in the hidden layers, and I tried multiple activations (ReLU, tanh, sigmoid), tried different optimisers, different learning rates, various architectures (more layers, fewer layers, dropout), but I never got this right.
Please note that I do believe it may be because of how I compute the predictions at the end (pred = sess.run(out, feed_dict={inputs:X.reshape(n_input, 1)})) as it may be incorrect, but I wouldn't know why. I also tried other methods like extracting the weights with w = sess.run(weights) and then feeding them to the model() function along with the input, but nothing worked.
Also, when monitoring the error, the error decreases between epochs.
Any ideas?
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Architecture
input_size = 1
output_size = 1
h1_size = 20
h2_size = 50
# 2 hidden layers network
def model(inputs, weights):
out1 = tf.nn.relu(tf.matmul(inputs, weights['h1']))
out2 = tf.nn.relu(tf.matmul(out1, weights['h2']))
return tf.matmul(out2, weights['h3'])
# Inputs/label placeholders
inputs = tf.placeholder('float', shape=(None, input_size))
labels = tf.placeholder('float', shape=(None, output_size))
# Learnable weights
weights = {
'h1': tf.Variable(tf.random_normal(shape=(input_size, h1_size))),
'h2': tf.Variable(tf.random_normal(shape=(h1_size, h2_size))),
'h3': tf.Variable(tf.random_normal(shape=(h2_size, output_size))),
}
# Stores the result from the net
out = model(inputs, weights)
# Cost and optimisation
cost = tf.reduce_mean(tf.square(out - labels))
opt = tf.train.AdadeltaOptimizer()
opt_operation = opt.minimize(cost)
# Generate some data
n_input = 1000
X = np.linspace(0, 1, n_input).astype('f')
y = X + 5 * np.sin(X * 10)
y /= max(y)
# Train
epochs = 2000
lr = 0.0000001
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
_, c = sess.run([opt_operation, cost], feed_dict={
inputs: X.reshape(n_input, 1),
labels: y.reshape(n_input, 1),
})
if not epoch % int(epochs/20):
print(c)
pred = sess.run(out, feed_dict={inputs:X.reshape(n_input, 1)})
plt.scatter(X, pred, color='red', label='prediction')
plt.scatter(X, y, label='data')
plt.legend()
plt.show()
Forgot bias terms: new graph
It works now but not sure if this fixed it?
New code uses:
weights = {
'h1': tf.Variable(tf.random_normal(shape=(input_size, h1_size))),
'h2': tf.Variable(tf.random_normal(shape=(h1_size, h2_size))),
'h3': tf.Variable(tf.random_normal(shape=(h2_size, output_size))),
'b1': tf.Variable(tf.zeros(shape=[1])),
'b2': tf.Variable(tf.zeros(shape=[1])),
'b3': tf.Variable(tf.zeros(shape=[1])),
}
and
def model(inputs, weights):
out1 = tf.nn.relu(tf.matmul(inputs, weights['h1']) + weights['b1'])
out2 = tf.nn.relu(tf.matmul(out1, weights['h2']) + weights['b2'])
return tf.matmul(out2, weights['h3'] + weights['b3'])

Predicting time-series values with MLP and Tensorflow

I'm having a hard time trying to set up a multilayer perceptron neural network to predict the next value of a time-series using Tensorflow.
I read the time-series from a file, split it into three arrays and use those arrays to train, test and validate the network. Unfortunately, my network answers 0.9999 to every input I give to it.
The image below shows the values I expect my network to outcome, note that they range from 2.8 to 4.2
Now, these are the values my network predicts. Though they seem all the same, they're actually 0.9999... (and some difference in the 9th decimal place).
import csv
import numpy as np
from statsmodels.tsa.tsatools import lagmat
import tensorflow as tf
# Data split (values represent percentage)
perc_train = 0.5
perc_test = 0.4
perc_eval = 0.1
# Parameters
learning_rate = 10 ** -3
min_step_size_train = 10 ** -5
training_epochs = 250
display_step = 1
# Network Parameters
n_input = 15
n_classes = 1
n_hidden = (n_input + n_classes) / 2
def get_nn_sets(pmX, pmY):
'''
Splits data into three subsets
'''
trainningIndex = int(len(pmX) * perc_train)
validationIndex = int(len(pmX) * perc_test) + trainningIndex
pmXFit = pmX[:trainningIndex, :]
pmYFit = pmY[:trainningIndex]
pmXTest = pmX[trainningIndex:validationIndex, :]
pmYTest = pmY[trainningIndex:validationIndex]
pmxEvaluate = pmX[validationIndex:, :]
pmYEvaluate = pmY[validationIndex:]
return pmXFit, pmYFit, pmXTest, pmYTest, pmxEvaluate, pmYEvaluate
def read_dollar_file(clip_first = 4000):
'''
Reads the CSV file containing the dollar value for Brazilian real during the years
-----
RETURNS:
A matrix with the file contents
'''
str_vals = []
with open('dolar.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
# retrieving the first column of the file (the dollar value)
str_vals.append(row[1])
# removing title
str_vals = str_vals[1:]
# removing the empty strings (sunday and holidays have no values)
y = filter(None, str_vals)
# converting from string to float values
y = np.array(y).astype(np.float)
# checking if initial elements should be discarded
if (clip_first > 0):
y = y[clip_first:]
return y
# Create model
def get_multilayer_perceptron(x):
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden], dtype=tf.float64)),
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], dtype=tf.float64))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden], dtype=tf.float64)),
'out': tf.Variable(tf.random_normal([n_classes], dtype=tf.float64))
}
# Hidden layer with relu activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with tanh activation
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
out_layer = tf.nn.tanh(out_layer)
return out_layer
def run_mlp(inp, outp):
pmXFit, pmYFit, pmXTest, pmYTest, pmXEvaluate, pmYEvaluate = get_nn_sets(inp, outp)
# tf Graph input
x = tf.placeholder("float64", [None, n_input])
y = tf.placeholder("float64", [None, n_classes])
# Construct model
pred = get_multilayer_perceptron(x)
# Define loss and optimizer
cost = tf.nn.l2_loss(tf.sub(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
last_cost = min_step_size_train + 1
for epoch in range(training_epochs):
# Trainning data
for i in range(len(pmXFit)):
batch_x = np.reshape(pmXFit[i,:], (1, n_input))
batch_y = np.reshape(pmYFit[i], (1, n_classes))
# Run optimization
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# Calculating data error
c = 0.0
for i in range(len(pmXTest)):
batch_x = np.reshape(pmXTest[i,:], (1, n_input))
batch_y = np.reshape(pmYTest[i], (1, n_classes))
# Run Cost function
c += sess.run(cost, feed_dict={x: batch_x, y: batch_y})
c /= len(pmXTest)
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.30f}".format(c))
if abs(c - last_cost) < min_step_size_train:
break
last_cost = c
nn_predictions = np.array([])
for i in range(len(pmXEvaluate)):
batch_x = np.reshape(pmXEvaluate[i,:], (1, n_input))
nn_predictions = np.append(nn_predictions, sess.run(pred, feed_dict={x: batch_x})[0])
print("Optimization Finished!")
nn_predictions.flatten()
return [pmYEvaluate, nn_predictions]
inp = lagmat(read_dollar_file(), n_input, trim='both')
outp = inp[1:, 0]
inp = inp[:-1]
real_value, predicted_value = run_mlp(inp, outp)
I also tried different cost functions and it didn't work. I know I may be missing something really stupid, so I really appreciate your help.
Thanks.
From your code:
out_layer = tf.nn.tanh(out_layer)
tanh can only output values between (-1.0, 1.0), remove this line will make it do better.

Categories