I am trying to recreate/rerun a previous neural network. The idea is that I save the weights and biases with which the network is initialised in run (1), and reuse those exact weights and biases for initialisation in run (2), so that the outcome of run (2) will be exactly that of run (1). The relevant code is below.
However, I can't manage to do this. If I rerun the network, setting use_stored_weights to True (which prompts neural_network_model to restore the previously saved weights and biases, and, I hope, initialise the network with them), I do indeed always get the same results - but these are not the results of the run I am trying to emulate (in fact, they are much worse). It is also strange that I ALWAYS get the same results - they thus seem to be independent of the stored weights and biases.
The first thing I checked is whether I am correctly restoring my weights and biases (in neural_network_model), and this is the case.
I'm not really sure what is going on here. I have two suspicions, both of which I don't know how to check:
Although the correct weights and biases are restored in neural_network_model, the network is in fact not initialised with them. Is it the command sess.run(init) in train_neural_network that somehow does this?
I am reinitialising the network with the stored weights and biases not once, but every time - this would explain the poor results.
... but it might as well be something completely different. Any help much appreciated!
UPDATE: I figured out what is happening: when use_stored_weights is set to True, the weights are always initialised as all zeros. As far as I can see, this is due to the sess.run(init) in train_neural_network. However, if I leave out that line, I get an error:
FailedPreconditionError: Attempting to use uninitialized value b2
So I guess my question is now: how can I make the restored weights and biases accessible in train_neural_network?
Imports:
import tensorflow as tf
import numpy as np
from numpy import genfromtxt
This function builds the neural network:
def neural_network_model(data, layer_sizes, use_stored_weights):
num_layers = len(layer_sizes) - 1 # hidden and output layers
weights = {}
biases = {}
# initialise the weights
# (a) create new weights and biases
if not use_stored_weights:
for i in range(num_layers):
w_name = 'W' + str(i+1)
b_name = 'b' + str(i+1)
weights[w_name] = tf.get_variable(w_name, [layer_sizes[i], layer_sizes[i+1]],
initializer = tf.contrib.layers.xavier_initializer(), dtype=tf.float32)
biases[b_name] = tf.get_variable(b_name, [layer_sizes[i+1]],
initializer = tf.zeros_initializer(), dtype=tf.float32)
# save weights and biases
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
save_path = saver.save(sess, fold_path + 'weights/' + 'weights.ckpt')
# (b) restore weights and biases
else:
for i in range(num_layers):
# prepare variable
w_name = 'W' + str(i+1)
b_name = 'b' + str(i+1)
weights[w_name] = tf.get_variable(w_name, [layer_sizes[i], layer_sizes[i+1]],
initializer = tf.zeros_initializer(), dtype=tf.float32)
biases[b_name] = tf.get_variable(b_name, [layer_sizes[i+1]],
initializer = tf.zeros_initializer(), dtype=tf.float32)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, fold_path + 'weights/' + 'weights.ckpt')
# calculate linear and relu outputs for hidden layers
a_prev = data
for i in range(len(weights)-1):
z = tf.add(tf.matmul(a_prev, weights['W' + str(i+1)]), biases['b' + str(i+1)])
a = tf.nn.relu(z)
a_r = tf.nn.dropout(a, keep_prob)
a_prev = a_r
# calculate linear output for output layer
z_o = tf.add(tf.matmul(a_prev, weights['W' + str(len(weights))]), biases['b' + str(len(weights))])
return z_o
This function trains and evaluates the network:
def train_neural_network(x, layer_sizes, use_stored_weights):
prediction = neural_network_model(x, layer_sizes, use_stored_weights)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=lrn_rate).minimize(cost)
softm = tf.nn.softmax(prediction)
pred_class = tf.argmax(softm)
costs = []
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
epoch_loss = 0
for _ in range(int(len(x_train)/batch_size)):
epoch_x, epoch_y = x_train, y_train
_, c = sess.run([optimizer, cost], feed_dict = {x: epoch_x, y: epoch_y, keep_prob: kp})
epoch_loss += c
softmaxes_tst = sess.run([softm, pred_class], feed_dict={x: x_test, keep_prob: 1.0})[0]
incorr = 0
for i in range(len(softmaxes_tst)):
curr_sm = softmaxes_tst[i]
curr_lbl = y_test[i]
if np.argmax(curr_sm) != np.argmax(curr_lbl):
incorr += 1
print('incorr: ', incorr)
num_ex = len(x_test)
print('acc = ' + str(num_ex-incorr) + '/' + str(num_ex) + ' = ' + str((num_ex-incorr)/num_ex))
print('accuracy train:', accuracy.eval({x:x_train, y:y_train, keep_prob: kp}))
print('accuracy test :', accuracy.eval({x:x_test, y:y_test, keep_prob: 1.0}))
And this, finally, is the code that calls the above functions:
lrn_rate = 0.01
kp = 0.85
epochs = 400
path = '/my/path/to/data/'
fold_path = ''
num_folds = 19
for i in range(num_folds):
tf.reset_default_graph()
fold_path = path + 'fold_' + str(i+1) + '/'
x_train = genfromtxt(fold_path + 'fv_train.csv', delimiter=',')
y_train = genfromtxt(fold_path + 'lbl_train.csv', delimiter=',')
x_test = genfromtxt(fold_path + 'fv_test.csv', delimiter=',')
y_test = genfromtxt(fold_path + 'lbl_test.csv', delimiter=',')
num_classes = len(y_train[0])
num_features = len(x_train[0])
batch_size = len(x_train)
num_nodes_hl1 = num_features
num_nodes_hl2 = num_features
layer_sizes = [num_features, num_nodes_hl1, num_nodes_hl2, num_classes]
x = tf.placeholder('float', [None, num_features])
y = tf.placeholder('float')
keep_prob = tf.placeholder('float')
use_stored_weights = True
train_neural_network(x, layer_sizes, use_stored_weights)
Related
I am new to Tensorflow and I am working for training with LSTM-RNN in Tensorflow.
I need to save the model so that I can restore and run with Test data again.
I am not sure what to save.
I need to save sess or I need to save pred
When I save sess, restore and test the Test data as
one_hot_predictions, accuracy, final_loss = sess.run(
[pred, accuracy, cost],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
Then the error is unknown for pred.
Since I am new to Tensorflow, I am not sure what to save and what to restore to test with new data?
X_train = load_X(X_train_path)
X_test = load_X(X_test_path)
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
# proof that it actually works for the skeptical: replace labelled classes with random classes to train on
#for i in range(len(y_train)):
# y_train[i] = randint(0, 5)
# Input Data
training_data_count = len(X_train) # 4519 training series (with 50% overlap between each serie)
test_data_count = len(X_test) # 1197 test series
n_input = len(X_train[0][0]) # num input parameters per timestep
n_hidden = 34 # Hidden layer num of features
n_classes = 6
#updated for learning-rate decay
# calculated as: decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
decaying_learning_rate = True
learning_rate = 0.0025 #used if decaying_learning_rate set to False
init_learning_rate = 0.005
decay_rate = 0.96 #the base of the exponential in the decay
decay_steps = 100000 #used in decay every 60000 steps with a base of 0.96
global_step = tf.Variable(0, trainable=False)
lambda_loss_amount = 0.0015
training_iters = training_data_count *300 # Loop 300 times on the dataset, ie 300 epochs
batch_size = 512
display_iter = batch_size*8 # To show test set accuracy during training
#Utility functions for training:
def LSTM_RNN(_X, _weights, _biases):
# model architecture based on "guillaume-chevalier" and "aymericdamien" under the MIT license.
_X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
_X = tf.reshape(_X, [-1, n_input])
# Rectifies Linear Unit activation function used
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, n_steps, 0)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# A single output is produced, in style of "many to one" classifier, refer to http://karpathy.github.io/2015/05/21/rnn-effectiveness/ for details
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def extract_batch_size(_train, _labels, _unsampled, batch_size):
# Fetch a "batch_size" amount of data and labels from "(X|y)_train" data.
# Elements of each batch are chosen randomly, without replacement, from X_train with corresponding label from Y_train
# unsampled_indices keeps track of sampled data ensuring non-replacement. Resets when remaining datapoints < batch_size
shape = list(_train.shape)
shape[0] = batch_size
batch_s = np.empty(shape)
batch_labels = np.empty((batch_size,1))
for i in range(batch_size):
# Loop index
# index = random sample from _unsampled (indices)
index = random.choice(_unsampled)
batch_s[i] = _train[index]
batch_labels[i] = _labels[index]
_unsampled.remove(index)
return batch_s, batch_labels, _unsampled
def one_hot(y_):
# One hot encoding of the network outputs
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
# Graph input/output
x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM_RNN(x, weights, biases)
# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss
if decaying_learning_rate:
learning_rate = tf.train.exponential_decay(init_learning_rate, global_step*batch_size, decay_steps, decay_rate, staircase=True)
#decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) #exponentially decayed learning rate
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost,global_step=global_step) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
#Train the network:
test_losses = []
test_accuracies = []
train_losses = []
train_accuracies = []
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
init = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
sess.run(init)
# Perform Training steps with "batch_size" amount of data at each loop.
# Elements of each batch are chosen randomly, without replacement, from X_train,
# restarting when remaining datapoints < batch_size
step = 1
time_start = time.time()
unsampled_indices = range(0,len(X_train))
while step * batch_size <= training_iters:
#print (sess.run(learning_rate)) #decaying learning rate
#print (sess.run(global_step)) # global number of iterations
if len(unsampled_indices) < batch_size:
unsampled_indices = range(0,len(X_train))
batch_xs, raw_labels, unsampled_indicies = extract_batch_size(X_train, y_train, unsampled_indices, batch_size)
batch_ys = one_hot(raw_labels)
# check that encoded output is same length as num_classes, if not, pad it
if len(batch_ys[0]) < n_classes:
temp_ys = np.zeros((batch_size, n_classes))
temp_ys[:batch_ys.shape[0],:batch_ys.shape[1]] = batch_ys
batch_ys = temp_ys
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
train_losses.append(loss)
train_accuracies.append(acc)
# Evaluate network only at some steps for faster training:
if (step*batch_size % display_iter == 0) or (step == 1) or (step * batch_size > training_iters):
# To not spam console, show training accuracy/loss in this "if"
print("Iter #" + str(step*batch_size) + \
": Learning rate = " + "{:.6f}".format(sess.run(learning_rate)) + \
": Batch Loss = " + "{:.6f}".format(loss) + \
", Accuracy = {}".format(acc))
# Evaluation on the test set (no learning made here - just evaluation for diagnosis)
loss, acc = sess.run(
[cost, accuracy],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
test_losses.append(loss)
test_accuracies.append(acc)
print("PERFORMANCE ON TEST SET: " + \
"Batch Loss = {}".format(loss) + \
", Accuracy = {}".format(acc))
step += 1
print("Optimization Finished!")
EDIT:
I can save the model as
print("Optimization Finished!")
save_path = saver.save(sess, "/home/test/venv/TFCodes/HumanActivityRecognition/model.ckpt")
Then I tried to restore and ok, I can restore. But I don't know how to test with test data.
My restore code is
X_test = load_X(X_test_path)
with tf.Session() as sess:
saver = tf.train.import_meta_graph('/home/nyan/venv/TFCodes/HumanActivityRecognition/model.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
print("Model restored.")
all_vars = tf.trainable_variables()
for i in range(len(all_vars)):
name = all_vars[i].name
values = sess.run(name)
print('name', name)
#print('value', values)
print('shape',values.shape)
result = sess.run(prediction, feed_dict={X: X_test})
print("loss:", l, "prediction:", result, "true Y:", y_data)
# print char using dic
result_str = [idx2char[c] for c in np.squeeze(res
ult)]
print("\tPrediction str:", ''.join(result_str))
The output is
Model restored.
('name', u'Variable_1:0')
('shape', (36, 34))
('name', u'Variable_2:0')
('shape', (34, 6))
('name', u'Variable_3:0')
('shape', (34,))
('name', u'Variable_4:0')
('shape', (6,))
('name', u'rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel:0')
('shape', (68, 136))
('name', u'rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias:0')
('shape', (136,))
('name', u'rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel:0')
('shape', (68, 136))
('name', u'rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias:0')
('shape', (136,))
Traceback (most recent call last):
File "restore.py", line 74, in <module>
result = sess.run(prediction, feed_dict={X: X_test})
NameError: name 'prediction' is not defined
How to test the model restored?
What I find the easiest is the tf.saved_model.simple_save() function. It saves the computation graph you use, the weights, the input and the output in a .pb model and the weight variables.
You can later restore this model or even put it on ml-engine or use tf serving.
An example code snippit with a keras model and applied on YOLO:
inputs = {"image_bytes": model.input,
"shape": image_shape}
outputs = {"boxes": boxes,
"scores": scores,
"classes": classes}
tf.saved_model.simple_save(sess, "saved_model/", inputs, outputs)
For a schoolproject I have analysed the code beneath, but I want to put a feature to it: I want to give the neural network, when its done with training, an image of a handwritten digit from MNIST (lets say an 8) so that it can try and define the number 8. Because Im totally new with coding and machine learning, although I really like it and want to learn more, I could not figure it out myself how such a code should look like. Can someone help me?
The code is written in Python:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
learning_rate = 0.0001
batch_size = 100
update_step = 10
layer_1_nodes = 500
layer_2_nodes = 500
layer_3_nodes = 500
output_nodes = 10
network_input = tf.placeholder(tf.float32, [None, 784])
target_output = tf.placeholder(tf.float32, [None, output_nodes])
layer_1 = tf.Variable(tf.random_normal([784, layer_1_nodes]))
layer_1_bias = tf.Variable(tf.random_normal([layer_1_nodes]))
layer_2 = tf.Variable(tf.random_normal([layer_1_nodes, layer_2_nodes]))
layer_2_bias = tf.Variable(tf.random_normal([layer_2_nodes]))
layer_3 = tf.Variable(tf.random_normal([layer_2_nodes, layer_3_nodes]))
layer_3_bias = tf.Variable(tf.random_normal([layer_3_nodes]))
out_layer = tf.Variable(tf.random_normal([layer_3_nodes, output_nodes]))
out_layer_bias = tf.Variable(tf.random_normal([output_nodes]))
l1_output = tf.nn.relu(tf.matmul(network_input, layer_1) + layer_1_bias)
l2_output = tf.nn.relu(tf.matmul(l1_output, layer_2) + layer_2_bias)
l3_output = tf.nn.relu(tf.matmul(l2_output, layer_3) + layer_3_bias)
ntwk_output_1 = tf.matmul(l3_output, out_layer) + out_layer_bias
ntwk_output_2 = tf.nn.softmax(ntwk_output_1)
cf =
tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=ntwk_output_1,
labels=target_output))
ts = tf.train.GradientDescentOptimizer(learning_rate).minimize(cf)
cp = tf.equal(tf.argmax(ntwk_output_2, 1), tf.argmax(target_output, 1))
acc = tf.reduce_mean(tf.cast(cp, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_epochs = 10
for epoch in range(num_epochs):
total_cost = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
batch_x, batch_y = mnist.train.next_batch(batch_size)
t, c = sess.run([ts, cf], feed_dict={network_input: batch_x, target_output: batch_y})
total_cost += c
print('Epoch', epoch, 'completed out of', num_epochs, 'loss:', total_cost)
print('Accuracy:', acc.eval({network_input: mnist.test.images,target_output: mnist.test.labels}))
with tf.Session() as sess:
number_prediction = tf.argmax(ntwk_output_2 , 1)
number_prediction = sess.run(number_prediction , feed_dict={network_input :
yourImageNdArray } )
print("your prediction : ",number_prediction)
What you need to know :
ntwk_ouput_2 is the ouput of the neural net which give you 10 probabilities - - you take the greatest one with tf.argmax ( tf argmax does not return the max value but the position of it )
sess.run is responsible of running your tensorflow graph and evaluating the tensor given in the first parameter
you need also to feed your network with the image you want to predict in feed_dict
Hope that helps!
The problem is you are not saving your model at any point of time during the training process.
You can do this during training:
ckpt_path = path_to_save_model
saver = tf.train.saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_epochs = 10
for epoch in range(num_epochs):
total_cost = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
batch_x, batch_y =
mnist.train.next_batch(batch_size)
t, c = sess.run([ts, cf], feed_dict=
{network_input: batch_x, target_output: batch_y})
total_cost += c
print('Epoch', epoch, 'completed out of',
num_epochs, 'loss:', total_cost)
if (epoch+1)%10 == 0:
saver.saver(sess, ckpt_path)
print('Accuracy:', acc.eval({network_input:
mnist.test.images,target_output:
mnist.test.labels}))
For running the trained model you can do the following:
with tf.Session() as sess:
meta_graph = [ i for i in os.listdir(ckpt_path) if i.endswith('.meta')]
tf.train.import_meta_graph(os.path.join(checkpoint_path, meta_graph_path[0]))
saver = tf.train.Saver()
saver.restore(sess, ckpt_path)
#img = read your image here
pred = sess.run(ntwk_output_2, feed_dict={network_input: img}
output = np.argmax(pred)
For more reference you can follow this link
I want to use the dropout function of tensorflow to check if I can improve my results (TPR, FPR) of my recurrent neural network.
However I implemented it by following a guide. So I am not sure if I did any mistakes. But if I train my model with with e.g. 10 epochs I get nearly the same results after validation. Thats why I am not sure if I use the dropout function correctly. Is this the right implementation in the following code or did I made something wrong? If I did everything right why do I get nearly the same result then?
hm_epochs = 10
n_classes = 2
batch_size = 128
chunk_size = 341
n_chunks = 5
rnn_size = 32
dropout_prop = 0.5 # Dropout, probability to drop a unit
batch_size_validation = 65536
x = tf.placeholder('float', [None, n_chunks, chunk_size])
y = tf.placeholder('float')
def recurrent_neural_network(x):
layer = {'weights':tf.Variable(tf.random_normal([rnn_size, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
x = tf.transpose(x, [1,0,2])
x = tf.reshape(x, [-1, chunk_size])
x = tf.split(x, n_chunks, 0)
lstm_cell = rnn.BasicLSTMCell(rnn_size)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']
#DROPOUT Implementation -> is this code really working?
#The result is nearly the same after 20 epochs...
output_layer = tf.layers.dropout(output, rate=dropout_prop)
return output
def train_neural_network(x):
prediction = recurrent_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(1,hm_epochs+1):
epoch_loss = 0
for i in range(0, training_data.shape[0], batch_size):
epoch_x = np.array(training_data[i:i+batch_size, :, :], dtype='float')
epoch_y = np.array(training_labels[i:i+batch_size, :], dtype='float')
if len(epoch_x) != batch_size:
epoch_x = epoch_x.reshape((len(epoch_x), n_chunks, chunk_size))
else:
epoch_x = epoch_x.reshape((batch_size, n_chunks, chunk_size))
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
train_neural_network(x)
print("rnn - finished!")
In its most basic form, dropout should happen inside the cell and applied to the weights. You only applied it afterwards. This article explained it pretty well with some good visualization and few variations.
To use it in your code, you can either
Implement your own RNN cell where the keep probability is a parameter that initializes the cell or is a parameter that got passed in when it's called every time.
Use an rnn dropout wrapper here.
In the example below I train the data x amount of times based on the value of iterations. At first I saved a checkpoint every iteration, but when I place it just below the iterations the training becomes much faster obviously. I'm not really sure if it makes a difference. Is one save in the session enough? And will the iterations use the values(weights) set in the previous iteration?
def train():
"""Trains the neural network
:returns: 0 for now. Why? 42
"""
unique_labels, labels = get_labels(training_data_file, savelabels=True) # get the labels from the training data
randomize_output(unique_labels) # create the correct amount of output neurons
prediction = neural_network_model(p_inputdata)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=p_known_labels)) # calculates the difference between the known labels and the results
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables()) # initialize all variables **in** the session
with open(training_data_file, "r") as file:
inputdata = [re.split(csvseperatorRE, line.rstrip("\n"))[:-1] for line in file]
if path.isfile(temp_dir + "\\" + label + ".ckpt.index"):
saver.restore(sess, temp_dir + "\\" + label + ".ckpt")
for i in range(iterations):
_, cost_of_iteration = sess.run([optimizer,cost], feed_dict={
p_inputdata: np.array(inputdata),
p_known_labels: np.array(labels)
})
saver.save(sess, temp_dir + "\\" + label + ".ckpt")
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(tf.argmax(p_known_labels, 1)))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
with open(verify_data_file, "r") as file:
verifydata = [re.split(csvseperatorRE, line.rstrip("\n"))[:-1] for line in file]
npverifydata = np.array(verifydata )
nplabels = np.array(get_labels(verify_data_file)[1])
print("Accuracy: ", accuracy.eval({p_inputdata: npverifydata , p_known_labels: nplabels}))
return 0
I'm having a hard time trying to set up a multilayer perceptron neural network to predict the next value of a time-series using Tensorflow.
I read the time-series from a file, split it into three arrays and use those arrays to train, test and validate the network. Unfortunately, my network answers 0.9999 to every input I give to it.
The image below shows the values I expect my network to outcome, note that they range from 2.8 to 4.2
Now, these are the values my network predicts. Though they seem all the same, they're actually 0.9999... (and some difference in the 9th decimal place).
import csv
import numpy as np
from statsmodels.tsa.tsatools import lagmat
import tensorflow as tf
# Data split (values represent percentage)
perc_train = 0.5
perc_test = 0.4
perc_eval = 0.1
# Parameters
learning_rate = 10 ** -3
min_step_size_train = 10 ** -5
training_epochs = 250
display_step = 1
# Network Parameters
n_input = 15
n_classes = 1
n_hidden = (n_input + n_classes) / 2
def get_nn_sets(pmX, pmY):
'''
Splits data into three subsets
'''
trainningIndex = int(len(pmX) * perc_train)
validationIndex = int(len(pmX) * perc_test) + trainningIndex
pmXFit = pmX[:trainningIndex, :]
pmYFit = pmY[:trainningIndex]
pmXTest = pmX[trainningIndex:validationIndex, :]
pmYTest = pmY[trainningIndex:validationIndex]
pmxEvaluate = pmX[validationIndex:, :]
pmYEvaluate = pmY[validationIndex:]
return pmXFit, pmYFit, pmXTest, pmYTest, pmxEvaluate, pmYEvaluate
def read_dollar_file(clip_first = 4000):
'''
Reads the CSV file containing the dollar value for Brazilian real during the years
-----
RETURNS:
A matrix with the file contents
'''
str_vals = []
with open('dolar.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
# retrieving the first column of the file (the dollar value)
str_vals.append(row[1])
# removing title
str_vals = str_vals[1:]
# removing the empty strings (sunday and holidays have no values)
y = filter(None, str_vals)
# converting from string to float values
y = np.array(y).astype(np.float)
# checking if initial elements should be discarded
if (clip_first > 0):
y = y[clip_first:]
return y
# Create model
def get_multilayer_perceptron(x):
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden], dtype=tf.float64)),
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], dtype=tf.float64))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden], dtype=tf.float64)),
'out': tf.Variable(tf.random_normal([n_classes], dtype=tf.float64))
}
# Hidden layer with relu activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with tanh activation
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
out_layer = tf.nn.tanh(out_layer)
return out_layer
def run_mlp(inp, outp):
pmXFit, pmYFit, pmXTest, pmYTest, pmXEvaluate, pmYEvaluate = get_nn_sets(inp, outp)
# tf Graph input
x = tf.placeholder("float64", [None, n_input])
y = tf.placeholder("float64", [None, n_classes])
# Construct model
pred = get_multilayer_perceptron(x)
# Define loss and optimizer
cost = tf.nn.l2_loss(tf.sub(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
last_cost = min_step_size_train + 1
for epoch in range(training_epochs):
# Trainning data
for i in range(len(pmXFit)):
batch_x = np.reshape(pmXFit[i,:], (1, n_input))
batch_y = np.reshape(pmYFit[i], (1, n_classes))
# Run optimization
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# Calculating data error
c = 0.0
for i in range(len(pmXTest)):
batch_x = np.reshape(pmXTest[i,:], (1, n_input))
batch_y = np.reshape(pmYTest[i], (1, n_classes))
# Run Cost function
c += sess.run(cost, feed_dict={x: batch_x, y: batch_y})
c /= len(pmXTest)
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.30f}".format(c))
if abs(c - last_cost) < min_step_size_train:
break
last_cost = c
nn_predictions = np.array([])
for i in range(len(pmXEvaluate)):
batch_x = np.reshape(pmXEvaluate[i,:], (1, n_input))
nn_predictions = np.append(nn_predictions, sess.run(pred, feed_dict={x: batch_x})[0])
print("Optimization Finished!")
nn_predictions.flatten()
return [pmYEvaluate, nn_predictions]
inp = lagmat(read_dollar_file(), n_input, trim='both')
outp = inp[1:, 0]
inp = inp[:-1]
real_value, predicted_value = run_mlp(inp, outp)
I also tried different cost functions and it didn't work. I know I may be missing something really stupid, so I really appreciate your help.
Thanks.
From your code:
out_layer = tf.nn.tanh(out_layer)
tanh can only output values between (-1.0, 1.0), remove this line will make it do better.