I want to use the dropout function of tensorflow to check if I can improve my results (TPR, FPR) of my recurrent neural network.
However I implemented it by following a guide. So I am not sure if I did any mistakes. But if I train my model with with e.g. 10 epochs I get nearly the same results after validation. Thats why I am not sure if I use the dropout function correctly. Is this the right implementation in the following code or did I made something wrong? If I did everything right why do I get nearly the same result then?
hm_epochs = 10
n_classes = 2
batch_size = 128
chunk_size = 341
n_chunks = 5
rnn_size = 32
dropout_prop = 0.5 # Dropout, probability to drop a unit
batch_size_validation = 65536
x = tf.placeholder('float', [None, n_chunks, chunk_size])
y = tf.placeholder('float')
def recurrent_neural_network(x):
layer = {'weights':tf.Variable(tf.random_normal([rnn_size, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
x = tf.transpose(x, [1,0,2])
x = tf.reshape(x, [-1, chunk_size])
x = tf.split(x, n_chunks, 0)
lstm_cell = rnn.BasicLSTMCell(rnn_size)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']
#DROPOUT Implementation -> is this code really working?
#The result is nearly the same after 20 epochs...
output_layer = tf.layers.dropout(output, rate=dropout_prop)
return output
def train_neural_network(x):
prediction = recurrent_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(1,hm_epochs+1):
epoch_loss = 0
for i in range(0, training_data.shape[0], batch_size):
epoch_x = np.array(training_data[i:i+batch_size, :, :], dtype='float')
epoch_y = np.array(training_labels[i:i+batch_size, :], dtype='float')
if len(epoch_x) != batch_size:
epoch_x = epoch_x.reshape((len(epoch_x), n_chunks, chunk_size))
else:
epoch_x = epoch_x.reshape((batch_size, n_chunks, chunk_size))
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
train_neural_network(x)
print("rnn - finished!")
In its most basic form, dropout should happen inside the cell and applied to the weights. You only applied it afterwards. This article explained it pretty well with some good visualization and few variations.
To use it in your code, you can either
Implement your own RNN cell where the keep probability is a parameter that initializes the cell or is a parameter that got passed in when it's called every time.
Use an rnn dropout wrapper here.
Related
I'm working on a simple linear regression model to predict the next step in a series. I'm giving it x/y coordinate data and I want the regressor to predict where the next point on the plot will lie.
I'm using dense layers with AdamOptmizer and have my loss function set to:
tf.reduce_mean(tf.square(layer_out - y))
I'm trying to create linear regression models from scratch (I don't want to utilize the TF estimator package here).
I've seen ways to do it by manually specifying weights and biases, but nothing goes into deep regression.
X = tf.placeholder(tf.float32, [None, self.data_class.batch_size, self.inputs])
y = tf.placeholder(tf.float32, [None, self.data_class.batch_size, self.outputs])
layer_input = tf.layers.dense(inputs=X, units=10, activation=tf.nn.relu)
layer_hidden = tf.layers.dense(inputs=layer_input, units=10, activation=tf.nn.relu)
layer_out = tf.layers.dense(inputs=layer_hidden, units=1, activation=tf.nn.relu)
cost = tf.reduce_mean(tf.square(layer_out - y))
optmizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
training_op = optmizer.minimize(cost)
init = tf.initialize_all_variables()
iterations = 10000
with tf.Session() as sess:
init.run()
for iteration in range(iterations):
X_batch, y_batch = self.data_class.get_data_batch()
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if iteration % 100 == 0:
mse = cost.eval(feed_dict={X:X_batch, y:y_batch})
print(mse)
array = []
for i in range(len(self.data_class.dates), (len(self.data_class.dates)+self.data_class.batch_size)):
array.append(i)
x_pred = np.array(array).reshape(1, self.data_class.batch_size, 1)
y_pred = sess.run(layer_out, feed_dict={X: x_pred})
print(y_pred)
predicted = np.array(y_pred).reshape(self.data_class.batch_size)
predicted = np.insert(predicted, 0, self.data_class.prices[0], axis=0)
plt.plot(self.data_class.dates, self.data_class.prices)
array = [self.data_class.dates[0]]
for i in range(len(self.data_class.dates), (len(self.data_class.dates)+self.data_class.batch_size)):
array.append(i)
plt.plot(array, predicted)
plt.show()
When I run training I'm getting the same loss value over and over again.
It's not being reduced, like it should, why?
The issue is that I'm applying an activation to the output layer. This is causing that output to go to whatever it activates to.
By specifying in the last layer that activation=None the deep regression works as intended.
Here is the updated architecture:
layer_input = tf.layers.dense(inputs=X, units=150, activation=tf.nn.relu)
layer_hidden = tf.layers.dense(inputs=layer_input, units=100, activation=tf.nn.relu)
layer_out = tf.layers.dense(inputs=layer_hidden, units=1, activation=None)
After playing with tensorflow, I finally was able to make my code run but the loss seem to go in the negative, even with few epochs. Why is this happening? I made simple neural nets with numpy only and never had this problem, can someone explain what am I doing wrong?
here is the input file if anyone wants to test it: input file
additional information: the labels arent onehot array, is my loss function the correct one since it isnt onehot array? I followed a tutorial that used onehot array labels.
import tensorflow as tf
import numpy as np
data = np.load("test_data.npz")
trng_input = np.array(data['Input'], dtype=np.float64)
trng_output = np.array(data['Output'], dtype=np.float64)
nhl1 = 12
nhl2 = 8
n_classes = 4
x = tf.placeholder(dtype=tf.float64, shape=[len(trng_input),24])
y = tf.placeholder(dtype=tf.float64, shape=[len(trng_output),n_classes])
def NN(data):
hl1 = {"weights":tf.Variable(tf.random_normal([24, nhl1], dtype=tf.float64)),
"biases":tf.Variable(tf.random_normal([nhl1], dtype=tf.float64))}
hl2 = {"weights":tf.Variable(tf.random_normal([nhl1, nhl2], dtype=tf.float64)),
"biases":tf.Variable(tf.random_normal([nhl2], dtype=tf.float64))}
output_layer = {"weights":tf.Variable(tf.random_normal([nhl2, n_classes], dtype=tf.float64)),
"biases":tf.Variable(tf.random_normal([n_classes], dtype=tf.float64))}
l1 = tf.add(tf.matmul(data, hl1["weights"]), hl1["biases"])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hl2["weights"]), hl2["biases"])
l2 = tf.nn.relu(l2)
output = tf.add(tf.matmul(l2, output_layer["weights"]), output_layer["biases"])
output = tf.nn.relu(output)
return output
def train(data, epochs):
prediction = NN(data)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
_, c = sess.run([optimizer, cost], feed_dict={x: trng_input, y: trng_output})
if not epoch % 20:
print(F"Epoch {epoch} completed out of {epochs}. Loss:{c}")
correct = tf.equal(tf.argmax(prediction,1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct,"float"))
Eval = accuracy.eval({x:trng_input, y:trng_output})
print(F"Accuracy:{Eval}")
train(trng_input, 200)
I have been trying to use an LSTM for regression in TensorFlow, but it doesn't fit the data. I have successfully fit the same data in Keras (with the same size network). My code for trying to overfit a sine wave is below:
import tensorflow as tf
import numpy as np
yt = np.cos(np.linspace(0, 2*np.pi, 256))
xt = np.array([yt[i-50:i] for i in range(50, len(yt))])[...,None]
yt = yt[-xt.shape[0]:]
g = tf.Graph()
with g.as_default():
x = tf.constant(xt, dtype=tf.float32)
y = tf.constant(yt, dtype=tf.float32)
lstm = tf.nn.rnn_cell.BasicLSTMCell(32)
outputs, state = tf.nn.dynamic_rnn(lstm, x, dtype=tf.float32)
pred = tf.layers.dense(outputs[:,-1], 1)
loss = tf.reduce_mean(tf.square(pred-y))
train_op = tf.train.AdamOptimizer().minimize(loss)
init = tf.global_variables_initializer()
sess = tf.InteractiveSession(graph=g)
sess.run(init)
for i in range(200):
_, l = sess.run([train_op, loss])
print(l)
This results in a MSE of 0.436067 (while Keras got to 0.0022 after 50 epochs), and the predictions range from -0.1860 to -0.1798. What am I doing wrong here?
Edit:
When I change my loss function to the following, the model fits properly:
def pinball(y_true, y_pred):
tau = np.arange(1,100).reshape(1,-1)/100
pin = tf.reduce_mean(tf.maximum(y_true[:,None] - y_pred, 0) * tau +
tf.maximum(y_pred - y_true[:,None], 0) * (1 - tau))
return pin
I also change the assignments of pred and loss to
pred = tf.layers.dense(outputs[:,-1], 99)
loss = pinball(y, pred)
This results in a decrease of loss from 0.3 to 0.003 as it trains, and seems to properly fit the data.
Looks like a shape/broadcasting issue. Here's a working version:
import tensorflow as tf
import numpy as np
yt = np.cos(np.linspace(0, 2*np.pi, 256))
xt = np.array([yt[i-50:i] for i in range(50, len(yt))])
yt = yt[-xt.shape[0]:]
g = tf.Graph()
with g.as_default():
x = tf.constant(xt, dtype=tf.float32)
y = tf.constant(yt, dtype=tf.float32)
lstm = tf.nn.rnn_cell.BasicLSTMCell(32)
outputs, state = tf.nn.dynamic_rnn(lstm, x[None, ...], dtype=tf.float32)
pred = tf.squeeze(tf.layers.dense(outputs, 1), axis=[0, 2])
loss = tf.reduce_mean(tf.square(pred-y))
train_op = tf.train.AdamOptimizer().minimize(loss)
init = tf.global_variables_initializer()
sess = tf.InteractiveSession(graph=g)
sess.run(init)
for i in range(200):
_, l = sess.run([train_op, loss])
print(l)
x gets a batch dimension of 1 before going into dynamic_rnn, since with time_major=False the first dimension is expected to be a batch dimension. It's important that the last dimension of the output of tf.layers.dense get squeezed off so that it doesn't broadcast with y (TensorShape([256, 1]) and TensorShape([256]) broadcast to TensorShape([256, 256])). With those fixes it converges:
5.78507e-05
You are not passing-on the state from one call of dynamic_rnn to next. That's the problem for sure.
Also, why take only last item of the output through the dense layer and onward?
I implemented a basic MLP and I want it to predict a user-generated set of data, but the prediction looks as follows:
.
I am not sure why... I have nonlinearities in the hidden layers, and I tried multiple activations (ReLU, tanh, sigmoid), tried different optimisers, different learning rates, various architectures (more layers, fewer layers, dropout), but I never got this right.
Please note that I do believe it may be because of how I compute the predictions at the end (pred = sess.run(out, feed_dict={inputs:X.reshape(n_input, 1)})) as it may be incorrect, but I wouldn't know why. I also tried other methods like extracting the weights with w = sess.run(weights) and then feeding them to the model() function along with the input, but nothing worked.
Also, when monitoring the error, the error decreases between epochs.
Any ideas?
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Architecture
input_size = 1
output_size = 1
h1_size = 20
h2_size = 50
# 2 hidden layers network
def model(inputs, weights):
out1 = tf.nn.relu(tf.matmul(inputs, weights['h1']))
out2 = tf.nn.relu(tf.matmul(out1, weights['h2']))
return tf.matmul(out2, weights['h3'])
# Inputs/label placeholders
inputs = tf.placeholder('float', shape=(None, input_size))
labels = tf.placeholder('float', shape=(None, output_size))
# Learnable weights
weights = {
'h1': tf.Variable(tf.random_normal(shape=(input_size, h1_size))),
'h2': tf.Variable(tf.random_normal(shape=(h1_size, h2_size))),
'h3': tf.Variable(tf.random_normal(shape=(h2_size, output_size))),
}
# Stores the result from the net
out = model(inputs, weights)
# Cost and optimisation
cost = tf.reduce_mean(tf.square(out - labels))
opt = tf.train.AdadeltaOptimizer()
opt_operation = opt.minimize(cost)
# Generate some data
n_input = 1000
X = np.linspace(0, 1, n_input).astype('f')
y = X + 5 * np.sin(X * 10)
y /= max(y)
# Train
epochs = 2000
lr = 0.0000001
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
_, c = sess.run([opt_operation, cost], feed_dict={
inputs: X.reshape(n_input, 1),
labels: y.reshape(n_input, 1),
})
if not epoch % int(epochs/20):
print(c)
pred = sess.run(out, feed_dict={inputs:X.reshape(n_input, 1)})
plt.scatter(X, pred, color='red', label='prediction')
plt.scatter(X, y, label='data')
plt.legend()
plt.show()
Forgot bias terms: new graph
It works now but not sure if this fixed it?
New code uses:
weights = {
'h1': tf.Variable(tf.random_normal(shape=(input_size, h1_size))),
'h2': tf.Variable(tf.random_normal(shape=(h1_size, h2_size))),
'h3': tf.Variable(tf.random_normal(shape=(h2_size, output_size))),
'b1': tf.Variable(tf.zeros(shape=[1])),
'b2': tf.Variable(tf.zeros(shape=[1])),
'b3': tf.Variable(tf.zeros(shape=[1])),
}
and
def model(inputs, weights):
out1 = tf.nn.relu(tf.matmul(inputs, weights['h1']) + weights['b1'])
out2 = tf.nn.relu(tf.matmul(out1, weights['h2']) + weights['b2'])
return tf.matmul(out2, weights['h3'] + weights['b3'])
Does anyone have an example for code that uses tf.train.AdadeltaOptimizer with good results?
I have a TF graph, that was originally set with tf.train.AdamOptimizer, and is working well. When I replace it with AdadeltaOptimizer, with the default params, it gives lousy results.
I used Cuda 7.5.
The below is example code which works with 'AdadeltaOptimizer' optimizer. It works with 'Adam'. The only difference between them that Adam is insensitive to "learning rate" and 'Adadelta' is sensitive.
I advice you to read more about optimization algorithm (like here).
In your own example, just try to change 'learning rate' to be smaller or bigger (it is named 'hyperparameter optimization').
Note:
From my experience, 'Adam' is a very good optimizer for RNN, better than 'AdaDelta' (using example code, 'Adam' achieve better score much faster). On the other hand, for CNN, SGD+Momentum works best.
Code, which learn MNIST classification using Bi-LSTM:
# Mnist classification using Bi-LSTM
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
learning_rate = 0.01
training_epochs = 100
batch_size = 64
seq_length = 28
heigh_image = 28
hidden_size = 128
class_numer = 10
input = tf.placeholder(tf.float32, [None, None, heigh_image])
target = tf.placeholder(tf.float32, [None, class_numer])
seq_len = tf.placeholder(tf.int32, [None])
def fulconn_layer(input_data, output_dim, activation_func=None):
input_dim = int(input_data.get_shape()[1])
W = tf.Variable(tf.random_normal([input_dim, output_dim]))
b = tf.Variable(tf.random_normal([output_dim]))
if activation_func:
return activation_func(tf.matmul(input_data, W) + b)
else:
return tf.matmul(input_data, W) + b
with tf.name_scope("BiLSTM"):
with tf.variable_scope('forward'):
lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size, forget_bias=1.0, state_is_tuple=True)
with tf.variable_scope('backward'):
lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size, forget_bias=1.0, state_is_tuple=True)
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell, cell_bw=lstm_bw_cell, inputs=input,sequence_length=seq_len, dtype=tf.float32, scope="BiLSTM")
# As we have Bi-LSTM, we have two output, which are not connected. So merge them
outputs = tf.concat(2, outputs)
# As we want do classification, we only need the last output from LSTM.
last_output = outputs[:,0,:]
# Create the final classification layer
yhat = fulconn_layer(last_output, class_numer)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(yhat, target))
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(cost) # AdamOptimizer
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(target, 1), tf.argmax(yhat, 1)), tf.float32))
gpu_opts = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_opts)) as session:
session.run(tf.initialize_all_variables())
print ("Start Learing")
for epoch in range(training_epochs):
for i in range(int(mnist.train.num_examples/batch_size)):
x_batch, y_batch = mnist.train.next_batch(batch_size)
x_batch = x_batch.reshape([batch_size, seq_length, heigh_image])
train_seq_len = np.ones(batch_size) * seq_length
session.run([optimizer], feed_dict={input: x_batch, target: y_batch, seq_len: train_seq_len})
train_accuracy = session.run(accuracy, feed_dict={input: x_batch, target: y_batch, seq_len: train_seq_len})
x_test = mnist.test.images.reshape([-1, seq_length, heigh_image])
y_test = mnist.test.labels
test_seq_len = np.ones(x_test.shape[0]) * seq_length
test_accuracy = session.run(accuracy, feed_dict={input: x_test, target: y_test, seq_len: test_seq_len})
print("epoch: %d, train_accuracy: %3f, test_accuracy: %3f" % (epoch, train_accuracy, test_accuracy))