Thanks for looking into this question.
I am trying to train a 3-layer NN to predict the stock price on next 10 days based on stock price in previous 15 days. While using the GradientDescentOptimizer, the weights of the variables have not changed, hence would like to seek some assistance from you. I have tried the following:
Check that there's a tf.placeholder and that I have fed in a tensor with correct dimension.
Changed the learning rate and see if loss improves.
Changed loss function from reduce_sum to reduce_mean of the squared differences between actual data and prediction.
Randomised my tf.Variables.
The code that I'm running is as follow. Some symbols are not defined here for clarity of the code. Appreciate your kind advice on this matter!
#Setting value placeholder
x = tf.placeholder(tf.float64,shape=(19,15,1), name = 'Input')
y_ = tf.placeholder(tf.float64,shape=(19,10,1), name = 'Output')
#Setting DNN key architectural values
n_layers = 3
n_nodes_l1 = 20
n_nodes_l2 = 30
n_nodes_l3 = 10
W01 = tf.Variable(tf.random_uniform([n_nodes_l1, 15],0,1,dtype=tf.float64,name="W01"))
W02 = tf.Variable(tf.random_uniform([n_nodes_l2, n_nodes_l1],0,1,dtype=tf.float64),name='W02')
W03 = tf.Variable(tf.random_uniform([n_nodes_l3, n_nodes_l2],0,1,dtype=tf.float64),name='W03')
b01 = tf.Variable(tf.random_uniform([n_nodes_l1,1],0,1,dtype=tf.float64),name='b01')
b02 = tf.Variable(tf.random_uniform([n_nodes_l2,1],0,1,dtype=tf.float64),name='b02')
b03 = tf.Variable(tf.random_uniform([n_nodes_l3,1],0,1,dtype=tf.float64),name='b03')
#Building the architecture
def neural(X):
a01 = tf.matmul(W01, X) + b01
X2 = tf.sigmoid(a01)
a02 = tf.matmul(W02, X2) + b02
X3 = tf.sigmoid(a02)
a03 = tf.matmul(W03, X3) + b03
y_prediction= tf.sigmoid(a03)
return y_prediction
#Loss and Optimizer
loss = []
final_loss= []
y_pred_col = []
for n_batch in range(0,len(x_data)):
y_pred = neural(x[n_batch])
y_pred_col.append(y_pred)
loss = tf.reduce_mean(tf.square(y_ - y_pred_col))
optimizer = tf.train.GradientDescentOptimizer(0.0005).minimize(loss)
#Setting up Tensor Session
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
n_steps = 30
for iter in range(n_steps):
_, l, W01_train = sess.run([optimizer,loss,W01], feed_dict = {x: x_data, y_: y_data})
print(l)
I would do things a bit differently. There is something that doesn't make sense in your code:
for n_batch in range(0,len(x_data)):
y_pred = neural(x[n_batch])
y_pred_col.append(y_pred)
Here, each call of neural is creating a new neural network, so you end up having len(x_data) networks. I pressume that you want a single network. In that case, you should be calling neural only once:
y_pred = neural(x)
This will require you to define the tf.matmul operations from neural in a different way (as now you need to take the first dimension of X into account). The loss function will then be defined as:
loss = tf.reduce_mean(tf.square(y_ - y_pred))
Putting it all together:
#Setting value placeholder
x = tf.placeholder(tf.float64,shape=(None,15), name = 'Input')
y_ = tf.placeholder(tf.float64,shape=(None,10), name = 'Output')
#Setting DNN key architectural values
n_layers = 3
n_nodes_l1 = 20
n_nodes_l2 = 30
n_nodes_l3 = 10
W01 = tf.Variable(tf.random_uniform([15, n_nodes_l1],0,1,dtype=tf.float64,name="W01"))
W02 = tf.Variable(tf.random_uniform([n_nodes_l1, n_nodes_l2],0,1,dtype=tf.float64),name='W02')
W03 = tf.Variable(tf.random_uniform([n_nodes_l2, n_nodes_l3],0,1,dtype=tf.float64),name='W03')
b01 = tf.Variable(tf.random_uniform([n_nodes_l1],0,1,dtype=tf.float64),name='b01')
b02 = tf.Variable(tf.random_uniform([n_nodes_l2],0,1,dtype=tf.float64),name='b02')
b03 = tf.Variable(tf.random_uniform([n_nodes_l3],0,1,dtype=tf.float64),name='b03')
#Building the architecture
def neural(X):
a01 = tf.matmul(X, W01) + b01
X2 = tf.sigmoid(a01)
a02 = tf.matmul(X2, W02) + b02
X3 = tf.sigmoid(a02)
a03 = tf.matmul(X3, W03) + b03
y_prediction= tf.sigmoid(a03)
return y_prediction
#Loss and Optimizer
y_pred = neural(x)
loss = tf.reduce_mean(tf.square(y_ - y_pred))
optimizer = tf.train.GradientDescentOptimizer(0.0005).minimize(loss)
#Setting up Tensor Session
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
n_steps = 30
for iter in range(n_steps):
_, l, W01_train = sess.run([optimizer,loss,W01], feed_dict = {x: x_data, y_: y_data})
print(l)
Note that I changed the definition of the placeholders and weights for convenience. The code above will run provided that the shapes of x_data and y_data are (batch_size=19,15) and (batch_size=19,10), respectively. If the problem still remains after this modifications then it is probably due to other reasons (i.e. dependent on your data or hyperparameters).
Related
I am learning TensorFlow by implementing a simple logisitic regression classifier that outputs whether a digit is 7 or not when fed an MNIST image. I am using Stochastic gradient descent. The crux of the Tensorflow code is
# Maximum number of epochs
MaxEpochs = 1
# Learning rate
eta = 1e-2
ops.reset_default_graph()
n_x = 784
n_y = 1
x_tf = tf.placeholder(tf.float32, shape = [n_x, 1], name = 'x_tf')
y_tf = tf.placeholder(tf.float32, shape = [n_y, 1], name = 'y_tf')
w_tf = tf.get_variable(name = "w_tf", shape = [n_x, 1], initializer = tf.initializers.random_uniform());
b_tf = tf.get_variable(name = "b_tf", shape = [n_y, 1], initializer = tf.initializers.random_uniform());
z_tf = tf.add(tf.matmul(w_tf, x_tf, transpose_a = True), b_tf, name = 'z_tf')
yPred_tf = tf.sigmoid(z_tf, name = 'yPred_tf')
Loss_tf = tf.nn.sigmoid_cross_entropy_with_logits(logits = yPred_tf, labels = y_tf, name = 'Loss_tf')
with tf.name_scope('Training'):
optimizer_tf = tf.train.GradientDescentOptimizer(learning_rate = eta)
train_step = optimizer_tf.minimize(Loss_tf)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for Epoch in range(MaxEpochs):
for Sample in range(len(XTrain)):
x = XTrain[Sample]
y = YTrain[Sample].reshape([-1,1])
Train_sample = {x_tf: x, y_tf: y}
sess.run(train_step, feed_dict = Train_sample)
toc = time.time()
print('\nElapsed time is: ', toc-tic,'s');
It builds the following graph (tensorboard related code has been removed for convenience):
The problem is even though the weights and biases are initialised randomly (non-zero), the neuron isn't being trained. The weight histogram is as follows.
I didnt want to post something so trivial, but I am at my wit's end. Sorry for the long post. Thank you very much in advance for any guidance. A little side note, it is taking 93.35s to run, it only took 10 or so seconds when I did this with numpy (same stochastic implementation), why would this be so?
EDIT:
The bias plot over the course of the training is as follows.
EDIT: The entire code, if the issue is cropping up on something outside what I previously thought.
import tensorflow as tf
import numpy as np
import h5py
from tensorflow.python.framework import ops
import time
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
def Flatten(Im):
FlatImArray = Im.reshape([Im.shape[0],-1,1])
return FlatImArray
DigitTested = 7
# Sperating the images with 7s from the rest
TrainIdxs = [];
for i in range(len(y_train)):
if(y_train[i] == DigitTested):
TrainIdxs.append(i)
TestIdxs = [];
for i in range(len(y_test)):
if(y_test[i] == DigitTested):
TestIdxs.append(i)
# Preparing the Datasets for training and testing
XTrain = Flatten(x_train);
YTrain = np.zeros([len(x_train),1]);
YTrain[TrainIdxs] = 1;
XTest = Flatten(x_test);
YTest = np.zeros([len(x_test),1]);
YTest[TestIdxs] = 1;
tic = time.time()
# Maximum number of epochs
MaxEpochs = 1
# Learning rate
eta = 1e-2
# Number of Epochs after which the neuron is validated
ValidationInterval = 1
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
n_x = 784
n_y = 1
x_tf = tf.placeholder(tf.float32, shape = [n_x, 1], name = 'x_tf')
y_tf = tf.placeholder(tf.float32, shape = [n_y, 1], name = 'y_tf')
w_tf = tf.get_variable(name = "w_tf", shape = [n_x, 1], initializer = tf.initializers.random_uniform());
b_tf = tf.get_variable(name = "b_tf", shape = [n_y, 1], initializer = tf.initializers.random_uniform());
z_tf = tf.add(tf.matmul(w_tf, x_tf, transpose_a = True), b_tf, name = 'z_tf')
yPred_tf = tf.sigmoid(z_tf, name = 'yPred_tf')
Loss_tf = tf.nn.sigmoid_cross_entropy_with_logits(logits = yPred_tf, labels = y_tf, name = 'Loss_tf')
with tf.name_scope('Training'):
optimizer_tf = tf.train.GradientDescentOptimizer(learning_rate = eta)
train_step = optimizer_tf.minimize(Loss_tf)
writer = tf.summary.FileWriter(r"C:\Users\braja\Documents\TBSummaries\MNIST1NTF\2")
tf.summary.histogram('Weights', w_tf)
tf.summary.scalar('Loss', tf.reshape(Loss_tf, []))
tf.summary.scalar('Bias', tf.reshape(b_tf, []))
merged_summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for Epoch in range(MaxEpochs):
for Sample in range(len(XTrain)):
x = XTrain[Sample]
y = YTrain[Sample].reshape([-1,1])
Train_sample = {x_tf: x, y_tf: y}
MergedSumm, _ = sess.run([merged_summary, train_step], feed_dict = Train_sample)
writer.add_summary(summary = MergedSumm, global_step = Sample)
if((Epoch+1) %ValidationInterval == 0):
ValidationError = 0
for Sample in range(len(XTest)):
x = XTest[Sample]
y = YTest[Sample].reshape([-1,1])
Test_sample = {x_tf: x, y_tf: y}
yPred = sess.run(yPred_tf, feed_dict = Test_sample)
ValidationError += abs(yPred - YTest[Sample])
print('Validation Error at', Epoch+1,'Epoch:', ValidationError);
writer.add_graph(tf.Session().graph)
writer.close()
toc = time.time()
print('\nElapsed time is: ', toc-tic,'s');
Looking at the bias value it looks like you are seeing saturation of the sigmoid function.
This happens when you push your sigmoid input(z_tf) to the extreme ends of the sigmoid function. When this happens, the gradient returned is so low that the training stagnates. The probable cause of this is that it seems you have doubled up on sigmoid functions; sigmoid_cross_entropy_with_logits applies a sigmoid to its input, but you have implemented one yourself already. Try removing one of these.
In addition, by default tf.initializers.random_uniform()) produces random values between 0:1. You probably want to initialise your Weights and biases symmetrically about 0 and at really small values to start with. This can be done by passing arguments minval and maxval to tf.initializers.random_uniform().
They should grow during training and again this prevents sigmoid saturation.
I got a dataset of 178 elements, and each contains 13 features and 1 label.
Label is stored as one-hot array. My training dataset is made of 158 elements.
Here is what my model looks like :
x = tf.placeholder(tf.float32, [None,training_data.shape[1]])
y_ = tf.placeholder(tf.float32, [None,training_data_labels.shape[1]])
node_1 = 300
node_2 = 300
node_3 = 300
out_n = 3
#1
W1 = tf.Variable(tf.random_normal([training_data.shape[1], node_1]))
B1 = tf.Variable(tf.random_normal([node_1]))
y1 = tf.add(tf.matmul(x,W1),B1)
y1 = tf.nn.relu(y1)
#2
W2 = tf.Variable(tf.random_normal([node_1, node_2]))
B2 = tf.Variable(tf.random_normal([node_2]))
y2 = tf.add(tf.matmul(y1,W2),B2)
y2 = tf.nn.relu(y2)
#3
W3 = tf.Variable(tf.random_normal([node_2, node_3]))
B3 = tf.Variable(tf.random_normal([node_3]))
y3 = tf.add(tf.matmul(y2,W3),B3)
y3 = tf.nn.relu(y3)
#output
W4 = tf.Variable(tf.random_normal([node_3, out_n]))
B4 = tf.Variable(tf.random_normal([out_n]))
y4 = tf.add(tf.matmul(y3,W4),B4)
y = tf.nn.softmax(y4)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(200):
sess.run(optimizer,feed_dict={x:training_data, y_:training_data_labels})
correct = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:eval_data, y_:eval_data_labels}))
But the accuracy is very low, i tried increase the range 200 to some higher number but it still remains low.
What could I do to improve the results ?
The problem is that you're taking the softmax of y4 and then passing that to tf.nn.softmax_cross_entropy_with_logits. This error is common enough that there's actually a note about it in the documentation for softmax_cross_entropy_with_logits:
WARNING: This op expects unscaled logits, since it performs a softmax on logits internally
for efficiency. Do not call this op with the output of softmax, as it will produce
incorrect results.
The rest of your code looks fine, so just replace y4 with y and get rid of y = tf.nn.softmax(y4).
I try to solve a Ax^2+Bx+C into (ax+b)(cx+d) where A,B,C are known and to solve value of a,b,c,d.
Here are the code:
import tensorflow as tf
a = tf.Variable([.5])
b = tf.Variable([.5])
c = tf.Variable([.5])
d = tf.Variable([.5])
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
fn1 = 2*x**2+3*x+4 #A=2,B=3,C=4
fn2 = (a*x+b)*(c*x+d)
x_train = [1,2,3,4]
y_train = [9,18,31,48]
loss = tf.reduce_sum(tf.square(fn2-y))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
sess.run(train, {x:x_train, y:y_train})
print(sess.run([a,b,c,d]))
the result shows nan for all a,b,c and d.
how to fix that? did i miss something? thanks for help.
Your cost function is failing to converge at the learning rate of 0.01. Set the learning rate to 0.0001 (or lower) and the cost function begins to converge.
optimizer = tf.train.GradientDescentOptimizer(0.0001)
Also, if you modify your fn2 to a * x ** 2 + b * x + c, you will get closer solution to the one you are having of Ax^2+Bx+C. But if you use (ax+b)(cx+d), you might get a different solution which will satisfy the small training dataset with x = [1,2,3,4].
Another small tip is not to initialize same value (0.5 in your case) to all the variables. Randomly initialize it between -1.0 to 1.0.
python 3.5.2, tensorflow 1.0.0
Somewhat new in programming with autoencoders. I am trying to implement a simple network to get familiarize from here. I have used the same input data in which a CNN is able to classify perfectly with accuracy of 98%. My data have 2000 row data and each row is a signal. I am trying with 3 stacked layers of auto encoders with 512 256 and 64 nodes.
class dimensions:
input_width, input_height = 1,1024
BATCH_SIZE = 50
layer = [input_width*input_height, 512, 256, 64]
learningrate = 0.001
def myencoder(x,corrupt_prob,dimensions):
current_input = corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)
encoder = []
for layer_i, n_output in enumerate(dimensions.layer[1:]):
n_input = int(current_input.get_shape()[1])
W = tf.Variable(
tf.random_uniform([n_input, n_output],
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input)))
b = tf.Variable(tf.zeros([n_output]))
encoder.append(W)
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
z = current_input
encoder.reverse()
# Build the decoder using the same weights
for layer_i, n_output in enumerate(model.layer[:-1][::-1]):
W = tf.transpose(encoder[layer_i])
b = tf.Variable(tf.zeros([n_output]))
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
# now have the reconstruction through the network
y = current_input
# cost function measures pixel-wise difference
cost = tf.sqrt(tf.reduce_mean(tf.square(y - x)))
return z,y,cost
sess = tf.Session()
model = dimensions()
data_train,data_test,label_train,label_test = load_data(Datainfo,folder)
x = tf.placeholder(tf.float32,[model.BATCH_SIZE,model.input_height*model.input_width])
corrupt_prob = tf.placeholder(tf.float32,[1])
z,y,cost = myencoder(x,corrupt_prob,dimensions)
train_step = tf.train.AdamOptimizer(model.learningrate).minimize(cost)
lossfun = np.zeros(STEPS)
sess.run(tf.global_variables_initializer())
for i in range(STEPS):
train_data = batchdata(data_train, model.BATCH_SIZE)
epoch_loss = 0
for j in range(model.BATCH_SIZE):
sess.run(train_step,feed_dict={x:train_data,corrupt_prob:[1.0]})
c = sess.run(cost, feed_dict={x: train_data, corrupt_prob: [1.0]})
epoch_loss += c
lossfun[i] = epoch_loss
print('Epoch', i, 'completed out of', STEPS, 'loss:', epoch_loss)
my loss function appears like this
xaxis - no of iterations, y axis - loss
the loss doesn't decrease and the network doesn't learn anything.
any help appreciated !
In the function myencoder, the weight variables W and b are initialized in every training step.
my bio dataset has 20K rows and 170 feature. I'm doing dnn regression to predict bioactivity. ( one unit output layer with linear equation and two hidden layers). It ran very slow on my cpu and produced really bad r-square (negative). Then I ran it with skflow with the same network architecture. It was way way faster (more than 100x) and I got r2 much better than the previous run (r2=0.3), although not a great result. Does anyone know why? anything wrong with my code? what is the difference between my code and underlying skflow code? is my loss function defined correctly?
Help is very much appreciated.
Below are the codes:
# with scikit flow
dnn_reg = skflow.TensorFlowDNNRegressor(hidden_units=[200,500], steps=3000, learning_rate=0.5)
dnn_reg.fit(x_train, y_train)
pred_train = dnn_reg.predict (x_train)
pred_valid = dnn_reg.predict (x_valid)
print ('r-square for training set', r2_score(y_train, pred_train))
print ('r-square for validation set',r2_score(y_valid, pred_valid))
# tensorflow code
n_samples = 15000
n_features = 171
batch_size = 1000
num_batch = n_samples/batch_size
hidden1 = 200
hidden2 = 100
learning_rate=0.01
n_epoch=3000
graph = tf.Graph()
with graph.as_default():
#constant and palceholder
tf_train_data = tf.placeholder(tf.float32, shape=(batch_size, n_features))
tf_train_act = tf.placeholder(tf.float32, shape=(batch_size))
tf_valid_data=tf.constant (x_valid.astype(np.float32))
# variables
w1 = tf.Variable(tf.truncated_normal([n_features, hidden1]), name='weight1')
b1 = tf.Variable(tf.zeros([hidden1]), name='bias1')
w2 = tf.Variable(tf.truncated_normal([hidden1, hidden2]), name='weight2')
b2 = tf.Variable(tf.zeros([hidden2]), name='bias2')
w3 = tf.Variable(tf.truncated_normal([hidden2, 1]), name='weight3')
b3 = tf.Variable(tf.zeros([1]), name='bias3')
#parameter histogram
w1_hist = tf.histogram_summary('weight_input', w1)
w2_hist = tf.histogram_summary('weight2', w2)
w3_hist = tf.histogram_summary('weight3', w3)
b1_hist = tf.histogram_summary('bias1', b1)
b2_hist = tf.histogram_summary('bias2', b2)
b3_hist = tf.histogram_summary('bias3', b3)
#y_hist = tf.histogram_summary('y', y_train)
#training computation
def forward_prop (input):
with tf.name_scope('hidden_1') as scope:
h1 = tf.nn.relu(tf.matmul(input, w1)+b1)
with tf.name_scope('hidden_2') as scope:
h2 = tf.nn.relu(tf.matmul(h1, w2)+b2)
with tf.name_scope('output') as scope:
output = tf.matmul(h2, w3)+b3
return (output)
#forward propagation
output = forward_prop(tf_train_data)
with tf.name_scope('cost') as scope:
loss=tf.sqrt(tf.reduce_mean(tf.square(tf.sub(tf_train_act, output))))
cost_summary = tf.scalar_summary('cost', loss)
#optimizer
with tf.name_scope('train') as scope:
optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss)
#predictions
train_prediction = output
valid_prediction = forward_prop(tf_valid_data)
with tf.Session(graph=graph) as session:
session.run(tf.initialize_all_variables())
print ('initialized')
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter ('./logs/log1', session.graph)
for epoch in range(n_epoch):
mini = np.array_split(range(y_train.shape[0]), num_batch)
for idx in mini[:-1]:
batch_x = x_train[idx]
batch_y = y_train[idx]
feed_dict = {tf_train_data:batch_x, tf_train_act:batch_y}
_,l, pred_train = session.run([optimizer, loss, output], feed_dict=feed_dict)
if epoch % 100 == 0:
print ('minibatch loss at step %d: %f' % (epoch, l))
print ('minibatch r2: %0.1f' % r2_score(batch_y, pred_train))
print ('validation r2: %0.1f' % r2_score(y_valid, valid_prediction.eval()))
There's a lot of parameters that are different between your TensorFlowDNNRegressor and vanilla tensorflow model including:
hidden2 = 100
learning_rate=0.01
batch_size=1000, the default batch_size for TensorFlowDNNRegressor is 32. I think that's the main reason why TensorFlowDNNRegressor runs much faster.
Also, TensorFlowDNNRegressor use SGD as default optimizer.