python 3.5.2, tensorflow 1.0.0
Somewhat new in programming with autoencoders. I am trying to implement a simple network to get familiarize from here. I have used the same input data in which a CNN is able to classify perfectly with accuracy of 98%. My data have 2000 row data and each row is a signal. I am trying with 3 stacked layers of auto encoders with 512 256 and 64 nodes.
class dimensions:
input_width, input_height = 1,1024
BATCH_SIZE = 50
layer = [input_width*input_height, 512, 256, 64]
learningrate = 0.001
def myencoder(x,corrupt_prob,dimensions):
current_input = corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)
encoder = []
for layer_i, n_output in enumerate(dimensions.layer[1:]):
n_input = int(current_input.get_shape()[1])
W = tf.Variable(
tf.random_uniform([n_input, n_output],
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input)))
b = tf.Variable(tf.zeros([n_output]))
encoder.append(W)
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
z = current_input
encoder.reverse()
# Build the decoder using the same weights
for layer_i, n_output in enumerate(model.layer[:-1][::-1]):
W = tf.transpose(encoder[layer_i])
b = tf.Variable(tf.zeros([n_output]))
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
# now have the reconstruction through the network
y = current_input
# cost function measures pixel-wise difference
cost = tf.sqrt(tf.reduce_mean(tf.square(y - x)))
return z,y,cost
sess = tf.Session()
model = dimensions()
data_train,data_test,label_train,label_test = load_data(Datainfo,folder)
x = tf.placeholder(tf.float32,[model.BATCH_SIZE,model.input_height*model.input_width])
corrupt_prob = tf.placeholder(tf.float32,[1])
z,y,cost = myencoder(x,corrupt_prob,dimensions)
train_step = tf.train.AdamOptimizer(model.learningrate).minimize(cost)
lossfun = np.zeros(STEPS)
sess.run(tf.global_variables_initializer())
for i in range(STEPS):
train_data = batchdata(data_train, model.BATCH_SIZE)
epoch_loss = 0
for j in range(model.BATCH_SIZE):
sess.run(train_step,feed_dict={x:train_data,corrupt_prob:[1.0]})
c = sess.run(cost, feed_dict={x: train_data, corrupt_prob: [1.0]})
epoch_loss += c
lossfun[i] = epoch_loss
print('Epoch', i, 'completed out of', STEPS, 'loss:', epoch_loss)
my loss function appears like this
xaxis - no of iterations, y axis - loss
the loss doesn't decrease and the network doesn't learn anything.
any help appreciated !
In the function myencoder, the weight variables W and b are initialized in every training step.
Related
Im doing a tutorial on tensorflow and I have following task:
I need to create 10 logits in tensorflow with 2 hidden layers and calculate the loss of them using the functions
tf.nn.softmax()
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
My code looks like this:
import tensorflow as tf, numpy as np
trainx = np.array(trainx) / 300.0
trainy = np.array(trainy)
testx = np.array(testx) / 300.0
testy = np.array(testy)
print ('train x shape is {}'.format(trainx.shape))
print ('train y shape is {}'.format(trainy.shape))
print ('test x shape is {}'.format(testx.shape))
print ('tesy y shape is {}'.format(testy.shape))
learning_rate = 0.001
def get_train_batch():
indices = np.random.randint(low=0, high=60000, size=[64])
return trainx[indices], trainy[indices]
g = tf.Graph()
with g.as_default():
x = tf.placeholder(float32, shape=[None, 784])
y = tf.placeholder(float32, shape=[None, 784])
def fc(tensor, outdim, name):
w = tf.get_variable(name + "w", shape=[1, outdim], dtype=float32)
b = tf.get_variable(name + "b", shape=[], dtype=float32, initializer=tf.constant_initializer(0.0) to tf.get_variable)
return tf.add(tf.linalg.matmul(tensor, w), b)
first = fc(trainx, trainx.shape, "first")
first = tf.nn.relu(first)
second = fc(first, trainx.shape, "second")
second = tf.nn.relu(second)
logits = []
names = [some names]
logits.append(tf.log_sigmoid(first))
logits.append(tf.log_sigmoid(second))
for i in range(10):
logits.append(fc(trainx, trainx.shape, names[0]))
tf.nn.softmax() // how do I use this paragraph to get the loss
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
loss = ? // how do i calculate the loss here?
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
minimize_op = optimizer.minimize(loss, var_list=tf.trainable_variables())
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(50000):
randomX, randomY = get_train_batch()
sess.run(minimize_op, feed_dict={x: randomX, y: randomY})
if epoch % 50 == 0:
trainloss = sess.run(loss, feed_dict={x: testx, y:testy})
print("Current loss: ", trainloss)
My questions are:
How do I use the mentioned functions correctly to calculate the loss
Where do I get the labels from?
I want to use the dropout function of tensorflow to check if I can improve my results (TPR, FPR) of my recurrent neural network.
However I implemented it by following a guide. So I am not sure if I did any mistakes. But if I train my model with with e.g. 10 epochs I get nearly the same results after validation. Thats why I am not sure if I use the dropout function correctly. Is this the right implementation in the following code or did I made something wrong? If I did everything right why do I get nearly the same result then?
hm_epochs = 10
n_classes = 2
batch_size = 128
chunk_size = 341
n_chunks = 5
rnn_size = 32
dropout_prop = 0.5 # Dropout, probability to drop a unit
batch_size_validation = 65536
x = tf.placeholder('float', [None, n_chunks, chunk_size])
y = tf.placeholder('float')
def recurrent_neural_network(x):
layer = {'weights':tf.Variable(tf.random_normal([rnn_size, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
x = tf.transpose(x, [1,0,2])
x = tf.reshape(x, [-1, chunk_size])
x = tf.split(x, n_chunks, 0)
lstm_cell = rnn.BasicLSTMCell(rnn_size)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']
#DROPOUT Implementation -> is this code really working?
#The result is nearly the same after 20 epochs...
output_layer = tf.layers.dropout(output, rate=dropout_prop)
return output
def train_neural_network(x):
prediction = recurrent_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(1,hm_epochs+1):
epoch_loss = 0
for i in range(0, training_data.shape[0], batch_size):
epoch_x = np.array(training_data[i:i+batch_size, :, :], dtype='float')
epoch_y = np.array(training_labels[i:i+batch_size, :], dtype='float')
if len(epoch_x) != batch_size:
epoch_x = epoch_x.reshape((len(epoch_x), n_chunks, chunk_size))
else:
epoch_x = epoch_x.reshape((batch_size, n_chunks, chunk_size))
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
train_neural_network(x)
print("rnn - finished!")
In its most basic form, dropout should happen inside the cell and applied to the weights. You only applied it afterwards. This article explained it pretty well with some good visualization and few variations.
To use it in your code, you can either
Implement your own RNN cell where the keep probability is a parameter that initializes the cell or is a parameter that got passed in when it's called every time.
Use an rnn dropout wrapper here.
I have developed deep sparse auto encoders cost function with Tensorflow and I have download the autoencoder structure from the following link:
https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/autoencoder.py .
I have the following cost function in simple AutoEncoder:
loss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
I have developed sparsity in AutoEncoders by using the following mathematical functions:
I have developed these mathematical function with the following code:
learning_rate = 0.01
training_epochs = 1000
batch_size = 256
display_step = 1
examples_to_show = 10
lambda_ = 3e-3
beta = 3
Nv = batch_size
def KL_divergence(x1, y1):
return x1* tf.log(x1 / y1) + (1 - x1) * tf.log((1 - x1) / (1 - y1))
#Weights
W1 = sum(tf.reduce_sum(tf.abs(var)**2) for var in tf.trainable_variables() if
'encoder_' in var.name)
W2 = sum(tf.reduce_sum(tf.abs(var)**2) for var in tf.trainable_variables() if
'decoder_' in var.name)
## Sparsity
rho_hat = (1+tf.reduce_mean(encoder(X),axis=0))/2
rho = np.tile(sparsity_param, n_output)
cost = tf.reduce_sum(tf.pow(y_true - y_pred, 2))/(2*Nv) + (lambda_/2)*(W1+W2)
+ beta * tf.reduce_sum(KL_divergence(rho,rho_hat))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
the name of paper that i have used the mathematical functions:
"Visualization of Driving Behavior Based on Hidden Feature Extraction by Using Deep Learning"
Thanks
Hi I have developed the final version of Deep sparse AutoEncoder with the following python code:
it is ok and ready for using:
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def next_batch(num, data, labels):
'''
Return a total of `num` random samples and labels.
'''
idx = np.arange(0 , len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[ i] for i in idx]
labels_shuffle = [data[ i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
# Parameters
learning_rate = 0.01
training_epochs = 1000
batch_size = 256
display_step = 1
examples_to_show = 10
lambda_ = 3e-3
beta = 3
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
# Network Parameters
n_input = 60 # number of input layers
n_hidden_1 = 30 # 1st layer num features
n_hidden_2 = 10 # 2nd layer num features
n_output = 3 # output layer num features
sparsity_param = 0.05
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'encoder_h3': tf.Variable(tf.random_normal([n_hidden_2, n_output])),
'decoder_h1': tf.Variable(tf.random_normal([n_output, n_hidden_2])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h3': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'encoder_b3': tf.Variable(tf.random_normal([n_output])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b2': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b3': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
# Decoder Hidden layer with sigmoid activation #3
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']),
biases['encoder_b3']))
return layer_3
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
# Decoder Hidden layer with sigmoid activation #3
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']),
biases['decoder_b3']))
return layer_3
def KL_divergence(x1, y1):
return x1* tf.log(x1 / y1) + (1 - x1) * tf.log((1 - x1) / (1 - y1))
# Construct model
Nv = batch_size
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
#Weights
W1 = sum(tf.reduce_sum(tf.abs(var)**2) for var in tf.trainable_variables() if 'encoder_' in var.name)
W2 = sum(tf.reduce_sum(tf.abs(var)**2) for var in tf.trainable_variables() if 'decoder_' in var.name)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
## Sparsity
rho_hat = tf.reduce_mean(encoder(X),axis=0)
#rho_hat = (1+tf.reduce_mean(encoder(X),axis=0))/2
rho = np.tile(sparsity_param, n_output)
# Define loss and optimizer, minimize the squared error
size = tf.shape(tf.pow(y_true - y_pred, 2))
cost = tf.reduce_sum(tf.pow(y_true - y_pred, 2))/(2*Nv) + (lambda_/2)*(W1+W2) + beta * tf.reduce_sum(KL_divergence(rho,rho_hat))
#(lambda_/2)*(tf.reduce_sum(W1**2) + tf.reduce_sum(W1**2))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
total_batch = int(len(data)/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = next_batch(batch_size,data[:,0:60], data[:,60:] )
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
tr, label = next_batch(200000,data[:,0:60], data[:,60:])
encode_decode = sess.run(
encoder_op, feed_dict={X: tr})
Here is the code for a 3 layer sparse autoencoder, implemented in Tensorflow 2.1.
The input and the output, in this case, are 1D arrays (496).
I would like to give credit to Dr. Zhiwei Lin at Ulster University for providing the initial implementation on github
https://github.com/zhiweiuu/sparse-autoencoder-tensorflow/blob/master/SparseAutoEncoder.py
I have wrapped it in a class, where each layer is now an instance variable. This makes it easier to get different outputs for each layer.
You will notice that I have used only the first layer output for the sparsity constraint.
This architecture is similar to the one used in this article: https://pubmed.ncbi.nlm.nih.gov/29302382/
My implementation is simple and the training and it can be improved :)
to train the model
model = my_model() then you loop for i in range(1000): model.network_learn(X,Y)
class my_model:
def __init__(self):
xavier=tf.keras.initializers.GlorotUniform()
self.l1 = tf.keras.layers.Dense(496,kernel_initializer=xavier,activation=tf.nn.sigmoid,input_shape=(496,))
self.l2 = tf.keras.layers.Dense(496,kernel_initializer=xavier,activation=tf.nn.sigmoid)
self.l3 = tf.keras.layers.Dense(496,kernel_initializer=xavier,activation=tf.nn.sigmoid)
self.train_op = tf.keras.optimizers.SGD(learning_rate=0.01)
self.rho = 0.05
self.alpha= 0.001
self.beta = 4
def kl_divergence(self, rho, rho_hat):
return rho * tf.math.log(rho) - rho * tf.math.log(rho_hat) + (1 - rho) * tf.math.log(1 - rho) - (1 - rho) * tf.math.log(1 - rho_hat)
def run(self,X):
out1=self.l1(X)
out2=self.l2(out1)
out3 = self.l3(out2)
return out3
def get_loss(self,X,Y):
rho_hat = tf.reduce_mean(self.l1(X),axis=0)
kl = self.kl_divergence(self.rho,rho_hat)
out1=self.l1(X)
out2=self.l2(out1)
X_prime=self.l3(out2)
diff = X-X_prime
W1 = self.l1.variables[0]
W2 = self.l2.variables[0]
W3 = self.l3.variables[0]
cost= 0.5*tf.reduce_mean(tf.reduce_sum(diff**2,axis=1)) \
+0.5*self.alpha*(tf.nn.l2_loss(W1) + tf.nn.l2_loss(W2) + tf.nn.l2_loss(W3)) \
+self.beta*tf.reduce_sum(kl)
return cost
return tf.math.square(boom2-Y)
def get_grad(self,X,Y):
with tf.GradientTape() as tape:
tape.watch(self.l1.variables)
tape.watch(self.l2.variables)
tape.watch(self.l3.variables)
L = self.get_loss(X,Y)
g = tape.gradient(L, [self.l1.variables[0],self.l1.variables[1],self.l2.variables[0],self.l2.variables[1],self.l3.variables[0],self.l3.variables[1]])
return g
def network_learn(self,X,Y):
g = self.get_grad(X,Y)
self.train_op.apply_gradients(zip(g, [self.l1.variables[0],self.l1.variables[1],self.l2.variables[0],self.l2.variables[1],self.l3.variables[0],self.l3.variables[1]]))
Here is how you would train a network like this
I am trying to reproduce a deep learning regression result in Tensorflow. If I train a neural network with the MLPRegressor class from sklearn I get very nice results of 98% validation.
The MLPRegressor:
http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html#sklearn.neural_network.MLPRegressor
I am trying to reproduce the model in Tensorflow. By copying the default values of the MLPRegressor class in a Tensorflow model. However I cannot get the same result. I only get 75% most of the time.
My TF model:
tf.reset_default_graph()
graph = tf.Graph()
n_input = 3 # n variables
n_hidden_1 = 100
n_hidden_2 = 1
n_output = 1
beta = 0.001
learning_rate = 0.001
with graph.as_default():
tf_train_feat = tf.placeholder(tf.float32, shape=(None, n_input))
tf_train_label = tf.placeholder(tf.float32, shape=(None))
tf_test_feat = tf.constant(test_feat, tf.float32)
"""
Weights and biases. The weights matix' columns will be the output vector.
* ndarray([rows, columns])
* ndarray([in, out])
tf.placeholder(None) and tf.placeholder([None, 3]) means that the row's size is not set. In the second
placeholder the columns are prefixed at 3.
"""
W = {
"layer_1": tf.Variable(tf.truncated_normal([n_input, n_hidden_1])),
"layer_2": tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2])),
"layer_3": tf.Variable(tf.truncated_normal([n_hidden_2, n_output])),
}
b = {
"layer_1": tf.Variable(tf.zeros([n_hidden_1])),
"layer_2": tf.Variable(tf.zeros([n_hidden_2])),
}
def computation(X):
layer_1 = tf.nn.relu(tf.matmul(X, W["layer_1"]) + b["layer_1"])
layer_2 = tf.nn.relu(tf.matmul(layer_1, W["layer_2"]) + b["layer_2"])
return layer_2
tf_prediction = computation(tf_train_feat)
tf_test_prediction = computation(tf_test_feat)
tf_loss = tf.reduce_mean(tf.pow(tf_train_label - tf_prediction, 2))
tf_loss = tf.reduce_mean( tf_loss + beta * tf.nn.l2_loss(W["layer_2"]) )
tf_optimizer = tf.train.AdamOptimizer(learning_rate).minimize(tf_loss)
#tf_optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(tf_loss)
init = tf.global_variables_initializer()
My TF session:
def accuracy(y_pred, y):
a = 0
for i in range(y.shape[0]):
a += abs(1 - y_pred[i][0] / y[i])
return round((1 - a / y.shape[0]) * 100, 3)
def accuracy_tensor(y_pred, y):
a = 0
for i in range(y.shape[0]):
a += abs(1 - y_pred[i][0] / y[i])
return round((1 - a / y.shape[0]) * 100, 3)
# Shuffles two arrays.
def shuffle_in_unison(a, b):
assert len(a) == len(b)
shuffled_a = np.empty(a.shape, dtype=a.dtype)
shuffled_b = np.empty(b.shape, dtype=b.dtype)
permutation = np.random.permutation(len(a))
for old_index, new_index in enumerate(permutation):
shuffled_a[new_index] = a[old_index]
shuffled_b[new_index] = b[old_index]
return shuffled_a, shuffled_b
train_epoch = int(5e4)
batch = int(200)
n_batch = int(X.shape[0] // batch)
prev_acc = 0
stable_count = 0
session = tf.InteractiveSession(graph=graph)
session.run(init)
print("Initialized.\n No. of epochs: %d.\n No. of batches: %d." % (train_epoch, n_batch))
for epoch in range(train_epoch):
offset = (epoch * n_batch) % (Y.shape[0] - n_batch)
for i in range(n_batch):
x = X[offset:(offset + n_batch)]
y = Y[offset:(offset + n_batch)]
x, y = shuffle_in_unison(x, y)
feed_dict = {tf_train_feat: x, tf_train_label: y}
_, l, pred, pred_label = session.run([tf_optimizer, tf_loss, tf_prediction, tf_train_label], feed_dict=feed_dict)
if epoch % 1 == 0:
print("Epoch: %d. Batch' loss: %f" %(epoch, l))
test_pred = tf_test_prediction.eval(session=session)
acc_test = accuracy(test_pred, test_label)
acc_train = accuracy_tensor(pred, pred_label)
print("Accuracy train set %s%%" % acc_train)
print("Accuracy test set: %s%%" % acc_test)
Am I missing something in the Tensorflow code? Thanks!
Unless you have a very good reason to not use them, regression should have linear output units. I ran into a similar problem a while back and ended up using linear outputs and linear hidden units which seemed to mirror the mlpregressor in my case.
There is a great section in Goodfellow's Deep Learning Book in chapter 6, starting at page 181, that goes over the activation functions.
At the very least try this for your output layer
layer_2 = tf.matmul(layer_1, W["layer_2"]) + b["layer_2"]
I'm having a hard time trying to set up a multilayer perceptron neural network to predict the next value of a time-series using Tensorflow.
I read the time-series from a file, split it into three arrays and use those arrays to train, test and validate the network. Unfortunately, my network answers 0.9999 to every input I give to it.
The image below shows the values I expect my network to outcome, note that they range from 2.8 to 4.2
Now, these are the values my network predicts. Though they seem all the same, they're actually 0.9999... (and some difference in the 9th decimal place).
import csv
import numpy as np
from statsmodels.tsa.tsatools import lagmat
import tensorflow as tf
# Data split (values represent percentage)
perc_train = 0.5
perc_test = 0.4
perc_eval = 0.1
# Parameters
learning_rate = 10 ** -3
min_step_size_train = 10 ** -5
training_epochs = 250
display_step = 1
# Network Parameters
n_input = 15
n_classes = 1
n_hidden = (n_input + n_classes) / 2
def get_nn_sets(pmX, pmY):
'''
Splits data into three subsets
'''
trainningIndex = int(len(pmX) * perc_train)
validationIndex = int(len(pmX) * perc_test) + trainningIndex
pmXFit = pmX[:trainningIndex, :]
pmYFit = pmY[:trainningIndex]
pmXTest = pmX[trainningIndex:validationIndex, :]
pmYTest = pmY[trainningIndex:validationIndex]
pmxEvaluate = pmX[validationIndex:, :]
pmYEvaluate = pmY[validationIndex:]
return pmXFit, pmYFit, pmXTest, pmYTest, pmxEvaluate, pmYEvaluate
def read_dollar_file(clip_first = 4000):
'''
Reads the CSV file containing the dollar value for Brazilian real during the years
-----
RETURNS:
A matrix with the file contents
'''
str_vals = []
with open('dolar.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
# retrieving the first column of the file (the dollar value)
str_vals.append(row[1])
# removing title
str_vals = str_vals[1:]
# removing the empty strings (sunday and holidays have no values)
y = filter(None, str_vals)
# converting from string to float values
y = np.array(y).astype(np.float)
# checking if initial elements should be discarded
if (clip_first > 0):
y = y[clip_first:]
return y
# Create model
def get_multilayer_perceptron(x):
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden], dtype=tf.float64)),
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], dtype=tf.float64))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden], dtype=tf.float64)),
'out': tf.Variable(tf.random_normal([n_classes], dtype=tf.float64))
}
# Hidden layer with relu activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with tanh activation
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
out_layer = tf.nn.tanh(out_layer)
return out_layer
def run_mlp(inp, outp):
pmXFit, pmYFit, pmXTest, pmYTest, pmXEvaluate, pmYEvaluate = get_nn_sets(inp, outp)
# tf Graph input
x = tf.placeholder("float64", [None, n_input])
y = tf.placeholder("float64", [None, n_classes])
# Construct model
pred = get_multilayer_perceptron(x)
# Define loss and optimizer
cost = tf.nn.l2_loss(tf.sub(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
last_cost = min_step_size_train + 1
for epoch in range(training_epochs):
# Trainning data
for i in range(len(pmXFit)):
batch_x = np.reshape(pmXFit[i,:], (1, n_input))
batch_y = np.reshape(pmYFit[i], (1, n_classes))
# Run optimization
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# Calculating data error
c = 0.0
for i in range(len(pmXTest)):
batch_x = np.reshape(pmXTest[i,:], (1, n_input))
batch_y = np.reshape(pmYTest[i], (1, n_classes))
# Run Cost function
c += sess.run(cost, feed_dict={x: batch_x, y: batch_y})
c /= len(pmXTest)
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.30f}".format(c))
if abs(c - last_cost) < min_step_size_train:
break
last_cost = c
nn_predictions = np.array([])
for i in range(len(pmXEvaluate)):
batch_x = np.reshape(pmXEvaluate[i,:], (1, n_input))
nn_predictions = np.append(nn_predictions, sess.run(pred, feed_dict={x: batch_x})[0])
print("Optimization Finished!")
nn_predictions.flatten()
return [pmYEvaluate, nn_predictions]
inp = lagmat(read_dollar_file(), n_input, trim='both')
outp = inp[1:, 0]
inp = inp[:-1]
real_value, predicted_value = run_mlp(inp, outp)
I also tried different cost functions and it didn't work. I know I may be missing something really stupid, so I really appreciate your help.
Thanks.
From your code:
out_layer = tf.nn.tanh(out_layer)
tanh can only output values between (-1.0, 1.0), remove this line will make it do better.