Weird behaviour of dropout layer in tensorflow - python

I made a CNN model with tensorflow that implement dropouts layers.
I pass the is_training argument in the network function so the dropout will be disabled on testing phase, and i realised that the errors were significantly higher when i disable it.
if I'm testing the model with the dropout function (which is not logic) I get an error of 0.01 in average, while when i'm testing it by specifying that is_training is False (but still training it with dropout) I get an error of 0.8.
I can't understand where is my error
Here is the model function:
def conv_net(x, arch, is_training=False):
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input becomes 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
### YOUR CODE STARTS HERE ###
# Convolution Layer with F1 filters, a kernel size of K1 and ReLU activations
pad = 'same'
conv1 = tf.layers.conv2d(x, arch['conv1'][0], arch['conv1'][1], activation=tf.nn.relu)
conv2 = tf.layers.conv2d(conv1, arch['conv2'][0], arch['conv2'][1], activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(conv2, arch['pool1'][0], arch['pool1'][0])
drop1 = tf.layers.dropout(pool1, arch['dropout1'], training=is_training)
conv3 = tf.layers.conv2d(drop1, arch['conv3'][0], arch['conv3'][1], activation=tf.nn.relu) # # TODO: add padding
drop1_2 = tf.layers.dropout(conv3, arch['dropout1'], training=is_training)
conv4 = tf.layers.conv2d(drop1_2, arch['conv4'][0], arch['conv4'][1], activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(conv4, arch['pool2'][0], arch['pool2'][0])
drop2 = tf.layers.dropout(pool2, arch['dropout2'], training=is_training)
flat = tf.contrib.layers.flatten(drop2)
fc1 = tf.layers.dense(flat, arch['N'])
out = tf.layers.dense(fc1, n_classes)
### YOUR CODE ENDS HERE ###
return out
And the training function:
def train_test_model(hypers, save_final_model=False):
# Running the training session
print("Starting training session...")
with tf.Session() as sess:
# Run the initializer
sess.run(init)
total_batch = int(mnist.train.num_examples / hypers.batch_size)
# Training cycle
try:
for epoch in range(hypers.n_epochs):
avg_cost = 0.
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(hypers.batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
# ORIGINAL:
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# train_err = 1-accuracy.eval({x: mnist.train.images, y: mnist.train.labels})
# valid_err = 1-accuracy.eval({x: mnist.validation.images, y: mnist.validation.labels})
# WITH BATCHES FOR LESS MEM ALLOC
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
train_acc = 0
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(hypers.batch_size)
train_acc += accuracy.eval(feed_dict={x:batch_x,
y:batch_y})
train_acc /= total_batch
train_err = 1 - train_acc
valid_err = 1 - accuracy.eval({x: mnist.validation.images, y: mnist.validation.labels})
# Display accuracy
print("Epoch:", '%05d' % (epoch + 1), ", cost=",
"{:.9f}".format(avg_cost), ", train_err=", "{:.4f}".format(train_err), ", valid_err=",
"{:.4f}".format(valid_err))
if epoch % 5 == 0:
v = input('Do you want to stop the model? [Y/n]')
if 'y' in v.lower():
raise KeyboardInterrupt
except KeyboardInterrupt:
hypers.n_epochs = epoch
print("SIGINT Received, interrupting the training")
print("\nOptimization Finished!\n")
# Test model
correct_prediction = tf.equal(tf.argmax(test_pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# modified to batches
train_acc = 0
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(hypers.batch_size)
train_acc += accuracy.eval(feed_dict={x: batch_x,
y: batch_y})
train_acc /= total_batch
train_err = 1 - train_acc
#
valid_err = 1 - accuracy.eval({x: mnist.validation.images, y: mnist.validation.labels})
print("Optimized for ", '%05d' % (epoch + 1), "epochs, to obtain training error", "{:.4f}".format(train_err),
", and validation error", "{:.4f}".format(valid_err))
confusion = tf.confusion_matrix(tf.argmax(pred, 1), tf.argmax(y, 1))
print("\nValidation Confusion matrix:\n",
confusion.eval({x: mnist.validation.images, y: mnist.validation.labels}))

Related

Cost function with Nan values in Tensorflow

I'm implementing a neural network to map one vector of 194 values to another vector of 5 values. Values are float numbers from an interval of -10, 10 on.
The problem with my code below is that the avg_cost printed in the end is always Nan. I've read somewhere that it happened because the guy didn't set the activation function of the output layer correctly, but I've set mine as it is (linear activation which is the default one). I'm a newbie in this area and to Tensorflow and so I'm not sure what I'm doing wrong here. Is the linear activation function a correct decision? Or is something else wrong in my code?
def neural_net(x):
layer_1 = tf.layers.dense(inputs=x, units=97, activation=tf.nn.relu)
out_layer = tf.layers.dense(inputs=layer_1, units=5)
return out_layer
train_x = pd.read_csv("x_data.csv", sep=" ")
train_y = pd.read_csv("y_data.csv", sep=" ")
train_size = 0.9
train_cnt = int(floor(train_x.shape[0] * train_size))
x_train = train_x.iloc[0:train_cnt].values
y_train = train_y.iloc[0:train_cnt].values
x_test = train_x.iloc[train_cnt:].values
y_test = train_y.iloc[train_cnt:].values
x = tf.placeholder("float", [None, 194])
y = tf.placeholder("float", [None, 5])
nn_output = neural_net(x)
cost = tf.reduce_mean(tf.losses.mean_squared_error(labels=y, predictions=nn_output))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0001).minimize(cost)
training_epochs = 5000
display_step = 1000
batch_size = 30
keep_prob = tf.placeholder("float")
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0.0
total_batch = int(len(x_train) / batch_size)
x_batches = np.array_split(x_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
for i in range(total_batch):
batch_x, batch_y = x_batches[i], y_batches[i]
_, c = sess.run([optimizer, cost],
feed_dict={
x: batch_x,
y: batch_y,
keep_prob: 0.8
})
avg_cost += c / total_batch
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))

I have built a CNN for detecting human faces . From the very first epoch I am getting higher accuracy. What might be the reason for it?

The training data set consists of face images taken from youtube faces database labelled as one and non face images are taken from 256Object categories 25k images are chosen for both positive and negative data. so totally 50k for training and another 10k images is taken from yooutube faces and 256 object categories which are not repeated.
The problem is I am getting 99% accuracy after just 12k iterations in first epoch itself and I am printing cost value also it is also starts from very high value like 596014.000 like that. When it is tested against the other face images it performs very badly.
cost vs epoch graph
import tensorflow as tf
import read_data
from sklearn import metrics
import numpy as np
import os
import graph_plotter as gp
# Parameters
learning_rate = 0.001
epochs = 30
batch_size = 100
display_step = 5
# tf Graph input
input_data = tf.placeholder(tf.float32, [None, 27, 31, 3])
output_data = tf.placeholder(tf.float32, [None, 1])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Getting train and test data
train_data, train_label , test_data, test_label = read_data.getData()
def conv2d(x, w, bias, k=1):
x = tf.nn.conv2d(x, w, strides=[1, k, k, 1], padding='SAME')
x = tf.nn.bias_add(x, bias)
return tf.nn.relu(x)
# Performs max pooling on the convolution layer output
def maxpool2d(x, k=2):
return tf.nn.max_pool(x,
ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Weights generated randomly according to layer
weights = {
# Conv 4*4 , 1 input , 32 outputs
'wc1': tf.Variable(tf.random_normal([4, 4, 3, 32])),
# Conv 3*3 , 32 inputs , 32 outputs
'wc2': tf.Variable(tf.random_normal([3, 3, 32, 64])),
# Conv 5*6 , 64 input , 128 outputs
'wc3': tf.Variable(tf.random_normal([5, 6, 64, 128])),
# Conv 1*1 , 128 inputs , 256 outputs
'wc4': tf.Variable(tf.random_normal([1, 1, 128, 256])),
# Conv 1*1 , 256 inputs , 256 outputs
'wc5': tf.Variable(tf.random_normal([1, 1, 256, 512])),
# Output Layer 7*8*256 inputs and 1 output ( face or non-face )
'out': tf.Variable(tf.random_normal([7*8*512, 1]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bc3': tf.Variable(tf.random_normal([128])),
'bc4': tf.Variable(tf.random_normal([256])),
'bc5': tf.Variable(tf.random_normal([512])),
'out': tf.Variable(tf.random_normal([1]))
}
def model(x, weight, bias, dropout):
# Layer 1
conv1 = conv2d(x, weight['wc1'], bias['bc1'])
conv1 = maxpool2d(conv1, k=2)
# Layer 2
conv2 = conv2d(conv1, weight['wc2'], bias['bc2'])
conv2 = maxpool2d(conv2, k=2)
# Layer 3
conv3 = conv2d(conv2, weight['wc3'], bias['bc3'])
# Layer 4
conv4 = conv2d(conv3, weight['wc4'], bias['bc4'])
# Layer 5
conv5 = conv2d(conv4, weight['wc5'], bias['bc5'])
#conv5 = tf.nn.dropout(conv5, dropout)
# Flattening data
intermediate = tf.reshape(conv5, shape=[-1, 7*8*512])
# Output Layer
output = tf.add(tf.matmul(intermediate, weight['out']), bias['out'])
return output
pred = model(input_data, weights, biases, keep_prob)
l2_loss = 0.001*(
tf.nn.l2_loss(weights.get('wc4')) +
tf.nn.l2_loss(weights.get('wc5')) +
tf.nn.l2_loss(weights.get('out')))
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
pred, output_data)) + l2_loss
tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_pred = tf.equal(
tf.greater(sigmoid_output, 0.5), tf.greater(output_data, 0.5))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
y_p = tf.cast(tf.greater(sigmoid_output, 0.5), tf.int32)
saver = tf.train.Saver()
tf.add_to_collection('y_p', y_p)
tf.add_to_collection('pred', pred)
tf.add_to_collection('x', input_data)
tf.add_to_collection('y', output_data)
init = tf.global_variables_initializer()
with tf.device("/gpu:0"):
with tf.Session() as sess:
sess.run(init)
train_data_minibatches = [train_data[k:k + batch_size]
for k in range(0, len(train_data), batch_size)]
train_label_minibatches = [train_label[k:k + batch_size]
for k in range(0, len(train_label), batch_size)]
step = 0
batch_count = 0
avg_cost_list = []
avg_accuracy_list = []
for epoch in range(epochs):
print('Epoch '+epoch.__str__())
cost_list = []
accuracy_list = []
for batch_x, batch_y in zip(
train_data_minibatches, train_label_minibatches):
batch_count += 1
sess.run(optimizer, feed_dict={
input_data: batch_x, output_data: batch_y,
keep_prob: 0.75})
# if epoch % display_step == 0:
loss, acc, output = sess.run([cost, accuracy, sig],
feed_dict={input_data: batch_x, output_data: batch_y, keep_prob: 0.75})
cost_list.append(loss)
accuracy_list.append(acc)
print("Iter " + str(step * batch_size) +" Loss "+ "{:.5f}".format(loss)+ ", Training Accuracy= " +
"{:.5f}".format(acc))
step += 1
average_cost = sum(cost_list) / len(cost_list)
average_acc = sum(accuracy_list) / len(accuracy_list)
avg_cost_list.append(average_cost)
avg_accuracy_list.append(average_acc)
if epoch % display_step == 0:
test_acc, y_pred = sess.run([accuracy, y_p], feed_dict={input_data: test_data,
output_data: test_label,
keep_prob: 0.75})
print(metrics.confusion_matrix(test_label, y_pred))
print("Testing Accuracy : " + "{:.5f}".format(test_acc))
print("Optimization finished !!")
# Saving cost Vs epoch graph, and accuracy Vs epoch graphs.
gp.cost_vs_epoch(avg_cost_list)
gp.accuracy_vs_epoch(avg_accuracy_list)
save_path = saver.save(sess=sess, save_path=save_path, write_meta_graph=True)
The line
correct_pred = tf.equal(tf.greater(sigmoid_output, 0.5),
tf.greater(output_data, 0.5))
is likely to be wrong. Your code seems not to have sigmoid_output, but only pred.
If that is not the problem, I would look into train_data. How many "True" labels > 0.5 do you have? How many labels which evaluate to false do you have?

same batch but different batch size generate different result

For example, we have 64*100 input data which will be send to the tensor flow graph, and it will generate 64*(n_hidden nodes) output before putting into softmax or whatever loss function. We put 1*100 into the same graph, the result should be the the first row of previous output, but results are not. I use the tensor flow example on Mnist to test the comparison.
'''
A Multilayer Perceptron implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import numpy as np
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name ='layer1'),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name = 'layer2'),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name = 'layer3')
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1]), name = 'layer1_b'),
'b2': tf.Variable(tf.random_normal([n_hidden_2]), name = 'layer2_b'),
'out': tf.Variable(tf.random_normal([n_classes]), name = 'layer3_b')
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
#optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
var = tf.all_variables()
trainer = tf.train.AdamOptimizer(learning_rate=learning_rate)
grads = trainer.compute_gradients(cost, var)
update = trainer.apply_gradients(grads)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
#_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
#c, v,grad, Pred, bi = sess.run([cost, var,grads, pred, biases], feed_dict={x: batch_x, y: batch_y})
Pred_2 = sess.run(pred, feed_dict={x: batch_x, y: batch_y})
Pred_1 = sess.run(pred , feed_dict={x: batch_x[0:1,:], y: batch_y[0:1]})
print(Pred_2[0] == Pred_1)
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
# print(len(v))
# g1 = np.array(grad[0])
# g2 = np.array(grad[1])
# g3 = np.array(grad[2])
# g4 = np.array(grad[3])
# g5 = np.array(grad[4])
# g6 = np.array(grad[5])
# print(g1.shape)
# print(g2.shape)
# print(g3.shape)
# print(g4.shape)
# print(g5.shape)
# print(g6.shape)
# print(g6[0,:])
# print(g6[1,:])
# print(bi['out'])
#print(type(updating))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
print(Pred_2[0] == Pred_1) Should be the same, but they are not. It is strange.
The gradient descent path should be different if your weight and bias initialization is random and the gradients each time are different, it might take a path towards a different minimum.

Tensorflow accuracy at .99 but predictions awful

Maybe I'm making predictions wrong?
Here's the project... I have a greyscale input image that I am trying to segment. The segmentation is a simple binary classification (think of foreground vs background). So the ground truth (y) is a matrix of 0's and 1's -- so there's 2 classifications. Oh and the input image is a square, so I just use one variable called n_input
My accuracy essentially converges to 0.99 but when I make a prediction I get all zero's. EDIT --> there is a single 1 in each output matrices, both in the same place...
Here's my session code(everything else is working)...
with tf.Session() as sess:
sess.run(init)
summary = tf.train.SummaryWriter('/tmp/logdir/', sess.graph_def)
step = 1
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data
data = scroll_data.read_data('/home/kendall/Desktop/')
# Keep training until reach max iterations
flag = 0
# while flag == 0:
while step * batch_size < training_iters:
batch_y, batch_x = data.train.next_batch(batch_size)
# pdb.set_trace()
# batch_x = batch_x.reshape((batch_size, n_input))
batch_x = batch_x.reshape((batch_size, n_input, n_input))
batch_y = batch_y.reshape((batch_size, n_input, n_input))
batch_y = convert_to_2_channel(batch_y, batch_size)
# batch_y = batch_y.reshape((batch_size, n_output, n_classes))
batch_y = batch_y.reshape((batch_size, 200, 200, n_classes))
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
flag = 1
# Calculate batch loss and accuracy
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc)
step += 1
print "Optimization Finished!"
save_path = "model.ckpt"
saver.save(sess, save_path)
im = Image.open('/home/kendall/Desktop/HA900_frames/frame0635.tif')
batch_x = np.array(im)
pdb.set_trace()
batch_x = batch_x.reshape((1, n_input, n_input))
batch_x = batch_x.astype(float)
# pdb.set_trace()
prediction = sess.run(pred, feed_dict={x: batch_x, keep_prob: 1.})
print prediction
arr1 = np.empty((n_input,n_input))
arr2 = np.empty((n_input,n_input))
for i in xrange(n_input):
for j in xrange(n_input):
for k in xrange(2):
if k == 0:
arr1[i][j] = prediction[0][i][j][k]
else:
arr2[i][j] = prediction[0][i][j][k]
# prediction = np.asarray(prediction)
# prediction = np.reshape(prediction, (200,200))
# np.savetxt("prediction.csv", prediction, delimiter=",")
np.savetxt("prediction1.csv", arr1, delimiter=",")
np.savetxt("prediction2.csv", arr2, delimiter=",")
Since there are two classifications, that end part (with the couple of loops) is just to partition the prediction into two 2x2 matrices.
I saved the prediction arrays to a CSV file, and like I said, they were all zeros.
I have also confirmed all data is correct (dimensions and values).
Why would the training converge, but predictions are awful?
If you want to look at all the code, here it is...
import tensorflow as tf
import pdb
import numpy as np
from numpy import genfromtxt
from PIL import Image
# Import MINST data
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_iters = 20000
batch_size = 128
display_step = 1
# Network Parameters
n_input = 200 # MNIST data input (img shape: 28*28)
n_output = 40000 # MNIST total classes (0-9 digits)
n_classes = 2
#n_input = 200
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input, n_input])
y = tf.placeholder(tf.float32, [None, n_input, n_input, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, n_input, n_input, 1])
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
conv1 = tf.nn.local_response_normalization(conv1)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
conv2 = tf.nn.local_response_normalization(conv2)
conv2 = maxpool2d(conv2, k=2)
# Convolution Layer
conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
# Max Pooling (down-sampling)
conv3 = tf.nn.local_response_normalization(conv3)
conv3 = maxpool2d(conv3, k=2)
# pdb.set_trace()
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv3, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
output = []
for i in xrange(2):
output.append(tf.nn.softmax(tf.add(tf.matmul(fc1, weights['out']), biases['out'])))
return output
# return tf.nn.softmax(tf.add(tf.matmul(fc1, weights['out']), biases['out']))
# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
# 5x5 conv, 32 inputs, 64 outputs
'wc3': tf.Variable(tf.random_normal([5, 5, 64, 128])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([25*25*128, 1024])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([1024, n_output]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bc3': tf.Variable(tf.random_normal([128])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_output]))
}
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
# pdb.set_trace()
pred = tf.pack(tf.transpose(pred,[1,2,0]))
pred = tf.reshape(pred, [-1,n_input,n_input,n_classes])
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
saver = tf.train.Saver()
def convert_to_2_channel(x, batch_size):
#assume input has dimension (batch_size,x,y)
#output will have dimension (batch_size,x,y,2)
output = np.empty((batch_size, 200, 200, 2))
temp_arr1 = np.empty((batch_size, 200, 200))
temp_arr2 = np.empty((batch_size, 200, 200))
for i in xrange(batch_size):
for j in xrange(200):
for k in xrange(200):
if x[i][j][k] == 1:
temp_arr1[i][j][k] = 1
temp_arr2[i][j][k] = 0
else:
temp_arr1[i][j][k] = 0
temp_arr2[i][j][k] = 1
for i in xrange(batch_size):
for j in xrange(200):
for k in xrange(200):
for l in xrange(2):
if l == 0:
output[i][j][k][l] = temp_arr1[i][j][k]
else:
output[i][j][k][l] = temp_arr2[i][j][k]
return output
# Launch the graph
with tf.Session() as sess:
sess.run(init)
summary = tf.train.SummaryWriter('/tmp/logdir/', sess.graph_def)
step = 1
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data
data = scroll_data.read_data('/home/kendall/Desktop/')
# Keep training until reach max iterations
flag = 0
# while flag == 0:
while step * batch_size < training_iters:
batch_y, batch_x = data.train.next_batch(batch_size)
# pdb.set_trace()
# batch_x = batch_x.reshape((batch_size, n_input))
batch_x = batch_x.reshape((batch_size, n_input, n_input))
batch_y = batch_y.reshape((batch_size, n_input, n_input))
batch_y = convert_to_2_channel(batch_y, batch_size)
# batch_y = batch_y.reshape((batch_size, n_output, n_classes))
batch_y = batch_y.reshape((batch_size, 200, 200, n_classes))
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
flag = 1
# Calculate batch loss and accuracy
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc)
step += 1
print "Optimization Finished!"
save_path = "model.ckpt"
saver.save(sess, save_path)
im = Image.open('/home/kendall/Desktop/HA900_frames/frame0635.tif')
batch_x = np.array(im)
pdb.set_trace()
batch_x = batch_x.reshape((1, n_input, n_input))
batch_x = batch_x.astype(float)
# pdb.set_trace()
prediction = sess.run(pred, feed_dict={x: batch_x, keep_prob: 1.})
print prediction
arr1 = np.empty((n_input,n_input))
arr2 = np.empty((n_input,n_input))
for i in xrange(n_input):
for j in xrange(n_input):
for k in xrange(2):
if k == 0:
arr1[i][j] = prediction[0][i][j][k]
else:
arr2[i][j] = prediction[0][i][j][k]
# prediction = np.asarray(prediction)
# prediction = np.reshape(prediction, (200,200))
# np.savetxt("prediction.csv", prediction, delimiter=",")
np.savetxt("prediction1.csv", arr1, delimiter=",")
np.savetxt("prediction2.csv", arr2, delimiter=",")
# Calculate accuracy for 256 mnist test images
print "Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: data.test.images[:256],
y: data.test.labels[:256],
keep_prob: 1.})
Errors in the code
There are multiple errors in your code:
you shouldn't call tf.nn.sigmoid_cross_entropy_with_logits with the output of a softmax layer, but with the unscaled logits:
WARNING: This op expects unscaled logits, since it performs a softmax on logits internally for efficiency. Do not call this op with the output of softmax, as it will produce incorrect results.
in fact since you have 2 classes, you should use a loss with softmax, using tf.nn.softmax_cross_entropy_with_logits
When using tf.argmax(pred, 1), you only apply argmax over axis 1, which is the height of the output image. You should use tf.argmax(pred, 3) on the last axis (of size 2).
This might explain why you get 0.99 accuracy
On the output image, it will take the argmax over the height of the image, which is by default 0 (as all values are equal for each channel)
Wrong model
The biggest drawback is that your model in general will be very hard to optimize.
You have a softmax over 40,000 classes, which is huge.
You do not take advantage at all of the fact that you want to output an image (the prediction foreground / background).
for instance prediction 2,345 is highly correlated with prediction 2,346 and prediction 2,545 but you don't take that into account
I recommend reading a bit about semantic segmentation first:
this paper: Fully Convolutional Networks for Semantic Segmentation
these slides from CS231n (Stanford): especially the part about upsampling and deconvolution
Recommendations
If you want to work with TensorFlow, you will need to start small. First try a very simple network with maybe 1 hidden layer.
You need to plot all the shapes of your tensors to make sure they correspond to what you thought. For instance, if you had plotted tf.argmax(y, 1), you would have realized the shape is [batch_size, 200, 2] instead of the expected [batch_size, 200, 200].
TensorBoard is your friend, you should try to plot the input image here, as well as your predictions to see what they look like.
Try small, with a very small dataset of 10 images and see if you can overfit it and predict almost the exact response.
To conclude, I am not sure of all my suggestions but they are worth trying, and I hope this will help you on the path to success !

TensorFlow: Network output has not expected shape

I am implementing a network using TensorFlow.
The network take as input a binary feature vector, and it should predict a float value as output.
I am expecting a (1,1) tensor object as output for my function multilayer_perceptron(), instead, when running pred, it returns a vector of the same length of my input data (X,1).
Since i am new to this framework I expect the error to be very trivial.
What am I doing wrong?
import tensorflow as tf
print "**** Defining parameters..."
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 1
display_step = 1
print "**** Defining Network..."
# Network Parameters
n_hidden_1 = 10 # 1st layer num features
n_hidden_2 = 10 # 2nd layer num features
n_input = Xa.shape[1] # data input(feature vector length)
n_classes = 1 # total classes (IC50 value)
# tf Graph input
x = tf.placeholder("int32", [batch_size, None])
y = tf.placeholder("float", [None, n_classes])
# Create model
def multilayer_perceptron(_X, _weights, _biases):
lookup_h1 = tf.nn.embedding_lookup(_weights['h1'], _X)
layer_1 = tf.nn.relu(tf.add(tf.reduce_sum(lookup_h1, 0), _biases['b1'])) #Hidden layer with RELU activation
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])) #Hidden layer with RELU activation
pred = tf.matmul(layer_2, _weights['out']) + _biases['out']
return pred
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(tf.sub(pred, y))) # MSE
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
print "**** Launching the graph..."
# Launch the graph
with tf.Session() as sess:
sess.run(init)
print "**** Training..."
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(Xa.tocsc().shape[0]/batch_size)
# Loop over all batches
for i in range(total_batch):
# Extract sample
batch_xs = Xa.tocsc()[i,:].tocoo()
batch_ys = np.reshape(Ya.tocsc()[i,0], (batch_size,1))
#****************************************************************************
# Extract sparse indeces from input matrix (They will be used as actual input)
ids = batch_xs.nonzero()[1]
# Fit training using batch data
sess.run(optimizer, feed_dict={x: ids, y: batch_ys})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: ids, y: batch_ys})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
print "Optimization Finished!"
Your pred should be of shape [n_input, n_class] because you define weights['out'] and biases['out'] in that way. The only way you get a (1,1) tensor from pred is your n_class = 1...

Categories