Logits and labels have different first dimensions - python

This was the error I got: InvalidArgumentError (see above for traceback): logits and labels must have the same first dimension, got logits shape [30,5] and labels shape [50]
I'm using a batch size of of 50. The number of outputs for my classification problem is 5.
I have no idea where the 30 in the logits shape is coming from. This is my architecture:
with tf.name_scope("pool3"):
pool3 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
pool3_flat = tf.reshape(pool3, shape=[-1, 24000]) # must be a multiple of the input
pool3_flat_drop = tf.layers.dropout(pool3_flat, conv2_dropout_rate, training=training)
with tf.name_scope("fc1"):
flattened = tf.layers.flatten(pool3_flat_drop)
fc1 = tf.layers.dense(flattened , n_fc1, activation=tf.nn.relu, name="fc1")
fc1_drop = tf.layers.dropout(fc1, fc1_dropout_rate, training=training)
with tf.name_scope("output"):
# n_outputs = number of possible classes
logits = tf.layers.dense(fc1_drop, n_outputs, name="output")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
Also here is how I declared my placeholders
with tf.name_scope('inputs'):
X = tf.placeholder(tf.float32, shape=[None, n_inputs], name='X')
X_reshaped = tf.reshape(X, shape=[-1, height, width, channels]) # make applicable to convolutional
y = tf.placeholder(tf.int32, shape=[None], name='y')
training = tf.placeholder_with_default(False, shape=[], name='training')

I think it is caused by pool3_flat = tf.reshape(pool3, shape=[-1, 24000])
You are supposed to check whether the 24000 is right.

Related

How do I explain this TensorFlow tf.nn.conv2d() layer shape?

My Tensorflow convolutional layer has a shape I did not expect it to have and I do not see the mistake.
I am new to TensorFlow and want to use this function to create a convolutional layer:
def new_conv_layer(input, # The previous layer.
num_input_channels, # Num. channels in prev. layer.
filter_size, # Width and height of each filter.
num_filters, # Number of filters.
use_pooling=True): # Use 2x2 max-pooling.
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = new_weights(shape=shape)
biases = new_biases(length=num_filters)
layer = tf.nn.conv2d(input=input_,
filters=weights,
strides=[1, 1, 1, 1],
padding='SAME')
layer += biases
if use_pooling:
layer = tf.nn.max_pool(input=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
layer = tf.nn.relu(layer)
return layer, weights
But when I use it with
num_channels = 1
img_size = 28
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
# Convolutional Layer 1.
filter_size1 = 5 # Convolution filters are 5 x 5 pixels.
num_filters1 = 16 # There are 16 of these filters.
layer_conv1, weights_conv1 = new_conv_layer(input=x_image,
num_input_channels=num_channels,
filter_size=filter_size1,
num_filters=num_filters1,
use_pooling=True)
layer_conv1
I get this output:
<tf.Tensor 'Relu:0' shape=(None, 392, 392, 16) dtype=float32>
Because my images are of a square 28x28 shape and I apply 2x2 pooling, I would have expected this shape to be (None, 14, 14, 16).
Why is that not the case and how do I fix it?
in my case this line x = tf.compat.v1.placeholder(tf.float32, shape=[None, img_size_flat], name='x') was incorrect!
In particular img_size_flat was not the length of each "stretched" image, as it should have been.
img_size_flat = df.drop('label', axis=1).shape[1]

Shape mismacth: shape of labels is incompatible with shape of logits

I am trying to train a model in TensorFlow. I've got a problem with the labels. Here is my input function:
def my_input_fn():
filenames = tf.constant(glob.glob("C:/test_proje/*.jpg"))
labels = tf.constant([0, 0, 1, 1, 1, 1, 1, 0, 0, 0])
labels = tf.one_hot(labels, 2)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.map(_parse_function)
return dataset
And here is the CNN model
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
print(labels.shape)
print(labels[0])
# Input Layer
input_layer = tf.reshape(features["image"], [-1, 168, 84, 3])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 42 * 21 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=4,
activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode ==
tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=2)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by
the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,
logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
I am getting this error: ValueError: Shape mismatch: The shape of labels (received (2,)) should equal the shape of logits except for the last dimension (received (1, 2)).
When I print the shape of labels before cnn_model_fn, it is (10,2). But when I print it in cnn_model_fn, it suddenly becomes (2,)
Thanks.
I think it's your use of sparse_softmax_cross_entropy. You feed it one hot encoded labels which it doesn't want.
Switch to just normal softmax_cross_entropy and see if that works.
https://stackoverflow.com/a/37317322/7431458

CNN model not able to make prediction

I trained a CNN model successfully, however I am getting errors when I feed images to the model for it to predict the labels.
This is my model (I am restoring it with saver.restore)...
# load dataset
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# interactive session
sess = tf.InteractiveSession()
# data and labels placeholder
x = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.float32, shape=[None, 10])
# 32 filters of size 5x5 and 32 biases,
# the filters are used to create 32 feature maps
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_img = tf.reshape(x, [-1, 28, 28, 1])
# first layer activated using a Relu activation function
conv1 = tf.nn.relu(conv2d(x_img, W_conv1) + b_conv1)
pool1 = max_pool_2x2(conv1)
# 64 filters of size 5x5
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
# second layer
conv2 = tf.nn.relu(conv2d(pool1, W_conv2) + b_conv2)
pool2 = max_pool_2x2(conv2)
# fully connected layer with 1024 neurons
W_fully = weight_variable([7 * 7 * 64, 1024])
b_fully = bias_variable([1024])
pool2flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
fully = tf.nn.relu(tf.matmul(pool2flat, W_fully) + b_fully)
# dropout layer removes dead neurons
prob_drop = tf.placeholder(tf.float32)
dropout = tf.nn.dropout(fully, prob_drop)
# readout layer that will return the raw values
# of our predictions
W_readout = weight_variable([1024, 10])
b_readout = bias_variable([10])
y_conv = tf.matmul(dropout, W_readout) + b_readout
# loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y))
# restore the trained CNN model
saver = tf.train.Saver()
saver.restore(sess, "/tmp/model2.ckpt")
y_conv is the predictor.
The model is trained on the mnist dataset, now I have an image of a number and I want the model to tell me what it thinks it is in terms of accuracy. I tried the following...
prediction = tf.argmax(y_conv, 1)
print(sess.run(prediction, feed_dict={x:two_images[0]}))
After feeding the image two_images[0] to the model, I got the following error...
ValueError: Cannot feed value of shape (784,) for Tensor 'Placeholder:0', which has shape '(?, 784)'
So I fixed it by doing the following...
prediction = tf.argmax(y_conv, 1)
print(sess.run(prediction, feed_dict={x:two_images[0].reshape((1, 784))}))
But now I am getting a whole bunch of errors that I cannot decipher...
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_2' with dtype float
[[Node: Placeholder_2 = Placeholderdtype=DT_FLOAT, shape=, _device="/job:localhost/replica:0/task:0/device:CPU:0"]]
I am not sure what I am doing wrong.
EDIT
This is how I populate the variable two_images...
# extract the indices of the number 2
two_idxs_list = np.where(mnist.test.labels[:, 2].astype(int) == 1)
two_idxs = two_idxs_list[0][:10]
# use the indices to extract the images of 2 and their corresponding label
two_images = mnist.test.images[two_idxs]
two_labels = mnist.test.labels[two_idxs]
Okay with the code added I was able to test in on my machine. The issue is that your network expects two inputs, one image, and a label. Even if you only do inference, you have to supply an input, maybe just some zeroes? Obviously the loss calculation will be wrong, but you're not interested in that, only in the prediction. So your sess.run line should be:
print( sess.run( prediction, feed_dict= {
x: two_images[0].reshape((1, 784)),
y: np.zeros( shape = ( 1, 10 ), dtype = np.float32 ) } ) )

Tensorflow image segmentation via linear regression

Previously I built a network that implemented a binary image segmentation -- foreground & background. I did this by having two classifications. Now instead of a binary classification, I want to do a linear regression of each pixel.
Say there is a 3D surface within the image view, I want to segment the exact middle of that surface with a linear value 10. The edge of the surface will be, let's say, 5. Of course all the voxels in between are within the range 5-10. Then, as the voxels move away from the surface the values quickly go down to zero.
With the binary classification I had an image with 1's in the places of the foreground and an image with 1's in the place of the background -- in other words a classification :) Now I want to have just one ground truth image with values like the following...
Via this linear regression example, I assumed I could simply change the cost function to a least square function -- cost = tf.square(y - pred). And of course I would change the ground truth.
However, when I do this, my predictions output NaN. My last layer is a linear sum of matrix weight values multiplied by the final output. I'm guessing this has something to do with it? I can't make it a tf.nn.softmax() function because that would normalize the values between 0 and 1.
So I believe cost = tf.square(y - pred) is the source of the issue. I tried this next... cost = tf.reduce_sum(tf.square(y - pred)) and that didn't work.
So then I tried this (recommended here) cost = tf.reduce_sum(tf.pow(pred - y, 2))/(2 * batch_size) and that didn't work.
Should I be initializing weights differently? Normalize weights?
Full code looks like this:
import tensorflow as tf
import pdb
import numpy as np
from numpy import genfromtxt
from PIL import Image
from tensorflow.python.ops import rnn, rnn_cell
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data
# Parameters
learning_rate = 0.001
training_iters = 1000000
batch_size = 2
display_step = 1
# Network Parameters
n_input_x = 396 # Input image x-dimension
n_input_y = 396 # Input image y-dimension
n_classes = 1 # Binary classification -- on a surface or not
n_steps = 396
n_hidden = 128
n_output = n_input_y * n_classes
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input_x, n_input_y])
y = tf.placeholder(tf.float32, [None, n_input_x * n_input_y], name="ground_truth")
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
def deconv2d(prev_layer, w, b, output_shape, strides):
# Deconv layer
deconv = tf.nn.conv2d_transpose(prev_layer, w, output_shape=output_shape, strides=strides, padding="VALID")
deconv = tf.nn.bias_add(deconv, b)
deconv = tf.nn.relu(deconv)
return deconv
# Create model
def net(x, cnn_weights, cnn_biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 396, 396, 1])
with tf.name_scope("conv1") as scope:
# Convolution Layer
conv1 = conv2d(x, cnn_weights['wc1'], cnn_biases['bc1'])
# Max Pooling (down-sampling)
#conv1 = tf.nn.local_response_normalization(conv1)
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
with tf.name_scope("conv2") as scope:
conv2 = conv2d(conv1, cnn_weights['wc2'], cnn_biases['bc2'])
# Max Pooling (down-sampling)
# conv2 = tf.nn.local_response_normalization(conv2)
conv2 = maxpool2d(conv2, k=2)
# Convolution Layer
with tf.name_scope("conv3") as scope:
conv3 = conv2d(conv2, cnn_weights['wc3'], cnn_biases['bc3'])
# Max Pooling (down-sampling)
# conv3 = tf.nn.local_response_normalization(conv3)
conv3 = maxpool2d(conv3, k=2)
temp_batch_size = tf.shape(x)[0] #batch_size shape
with tf.name_scope("deconv1") as scope:
output_shape = [temp_batch_size, 99, 99, 64]
strides = [1,2,2,1]
# conv4 = deconv2d(conv3, weights['wdc1'], biases['bdc1'], output_shape, strides)
deconv = tf.nn.conv2d_transpose(conv3, cnn_weights['wdc1'], output_shape=output_shape, strides=strides, padding="SAME")
deconv = tf.nn.bias_add(deconv, cnn_biases['bdc1'])
conv4 = tf.nn.relu(deconv)
# conv4 = tf.nn.local_response_normalization(conv4)
with tf.name_scope("deconv2") as scope:
output_shape = [temp_batch_size, 198, 198, 32]
strides = [1,2,2,1]
conv5 = deconv2d(conv4, cnn_weights['wdc2'], cnn_biases['bdc2'], output_shape, strides)
# conv5 = tf.nn.local_response_normalization(conv5)
with tf.name_scope("deconv3") as scope:
output_shape = [temp_batch_size, 396, 396, 1]
#this time don't use ReLu -- since output layer
conv6 = tf.nn.conv2d_transpose(conv5, cnn_weights['wdc3'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
x = tf.nn.bias_add(conv6, cnn_biases['bdc3'])
# Include dropout
#conv6 = tf.nn.dropout(conv6, dropout)
x = tf.reshape(conv6, [-1, n_input_x, n_input_y])
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Permuting batch_size and n_steps
x = tf.transpose(x, [1, 0, 2])
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input_x])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)
# This input shape is required by `rnn` function
x = tf.split(0, n_steps, x)
# Define a lstm cell with tensorflow
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True, activation=tf.nn.relu)
# lstm_cell = rnn_cell.MultiRNNCell([lstm_cell] * 12, state_is_tuple=True)
# lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=0.8)
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
# pdb.set_trace()
output = []
for i in xrange(396):
output.append(tf.matmul(outputs[i], lstm_weights[i]) + lstm_biases[i])
return output
cnn_weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1' : tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2' : tf.Variable(tf.random_normal([5, 5, 32, 64])),
# 5x5 conv, 32 inputs, 64 outputs
'wc3' : tf.Variable(tf.random_normal([5, 5, 64, 128])),
'wdc1' : tf.Variable(tf.random_normal([2, 2, 64, 128])),
'wdc2' : tf.Variable(tf.random_normal([2, 2, 32, 64])),
'wdc3' : tf.Variable(tf.random_normal([2, 2, 1, 32])),
}
cnn_biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bc3': tf.Variable(tf.random_normal([128])),
'bdc1': tf.Variable(tf.random_normal([64])),
'bdc2': tf.Variable(tf.random_normal([32])),
'bdc3': tf.Variable(tf.random_normal([1])),
}
lstm_weights = {}
lstm_biases = {}
for i in xrange(396):
lstm_weights[i] = tf.Variable(tf.random_normal([n_hidden, n_output]))
lstm_biases[i] = tf.Variable(tf.random_normal([n_output]))
# Construct model
# with tf.name_scope("net") as scope:
pred = net(x, cnn_weights, cnn_biases, keep_prob)
# pdb.set_trace()
pred = tf.pack(pred)
pred = tf.transpose(pred, [1,0,2])
pred = tf.reshape(pred, [-1, n_input_x * n_input_y])
with tf.name_scope("opt") as scope:
# cost = tf.reduce_sum(tf.square(y-pred))
cost = tf.reduce_sum(tf.pow((pred-y),2)) / (2*batch_size)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
with tf.name_scope("acc") as scope:
# accuracy is the difference between prediction and ground truth matrices
correct_pred = tf.equal(0,tf.cast(tf.sub(cost,y), tf.int32))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
summary = tf.train.SummaryWriter('/tmp/logdir/', sess.graph) #initialize graph for tensorboard
step = 1
# Import data
data = scroll_data.read_data('/home/kendall/Desktop/')
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = data.train.next_batch(batch_size)
# Run optimization op (backprop)
# pdb.set_trace()
batch_x = batch_x.reshape((batch_size, n_input_x, n_input_y))
batch_y = batch_y.reshape(batch_size, n_input_x * n_input_y)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
step = step + 1
if step % display_step == 0:
batch_y = batch_y.reshape(batch_size, n_input_x * n_input_y)
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y})
# Make prediction
im = Image.open('/home/kendall/Desktop/cropped/temp data0001.tif')
batch_x = np.array(im)
batch_x = batch_x.reshape((1, n_input_x, n_input_y))
batch_x = batch_x.astype(float)
prediction = sess.run(pred, feed_dict={x: batch_x})
prediction = prediction.reshape((1, n_input_x * n_input_y))
prediction = tf.nn.softmax(prediction)
prediction = prediction.eval()
prediction = prediction.reshape((n_input_x, n_input_y))
# my_accuracy = accuracy_custom(temp_arr1,batch_y[0,:,:,0])
#
# print "Step = " + str(step) + " | Accuracy = " + str(my_accuracy)
print "Step = " + str(step) + " | Accuracy = " + str(acc)
# csv_file = "CNN-LSTM-reg/CNNLSTMreg-step-" + str(step) + "-accuracy-" + str(my_accuracy) + ".csv"
csv_file = "CNN-LSTM-reg/CNNLSTMreg-step-" + str(step) + "-accuracy-" + str(acc) + ".csv"
np.savetxt(csv_file, prediction, delimiter=",")
As said in the comments, a good weight initialization is key to the success of a model:
too high: the model will not learn and may produce NaN values
too low: the model will learn very very slowly, because the gradient will be too small (see vanishing gradients)
There are good initializations already provided in TensorFlow here (as a contribution), feel free to use them.

Tensorflow reshaping a tensor

I'm trying to use tf.nn.sparse_softmax_cross_entropy_with_logits and I have followed the answer by user Olivier Moindrot [here][1] but I'm getting a dimension error
I'm building a segmentation network, so the input image is 200x200 and the output image is 200x200. The classification is binary, so foreground and background.
After I build the CNN pred = conv_net(x, weights, biases, keep_prob)
pred looks like this <tf.Tensor 'Add_1:0' shape=(?, 40000) dtype=float32>
The CNN has a couple of conv layers followed by a fully connected layer. The fully connected layer is 40000 because it is 200x200 flattened.
According to the above link, I reshape pred like so...
(side note: I also tried packing tf.pack() two pred's -- like above -- together, but I thought that was wrong)
pred = tf.reshape(pred, [-1, 200, 200, 2])
...so that there are 2 classifications. Continuing the above link...
temp_pred = tf.reshape(pred, [-1,2])
temp_y = tf.reshape(y, [-1])
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(temp_pred, temp_y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
I have the following placeholders and batch data...
x = tf.placeholder(tf.float32, [None, 200, 200])
y = tf.placeholder(tf.int64, [None, 200, 200])
(Pdb) batch_x.shape
(10, 200, 200)
(Pdb) batch_y.shape
(10, 200, 200)
When I run a training session, I get the following dimension error:
tensorflow.python.framework.errors.InvalidArgumentError: logits first
dimension must match labels size. logits shape=[3200000,2] labels
shape=[400000]
My full code looks like this:
import tensorflow as tf
import pdb
import numpy as np
# Import MINST data
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 10
display_step = 1
# Network Parameters
n_input = 200 # MNIST data input (img shape: 28*28)
n_classes = 2 # MNIST total classes (0-9 digits)
n_output = 40000
#n_input = 200
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input, n_input])
y = tf.placeholder(tf.int64, [None, n_input, n_input])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 200, 200, 1])
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
# conv1 = tf.nn.local_response_normalization(conv1)
# conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
# conv2 = tf.nn.local_response_normalization(conv2)
# conv2 = maxpool2d(conv2, k=2)
# Convolution Layer
conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
# # Max Pooling (down-sampling)
# conv3 = tf.nn.local_response_normalization(conv3)
# conv3 = maxpool2d(conv3, k=2)
# return conv3
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
return tf.add(tf.matmul(fc1, weights['out']), biases['out'])
# Output, class prediction
# output = []
# for i in xrange(2):
# # output.append(tf.nn.softmax(tf.add(tf.matmul(fc1, weights['out']), biases['out'])))
# output.append((tf.add(tf.matmul(fc1, weights['out']), biases['out'])))
#
# return output
# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
# 5x5 conv, 32 inputs, 64 outputs
'wc3': tf.Variable(tf.random_normal([5, 5, 64, 128])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([50*50*64, 1024])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([1024, n_output]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bc3': tf.Variable(tf.random_normal([128])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_output]))
}
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
pdb.set_trace()
# pred = tf.pack(tf.transpose(pred,[1,2,0]))
pred = tf.reshape(pred, [-1, n_input, n_input, 2])
temp_pred = tf.reshape(pred, [-1,2])
temp_y = tf.reshape(y, [-1])
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(temp_pred, temp_y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
# correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
temp_pred2 = tf.reshape(pred, [-1,n_input,n_input])
correct_pred = tf.equal(tf.cast(y,tf.float32),tf.sub(temp_pred2,tf.cast(y,tf.float32)))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
summ = tf.train.SummaryWriter('/tmp/logdir/', sess.graph_def)
step = 1
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data
data = scroll_data.read_data('/home/kendall/Desktop/')
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = data.train.next_batch(batch_size)
# Run optimization op (backprop)
batch_x = batch_x.reshape((batch_size, n_input, n_input))
batch_y = batch_y.reshape((batch_size, n_input, n_input))
batch_y = np.int64(batch_y)
# y = tf.reshape(y, [-1,n_input,n_input])
pdb.set_trace()
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
if step % display_step == 0:
# Calculate batch loss and accuracy
pdb.set_trace()
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc)
step += 1
print "Optimization Finished!"
# Calculate accuracy for 256 mnist test images
print "Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: data.test.images[:256],
y: data.test.labels[:256],
keep_prob: 1.})
[1]: http://stackoverflow.com/questions/35317029/how-to-implement-pixel-wise-classification-for-scene-labeling-in-tensorflow/37294185?noredirect=1#comment63253577_37294185
Let's forget about softmax and use a simpler tf.nn.sigmoid_cross_entropy_with_logits here:
with sigmoid, you only need one prediction per pixel
if pred[pixel] > 0.5, you predict 1
if pred[pixel] < 0.5, you predict 0
the shape of prediction and target should then be [batch_size, 40000]
pred = conv_net(x, weights, biases, keep_prob) # shape [batch_size, 40000]
flattened_y = tf.reshape(y, [-1, 40000]) # shape [batch_size, 40000]
loss = tf.nn.sigmoid_cross_entropy_with_logits(pred, flattened_y)
Using sparse softmax is going to be of help only after the last layer you want to resize the image to the original size (200*200).In this case using reshape as you have would ensure that the the code would be error free.
But in your case you don't have to use sparse softmax. To see why check the dimensions of "pred".

Categories