How to reuse model in Tensorflow - python

I'm new to TensorFlow. I have the following graph. The test accuracy that I get is 90%. I'd like to reuse the model. One way that I figured it out is to initiate my variables from the learned weights (look in REUSE_MODEL below). But, when I run the test dataset through the model I get now accuracy of 2.0%.
What is the problem in the way I'm doing it and what's best way to do it?
GRAPH BUILD AND RUN
graph = tf.Graph()
with graph.as_default():
# input data
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_test_dataset = tf.constant(test_dataset)
layer1_weights = tf.Variable(tf.truncated_normal([kernel_size, kernel_size, num_channels, num_kernels]))
layer1_biases = tf.Variable(tf.zeros([num_kernels]))
layer2_weights = tf.Variable(tf.truncated_normal([kernel_size, kernel_size, num_kernels, num_kernels]))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[num_kernels]))
layer3_weights = tf.Variable(tf.truncated_normal([image_size // 4 * image_size // 4 * num_kernels, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal([num_hidden, num_labels], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# model
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer2_biases)
shape = hidden.get_shape().as_list()
# reshape is of size batch_size X features_vector. We flatten the output of the layer2 to a features vector
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases
# training computation
logits = model(tf_train_dataset)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# predictions
train_prediction = tf.nn.softmax(logits)
test_prediction = tf.nn.softmax(model(tf_test_dataset))
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
num_steps = 1001
num_epochs = 100
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
for epoch in range(num_epochs):
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 50 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
REUSE MODEL
with graph.as_default():
tf_test_dataset2 = tf.constant(test_dataset)
layer1_weights2 = tf.Variable(layer1_weights.initialized_value())
layer1_biases2 = tf.Variable(layer1_biases.initialized_value())
layer2_weights2 = tf.Variable(layer2_weights.initialized_value())
layer2_biases2 = tf.Variable(layer2_biases.initialized_value())
layer3_weights2 = tf.Variable(layer3_weights.initialized_value())
layer3_biases2 = tf.Variable(layer3_biases.initialized_value())
layer4_weights2 = tf.Variable(layer4_weights.initialized_value())
layer4_biases2 = tf.Variable(layer4_biases.initialized_value())
# model
def model(data):
conv = tf.nn.conv2d(data, layer1_weights2, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer1_biases2)
conv = tf.nn.conv2d(hidden, layer2_weights2, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer2_biases2)
shape = hidden.get_shape().as_list()
# reshape is of size batch_size X features_vector. We flatten the output of the layer2 to a features vector
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights2) + layer3_biases2)
return tf.matmul(hidden, layer4_weights2) + layer4_biases2
test_prediction2 = tf.nn.softmax(model(tf_test_dataset2))
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
session.run(test_prediction2)
print('Test accuracy: %.1f%%' % accuracy(test_prediction2.eval(), test_labels))

I think the correct way is to save and restore the metagraph ,this is the official documentation on this:
https://www.tensorflow.org/api_docs/python/state_ops/exporting_and_importing_meta_graphs

Related

Tensorflow: validation prediction outs the same for each image

I have the following problem.
I am trying to train a 3d CNN in tensorflow. I have separated the data in three data sets, train, validation and test.
The main problem is that when I test the validation set after 5 epoch of training the output of the model is the nearly the same for the 5 images.
(this is the output of the last layer without any softmax)
2018-04-17 23:30:35.134318 Prediction: [[0.8185656 2.7571523 ]
[0.8200048 2.7590456 ]
[0.8185656 2.7571523 ]
[0.8200048 2.7590458 ]
[0.7751368 2.7532804 ]
[0.82061136 2.7588618 ]
[0.8130686 2.7821052 ]
[0.83537185 2.7514493 ]
[0.8200041 2.7590454 ]
[0.81701267 2.7519925 ]
[0.8424163 2.8674953 ]
[0.82000506 2.7590454 ]
[0.81999433 2.7590487 ]
[0.81701267 2.7519925 ]
However, if i do the same for trainning set I get a conventional prediction.
I have fully check the data sets and both are correct and in the same conditions.
This is my mode used to build the model and do the training:
class Cnn3DMRI(object):
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv3d(self, x, W):
return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='SAME')
def maxpool3d(self, x):
# size of window movement of window
return tf.nn.max_pool3d(x, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
def dense_to_one_hot(self, labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def wrapper_image(self, full_image_set, full_label_set, last_batch=0, batch_size=5):
batch_img = full_image_set[last_batch:batch_size+last_batch, :, :, :]
batch_label = full_label_set[last_batch:batch_size+last_batch]
return batch_img, batch_label, batch_size+last_batch
def convolutional_neural_network(self, x, img_sz, n_slices):
weights = {
'W_conv1': self.weight_variable([3, 5, 5, 1, 32]),
'W_conv2': self.weight_variable([2, 5, 5, 32, 48]),
'W_fc': self.weight_variable(
[
int(
math.ceil(
n_slices / 8
) * math.ceil(
img_sz / 8
) * math.ceil(
img_sz / 8
) *48), 2048
]
),
'W_fc2': self.weight_variable([2048, 1024]),
'out': self.weight_variable([1024, 2])
}
biases = {
'b_conv1': self.bias_variable([32]),
'b_conv2': self.bias_variable([48]),
'b_fc': self.bias_variable([2048]),
'b_fc2': self.bias_variable([1024]),
'out': self.bias_variable([2])
}
self.x_im = tf.reshape(x, shape=[-1, n_slices, img_sz, img_sz, 1])
conv1 = tf.nn.relu(self.conv3d(self.x_im, weights['W_conv1']) + biases['b_conv1'])
conv1 = tf.Print(conv1,[conv1], 'The conv1: ')
conv1 =self.maxpool3d(conv1)
conv1 = tf.Print(conv1,[conv1], 'The max1: ')
conv2 = tf.nn.relu(self.conv3d(conv1, weights['W_conv2']) + biases['b_conv2'])
conv1 = tf.Print(conv2,[conv2], 'The conv2: ')
conv2 = tf.nn.max_pool3d(conv2, ksize=[1, 4, 4, 4, 1], strides=[1, 4, 4, 4, 1],
padding='SAME')
conv2 = tf.Print(conv2,[conv2], 'The max2: ')
fc = tf.reshape(conv2, [-1,int(math.ceil(n_slices/8)*math.ceil(img_sz/8)*math.ceil(
img_sz/8))*48])
fc = tf.Print(fc,[fc], 'The reshape: ')
fc2 = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['b_fc'])
fc2 = tf.Print(fc2,[fc2], 'The fc: ')
dp1 = tf.nn.dropout(fc2, self.keep_prob)
fc3 = tf.nn.relu(tf.matmul(dp1, weights['W_fc2'])+biases['b_fc2'])
fc3 = tf.Print(fc3,[fc3], 'The fc2: ')
dp2 = tf.nn.dropout(fc3, self.keep_prob)
output = tf.matmul(dp2, weights['out'])+biases['out']
output = tf.Print(output,[output], 'The output: ')
return output
def test_validation_set(self, sess, data_validation, label_validation, valid_batch_size=60):
batch_img, batch_label, last_batch = self.wrapper_image(
data_validation, label_validation, self.last_valid_batch, valid_batch_size
)
batch_label = self.dense_to_one_hot(
np.array(batch_label, dtype=np.int),2
).astype(np.float32)
if last_batch+valid_batch_size < len(label_validation):
self.last_valid_batch = last_batch
else:
self.last_valid_batch = 0
pred, c, validation_accuracy = sess.run(
[self.prediction, self.cost, self.accuracy], feed_dict={
self.x: batch_img, self.y_: batch_label, self.keep_prob: 1.0
}
)
self.log("Prediction: "+str(pred))
self.log("Label: "+str(batch_label))
self.log("Validation accuracy: "+str(validation_accuracy))
self.log("Validation cost: "+str(c))
return validation_accuracy, c
def train_neural_network(self, data_img, labels, data_validation, label_validation,
batch_size, img_sz, n_slices, last_batch,
keep_rate, model_path):
self.prediction = self.convolutional_neural_network(self.x, img_sz, n_slices)
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_,
logits=self.prediction))
optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost)
correct_prediction = tf.equal(tf.argmax(self.prediction, 1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
hm_epochs = 1000
saver = tf.train.Saver(tf.trainable_variables())
epoch_loss = 0
epoch_loss_mean = []
n_epoch = 0
learning_rate = 1e-4
self.last_valid_batch = 0
min_valid_cost = 0
all_valid_cost = []
model_path_train = 'model_train/my_model.ckpt'
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if model_path:
pass
#saver.restore(sess, model_path_train)
while n_epoch < hm_epochs:
if len(data_img)>last_batch+batch_size:
with tf.device('/cpu:0'):
#batch_img, batch_label, last_batch = self.get_image(
# data_img, labels, last_batch, batch_size, img_sz, n_slices
#)
batch_img, batch_label, last_batch = self.wrapper_image(data_img, labels, last_batch, batch_size)
print "Batch label images: "+str(batch_label)
batch_label = self.dense_to_one_hot(np.array(batch_label, dtype=np.int),
2).astype(np.float32)
else:
with tf.device('/cpu:0'):
restbatch = last_batch + batch_size - len(data_img)
batch_img = np.concatenate((
self.wrapper_image(data_img, labels, last_batch, len(data_img) -
last_batch)[0],
self.wrapper_image(data_img, labels, last_batch, len(data_img) -
last_batch)[0]
))
batch_label = np.concatenate((
self.wrapper_image(data_img, labels, last_batch, len(data_img) -
last_batch)[1],
self.wrapper_image(data_img, labels, last_batch, len(data_img) -
last_batch)[1]
))
batch_label = self.dense_to_one_hot(np.array(batch_label, dtype=np.int),
2).astype(
np.float32)
last_batch = restbatch
####### at the end of EACH EPOCH ###
epoch_loss_mean.append(epoch_loss)
print "epoch loss mean: "+str(epoch_loss_mean)
epoch_loss = 0
n_epoch += 1
print "n_epoch: "+str(n_epoch)
if model_path:
saver.save(sess, model_path_train)
if not n_epoch % 5:
valid_accuracy, valid_cost = self.test_validation_set(sess,data_validation,
label_validation, 60)
if valid_cost < min_valid_cost - 2:
min_valid_cost = valid_cost
if model_path:
saver.save(sess, model_path)
all_valid_cost.append(valid_cost)
print all_valid_cost
if self.last_valid_batch == 0:
self.shufle_data(data_validation, label_validation)
train_accuracy = self.accuracy.eval(
feed_dict={self.x: batch_img, self.y_: batch_label, self.keep_prob: 1.0})
print "trainning accuracy: " + str(train_accuracy)
self.shufle_data(data_img, labels)
_, c, pred = sess.run(
[optimizer, self.cost,], feed_dict={
self.x: batch_img, self.y_: batch_label, self.keep_prob: keep_rate,
self.learning_rate: learning_rate
}
)
print 'epoch_loss: '+str(c)
def main(self, data_dir, labels_dir, img_sz, n_slices, batch_size=5, last_batch=0, train=False,
model_path=None, keep_rate=0.5):
"""
Args:
data_dir(list): directories of the image to be tested
labels_dir: (str): directory of the csv file where the image are labeled, the index
colum is the number 2 and the labels header is 'Diag'.
img_sz: the spatial image size the be transformed to. that is the sizes with which
the image will be trainned. width and hight must be the same
n_slices: the number of slices for the image to be trained
last_batch: the batch at which you want to start the trainning
train: boolean to set trainning: 0 or testing :1
model_path: the path where the model is saved, if there is no previous model you can
set a path here to start a new one.
keep_rate: the keep_probability of firing a node by means of dropout
Returns:
"""
self.train = train
data_path_trainning, label_trainning, data_path_validation, label_validation, \
data_testing, label_testing = self.load_dataset(data_dir, labels_dir,)
data_trainning, label_trainning_final = self.load_image(data_path_trainning,
label_trainning, img_sz, n_slices
)
data_validation, label_validation_final = self.load_image(
data_path_validation, label_validation, img_sz, n_slices
)
self.x = tf.placeholder(tf.float32, shape=[None, n_slices, img_sz, img_sz]) #batch_size,
# image_Size
self.y_ = tf.placeholder(tf.float32, shape=[None, 3]) #batch_size, label_size
self.learning_rate = tf.placeholder(tf.float32)
self.keep_prob = tf.placeholder(tf.float32)
if train:
self.train_neural_network(data_trainning, label_trainning_final, data_validation,
label_validation_final, batch_size, img_sz, n_slices,
last_batch, keep_rate, model_path
)
I have already tried tf.set_random_seed( 1 ) but no correction is seen
Do anyone have any idea about, please?
thanks so much
EDITED 22/04/18:
The data to be classified are 3d images of 150x150x40 pixels in a biclass problem. I have a total 400 images approaximatly half of each class. I have separated the dataset in train (75%), validation (10%) and test(15%)
Edit2:
I have simplified a bit my model. see up
Also mention that we have only 2 classes
I have tried another check I have train my model with only 20 images. To see if 0 cost is obtained.
result after 125 epochs:
2018-04-24 23:58:24.992421 epoch loss mean: [4549.9554141853, 1854.6537470817566, 817.4076923541704, 686.8368729054928, 687.7348744268759, 704.946801304817, 483.6952783479355, 260.2293045549304, 272.66821688037817, 116.57515235748815, 97.86094704543848, 90.43152131629176, 132.54018089070996, 69.62595339218387, 57.412255316681694, 79.66184640157735, 70.99515068903565, 55.75798599421978, 44.14403077028692, 38.901107819750905, 49.75594720244408, 52.6321079954505, 37.70595762133598, 42.07099115010351, 29.01994925737381, 28.365123450756073, 31.93120799213648, 43.9855432882905, 33.242121398448944, 36.57513061538339, 28.828659534454346, 29.847569406032562, 24.078316539525986, 31.630925316363573, 30.5430103354156, 26.18060240149498, 32.86780231446028, 25.42889341711998, 29.355055704712868, 26.269534677267075, 24.921810917556286, 27.15281054377556, 27.343381822109222, 24.293660208582878, 28.212179094552994, 25.07626649737358, 21.650991335511208, 25.7527906447649, 23.42476052045822, 28.350880563259125, 22.57907184958458, 21.601420983672142, 25.28128480911255, 25.550641894340515, 22.444457232952118, 27.660063683986664, 21.863914296031, 25.722180172801018, 24.00674758851528, 21.46472266316414, 26.599679857492447, 23.52132275700569, 26.1786640137434, 24.842691332101822, 25.263965144753456, 22.730938494205475, 22.787407517433167, 23.58866274356842, 25.351682364940643, 23.85272353887558, 23.884423837065697, 24.685379207134247, 22.55106496810913, 25.993630707263947, 21.967322662472725, 22.651918083429337, 21.91003155708313, 23.782021015882492, 21.567724645137787, 22.130879193544388, 21.33636975288391, 25.624440014362335, 23.26347705721855, 22.370914071798325, 22.614411562681198, 24.962509214878082, 22.121410965919495, 20.644148647785187, 24.472172617912292, 21.622991144657135, 21.719978988170624, 21.72349101305008, 21.729621797800064, 22.090826153755188, 21.44688707590103, 22.34817299246788, 22.93226248025894, 22.63547444343567, 22.1306095123291, 22.16277289390564, 22.83771103620529, 24.171751350164413, 22.025538682937622, 21.339059710502625, 22.169043481349945, 24.614955246448517, 22.83159503340721, 21.43451902270317, 21.54544973373413, 22.889380514621735, 24.168621599674225, 21.947510302066803, 22.30243694782257, 22.381454586982727, 22.50485634803772, 22.61657750606537, 22.288170099258423, 21.30070123076439, 22.489792048931122, 21.885000944137573, 21.343613982200623, 23.04211688041687, 24.00969059765339, 21.8588485121727, 22.199619591236115]
2018-04-24 23:58:24.992694 n_epoch: 125
the print output of each layer:
The conv1: [[[[[0.0981627107 0.100793235 0.0934509188]]]]...]
The max1: [[[[[0.102978 0.107030481 0.0977560952]]]]...]
The max2: [[[[[0 0 0.00116439909]]]]...]
The reshape: [[0 0 0.00116439909]...]
The fc: [[0.01167579 0.182256863 0.107154548]...]
The fc2: [[0.773868561 0.364259362 0]...]
The output: [[0.16590938 -0.255491495][0.16590938]...]
The conv1: [[[[[0.0981602222 0.100800745 0.0934513509]]]]...]
The max1: [[[[[0.102975294 0.107038349 0.0977560282]]]]...]
The max2: [[[[[0 0 0.000874094665]]]]...]
The reshape: [[0 0 0.000874094665]...]
The fc: [[0.0117974132 0.182980478 0.106876813]...]
The fc2: [[0.774896204 0.36372292 0]...]
The output: [[0.129838273 -0.210624188][0.129838273]...]
Shouldn't be 125 epochs enoght to overfit 60 samples?
Any idea about what is happening?
This is more of a comment that did not fit into the comment limit.
As I said before, I can't see anything obviously wrong. You might have to do some debugging. If the pre-softmax outputs are exactly the same, it is probably a bug somewhere and you can find it by finding the exact place where your presumably different inputs lead to the same layer outputs.
If pre-softmax outputs are close, but not exactly the same, most likely you have a classic issue of over-fitting. You mentioned that you have just 300 training examples - that is extremely few to train the whole net on (without using some pre-trained weights). Your net just "memorized" 300 training examples and does not generalize to validation set at all.
EDIT 04/23/18:
So, the issue is not just in validation? I interpreted your "if i do the same for trainning set I get a conventional prediction." to mean that training images are classified just fine. If you get the same prediction for your training images, most likely the data or loss or prediction calculation is wrong. I did not spot anything and guess you will need to debug. You might find "eager execution" useful for this - https://www.tensorflow.org/get_started/eager. If you organize your model as in examples (https://github.com/tensorflow/tensorflow/tree/3f4662e7ca8724f760db4a5ea6e241c99e66e588/tensorflow/contrib/eager/python/examples), you should be able to use the same code with regular tensorflow graph execution.

Tensorflow - Restoring a model

I have the following code, where I'm trying to restore the model at some point in the code, but seems that I'm getting some infinite loop (not sure), as the program would not return any output although seems to be running:
import tensorflow as tf
data, labels = cifar_tools.read_data('C:\\Users\\abc\\Desktop\\Testing')
x = tf.placeholder(tf.float32, [None, 150 * 150])
y = tf.placeholder(tf.float32, [None, 2])
w1 = tf.Variable(tf.random_normal([5, 5, 1, 64]))
b1 = tf.Variable(tf.random_normal([64]))
w2 = tf.Variable(tf.random_normal([5, 5, 64, 64]))
b2 = tf.Variable(tf.random_normal([64]))
w3 = tf.Variable(tf.random_normal([38*38*64, 1024]))
b3 = tf.Variable(tf.random_normal([1024]))
w_out = tf.Variable(tf.random_normal([1024, 2]))
b_out = tf.Variable(tf.random_normal([2]))
def conv_layer(x,w,b):
conv = tf.nn.conv2d(x,w,strides=[1,1,1,1], padding = 'SAME')
conv_with_b = tf.nn.bias_add(conv,b)
conv_out = tf.nn.relu(conv_with_b)
return conv_out
def maxpool_layer(conv,k=2):
return tf.nn.max_pool(conv, ksize=[1,k,k,1], strides=[1,k,k,1], padding='SAME')
def model():
x_reshaped = tf.reshape(x, shape=[-1, 150, 150, 1])
conv_out1 = conv_layer(x_reshaped, w1, b1)
maxpool_out1 = maxpool_layer(conv_out1)
norm1 = tf.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
conv_out2 = conv_layer(norm1, w2, b2)
norm2 = tf.nn.lrn(conv_out2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
maxpool_out2 = maxpool_layer(norm2)
maxpool_reshaped = tf.reshape(maxpool_out2, [-1, w3.get_shape().as_list()[0]])
local = tf.add(tf.matmul(maxpool_reshaped, w3), b3)
local_out = tf.nn.relu(local)
out = tf.add(tf.matmul(local_out, w_out), b_out)
return out
model_op = model()
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(model_op, y))
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
correct_pred = tf.equal(tf.argmax(model_op, 1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
onehot_labels = tf.one_hot(labels, 2, on_value=1.,off_value=0.,axis=-1)
onehot_vals = sess.run(onehot_labels)
batch_size = len(data)
# Restore model
saver = tf.train.import_meta_graph('mymodel.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
all_vars = tf.get_collection('vars')
for v in all_vars:
v_ = sess.run(v)
print(v_)
for j in range(0, 5):
print('EPOCH', j)
for i in range(0, len(data), batch_size):
batch_data = data[i:i+batch_size, :]
batch_onehot_vals = onehot_vals[i:i+batch_size, :]
_, accuracy_val = sess.run([train_op, accuracy], feed_dict={x: batch_data, y: batch_onehot_vals})
print(i, accuracy_val)
print('DONE WITH EPOCH')
What could be the issue? Am I restoring the model correct here?
Thanks.
It seems I had to list the whole path to the model as follows:
saver = tf.train.import_meta_graph('C:\\Users\\abc\\Desktop\\\Testing\\mymodel.meta')
The same mistake I made when saving the model, as shown here :-)

Fit and predict in Tensorflow : Session restore for multiple functions

I started learning tensorflow one week ago and i am struggling a bit with the possibility of restoring a model.
I am constructing a class for a CNN (from stanford course code) where i want to have a run and a test function (first one to train the model, second one to make a prediction on another set of datas).
The function run is working correctly, mais i have a hard time restoring the model to make the prediction in function test (here i have a problem shape and i guess it comes from the way I get the operation).
import os
import tensorflow as tf
from time import time
class Cnn:
def __init__(self, batch_size=128, skip_step=10, epochs=1, dropout_ratio=0.75):
self.batch_size = batch_size
self.skip_step = skip_step
self.epochs = epochs
self.dropout_ratio = dropout_ratio
self.x = tf.placeholder(tf.float32, [None, 784], name="X_placeholder")
self.images = tf.reshape(self.x, shape=[-1, 28, 28, 1])
self.y = tf.placeholder(tf.float32, [None, 10], name="Y_placeholder")
self.dropout = tf.placeholder(tf.float32, name="dropout")
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.layers = [self.images]
self.n_layers = 0
def convolution(self, kernel_size, n_output, strides, scope_name):
previous_layer = self.layers[self.n_layers]
with tf.variable_scope(scope_name) as scope:
kernel_shape = [kernel_size[0], kernel_size[1], previous_layer.get_shape()[3], n_output]
kernel = tf.get_variable("kernels", kernel_shape,
initializer=tf.truncated_normal_initializer())
biases = tf.get_variable("biases", [n_output],
initializer=tf.random_normal_initializer())
convolution_2d = tf.nn.conv2d(previous_layer, kernel, strides=strides,
padding="SAME")
convolution = tf.nn.relu(convolution_2d + biases, name=scope.name)
self.layers.append(convolution)
self.n_layers += 1
return convolution
def pool(self, size, strides, scope_name, padding="SAME"):
previous_layer = self.layers[self.n_layers]
with tf.variable_scope(scope_name):
pool = tf.nn.max_pool(previous_layer, ksize=size, strides=strides,
padding=padding)
self.layers.append(pool)
self.n_layers += 1
return pool
def fully_connected(self, n_input, n_output, scope_name, relu=False, dropout=True):
previous_layer = self.layers[self.n_layers]
with tf.variable_scope(scope_name):
weights = tf.get_variable("weights", [n_input, n_output],
initializer=tf.truncated_normal_initializer())
biases = tf.get_variable("biases", [n_output],
initializer=tf.random_normal_initializer())
pool_reshaped = tf.reshape(previous_layer, [-1, n_input])
logits = tf.add(tf.matmul(pool_reshaped, weights), biases, name="logits")
fc = logits
if relu:
fc = tf.nn.relu(fc, name="relu")
if dropout:
fc = tf.nn.dropout(fc, self.dropout, name="dropout")
self.layers.append(fc)
self.n_layers += 1
if relu is False and dropout is False:
self.logits = tf.add(tf.matmul(pool_reshaped, weights), biases, name="logits")
self.predictions = tf.nn.softmax(logits, name="predictions")
return fc
def set_loss(self, scope_name):
previous_layer = self.layers[self.n_layers]
with tf.name_scope(scope_name):
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=previous_layer, labels=self.y)
loss = tf.reduce_mean(entropy, name='loss')
self.layers.append(loss)
self.n_layers += 1
self.loss = loss
def set_optimizer(self):
previous_layer = self.layers[self.n_layers]
optimizer = tf.train.AdamOptimizer(0.001).minimize(previous_layer, global_step=self.global_step)
self.layers.append(optimizer)
self.n_layers += 1
self.optimizer = optimizer
def run(self, train_x, train_y):
sess = tf.Session()
with sess.as_default():
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
writer = tf.summary.FileWriter('./my_graph/mnist', sess.graph)
ckpt = tf.train.get_checkpoint_state(os.path.dirname('results/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
initial_step = self.global_step.eval()
start_time = time()
n_batches = int(train_x.shape[0] / self.batch_size)
total_correct_preds = 0
total_loss = 0.0
current_position = 0
for index in range(initial_step, int(n_batches * self.epochs)): # train the model n_epochs times
x_batch = train_x[current_position: current_position + self.batch_size, :]
y_batch = train_y[current_position: current_position + self.batch_size, :]
feed_dict = {self.x: x_batch, self.y: y_batch, self.dropout: self.dropout_ratio}
_, loss_batch, logits_batch = sess.run([self.optimizer, self.loss, self.logits],
feed_dict=feed_dict)
print(logits_batch.shape)
total_loss += loss_batch
preds = tf.nn.softmax(logits_batch)
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(y_batch, 1))
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
total_correct_preds += sess.run(accuracy)
if (index + 1) % self.skip_step == 0:
print('Average loss at step {}: {:5.1f}'.format(index + 1, total_loss / self.skip_step))
total_loss = 0.0
saver.save(sess, 'results/mnist-convnet', index)
current_position += self.batch_size
print("Optimization Finished!") # should be around 0.35 after 25 epochs
print("Total time: {0} seconds".format(time() - start_time))
print("Accuracy {0}".format(total_correct_preds / train_x.shape[0]))
def test(self, val_x, val_y):
checkpoint_file = tf.train.latest_checkpoint("results/")
graph = tf.Graph()
with graph.as_default():
sess = tf.Session()
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
self.x = graph.get_operation_by_name("X_placeholder").outputs[0]
self.y = graph.get_operation_by_name("Y_placeholder").outputs[0]
self.dropout = graph.get_operation_by_name("dropout").outputs[0]
self.global_step = graph.get_operation_by_name("global_step").outputs[0]
self.logits = graph.get_operation_by_name("logits").outputs[0]
current_position = 0
n_batches = int(val_x.shape[0] / self.batch_size)
total_correct_preds = 0
for i in range(n_batches):
x_batch = val_x[current_position: current_position + self.batch_size]
y_batch = val_y[current_position: current_position + self.batch_size]
feed_dict = {self.x: x_batch, self.y: y_batch, self.dropout: self.dropout_ratio}
logits_batch = sess.run([self.logits], feed_dict=feed_dict)
preds = tf.nn.softmax(logits_batch)
# correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(y_batch, 1))
"""
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
total_correct_preds += sess.run(accuracy)
current_position += self.batch_size
print("Accuracy {0}".format(total_correct_preds / val_x.shape[0]))
"""
and testing it like this (sorry if there is a lot of code) :
from cnn import Cnn
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(1)
N_CLASSES = 10
# load datas
mnist = input_data.read_data_sets("mnist", one_hot=True)
train_x, train_y = mnist.train.images, mnist.train.labels
val_x, val_y = mnist.test.images, mnist.test.labels
model = Cnn(batch_size=128, skip_step=10, epochs=0.25, dropout_ratio=0.75)
model.convolution(kernel_size=[5, 5], strides=[1, 1, 1, 1], n_output=32, scope_name="conv1")
model.pool(size=[1, 2, 2, 1], strides=[1, 2, 2, 1], scope_name="pool1")
model.convolution(kernel_size=[5, 5], strides=[1, 1, 1, 1], n_output=64, scope_name="conv2")
model.pool(size=[1, 2, 2, 1], strides=[1, 2, 2, 1], scope_name="pool2")
input_features = 7 * 7 * 64
model.fully_connected(n_input=input_features, n_output=1024, scope_name="fc", relu=True, dropout=True)
model.fully_connected(n_input=1024, n_output=N_CLASSES, scope_name="softmax_linear", dropout=False)
model.set_loss(scope_name="loss")
model.set_optimizer()
model.run(train_x=train_x, train_y=train_y)
model.test(val_x, val_y)
I would also appreciate any comments regarding the better way to create a class like this (in particular the handling of the placeholders and the sessions)
Thanks !
Nicolas

Tensorflow - Should saving and restoring the model be in the same program?

I have the following code where I restore a previously saved model. Is it correct this way? I save a model at some point, and when I want to restore it I don't need to to save the model, since I already have a saved model. Is it right how I understand it?
import tensorflow as tf
data, labels = cifar_tools.read_data('C:\\Users\\abc\\Desktop\\Testing')
x = tf.placeholder(tf.float32, [None, 150 * 150])
y = tf.placeholder(tf.float32, [None, 2])
w1 = tf.Variable(tf.random_normal([5, 5, 1, 64]))
b1 = tf.Variable(tf.random_normal([64]))
w2 = tf.Variable(tf.random_normal([5, 5, 64, 64]))
b2 = tf.Variable(tf.random_normal([64]))
w3 = tf.Variable(tf.random_normal([38*38*64, 1024]))
b3 = tf.Variable(tf.random_normal([1024]))
w_out = tf.Variable(tf.random_normal([1024, 2]))
b_out = tf.Variable(tf.random_normal([2]))
def conv_layer(x,w,b):
conv = tf.nn.conv2d(x,w,strides=[1,1,1,1], padding = 'SAME')
conv_with_b = tf.nn.bias_add(conv,b)
conv_out = tf.nn.relu(conv_with_b)
return conv_out
def maxpool_layer(conv,k=2):
return tf.nn.max_pool(conv, ksize=[1,k,k,1], strides=[1,k,k,1], padding='SAME')
def model():
x_reshaped = tf.reshape(x, shape=[-1, 150, 150, 1])
conv_out1 = conv_layer(x_reshaped, w1, b1)
maxpool_out1 = maxpool_layer(conv_out1)
norm1 = tf.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
conv_out2 = conv_layer(norm1, w2, b2)
norm2 = tf.nn.lrn(conv_out2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
maxpool_out2 = maxpool_layer(norm2)
maxpool_reshaped = tf.reshape(maxpool_out2, [-1, w3.get_shape().as_list()[0]])
local = tf.add(tf.matmul(maxpool_reshaped, w3), b3)
local_out = tf.nn.relu(local)
out = tf.add(tf.matmul(local_out, w_out), b_out)
return out
model_op = model()
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(model_op, y))
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
correct_pred = tf.equal(tf.argmax(model_op, 1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
onehot_labels = tf.one_hot(labels, 2, on_value=1.,off_value=0.,axis=-1)
onehot_vals = sess.run(onehot_labels)
batch_size = len(data)
# Restore model
saver = tf.train.import_meta_graph('C:\\Users\\abc\\Desktop\\\Testing\\mymodel.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
tf.add_to_collection("vars", w1)
tf.add_to_collection("vars", b1)
all_vars = tf.get_collection('vars')
for v in all_vars:
v_ = sess.run(v)
print(v_)
for j in range(0, 5):
print('EPOCH', j)
for i in range(0, len(data), batch_size):
batch_data = data[i:i+batch_size, :]
batch_onehot_vals = onehot_vals[i:i+batch_size, :]
_, accuracy_val = sess.run([train_op, accuracy], feed_dict={x: batch_data, y: batch_onehot_vals})
print(i, accuracy_val)
print('DONE WITH EPOCH')

failed sess.run error “Cannot feed value of shape (50, 2352) for Tensor 'Placeholder:0', which has shape '(?, 784)'”

Please Help me...
I learning to tensorflow using my own data based on tutorial expert.
following my code:
#datasets define
NUM_CLASSES = 65535
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE*IMAGE_SIZE*1
#read datasets
with open(FLAGS.train, 'r') as f: # train.txt
train_image = []
train_label = []
num = 0
for line in f:
if num == 500:
break
line = line.rstrip()
l = line.split(',')
print(l[0])
img = cv2.imread(l[0])
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
train_image.append(img.flatten().astype(np.float32)/255.0)
tmp = np.zeros(NUM_CLASSES)
tmp[int(l[1])] = 1
train_label.append(tmp)
num += 1
train_image = np.asarray(train_image)
train_label = np.asarray(train_label)
train_len = len(train_image)
def inference(images_placeholder, keep_prob):
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
x_images = tf.reshape(images_placeholder, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
with tf.name_scope('conv1') as scope:
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_images, W_conv1) + b_conv1)
with tf.name_scope('pool1') as scope:
h_pool1 = max_pool_2x2(h_conv1)
with tf.name_scope('conv2') as scope:
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
with tf.name_scope('pool2') as scope:
h_pool2 = max_pool_2x2(h_conv2)
with tf.name_scope('fc1') as scope:
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
with tf.name_scope('fc2') as scope:
W_fc2 = weight_variable([1024, NUM_CLASSES])
b_fc2 = bias_variable([NUM_CLASSES])
with tf.name_scope('softmax') as scope:
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
return y_conv
#learn
with tf.Graph().as_default():
images_placeholder = tf.placeholder("float", shape=(None, IMAGE_PIXELS))
labels_placeholder = tf.placeholder("float", shape=(None, NUM_CLASSES))
keep_prob = tf.placeholder("float")
logits = inference(images_placeholder, keep_prob)
loss_value = loss(logits, labels_placeholder)
train_op = training(loss_value, FLAGS.learning_rate)
print("train_op =", train_op)
acc = accuracy(logits, labels_placeholder)
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.initialize_all_variables())
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph_def)
if train_len % FLAGS.batch_size is 0:
train_batch = train_len/FLAGS.batch_size
else:
train_batch = (train_len/FLAGS.batch_size)+1
print("train_batch = %d",str(train_batch))
for step in range(FLAGS.max_steps):
for i in range(int(train_batch)):
batch = FLAGS.batch_size*i
batch_plus = FLAGS.batch_size*(i+1)
print("batch_plus =", batch_plus)
if batch_plus > train_len: batch_plus = train_len
sess.run(train_op, feed_dict={
images_placeholder: train_image[batch:batch_plus],
labels_placeholder: train_label[batch:batch_plus],
keep_prob: 0.5})
if step % 10 == 0:
train_accuracy = 0.0
for i in range(train_batch):
batch = FLAGS.batch_size*i
batch_plus = FLAGS.batch_size*(i+1)
if batch_plus > train_len: batch_plus = train_len
train_accuracy += sess.run(acc, feed_dict={
images_placeholder: train_image[batch:batch_plus],
labels_placeholder: train_label[batch:batch_plus],
keep_prob: 1.0})
if i is not 0: train_accuracy /= 2.0
#summary_str = sess.run(summary_op, feed_dict={
# images_placeholder: train_image,
# labels_placeholder: train_label,
# keep_prob: 1.0})
#summary_writer.add_summary(summary_str, step)
print("step %d, training accuracy %g",(step, train_accuracy))
if test_len % FLAGS.batch_size is 0:
test_batch = test_len/FLAGS.batch_size
else:
test_batch = (test_len/FLAGS.batch_size)+1
print("test_batch = ",str(test_batch))
test_accuracy = 0.0
for i in range(test_batch):
batch = FLAGS.batch_size*i
batch_plus = FLAGS.batch_size*(i+1)
if batch_plus > train_len: batch_plus = train_len
test_accuracy += sess.run(acc, feed_dict={
images_placeholder: test_image[batch:batch_plus],
labels_placeholder: test_label[batch:batch_plus],
keep_prob: 1.0})
if i is not 0: test_accuracy /= 2.0
print("test accuracy %g",(test_accuracy))
save_path = saver.save(sess, FLAGS.save_model)
but when I try to run it I gives me an error:
ValueError:Cannot feed value of shape (50, 2352) for Tensor 'Placeholder:0', which has shape '(?, 784)'
I feel like i'm overlooking something small but I don't see it.
EDIT: Sorry if you read my previous analysis, it was wrong.
2352 / 3 = 784, I think you have kept the 3 rgb color channels instead of a single pixel intensity (or the resize function has 3 color channels in output by default).
Quite a lot of the complexity of the mnist example is hidden in the data loading, see tensorflow/tensorflow/contrib/learn/python/learn/datasets/mnist.py to understand it better and make something similar for your case. They load images in a 4D tensor [index, x, y, depth] and that's how they can look at a minibatch taking a subset of the indexes.
Good luck !

Categories