Related
I've been working on implementing this model for months now and finally get it to work. Now I was looking to calculate metrics for this model like F-score, Recall, Precision etc. I looked at examples and they do it by splitting data which I've not been able to implement. Can I calculate it like model loss and total loss is calculated here???
from nets import model
from utils.data_provider import data_provider
FLAGS = tf.app.flags.FLAGS
gpus = list(range(len(FLAGS.gpu_list.split(','))))
logger.setLevel(cfg.debug)
def tower_loss(images, seg_maps_gt, training_masks, reuse_variables=None):
# Build inference graph
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
seg_maps_pred = model.model(images, is_training=True)
model_loss = model.loss(seg_maps_gt, seg_maps_pred, training_masks)
total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# add summary
if reuse_variables is None:
tf.summary.image('input', images)
tf.summary.image('seg_map_0_gt', seg_maps_gt[:, :, :, 0:1] * 255)
tf.summary.image('seg_map_0_pred', seg_maps_pred[:, :, :, 0:1] * 255)
tf.summary.image('training_masks', training_masks)
tf.summary.scalar('model_loss', model_loss)
tf.summary.scalar('total_loss', total_loss)
return total_loss, model_loss
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def main(argv=None):
import os
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
if not tf.gfile.Exists(FLAGS.checkpoint_path):
tf.gfile.MkDir(FLAGS.checkpoint_path)
else:
if not FLAGS.restore:
tf.gfile.DeleteRecursively(FLAGS.checkpoint_path)
tf.gfile.MkDir(FLAGS.checkpoint_path)
input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')
input_seg_maps = tf.placeholder(tf.float32, shape=[None, None, None, 6], name='input_score_maps')
input_training_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_training_masks')
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=10000, decay_rate=0.94, staircase=True)
# add summary
tf.summary.scalar('learning_rate', learning_rate)
opt = tf.train.AdamOptimizer(learning_rate)
# split
input_images_split = tf.split(input_images, len(gpus))
input_seg_maps_split = tf.split(input_seg_maps, len(gpus))
input_training_masks_split = tf.split(input_training_masks, len(gpus))
tower_grads = []
reuse_variables = None
for i, gpu_id in enumerate(gpus):
with tf.device('/gpu:%d' % gpu_id):
with tf.name_scope('model_%d' % gpu_id) as scope:
iis = input_images_split[i]
isegs = input_seg_maps_split[i]
itms = input_training_masks_split[i]
total_loss, model_loss = tower_loss(iis, isegs, itms, reuse_variables)
batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))
reuse_variables = True
grads = opt.compute_gradients(total_loss)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
summary_op = tf.summary.merge_all()
# save moving average
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# batch norm updates
with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]):
train_op = tf.no_op(name='train_op')
saver = tf.train.Saver(tf.global_variables())
summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph())
init = tf.global_variables_initializer()
if FLAGS.pretrained_model_path is not None:
variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path, slim.get_trainable_variables(),
ignore_missing_vars=True)
gpu_options=tf.GPUOptions(allow_growth=True)
#gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.75)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
if FLAGS.restore:
logger.info('continue training from previous checkpoint')
ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
logger.debug(ckpt)
saver.restore(sess, ckpt)
else:
sess.run(init)
if FLAGS.pretrained_model_path is not None:
variable_restore_op(sess)
data_generator = data_provider.get_batch(num_workers=FLAGS.num_readers,
input_size=FLAGS.input_size,
batch_size=FLAGS.batch_size_per_gpu * len(gpus))
start = time.time()
for step in range(FLAGS.max_steps):
data = next(data_generator)
ml, tl, _ = sess.run([model_loss, total_loss, train_op], feed_dict={input_images: data[0],
input_seg_maps: data[2],
input_training_masks: data[3]})
if np.isnan(tl):
logger.error('Loss diverged, stop training')
break
if step % 10 == 0:
avg_time_per_step = (time.time() - start)/10
avg_examples_per_second = (10 * FLAGS.batch_size_per_gpu * len(gpus))/(time.time() - start)
start = time.time()
logger.info('Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.2f} seconds/step, {:.2f} examples/second'.format(
step, ml, tl, avg_time_per_step, avg_examples_per_second))
if step % FLAGS.save_checkpoint_steps == 0:
saver.save(sess, os.path.join(FLAGS.checkpoint_path, 'model.ckpt'), global_step=global_step)
if step % FLAGS.save_summary_steps == 0:
_, tl, summary_str = sess.run([train_op, total_loss, summary_op], feed_dict={input_images: data[0],
input_seg_maps: data[2],
input_training_masks: data[3]})
summary_writer.add_summary(summary_str, global_step=step)
Sorry for posting bunch of code lines but I really have no idea how to calculate metrics from all this?
Scikit-learn has a great API for calculating precision, recall and F1 score for each class:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html
Y_pred = sess.run([nn_output], feed_dict={input_images: input_data[0], input_seg_maps: input_data[2], input_training_masks: input_data[3]]}) # Feed according to your model
ClassificationReport = sklearn.metrics.classification_report(Y_pred, Y_true, output_dict=True)
I have LSTM training in tensorflow and the whole sess is saved with saver = tf.train.Saver().
The whole code is shown below.
def LSTM_RNN(_X, _weights, _biases):
# model architecture based on "guillaume-chevalier" and "aymericdamien" under the MIT license.
_X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
_X = tf.reshape(_X, [-1, n_input])
# Rectifies Linear Unit activation function used
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, n_steps, 0)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# A single output is produced, in style of "many to one" classifier, refer to http://karpathy.github.io/2015/05/21/rnn-effectiveness/ for details
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
# Graph input/output
x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM_RNN(x, weights, biases)
# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss
if decaying_learning_rate:
learning_rate = tf.train.exponential_decay(init_learning_rate, global_step*batch_size, decay_steps, decay_rate, staircase=True)
#decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) #exponentially decayed learning rate
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost,global_step=global_step) # Adam Optimizer
while step * batch_size <= training_iters:
#print (sess.run(learning_rate)) #decaying learning rate
#print (sess.run(global_step)) # global number of iterations
if len(unsampled_indices) < batch_size:
unsampled_indices = list(range(0,len(X_train)))
batch_xs, raw_labels, unsampled_indicies = extract_batch_size(X_train, y_train, unsampled_indices, batch_size)
batch_ys = one_hot(raw_labels)
# check that encoded output is same length as num_classes, if not, pad it
if len(batch_ys[0]) < n_classes:
temp_ys = np.zeros((batch_size, n_classes))
temp_ys[:batch_ys.shape[0],:batch_ys.shape[1]] = batch_ys
batch_ys = temp_ys
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
train_losses.append(loss)
train_accuracies.append(acc)
# Evaluate network only at some steps for faster training:
if (step*batch_size % display_iter == 0) or (step == 1) or (step * batch_size > training_iters):
# To not spam console, show training accuracy/loss in this "if"
print("Iter #" + str(step*batch_size) + ": Learning rate = " + "{:.6f}".format(sess.run(learning_rate)) + ": Batch Loss = " + "{:.6f}".format(loss) + ", Accuracy = {}".format(acc))
# Evaluation on the test set (no learning made here - just evaluation for diagnosis)
loss, acc = sess.run([cost, accuracy], feed_dict={x: X_test,y: one_hot(y_test)})
test_losses.append(loss)
test_accuracies.append(acc)
print("PERFORMANCE ON TEST SET: " + "Batch Loss = {}".format(loss) + ", Accuracy = {}".format(acc))
step += 1
print("Optimization Finished!")
save_path = saver.save(sess, "ActivityTrainedModels/model.ckpt")
Then I restore the model for deployment.
At that time, I need to use together with another human pose estimator model for Human pose estimation. Pose estimator is loaded with get_graph_path().
I can't load both. I can load either one only. If I load both I have error as
NotFoundError (see above for traceback): Restoring from checkpoint failed. This is most likely due to a Variable name or other graph key that is missing from the checkpoint. Please ensure that you have not altered the graph expected based on the checkpoint. Original error:
Key smoothing/gauss_weight not found in checkpoint
[[node save/RestoreV2 (defined at ActivityDetection.py:219) = RestoreV2[dtypes=[DT_INT32, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT], _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_save/Const_0_0, save/RestoreV2/tensor_names, save/RestoreV2/shape_and_slices)]]
My deployment code is as follow.
n_steps = 32 # 32 timesteps per series
n_input = 36 # num input parameters per timestep
n_hidden = 34 # Hidden layer num of features
n_classes = 3
global_step = tf.Variable(0, trainable=False)
# Graph input/output
x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM_RNN(x, weights, biases)
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
init = tf.global_variables_initializer()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Activity Recognition')
parser.add_argument('--video', type=str, default='../../tf-openpose/TestVideos/2019_01-Feb Recording/C4-13.mp4')
parser.add_argument('--resolution', type=str, default='640x360', help='network input resolution. default=432x368')
parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin')
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
parser.add_argument('--showBG', type=bool, default=True, help='False to show skeleton only.')
parser.add_argument('--s', type=str, default='00:00', help='start time to crop')
parser.add_argument('--e', type=str, default='00:00', help='end time to crop')
args = parser.parse_args()
w, h = model_wh(args.resize)
if w > 0 and h > 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(img_w, img_h))
'''with tf.Session() as sess:
sess.run(init)
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint('/home/coie/venvp3/HumanActivityRecognition/HumanActivityRecognition/ActivityTrainedModels/'))
print("Model restored.")
all_vars = tf.trainable_variables()
for i in range(len(all_vars)):
name = all_vars[i].name
values = sess.run(name)
print('name', name)
#print('value', values)
print('shape',values.shape)'''
#result = sess.run(pred, feed_dict={x: X_test[24:27]})
#for r in range(len(result)):
#print("predicted activity:", LABELS[result[r].argmax(0)])
If I load TfPoseEstimator, I can't restore LSTM model.
How can I solve the problem?
I made a separate class for LSTM and LSTM graph is loaded with a sess in the class. So main python code has another sess with default graph. The default graph at main python load postestimator graph.
My LSTM class is defined as
class ActivityRecognition:
#Utility functions for training:
def LSTM_RNN(self,_X, _weights, _biases):
# model architecture based on "guillaume-chevalier" and "aymericdamien" under the MIT license.
_X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
_X = tf.reshape(_X, [-1, self.n_input])
# Rectifies Linear Unit activation function used
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, self.n_steps, 0)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
lstm_last_output = outputs[-1]
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def __init__(self):
self.n_steps = 32 # 32 timesteps per series
self.n_input = 36 # num input parameters per timestep
self.n_hidden = 34 # Hidden layer num of features
self.n_classes = 3
self.global_step = tf.Variable(0, trainable=False)
# Graph input/output
self.x = tf.placeholder(tf.float32, [None, self.n_steps, self.n_input])
self.y = tf.placeholder(tf.float32, [None, self.n_classes])
# Graph weights
self.weights = {
'hidden': tf.Variable(tf.random_normal([self.n_input, self.n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes], mean=1.0))
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden])),
'out': tf.Variable(tf.random_normal([self.n_classes]))
}
self.pred = self.LSTM_RNN(self.x, self.weights, self.biases)
self.sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
self.init = tf.global_variables_initializer()
with tf.Session() as sess:
self.sess.run(self.init)
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint('ActivityTrainedModels/'))
print("Model restored.")
#all_vars = tf.trainable_variables()
#for i in range(len(all_vars)):
#name = all_vars[i].name
#values = sess.run(name)
#print('name', name)
#print('value', values)
#print('shape',values.shape)
def inference(self,test):
result = self.sess.run(selfpred, feed_dict={x: test})
for r in range(len(result)):
activity=LABELS[result[r].argmax(0)]
return activity
if __name__ == "__main__":
ActivityRecognition()
as I have mentioned in the topic I would like to save my tf.graph into a frozen_graph.pb file. This should save space later I will try to run it an a jetson tx2. I have made a short MNIST example describing my problem. I run tf 1.7 on python 3.5.
Question1: As far as I understood my freeze_graph method takes a checkpoint file transfers all variables to constants except the ones i define with the second parameter. When I try to get the correct tensorname I wrote loggits.name but I get an error no Tensor with that name found in graph.
Question2: After that I would be able to extract a frozen graph, how can i load it back and run an classification on that.
My Code is attached and should work in a single py file.
Thank you very much in advance
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import os
import time
import tensorflow as tf
import os
import argparse
#METHODS I WANT TO TEST
#TAKE THE CHECKPOINT FILE AND DELETE ALL NOTES THAT ARE NOT USEFUL
def freeze_graph(checkpoint_directory,output_node_names):
#checkpoint = tf.train.get_checkpoint_state(checkpoint_directory)
print(checkpoint_directory)
checkpoint = tf.train.get_checkpoint_state(checkpoint_directory)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = str(os.sep).join(input_checkpoint.split(os.sep)[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
#import the metagraph in default graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta',clear_devices=clear_devices)
#restore the weights
saver.restore(sess,input_checkpoint)
#wrap variables to constants
[print(n.name) for n in tf.get_default_graph().as_graph_def().node]
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, tf.get_default_graph().as_graph_def(),output_node_names.split(","))
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." %len(output_graph_def.node))
return output_graph_def
#HERE IS THE METHOD THAT ALLOWS ME TO LOAD MY FROZEN GRAPH AS GRAPH
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename,"rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name = "prefix")
return graph
#get the data
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
#NETWORK PARAMETERS
learning_rate = 0.01
dropout = 0.75
display_step = 1
filter_height = 5
filter_width = 5
depth_in = 1
depth_out1 = 64
depth_out2 = 128
#PARAMETERS OF THE DATASET
input_height = 28
input_width = 28
n_classes = 10
#TRAINING PARAMETERS
epochs = 1
batch_size = 256
num_batches = int(mnist.train.num_examples/batch_size)
x = tf.placeholder(tf.float32,[None,28*28],name = "input")
y = tf.placeholder(tf.float32,[None,n_classes])
keep_prob = tf.placeholder(tf.float32)
weights = {'wc1': tf.Variable(tf.random_normal([filter_height,filter_width,depth_in,depth_out1])),
'wc2': tf.Variable(tf.random_normal([filter_height, filter_width, depth_out1, depth_out2])),
'wd1': tf.Variable(tf.random_normal([int(input_height/4)*int(input_height/4)*depth_out2,1024])),
'out': tf.Variable(tf.random_normal([1024,n_classes]))}
biases = {'bc1': tf.Variable(tf.random_normal([depth_out1])),
'bc2': tf.Variable(tf.random_normal([depth_out2])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))}
#DEFINE YOUR NEURAL NETWORKS LAYER OPERATIONS
def ops_conv2d(x,W,b,strides = 1, add_bias = True, activation = tf.nn.relu, use_activation = True):
x = tf.nn.conv2d(x,W,strides = [1,strides,strides,1],padding = 'SAME')
x = tf.nn.bias_add(x,b)
if use_activation:
return activation(x)
else:
return x
def ops_maxpool2d(x,stride=2):
return tf.nn.max_pool(x,ksize=[1,stride,stride,1],strides = [1,stride,stride,1], padding = 'SAME' )
def ops_dropout(input_fully_connected,dropout):
return tf.nn.dropout(input_fully_connected,dropout)
def ops_fullyconnected(input, activation = tf.nn.relu, use_activation = True):
fc = tf.reshape(input,[-1,weights['wd1'].get_shape().as_list()[0]])
fc = tf.add(tf.matmul(fc,weights['wd1']),biases['bd1'])
if use_activation:
return activation(fc)
else:
return fc
#DEFINE NETWORK ARCHTEKTURE (FORWARDPASS)
def build_network(x,weights,biases,dropout):
x = tf.reshape(x,shape=(-1,28,28,1))
conv_layer_1 = ops_conv2d(x,weights['wc1'],biases['bc1'],activation=tf.nn.relu, use_activation=True)
conv_layer_1 = ops_maxpool2d(conv_layer_1,2)
conv_layer_2 = ops_conv2d(conv_layer_1,weights['wc2'],biases['bc2'],activation=tf.nn.relu, use_activation=True)
conv_layer_2 = ops_maxpool2d(conv_layer_2,2)
fc1 = ops_fullyconnected(conv_layer_2, activation=tf.nn.relu, use_activation=True)
fc1 = ops_dropout(fc1,dropout)
logits = tf.add(tf.matmul(fc1,weights['out']),biases['out'],name = "logits")
return logits
#DEFINE TENSORFLOW BACKPROPAGATION OBJECTS (BACKWARDPASS)
logits = build_network(x,weights,biases,keep_prob)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits,labels = y))
#CHOSE AN OPTIMIZER
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(loss=loss)
predicted_labels = tf.equal(tf.argmax(logits,1),tf.argmax(y,1))
#EVALUATION PARAMETERS
acc = tf.reduce_mean(tf.cast(predicted_labels,tf.float32))
#NOW INITIALIZE ALL TF VARIABLES
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=10)
#NOW START THE SESSION AND EXECUTE THE GRAPH
with tf.Session() as sess:
sess.run(init)
for i in range(epochs):
save_path = saver.save(sess, os.curdir + "checkpoints/MNIST_TEST.ckpt")
for j in range(num_batches):
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={x:batch_x,y:batch_y,keep_prob:dropout})
losses,accs = sess.run([loss,acc],feed_dict={x:batch_x,y:batch_y,keep_prob:1.})
if epochs % display_step == 0:
print("EPOCH:",'%04d' % (i+1),
"loss =", "{:.9f}".format(losses),
"acc =", "{:.5f}".format(accs))
print("TRAINING COMPLETED")
#START PREDICTIONS
predicted_label = sess.run(logits,feed_dict={x:mnist.test.images[:256],keep_prob:1.})
test_classes = np.argmax(predicted_label,1)
print("TEST ACCURACY:",sess.run(acc,feed_dict={x:mnist.test.images[:256], y:mnist.test.labels[:256],keep_prob:1.}))
f,a = plt.subplots(1,10,figsize = (10,2))
for i in range(10):
a[i].imshow(np.reshape(mnist.test.images[i],(28,28)))
print( test_classes[i])
print("TOTAL EXAMPLE FINNISHED")
freeze_graph(os.curdir + "checkpoints" + os.sep, logits.name)
graph = load_graph(os.curdir + os.sep + "checkpoints" + os.sep + "frozen_model.pb")
with tf.Session(graph) as sess:
sess.run(init)
predicted_label = sess.run(logits, feed_dict={x: mnist.test.images[:256], keep_prob: 1.})
print(predicted_label)
if anybody has the same problem here is a description how i solved it.
Saving and Loading the data:
First of all note that I have now a different pipeline. First of all I save the session in a saver (ckpt files). Afterwards I construct a metagaph (graph.pb). This graph is then transfered into a frozen graph (frozen.pb). To load the frozen graph I use the load_frozen_graph_from_session method. Inside that method I also test a forward pass through my network.
Running an inference on the loaded graph:
First I name my tensors x (name = "input") this will result in a tensorname ("input:0")
so when you try to fill this placeholder in the new session you need predicted_label = sess.run("output:0", feed_dict={"input:0":mnist.test.images[:256], "keep_prob:0": 1.})
The output is the logit and not the prediction inside my network. This is because if you run the session it will run until it hits the variable you want to fetch. would I take the prediction I need also the placeholder for my y (name=label).
Here is the full code:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import os
import time
import tensorflow as tf
import os
import argparse
from tensorflow.python.platform import gfile
from tensorflow.python.framework.graph_util import convert_variables_to_constants
#METHODS I WANT TO TEST
def freeze_graph_from_Session(sess,saver):
# convert_variables_to_constants(sess, input_graph_def, output_node_names, variable_names_whitelist=None)
save_graph(sess,saver)
with gfile.FastGFile("./tmp/" + "graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
frozen_graph_def = convert_variables_to_constants(sess, graph_def, ["output"])
with tf.gfile.GFile("./tmp/" + "frozen.pb", "wb") as f:
f.write(frozen_graph_def.SerializeToString())
def save_graph(sess, saver):
saver.save(sess, "./tmp/model", write_meta_graph=True, global_step=1)
with open("./tmp/" + "graph.pb", 'wb') as f:
f.write(sess.graph_def.SerializeToString())
#sess.close()
def load_frozen_graph_from_session():
filename = "./tmp/" + "frozen.pb"
print("LOADING GRAPH")
with tf.gfile.GFile(filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
print("OPEN GRAPH")
with tf.Graph().as_default() as graph:
print("DEFINE INPUT")
new_input = tf.placeholder(tf.float32, [None, 28 * 28], name="new_input")
print("DEFINE INPUT MAP")
tf.import_graph_def(
graph_def,
# usually, during training you use queues, but at inference time use placeholders
# this turns into "input
input_map={"input:0": new_input},
return_elements=None,
# if input_map is not None, needs a name
name="bla",
op_dict=None,
producer_op_list=None
)
checkpoint_path = tf.train.latest_checkpoint("./tmp/")
with tf.Session(graph=graph) as sess:
saver = tf.train.import_meta_graph(checkpoint_path + ".meta", import_scope=None)
saver.restore(sess, checkpoint_path)
print("TRY FORWARD RUN THROUGH LOADED GRAPH")
predicted_label = sess.run("output:0", feed_dict={"input:0":mnist.test.images[:256], "keep_prob:0": 1.})
print("output", predicted_label)
f, a = plt.subplots(1, 10, figsize=(10, 2))
test_classes = np.argmax(predicted_label, 1)
for i in range(10):
a[i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
print(test_classes[i])
print ("output:", test_classes)
#TAKE THE CHECKPOINT FILE AND DELETE ALL NOTES THAT ARE NOT USEFUL
def freeze_graph(checkpoint_directory,output_node_names):
#checkpoint = tf.train.get_checkpoint_state(checkpoint_directory)
print(checkpoint_directory)
checkpoint = tf.train.get_checkpoint_state(checkpoint_directory)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = str(os.sep).join(input_checkpoint.split(os.sep)[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
#import the metagraph in default graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta',clear_devices=clear_devices)
#restore the weights
saver.restore(sess,input_checkpoint)
#wrap variables to constants
[print(n.name) for n in tf.get_default_graph().as_graph_def().node]
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, tf.get_default_graph().as_graph_def(),output_node_names.split(","))
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." %len(output_graph_def.node))
return output_graph_def
#HERE IS THE METHOD THAT ALLOWS ME TO LOAD MY FROZEN GRAPH AS GRAPH
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename,"rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name = "prefix")
return graph
#get the data
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
print(mnist.test.labels[:256])
print("load_freeze_graph_from_session: STARTED")
load_frozen_graph_from_session()
print("load_freeze_graph_from_session: ENDED")
exit()
#NETWORK PARAMETERS
learning_rate = 0.01
dropout = 0.75
display_step = 1
filter_height = 5
filter_width = 5
depth_in = 1
depth_out1 = 64
depth_out2 = 128
#PARAMETERS OF THE DATASET
input_height = 28
input_width = 28
n_classes = 10
#TRAINING PARAMETERS
epochs = 1
batch_size = 256
num_batches = int(mnist.train.num_examples/batch_size)
x = tf.placeholder(tf.float32,[None,28*28],name="input")
y = tf.placeholder(tf.float32,[None,n_classes],name = "label")
keep_prob = tf.placeholder(tf.float32,name = "keep_prob")
weights = {'wc1': tf.Variable(tf.random_normal([filter_height,filter_width,depth_in,depth_out1])),
'wc2': tf.Variable(tf.random_normal([filter_height, filter_width, depth_out1, depth_out2])),
'wd1': tf.Variable(tf.random_normal([int(input_height/4)*int(input_height/4)*depth_out2,1024])),
'out': tf.Variable(tf.random_normal([1024,n_classes]))}
biases = {'bc1': tf.Variable(tf.random_normal([depth_out1])),
'bc2': tf.Variable(tf.random_normal([depth_out2])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))}
#DEFINE YOUR NEURAL NETWORKS LAYER OPERATIONS
def ops_conv2d(x,W,b,strides = 1, add_bias = True, activation = tf.nn.relu, use_activation = True):
x = tf.nn.conv2d(x,W,strides = [1,strides,strides,1],padding = 'SAME')
x = tf.nn.bias_add(x,b)
if use_activation:
return activation(x)
else:
return x
def ops_maxpool2d(x,stride=2):
return tf.nn.max_pool(x,ksize=[1,stride,stride,1],strides = [1,stride,stride,1], padding = 'SAME' )
def ops_dropout(input_fully_connected,dropout):
return tf.nn.dropout(input_fully_connected,dropout)
def ops_fullyconnected(input, activation = tf.nn.relu, use_activation = True):
fc = tf.reshape(input,[-1,weights['wd1'].get_shape().as_list()[0]])
fc = tf.add(tf.matmul(fc,weights['wd1']),biases['bd1'])
if use_activation:
return activation(fc)
else:
return fc
#DEFINE NETWORK ARCHTEKTURE (FORWARDPASS)
def build_network(x,weights,biases,dropout):
x = tf.reshape(x,shape=(-1,28,28,1))
conv_layer_1 = ops_conv2d(x,weights['wc1'],biases['bc1'],activation=tf.nn.relu, use_activation=True)
conv_layer_1 = ops_maxpool2d(conv_layer_1,2)
conv_layer_2 = ops_conv2d(conv_layer_1,weights['wc2'],biases['bc2'],activation=tf.nn.relu, use_activation=True)
conv_layer_2 = ops_maxpool2d(conv_layer_2,2)
fc1 = ops_fullyconnected(conv_layer_2, activation=tf.nn.relu, use_activation=True)
fc1 = ops_dropout(fc1,dropout)
logits = tf.add(tf.matmul(fc1,weights['out']),biases['out'],name = "output")
return logits
#DEFINE TENSORFLOW BACKPROPAGATION OBJECTS (BACKWARDPASS)
logits = build_network(x,weights,biases,keep_prob)
#freeze_graph(os.curdir + "checkpoints" + os.sep, logits.name)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits,labels = y))
#CHOSE AN OPTIMIZER
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(loss=loss)
predicted_labels = tf.equal(tf.argmax(logits,1),tf.argmax(y,1))
#EVALUATION PARAMETERS
acc = tf.reduce_mean(tf.cast(predicted_labels,tf.float32))
#NOW INITIALIZE ALL TF VARIABLES
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=10)
#NOW START THE SESSION AND EXECUTE THE GRAPH
with tf.Session() as sess:
sess.run(init)
for i in range(epochs):
save_path = saver.save(sess, os.curdir + "checkpoints/MNIST_TEST.ckpt")
for j in range(num_batches):
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={x:batch_x,y:batch_y,keep_prob:dropout})
losses,accs = sess.run([loss,acc],feed_dict={x:batch_x,y:batch_y,keep_prob:1.})
if epochs % display_step == 0:
print("EPOCH:",'%04d' % (i+1),
"loss =", "{:.9f}".format(losses),
"acc =", "{:.5f}".format(accs))
print("TRAINING COMPLETED")
#START PREDICTIONS
predicted_label = sess.run(logits,feed_dict={x:mnist.test.images[:256],keep_prob:1.})
test_classes = np.argmax(predicted_label,1)
print("TEST ACCURACY:",sess.run(acc,feed_dict={x:mnist.test.images[:256], y:mnist.test.labels[:256],keep_prob:1.}))
f,a = plt.subplots(1,10,figsize = (10,2))
for i in range(10):
a[i].imshow(np.reshape(mnist.test.images[i],(28,28)))
print( test_classes[i])
print("TOTAL EXAMPLE FINNISHED")
#freeze_graph(os.curdir + "checkpoints"+os.sep,logits)
print("freeze_graph_from_session: STARTED")
freeze_graph_from_Session(sess,saver)
print("freeze_graph_from_session: ENDED")
print("load_freeze_graph_from_session: STARTED")
load_frozen_graph_from_session()
print("load_freeze_graph_from_session: ENDED")
#with tf.Session() as sess:
#
# sess.run(init)
# graph = load_graph(os.curdir + os.sep + "checkpoints" + os.sep + "frozen_model.pb")
# predicted_label = sess.run(logits, feed_dict={x: mnist.test.images[:256], keep_prob: 1.})
# print(predicted_label)
Thanks goes out to my self. :)
I use tf.dataset to fetch images, labels, edges in training with GPU. But I find the dataset API cannot load all the data.
I use codes:
def get_dataset(filenames, shuffle_buffer, repeat_times, batch_size):
dataset = tf.data.TFRecordDataset([filenames])
dataset = dataset.map(tfrecord_preprocess)
if repeat_times is None:
dataset = dataset.repeat()
else:
dataset = dataset.repeat(repeat_times)
dataset = dataset.shuffle(shuffle_buffer).batch(batch_size)
return dataset
def tfrecord_preprocess(example):
feature = {"image": tf.FixedLenFeature((), tf.string, default_value=""),
"label": tf.FixedLenFeature((), tf.string, default_value=""),
"edge": tf.FixedLenFeature((), tf.string, default_value="")}
parsed_feature = tf.parse_single_example(example, feature)
image = tf.decode_raw(parsed_feature["image"], out_type=tf.uint8)
label = tf.decode_raw(parsed_feature["label"], out_type=tf.uint8)
edge = tf.decode_raw(parsed_feature["edge"], out_type=tf.uint8)
image = tf.cast(tf.reshape(image, shape=[1, 128, 128]), tf.float32)
label = tf.cast(tf.reshape(label, shape=[1, 128, 128]), tf.float32)
edge = tf.cast(tf.reshape(edge, shape=[128, 128]), tf.float32)
return image, label, edge
I write a simple codes to test the API
dataset = get_dataset(filenames, shuffle_buffer, repeat_times, batchsize)
#shuffle=1000, repeat_times=2, batchsize=13
iter = dataset.make_one_shot_iterator
images, labels, edges = iter.get_next()
count = 0
with tf.Session() as sess:
for _ in xrange(40):
try:
edges_value = sess.run(edges)
count = count+len(edges_value)
print count
except tf.errors.OutofRangeError:
break
the number of data is 260 so after repeat and batch, the epochs should be 40. It works.
However, when I use similar code for training, the total number of data is less than 260, only 140(through the var count). Does anyone know haw to solve this problem? Pls help me.
I use tensorflow-gpu 1.4
my training code is:
shuffle_buffer = params["shuffle_buffer"] #1000
repeat_times = params["repeat_times"] #1
batch_size = params["batch_size"] #26
num_classes = params["num_classes"] #2
dataset = model.get_dataset(filenames, shuffle_buffer, repeat_times, batch_size)
iterator = dataset.make_one_shot_iterator()
with tf.device('/gpu:1'):
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(params["learning_rate"],
global_step, 100, 0.99)
optimizer = tf.train.AdamOptimizer(learning_rate)
images, labels, edges = iterator.get_next()
_, probs = model.interence(features=images, training=True)
loss, reg = model.get_loss(probs, labels, edges, num_classes)
_, acc_mean, _ = model.get_acc(probs, labels)
train_op = optimizer.minimize(loss, global_step=global_step)
variables_average = tf.train.ExponentialMovingAverage(0.99, global_step)
var_list = tf.trainable_variables(scope='.*(kernel|bias)')
variables_average_op = variables_average.apply(var_list)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_all_op = tf.group(train_op, variables_average_op)
tf.summary.scalar("loss", loss)
tf.summary.scalar("reg", reg)
tf.summary.scalar("acc_mean", acc_mean)
merged = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=5)
config = tf.ConfigProto(log_device_placement=True,
allow_soft_placement=True)
config.gpu_options.allow_growth = True
count = 0
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
writer = tf.summary.FileWriter('./train', sess.graph)
for _ in xrange(10):
try:
edges_value = sess.run(edges)
count = count+len(edges_value)
_, step, summary = sess.run([train_all_op, global_step, merged])
writer.add_summary(summary, step)
if step % 5 == 0:
loss_value = sess.run(loss)
print loss_value
acc_mean_value = sess.run(acc_mean)
print acc_mean_value
saver.save(sess, params["save_dir"], step)
except tf.errors.OutOfRangeError:
print "end of data"
break
print count
print "the final step is %d" % step
loss_value = sess.run(loss)
print loss_value
acc_mean_value = sess.run(acc_mean)
print acc_mean_value
saver.save(sess, params["save_dir"], step)
writer.close()
finally I got info in the terminal:
end of data
130
the final step is 5
to test the code I set the repeat times 1
But I use test codes:
def test():
dataset = get_dataset("train_output.tfrecords", 1000, 1, 26)
terator = dataset.make_one_shot_iterator()
images, labels, edges = iterator.get_next()
count = 0
with tf.Session() as sess:
for i in xrange(10):
try:
images_value, labels_value, edges_value = sess.run([images, labels, edges])
count = count+len(edges_value)
except tf.errors.OutOfRangeError:
print "end of data"
print count
print i
test()
The terminal shows:
260
9
The problem is that the sess.run(edges) is causing this part of the graph to execute again: images, labels, edges = iterator.get_next(). Therefore, everytime you run it, you are consuming one iteration that is not counted in your counter.
To get the count of edges, keep a counter inside of the with tf.device('/gpu:1') block. You can even graph it on tensorboard using a tf.summary.scalar similar to how you do so with loss.
Declare a edges_count = tf.Variable(1, name='edges_count', trainable=False, dtype=tf.int32)
images, labels, edges = iterator.get_next()
edges_count_update_op = tf.assign_add(edges_count, len(edges))
Then add edges_count_update_op to your train_op group.
I'm new in TensorFlow and Machine Learning (python also).
In first step to create an image recognition program, i was hit the wall of confusion in feeding data preparation. Can someone please help me on this?
I was look into this tutorial, but the data preparation is obfuscated.
mnis softmax for beginner
I didn't expect to get a whole perfect program from this question, instead i would love to hear if you can tell me how TensorFlow work on feed_dict. For now in my mind, it is: "Work like a [for] loop, go though imageHolder, get the data of 2352 byte/ 1 image and put in the training op, in there it's perform predict base on current model and compare with data from labelHolder of same index then perform correction on model." so i was expect to put in a set of 2352 byte data (another image with same size) and get the prediction. I will also put the code here, in case my idea is correct and the error come from bad implementation.
Said: i have a set off data for 5 classes, with 3670 images in total.
When load the data to feed_dict for training, i have converted all image to 28x28 pixels, with 3 channels. it result me a tensor of (3670, 2352) for image holder in the feed_dict. After that, i managed to prepare a tensor of (3670,) for label holder in the feed_dict.
The training code is look like this:
for step in xrange(FLAGS.max_steps):
feed_dict = {
imageHolder: imageTrain,
labelHolder: labelTrain,
}
_, loss_rate = sess.run([train_op, loss_op], feed_dict=feed_dict)
Then i have my code to predict a new image with the model above:
testing_dataset = do_get_file_list(FLAGS.guess_dir)
x = tf.placeholder(tf.float32, shape=(IMAGE_PIXELS))
for data in testing_dataset:
image = Image.open(data)
image = image.resize((IMAGE_SIZE, IMAGE_SIZE))
image = np.array(image).reshape(IMAGE_PIXELS)
prediction = session.run(tf.argmax(logits, 1), feed_dict={x: image})
But the problem is the predict line always raise an error of "Can not feed value of shape...." no matter what shape my testing data is (2352,), (1, 2352) (it's ask for (3670, 2352) shape, but no way)
This is some flag i have used
IMAGE_SIZE = 28
CHANNELS = 3
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE * CHANNELS
The training op and loss computing:
def do_get_op_compute_loss(logits, labels):
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def do_get_op_training(loss_op, training_rate):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss_op, global_step=global_step)
return train_op
Variables
imageHolder = tf.placeholder(tf.float32, [data_count, IMAGE_PIXELS])
labelHolder = tf.placeholder(tf.int32, [data_count])
For complete program:
import os
import math
import tensorflow as tf
from PIL import Image
import numpy as np
from six.moves import xrange
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 200, 'Number of steps to run trainer.')
flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 4, 'Batch size. '
'Must divide evenly into the dataset sizes.')
flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.')
flags.DEFINE_string('save_file', '.\\data\\model.ckpt', 'Directory to put the training data.')
flags.DEFINE_string('guess_dir', 'work', 'Directory to put the testing data.')
#flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
# 'for unit testing.')
IMAGE_SIZE = 28
CHANNELS = 3
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE * CHANNELS
def do_inference(images, hidden1_units, hidden2_units, class_count):
#HIDDEN LAYER 1
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units], stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]), name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
#HIDDEN LAYER 2
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units], stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]), name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
#LINEAR
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, class_count], stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([class_count]), name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits
def do_get_op_compute_loss(logits, labels):
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def do_get_op_training(loss_op, training_rate):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss_op, global_step=global_step)
return train_op
def do_get_op_evaluate(logits, labels):
correct = tf.nn.in_top_k(logits, labels, 1)
return tf.reduce_sum(tf.cast(correct, tf.int32))
def do_evaluate(session, eval_correct_op, imageset_holder, labelset_holder, train_images, train_labels):
true_count = 0
num_examples = FLAGS.batch_size * FLAGS.batch_size
for step in xrange(FLAGS.batch_size):
feed_dict = {imageset_holder: train_images, labelset_holder: train_labels,}
true_count += session.run(eval_correct_op, feed_dict=feed_dict)
precision = true_count / num_examples
# print(' Num examples: %d Num correct: %d Precision # 1: %0.04f' %
# (num_examples, true_count, precision))
def do_init_param(data_count, class_count):
# Generate placeholder
imageHolder = tf.placeholder(tf.float32, shape=(data_count, IMAGE_PIXELS))
labelHolder = tf.placeholder(tf.int32, shape=(data_count))
# Build a graph for prediction from inference model
logits = do_inference(imageHolder, FLAGS.hidden1, FLAGS.hidden2, class_count)
# Add loss calculating op
loss_op = do_get_op_compute_loss(logits, labelHolder)
# Add training op
train_op = do_get_op_training(loss_op, FLAGS.learning_rate)
# Add evaluate correction op
evaluate_op = do_get_op_evaluate(logits, labelHolder)
# Create session for op operating
sess = tf.Session()
# Init param
init = tf.initialize_all_variables()
sess.run(init)
return sess, train_op, loss_op, evaluate_op, imageHolder, labelHolder, logits
def do_get_class_list():
return [{'name': name, 'path': os.path.join(FLAGS.train_dir, name)} for name in os.listdir(FLAGS.train_dir)
if os.path.isdir(os.path.join(FLAGS.train_dir, name))]
def do_get_file_list(folderName):
return [os.path.join(folderName, name) for name in os.listdir(folderName)
if (os.path.isdir(os.path.join(folderName, name)) == False)]
def do_init_data_list():
file_list = []
for classItem in do_get_class_list():
for dataItem in do_get_file_list(classItem['path']):
file_list.append({'name': classItem['name'], 'path': dataItem})
# Renew data feeding dictionary
imageTrainList, labelTrainList = do_seperate_data(file_list)
imageTrain = []
for imagePath in imageTrainList:
image = Image.open(imagePath)
image = image.resize((IMAGE_SIZE, IMAGE_SIZE))
imageTrain.append(np.array(image))
imageCount = len(imageTrain)
imageTrain = np.array(imageTrain)
imageTrain = imageTrain.reshape(imageCount, IMAGE_PIXELS)
id_list, id_map = do_generate_id_label(labelTrainList)
labelTrain = np.array(id_list)
return imageTrain, labelTrain, id_map
def do_init():
imageTrain, labelTrain, id_map = do_init_data_list()
sess, train_op, loss_op, evaluate_op, imageHolder, labelHolder, logits = do_init_param(len(imageTrain), len(id_map))
return sess, train_op, loss_op, evaluate_op, imageHolder, labelHolder, imageTrain, labelTrain, id_map, logits
def do_seperate_data(data):
images = [item['path'] for item in data]
labels = [item['name'] for item in data]
return images, labels
def do_generate_id_label(label_list):
trimmed_label_list = list(set(label_list))
id_map = {trimmed_label_list.index(label): label for label in trimmed_label_list}
reversed_id_map = {label: trimmed_label_list.index(label) for label in trimmed_label_list}
id_list = [reversed_id_map.get(item) for item in label_list]
return id_list, id_map
def do_training(sess, train_op, loss_op, evaluate_op, imageHolder, labelHolder, imageTrain, labelTrain):
# Training state checkpoint saver
saver = tf.train.Saver()
# feed_dict = {
# imageHolder: imageTrain,
# labelHolder: labelTrain,
# }
for step in xrange(FLAGS.max_steps):
feed_dict = {
imageHolder: imageTrain,
labelHolder: labelTrain,
}
_, loss_rate = sess.run([train_op, loss_op], feed_dict=feed_dict)
if step % 100 == 0:
print('Step {0}: loss = {1}'.format(step, loss_rate))
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
saver.save(sess, FLAGS.save_file, global_step=step)
print('Evaluate training data')
do_evaluate(sess, evaluate_op, imageHolder, labelHolder, imageTrain, labelTrain)
def do_predict(session, logits):
# xentropy
testing_dataset = do_get_file_list(FLAGS.guess_dir)
x = tf.placeholder(tf.float32, shape=(IMAGE_PIXELS))
print('Perform predict')
print('==================================================================================')
# TEMPORARY CODE
for data in testing_dataset:
image = Image.open(data)
image = image.resize((IMAGE_SIZE, IMAGE_SIZE))
image = np.array(image).reshape(IMAGE_PIXELS)
print(image.shape)
prediction = session.run(logits, {x: image})
print('{0}: {1}'.format(data, prediction))
def main(_):
# TF notice default graph
with tf.Graph().as_default():
sess, train_op, loss_op, evaluate_op, imageHolder, labelHolder, imageTrain, labelTrain, id_map, logits = do_init()
print("done init")
do_training(sess, train_op, loss_op, evaluate_op, imageHolder, labelHolder, imageTrain, labelTrain)
print("done training")
do_predict(sess, logits)
# NO IDEA
if __name__ == '__main__':
tf.app.run()
It's important to understand the error, you say
But the problem is the predict line always raise an error of "Can not
feed value of shape...." no matter what shape my testing data is
(2352,), (1, 2352) (it's ask for (3670, 2352) shape, but no way)
Oh yes way my friend, yes way. It says there's a problem with your shape, you need to inspect that. It asks for 3670, why?
Because your model accepts inputs with shape (data_count, IMAGE_PIXELS), which you declare in the below:
def do_init_param(data_count, class_count):
# Generate placeholder
imageHolder = tf.placeholder(tf.float32, shape=(data_count, IMAGE_PIXELS))
labelHolder = tf.placeholder(tf.int32, shape=(data_count))
This function is called here:
sess, train_op, loss_op, evaluate_op, imageHolder, labelHolder, logits = do_init_param(len(imageTrain), len(id_map))
len(imageTrain) is the length of your dataset, probably 3670 images.
Then you have your prediction function:
def do_predict(session, logits):
# xentropy
testing_dataset = do_get_file_list(FLAGS.guess_dir)
x = tf.placeholder(tf.float32, shape=(IMAGE_PIXELS))
...
prediction = session.run(logits, {x: image})
Note x here is useless. You are feeding your image to predict to your model which does not expect that shape, it expects the original placeholder shape of (3670, 2352), because that's what you said.
The solution is to declare x as a placeholder with non-specific first dimension such as:
imageHolder = tf.placeholder(tf.float32, shape=(None, IMAGE_PIXELS))
When you predict the label of your image, you can have a single image or multiple images (a mini-batch), but always must be of shape [number_images, IMAGE_PIXELS].
Makes sense?