I was able to train a model in TensorFlow with my own data. Input and Output of the model are images. I now tried to get the output of the predictions and save it to an png image file to see what's going on. Unfortunately I am getting an error when running the following function I created to test with predictions. My goal is to save the prediction that is also an image so I can open it with a normal image viewer.
Some more to the code. In my main I am creating an estimator
def predict_element(my_model, features):
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x=features,
num_epochs=1,
shuffle=False)
eval_results = my_model.predict(input_fn=eval_input_fn)
predictions = eval_results.next() #this returns a dict with my tensors
prediction_tensor = predictions["y"] #get the tensor from the dict
image_tensor = tf.reshape(prediction_tensor, [IMG_WIDTH, -1]) #reshape to a matrix due my returned tensor is a 1D flat one
decoded_image = tf.image.encode_png(image_tensor)
write_image = tf.write_file("output/my_output_image.png", decoded_image)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(write_image))
def get_input():
filename_dataset = tf.data.Dataset.list_files("features/*.png")
label_dataset = tf.data.Dataset.list_files("labels/*.png")
# Make a Dataset of image tensors by reading and decoding the files.
image_dataset = filename_dataset.map(lambda x: tf.cast(tf.image.decode_png(tf.read_file(x), channels=1),tf.float32))
l_dataset = label_dataset.map(lambda x: tf.cast(tf.image.decode_png(tf.read_file(x),channels=1),tf.float32))
image_reshape = image_dataset.map(lambda x: tf.reshape(x, [IM_WIDTH * IM_HEIGHT]))
label_reshape = l_dataset.map(lambda x: tf.reshape(x, [IM_WIDTH * IM_HEIGHT]))
iterator = image_reshape.make_one_shot_iterator()
iterator2 = label_reshape.make_one_shot_iterator()
next_img = iterator.get_next()
next_lbl = iterator2.get_next()
features = []
labels = []
# read all 10 images and labels and put it in the array
# so we can pass it to the estimator
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(10):
t1, t2 = sess.run([next_img, next_lbl])
features.append(t1)
labels.append(t2)
return {"x": np.array(features)}, np.array(labels)
def main(unused_argv):
features, labels = get_input() # creating the features dict {"x": }
my_estimator = tf.estimator.Estimator(model_fn=my_cnn_model, model_dir="/tmp/my_model")
predict_element(my_estimator, features)
The error is
Graph is finalized and cannot be modified
With some easy print() statements I could see that retrieving the dict with
eval_results = my_model.predict(input_fn=eval_input_fn)
is probable the one which finalizes the graph.
I absolutely don't know what to do or where to look for a solution here. How could I save the output?
I tried this in my model_fn:
#the last layer of my network is dropout
predictions = {
"y": dropout
}
if mode == tf.estimator.ModeKeys.PREDICT:
reshape1 = tf.reshape(dropout, [-1,IM_WIDTH, IM_HEIGHT])
sliced = tf.slice(reshape1, [0,0,0], [1, IM_WIDTH, IM_HEIGHT])
encoded = tf.image.encode_png(tf.cast(sliced, dtype=tf.uint8))
outputfile = tf.write_file(params["output_path"], encoded)
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
My problem here is that I can't pass back the "outputfile" node so I can work with it.
Well your graph is finalized and cannot be modified. You can either add this tensorflow operations to your model (before running it) or simply write some python code which saves the images seperately (without using tensorflow). Maybe I'll find some old code of mine as an example.
You could also create a second graph, then you can use tensorflow without changing the existing model graph.
You have to distinguish between graph nodes and evaluated objects. tf.reshape doesn't take an array as input but a graph node.
https://www.tensorflow.org/programmers_guide/graphs
for everyone with the same problem here is my solution. I don't know if this is the proper way but it works.
In my predict function i created a second graph for the reshaping, slicing, encoding and saving like:
pred_dict = eval_results.next() #generator the predict function returns
preds = pred_dict["y"] #get the predictions from the dict
#create the second graph
g = tf.Graph()
with g.as_default():
inp = tf.Variable(preds)
reshape1 = tf.reshape(printnode, [IM_WIDTH, IM_HEIGHT, -1])
sliced = tf.slice(reshape1, [0,0,0], [ IM_WIDTH, IM_HEIGHT,1])
reshaped = tf.reshape(sliced, [IM_HEIGHT, IM_WIDTH, 1])
encoded = tf.image.encode_png(tf.image.convert_image_dtype(reshaped,tf.uint16))
outputfile = tf.write_file("/tmp/pred_output/prediction_img.png", encoded)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(outputfile)
Related
I have recently started using TensorFlow (TF), and I have come across a problem that I need some help with. Basically, I've restored a pre-trained model, and I need to modify the weights and biases of one of its layers before I retest its accuracy. Now, my problem is the following:
how can I change the weights and biases using the assign method in TF? Is modifying the weights of a restored modeled even possible in TF?
Here is my code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data # Imports the MINST dataset
# Data Set:
# ---------
mnist = input_data.read_data_sets("/home/frr/MNIST_data", one_hot=True)# An object where data is stored
ImVecDim = 784# The number of elements in a an image vector (flattening a 28x28 2D image)
NumOfClasses = 10
g = tf.get_default_graph()
with tf.Session() as sess:
LoadMod = tf.train.import_meta_graph('simple_mnist.ckpt.meta') # This object loads the model
LoadMod.restore(sess, tf.train.latest_checkpoint('./'))# Loading weights and biases and other stuff to the model
# ( Here I'd like to modify the weights and biases of layer 1, set them to one for example, before I go ahead and test the accuracy ) #
# Testing the acuracy of the model:
X = g.get_tensor_by_name('ImageIn:0')
Y = g.get_tensor_by_name('LabelIn:0')
KP = g.get_tensor_by_name('KeepProb:0')
Accuracy = g.get_tensor_by_name('NetAccuracy:0')
feed_dict = { X: mnist.test.images[:256], Y: mnist.test.labels[:256], KP: 1.0 }
print( 'Model Accuracy = ' )
print( sess.run( Accuracy, feed_dict ) )
In addition to an existing answer, tensor update can be performed via tf.assign function.
v1 = sess.graph.get_tensor_by_name('v1:0')
print(sess.run(v1)) # 1.0
sess.run(tf.assign(v1, v1 + 1))
print(sess.run(v1)) # 2.0
Thanks for everyone who responded. I'd just like to put the pieces together. This is the code the helped me accomplish what I want:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data # Imports the MINST dataset
# Data Set:
# ---------
mnist = input_data.read_data_sets("/home/frr/MNIST_data", one_hot=True)# An object where data is stored
ImVecDim = 784# The number of elements in a an image vector (flattening a 28x28 2D image)
NumOfClasses = 10
g = tf.get_default_graph()
with tf.Session() as sess:
LoadMod = tf.train.import_meta_graph('simple_mnist.ckpt.meta') # This object loads the model
LoadMod.restore(sess, tf.train.latest_checkpoint('./'))# Loading weights and biases and other stuff to the model
wc1 = g.get_tensor_by_name('wc1:0')
sess.run( tf.assign( wc1,tf.multiply(wc1,0) ) )# Setting the values of the variable 'wc1' in the model to zero.
# Testing the acuracy of the model:
X = g.get_tensor_by_name('ImageIn:0')
Y = g.get_tensor_by_name('LabelIn:0')
KP = g.get_tensor_by_name('KeepProb:0')
Accuracy = g.get_tensor_by_name('NetAccuracy:0')
feed_dict = { X: mnist.test.images[:256], Y: mnist.test.labels[:256], KP: 1.0 }
print( 'Model Accuracy = ' )
print( sess.run( Accuracy, feed_dict ) )
Yes it is possible. Your weights and biases are already loaded after you loaded the meta graph. You need to find their names (see the list_variables function) and then assign them to a Python variable.
For that, use tf.get_variable with the variable name. You might have to set reuse=True on your variable scope. See this answer for more detail on reusing variables.
Once you have them as a weights variable, you can call sess.run(weights.assign(...)).
An update to this for Tensorflow 2.4 using a different example than OP's.
# Step 0 - Init
model = # some tf.keras.Model
model_folder = # path to model files
ckpt_obj = tf.train.Checkpoint(model=model)
ckpt_obj.restore(save_path=tf.train.latest_checkpoint(str(model_folder))).expect_partial()
# Step 1 - Loop over all layers
for layer in model.layers:
# Step 2 - Loop over submodules of a layer
for submodule in layer.submodules:
# Step 3 - Find a particular type of submodule (alternative use submodule.name=='SomeName')
if type(submodule) == tfp.layers.Convolution3DFlipout: # kernel=N(loc,scale) --> N=Normal distro
# Step 4 - Extract numpy weights using .get_weights()
## Note: Different compared to submodule.weights which returns a tensor that shall also have a name e.g. wc1:0
weights = submodule.get_weights() # [scale, rho, bias] --> kernel=N(loc,scale=tfp.bijectors.Softplus(rho)) --> output=input*kernel + bias
# Step 5 - Set weights as a new numpy array of your choice
weights[1] = np.full(weights[1].shape, -np.inf)
# Step 6 - Update weights
submodule.set_weights(weights)
input = tf.random.normal((1,100,100,100,1)) # 3D input with batch=1, channels=1
_ = model(input)
I like to perform image classification on our own large image libary (millions of labeled images) with tensorflow. I´m new to stackoverflow, python and tensorflow and worked myself through a few tutorials (mnist etc.) and got to the point, where i was able to prepare a TensorFlow datset from a dictionary including the absolute path to the images and the according labels. However, i´m stuck at the point using the dataset in a TensorFlow session. Here is my (example) code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import time
import mymodule # I build my module to read the images and labels
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.contrib.data import Iterator
beginTime = time.time()
batch_size = 100
learning_rate = 0.005
max_steps = 2
NUM_CLASSES = 25
def input_parser(img_path, label):
one_hot = tf.one_hot(label, NUM_CLASSES)
img_file = tf.read_file(img_path)
img_decoded = tf.image.decode_jpeg(img_file, channels = 3)
return img_decoded, one_hot
#Import Training data (returns the dicitonary with paths and labels)
train_dict = mymodule.getFileMap(labelList, imageList)
#Import Test data
test_dict = mymodule.getFileMap(labelList, imageList)
#Get train data
train_file_list, train_label_list = get_file_label_list(train_dict)
train_images_tensor = ops.convert_to_tensor(train_file_list, dtype=dtypes.string)
train_labels_tensor = ops.convert_to_tensor(train_label_list, dtype=dtypes.int64)
#Get test data
test_file_list, test_label_list = get_file_label_list(test_dict)
test_images_tensor = ops.convert_to_tensor(test_file_list, dtype=dtypes.string)
test_labels_tensor = ops.convert_to_tensor(test_label_list, dtype=dtypes.int64)
#Create TensorFlow Datset object
train_data = tf.data.Dataset.from_tensor_slices((train_images_tensor, train_labels_tensor))
test_data = tf.data.Dataset.from_tensor_slices((test_images_tensor, test_labels_tensor))
# Transform the datset so that it contains decoded images
# and one-hot vector labels
train_data = train_data.map(input_parser)
test_data = test_data.map(input_parser)
# Batching --> How to do it right?
#train_data = train_data.batch(batch_size = 100)
#test_data = train_data.batch(batch_size = 100)
#Define input placeholders
image_size = 990*990*3
images_placeholder = tf.placeholder(tf.float32, shape=[None, image_size])
labels_placeholder = tf.placeholder(tf.int64, shape=[None])
# Define variables (these afe the values we want to optimize)
weigths = tf.Variable(tf.zeros([image_size, NUM_CLASSES]))
biases = tf.Variable(tf.zeros([NUM_CLASSES]))
# Define the classifier´s result
logits = tf.matmul(images_placeholder, weigths) + biases
# Define the loss function
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = labels_placeholder))
# Define the training operation
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Operation comparing prediciton with true label
correct_prediciton = tf.equal(tf.argmax(logits, 1), labels_placeholder)
# Operation calculating the accuracy of our predicitons
accuracy = tf.reduce_mean(tf.cast(correct_prediciton, tf.float32))
#Create TensorFlow Iterator object
iterator = Iterator.from_structure(train_data.output_types,
train_data.output_shapes)
next_element = iterator.get_next()
#Create two initialization ops to switch between the datasets
train_init_op = iterator.make_initializer(train_data)
test_init_op = iterator.make_initializer(test_data)
with tf.Session() as sess:
#Initialize variables
sess.run(tf.global_variables_initializer())
sess.run(train_init_op)
for _ in range(10):
try:
elem = sess.run(next_element)
print(elem)
except tf.errors.OutOfRangeError:
print("End of training datset.")
break
Following this and this tutorial i could not solve the problem of how to use the (image and label) dataset in a tensorflow session for training. I was able to print out the datset by iterating through it, but wasn´t able to use it for learning.
I don´t understand how to access the images and labels seperately after they have been merged in the train_data = tf.data.Dataset.from_tensor_slices((train_images_tensor, train_labels_tensor)) operation, as requried by the 2nd tutorial. Also i don´t know how to implement batching correctly.
What i want to do in the session is basically this (from the 2nd tutorial):
# Generate input data batch
indices = np.random.choice(data_sets['images_train'].shape[0], batch_size)
images_batch = data_sets['images_train'][indices]
labels_batch = data_sets['labels_train'][indices]
# Periodically print out the model's current accuracy
if i % 100 == 0:
train_accuracy = sess.run(accuracy, feed_dict={
images_placeholder: images_batch, labels_placeholder: labels_batch})
print('Step {:5d}: training accuracy {:g}'.format(i, train_accuracy))
# Perform a single training step
sess.run(train_step, feed_dict={images_placeholder: images_batch,
labels_placeholder: labels_batch})
# After finishing the training, evaluate on the test set
test_accuracy = sess.run(accuracy, feed_dict={
images_placeholder: data_sets['images_test'],
labels_placeholder: data_sets['labels_test']})
print('Test accuracy {:g}'.format(test_accuracy))
endTime = time.time()
print('Total time: {:5.2f}s'.format(endTime - beginTime))
If anyone can tell me, how to access images and labels in the dataset sepearately and use it for training, i would be really thankful. Also a tip where and how to do the batching would be appreciated.
Thank you.
In your code, next_element is a tuple of two tensors, matching the structure of your datasets: i.e. it is a tuple whose first element is an image, and second element is a label. To access the individual tensors, you can do the following:
next_element = iterator.get_next()
next_image = next_element[0]
next_label = next_element[1]
# Or, in a single line:
next_image, next_label = iterator.get_next()
To batch a tf.data.Dataset, you can use the Dataset.batch() transformation. Your commented out code for this should simply work:
train_data = train_data.batch(batch_size = 100)
test_data = train_data.batch(batch_size = 100)
I have a model that's trains my network using an Iterator; following the new Dataset API pipeline model that's now recommended by Google.
I read tfrecord files, feed data to the network, train nicely, and all is going well, I save my model in the end of the training so I can run Inference on it later. A simplified version of the code is as following:
""" Training and saving """
training_dataset = tf.contrib.data.TFRecordDataset(training_record)
training_dataset = training_dataset.map(ds._path_records_parser)
training_dataset = training_dataset.batch(BATCH_SIZE)
with tf.name_scope("iterators"):
training_iterator = Iterator.from_structure(training_dataset.output_types, training_dataset.output_shapes)
next_training_element = training_iterator.get_next()
training_init_op = training_iterator.make_initializer(training_dataset)
def train(num_epochs):
# compute for the number of epochs
for e in range(1, num_epochs+1):
session.run(training_init_op) #initializing iterator here
while True:
try:
images, labels = session.run(next_training_element)
session.run(optimizer, feed_dict={x: images, y_true: labels})
except tf.errors.OutOfRangeError:
saver_name = './saved_models/ucf-model'
print("Finished Training Epoch {}".format(e))
break
""" Restoring """
# restoring the saved model and its variables
session = tf.Session()
saver = tf.train.import_meta_graph(r'saved_models\ucf-model.meta')
saver.restore(session, tf.train.latest_checkpoint('.\saved_models'))
graph = tf.get_default_graph()
# restoring relevant tensors/ops
accuracy = graph.get_tensor_by_name("accuracy/Mean:0") #the tensor that when evaluated returns the mean accuracy of the batch
testing_iterator = graph.get_operation_by_name("iterators/Iterator") #my iterator used in testing.
next_testing_element = graph.get_operation_by_name("iterators/IteratorGetNext") #the GetNext operator for my iterator
# loading my testing set tfrecords
testing_dataset = tf.contrib.data.TFRecordDataset(testing_record_path)
testing_dataset = testing_dataset.map(ds._path_records_parser, num_threads=4, output_buffer_size=BATCH_SIZE*20)
testing_dataset = testing_dataset.batch(BATCH_SIZE)
testing_init_op = testing_iterator.make_initializer(testing_dataset) #to initialize the dataset
with tf.Session() as session:
session.run(testing_init_op)
while True:
try:
images, labels = session.run(next_testing_element)
accuracy = session.run(accuracy, feed_dict={x: test_images, y_true: test_labels}) #error here, x, y_true not defined
except tf.errors.OutOfRangeError:
break
My problem is mainly when I restore the model. How to feed testing data to the network?
When I restore my Iterator using testing_iterator = graph.get_operation_by_name("iterators/Iterator"), next_testing_element = graph.get_operation_by_name("iterators/IteratorGetNext"), I get the following error:
GetNext() failed because the iterator has not been initialized. Ensure that you have run the initializer operation for this iterator before getting the next element.
So I did try to initialize my dataset using: testing_init_op = testing_iterator.make_initializer(testing_dataset)). I got this error: AttributeError: 'Operation' object has no attribute 'make_initializer'
Another issue is, since an iterator is being used, there's no need to use placeholders in the training_model, as an iterator feed data directly to the graph. But this way, how to restore my feed_dict keys in the 3rd to last line, when I feed data to the "accuracy" op?
EDIT: if someone could suggest a way to add placeholders between the Iterator and the network input, then I could try running the graph by evaluating the "accuracy" tensor while feeding data to the placeholders and ignoring the iterator altogether.
When restoring a saved meta graph, you can restore the initialization operation with name and then use it again to initialize the input pipeline for inference.
That is, when creating the graph, you can do
dataset_init_op = iterator.make_initializer(dataset, name='dataset_init')
And then restore this operation by doing:
dataset_init_op = graph.get_operation_by_name('dataset_init')
Here is a self contained code snippet that compares results of a randomly initialized model before and after restoring.
Saving an Iterator
np.random.seed(42)
data = np.random.random([4, 4])
X = tf.placeholder(dtype=tf.float32, shape=[4, 4], name='X')
dataset = tf.data.Dataset.from_tensor_slices(X)
iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
dataset_next_op = iterator.get_next()
# name the operation
dataset_init_op = iterator.make_initializer(dataset, name='dataset_init')
w = np.random.random([1, 4])
W = tf.Variable(w, name='W', dtype=tf.float32)
output = tf.multiply(W, dataset_next_op, name='output')
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(dataset_init_op, feed_dict={X:data})
while True:
try:
print(sess.run(output))
except tf.errors.OutOfRangeError:
saver.save(sess, 'tmp/', global_step=1002)
break
And then you can restore the same model for inference as follows:
Restoring saved iterator
np.random.seed(42)
data = np.random.random([4, 4])
tf.reset_default_graph()
sess = tf.Session()
saver = tf.train.import_meta_graph('tmp/-1002.meta')
ckpt = tf.train.get_checkpoint_state(os.path.dirname('tmp/checkpoint'))
saver.restore(sess, ckpt.model_checkpoint_path)
graph = tf.get_default_graph()
# Restore the init operation
dataset_init_op = graph.get_operation_by_name('dataset_init')
X = graph.get_tensor_by_name('X:0')
output = graph.get_tensor_by_name('output:0')
sess.run(dataset_init_op, feed_dict={X:data})
while True:
try:
print(sess.run(output))
except tf.errors.OutOfRangeError:
break
I would suggest to use tf.contrib.data.make_saveable_from_iterator, which has been designed precisely for this purpose. It is much less verbose and does not require you to change existing code, in particular how you define your iterator.
Working example, when we save everything after step 5 has completed. Note how I don't even bother knowing what seed is used.
import tensorflow as tf
iterator = (
tf.data.Dataset.range(100)
.shuffle(10)
.make_one_shot_iterator())
batch = iterator.get_next(name='batch')
saveable_obj = tf.contrib.data.make_saveable_from_iterator(iterator)
tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
for step in range(10):
print('{}: {}'.format(step, sess.run(batch)))
if step == 5:
saver.save(sess, './foo', global_step=step)
# 0: 1
# 1: 6
# 2: 7
# 3: 3
# 4: 8
# 5: 10
# 6: 12
# 7: 14
# 8: 5
# 9: 17
Then later, if we resume from step 6, we get the same output.
import tensorflow as tf
saver = tf.train.import_meta_graph('./foo-5.meta')
with tf.Session() as sess:
saver.restore(sess, './foo-5')
for step in range(6, 10):
print('{}: {}'.format(step, sess.run('batch:0')))
# 6: 12
# 7: 14
# 8: 5
# 9: 17
I couldn't solve the problem related to initializing the iterator, but since I pre-process my dataset using map method, and I apply transformations defined by Python operations wrapped with py_func, which cannot be serialized for storing\restoring, I'll have to initialize my dataset when I want to restore it anyway.
So, the problem that remains is how to feed data to my graph when I restore it. I placed a tf.identity node between the iterator output and my network input. Upon restoring, I feed my data to the identity node. A better solution that I discovered later is using placeholder_with_default(), as described in this answer.
I would suggest having a look at CheckpointInputPipelineHook CheckpointInputPipelineHook, which implements saving iterator state for further training with tf.Estimator.
So I've been stuck on this problem for weeks. I want to make an image batch from a list of image filenames. I insert the filename list into a queue and use a reader to get the file. The reader then returns the filename and the read image file.
My problem is that when I make a batch using the decoded jpg and the labels from the reader, tf.train.shuffle_batch() mixes up the images and the filenames so that now the labels are in the wrong order for the image files. Is there something I am doing wrong with the queue/shuffle_batch and how can I fix it such that the batch comes out with the right labels for the right files?
Much thanks!
import tensorflow as tf
from tensorflow.python.framework import ops
def preprocess_image_tensor(image_tf):
image = tf.image.convert_image_dtype(image_tf, dtype=tf.float32)
image = tf.image.resize_image_with_crop_or_pad(image, 300, 300)
image = tf.image.per_image_standardization(image)
return image
# original image names and labels
image_paths = ["image_0.jpg", "image_1.jpg", "image_2.jpg", "image_3.jpg", "image_4.jpg", "image_5.jpg", "image_6.jpg", "image_7.jpg", "image_8.jpg"]
labels = [0, 1, 2, 3, 4, 5, 6, 7, 8]
# converting arrays to tensors
image_paths_tf = ops.convert_to_tensor(image_paths, dtype=tf.string, name="image_paths_tf")
labels_tf = ops.convert_to_tensor(labels, dtype=tf.int32, name="labels_tf")
# getting tensor slices
image_path_tf, label_tf = tf.train.slice_input_producer([image_paths_tf, labels_tf], shuffle=False)
# getting image tensors from jpeg and performing preprocessing
image_buffer_tf = tf.read_file(image_path_tf, name="image_buffer")
image_tf = tf.image.decode_jpeg(image_buffer_tf, channels=3, name="image")
image_tf = preprocess_image_tensor(image_tf)
# creating a batch of images and labels
batch_size = 5
num_threads = 4
images_batch_tf, labels_batch_tf = tf.train.batch([image_tf, label_tf], batch_size=batch_size, num_threads=num_threads)
# running testing session to check order of images and labels
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
print image_path_tf.eval()
print label_tf.eval()
coord.request_stop()
coord.join(threads)
Wait.... Isn't your tf usage a little weird?
You are basically running the graph twice by calling:
print image_path_tf.eval()
print label_tf.eval()
And since you are only asking for image_path_tf and label_tf, anything below this line is not even run:
image_path_tf, label_tf = tf.train.slice_input_producer([image_paths_tf, labels_tf], shuffle=False)
Maybe try this?
image_paths, labels = sess.run([images_batch_tf, labels_batch_tf])
print(image_paths)
print(labels)
From your code I'm unsure how your labels are encoded/extracted from the jpeg images. I used to encode everything in the same file, but have since found a much more elegant solution. Assuming you can get a list of filenames, image_paths and a numpy array of labels labels, you can bind them together and operate on individual examples with tf.train.slice_input_producer then batch them together using tf.train.batch.
import tensorflow as tf
from tensorflow.python.framework import ops
shuffle = True
batch_size = 128
num_threads = 8
def get_data():
"""
Return image_paths, labels such that label[i] corresponds to image_paths[i].
image_paths: list of strings
labels: list/np array of labels
"""
raise NotImplementedError()
def preprocess_image_tensor(image_tf):
"""Preprocess a single image."""
image = tf.image.convert_image_dtype(image_tf, dtype=tf.float32)
image = tf.image.resize_image_with_crop_or_pad(image, 300, 300)
image = tf.image.per_image_standardization(image)
return image
image_paths, labels = get_data()
image_paths_tf = ops.convert_to_tensor(image_paths, dtype=tf.string, name='image_paths')
labels_tf = ops.convert_to_tensor(image_paths, dtype=tf.int32, name='labels')
image_path_tf, label_tf = tf.train.slice_input_producer([image_paths_tf, labels_tf], shuffle=shuffle)
# preprocess single image paths
image_buffer_tf = tf.read_file(image_path_tf, name='image_buffer')
image_tf = tf.image.decode_jpeg(image_buffer_tf, channels=3, name='image')
image_tf = preprocess_image_tensor(image_tf)
# batch the results
image_batch_tf, labels_batch_tf = tf.train.batch([image_tf, label_tf], batch_size=batch_size, num_threads=num_threads)
EDIT: I'm using TensorFlow version 0.10.0rc0
I'm currently trying to use tf.contrib.learn.read_batch_examples working while using a TensorFlow (SKFlow/tf.contrib) Estimator, specifically the LinearClassifier. I create a read_batch_examples op feeding in a CSV file with a tf.decode_csv for the parse_fn parameter with appropriate default records. I then feed that op to my input_fn for fitting the Estimator, but when that's run I receive the following error:
ValueError: Tensor("centered_bias_weight:0", shape=(1,), dtype=float32_ref) must be from the same graph as Tensor("linear/linear/BiasAdd:0", shape=(?, 1), dtype=float32).
I'm confused because neither of those Tensors appear to be from the read_batch_examples op. The code works if I run the op beforehand and then feed the input instead as an array of values. While this workaround exists, it is unhelpful because I am working with large datasets in which I need to batch in my inputs. Currently going over Estimator.fit (currently equivalent to Estimator.partial_fit in iterations isn't nearly as fast as being able to feed in data as it trains, so having this working is ideal. Any ideas? I'll post the non-functioning code below.
def input_fn(examples_dict):
continuous_cols = {k: tf.cast(examples_dict[k], dtype=tf.float32)
for k in CONTINUOUS_FEATURES}
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in xrange(examples_dict[k].get_shape()[0])],
values=examples_dict[k],
shape=[int(examples_dict[k].get_shape()[0]), 1])
for k in CATEGORICAL_FEATURES}
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
label = tf.contrib.layers.one_hot_encoding(labels=examples_dict[LABEL],
num_classes=2,
on_value=1,
off_value=0)
return feature_cols, label
filenames = [...]
csv_headers = [...] # features and label headers
batch_size = 50
min_after_dequeue = int(num_examples * min_fraction_of_examples_in_queue)
queue_capacity = min_after_dequeue + 3 * batch_size
examples = tf.contrib.learn.read_batch_examples(
filenames,
batch_size=batch_size,
reader=tf.TextLineReader,
randomize_input=True,
queue_capacity=queue_capacity,
num_threads=1,
read_batch_size=1,
parse_fn=lambda x: tf.decode_csv(x, [tf.constant([''], dtype=tf.string) for _ in xrange(csv_headers)]))
examples_dict = {}
for i, header in enumerate(csv_headers):
examples_dict[header] = examples[:, i]
categorical_cols = []
for header in CATEGORICAL_FEATURES:
categorical_cols.append(tf.contrib.layers.sparse_column_with_keys(
header,
keys # Keys for that particular feature, source not shown here
))
continuous_cols = []
for header in CONTINUOUS_FEATURES:
continuous_cols.append(tf.contrib.layers.real_valued_column(header))
feature_columns = categorical_cols + continuous_cols
model = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=feature_columns,
optimizer=optimizer,
n_classes=num_classes)
# Above code is ok up to this point
model.fit(input_fn=lambda: input_fn(examples_dict),
steps=200) # This line causes the error ****
Any alternatives for batching would be appreciated as well!
I was able to figure out my mistake through the help of the great TensorFlow team! read_batch_examples has to be called within input_fn, otherwise the op has to be run beforehand as it'll be from a different graph.
Edit
Here is the modified code that functions properly for those who are interested:
def input_fn(file_names, batch_size):
examples_dict = read_csv_examples(file_names, batch_size)
# Continuous features
feature_cols = {k: tf.string_to_number(examples_dict[k], dtype=tf.float32)
for k in CONTINUOUS_FEATURES}
# Categorical features
feature_cols.update({
k: tf.SparseTensor(
indices=[[i, 0] for i in range(examples_dict[k].get_shape()[0])],
values=examples_dict[k],
shape=[int(examples_dict[k].get_shape()[0]), 1])
for k in CATEGORICAL_FEATURES})
# Change out type for classification/regression
out_type = tf.int32 if CLASSIFICATION else tf.float32
label = tf.string_to_number(examples_dict[LABEL], out_type=out_type)
return feature_cols, label
def read_csv_examples(file_names, batch_size):
def parse_fn(record):
record_defaults = [tf.constant(['']), dtype=tf.string] * len(FEATURE_HEADERS)
return tf.decode_csv(record, record_defaults)
examples_op = tf.contrib.learn.read_batch_examples(
file_names,
batch_size=batch_size,
reader=tf.TextLineReader,
parse_fn=parse_fn)
# Important: convert examples to dict for ease of use in `input_fn`
# Map each header to its respective column (FEATURE_HEADERS order
# matters!
examples_dict_op = {}
for i, header in enumerate(FEATURE_HEADERS):
examples_dict_op[header] = examples_op[:, i]
return examples_dict_op
This code is near minimal for producing a generic input function for your data. Also note that if you would like to pass num_epochs to read_batch_examples, you'll need to do something different for your categorical features (see this answer for details). Disclaimer: I wrote that answer. Hope this helps!