Efficient way to load .npz with Tensorflow iterator - python

I have a large .npz numpy training file that I want to read more efficiently. I tried to follow the approach from Tensorflow documentation (https://www.tensorflow.org/guide/datasets#consuming_numpy_arrays):
As an alternative, you can define the Dataset in terms of
tf.placeholder() tensors, and feed the NumPy arrays when you
initialize an Iterator over the dataset.
However after implementing iterator, my model consumes even 2x more memory. Do you have any clues what might be wrong here?
def model(batch_size):
x = tf.placeholder(tf.float32,[None, IMGSIZE,IMGSIZE,1])
y = tf.placeholder(tf.float32,[None, n_landmark * 2])
z = tf.placeholder(tf.int32, [None, ])
Ret_dict['x'] = x
Ret_dict['y'] = y
Ret_dict['z'] = z
Ret_dict['iterator'] = iter_
dataset = tf.data.Dataset.from_tensor_slices((x, y, z)).batch(batch_size)
iter_ = dataset.make_initializable_iterator()
InputImage, GroundTruth, GroundTruth_Em = iter_.get_next()
Conv1a = tf.layers.conv2d(InputImage,64,3,1,..)
(...)
def main():
trainSet = np.load(args.datasetDir)
Xtrain = trainSet['Image']
Ytrain = trainSet['Label_1']
Ytrain_em = trainSet['Label_2']
with tf.Session() as sess:
my_model = model(BATCH_SIZE)
Saver = tf.train.Saver()
Saver.restore(sess, args.pretrainedModel)
sess.run(
[model['Optimizer'], model['iterator'].initializer],
feed_dict={model['x']:Xtrain,
model['y']:Ytrain,
model['z']:Ytrain_em})

Related

Tensorflow: how to save a model before the end of optimization?

I'm using ScipyOptimizerInterface for training a tensorflow model. (tensorflow 1.13.1)
During the process of training, if the loss value is below a threshold, I want the training process to stop and save the model right before the threshold was crossed.
Below is the script that I tried. The idea is to raise an exception to exit optimizer.minimize, then save the model using tf.train.Saver.
However, this does not work. As you can see by comparing the initial loss value and the loss value computed by the saved model. The two values are the same which indicates it is the initial random model that is saved, not the desired model.
From #Patol75's answer, I understand the best model is not saved because the updated tf.Variables dies when the training session is interpreted.
How can the desired model be saved?
import numpy as np
import tensorflow as tf
from tensorflow.contrib.opt import ScipyOptimizerInterface
class test(Exception):
pass
def construct_graph():
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder('float', shape = [None, 1])
w = tf.get_variable('w_0', shape = [1, 1], initializer = tf.contrib.layers.xavier_initializer())
b = tf.get_variable('b_0', shape = [1], initializer = tf.contrib.layers.xavier_initializer())
y_out = tf.matmul(x, w) + b
y = tf.placeholder('float', shape = [None, 1])
loss = tf.reduce_mean(tf.square(y - y_out))
return graph, x, y, loss
# create example datasets
x_train = np.linspace(1, 6, 100) + 0.1 * np.random.random(100)
x_train = x_train.reshape(100, 1)
y_train = np.sin(x_train)
x_val = np.linspace(6, 11, 100)
x_val = x_val.reshape(100, 1)
y_val = np.sin(x_val)
tf.reset_default_graph()
graph, x, y, loss = construct_graph()
feeddict_train = {x: x_train, y: y_train}
feeddict_val = {x: x_val, y: y_val}
with graph.as_default():
def step_callbackfun(x):
global iteration
train_part, val_part = valfunc_train(x), valfunc_val(x)
print('%10.5f %10.5f' % (*train_part, *val_part))
iteration += 1
if iteration == 5:
raise test()
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
optimizer = ScipyOptimizerInterface(loss, method='l-BFGS-b')
iteration = 0
valfunc_train = optimizer._make_eval_func(tensors=loss, session=sess, feed_dict=feeddict_train, fetches=[])
valfunc_val = optimizer._make_eval_func(tensors=loss, session=sess, feed_dict=feeddict_val, fetches=[])
print('The initial loss is %f' % sess.run(loss, feeddict_train))
try:
optimizer.minimize(sess, feeddict_train, step_callback=step_callbackfun)
except test:
saver.save(sess, 'model/model.ckpt')
graph2, x2, y2, loss2 = construct_graph()
with tf.Session(graph=graph2) as sess2:
feeddict_two = {x2: x_train, y2: y_train}
sess2.run(tf.global_variables_initializer())
saver2 = tf.train.Saver()
saver2.restore(sess2, 'model/model.ckpt')
loss_val2 = sess2.run(loss2, feeddict_two)
print('Outside', loss_val2)
Take a look at the doc
Variables subject to optimization are updated in-place at the end of optimization.
This is why interrupting the process the way you do does give you the original value. I have never used TensorFlow, but the keyword loss_callback sounds promising.
A function to be called every time the loss and gradients are computed, with evaluated fetches supplied as positional arguments.
Have a look here.

The Way to Connect Multiple Neural Networks in a Series(Not Parallel)

I wonder there is any way to connect multiple NN as a series in tensorflow.
For example, input features to DNN structure, and get the result values for input data of RNN structure.
Example code:
import tensorflow as tf
import numpy as np
a = 50 #batch_size
b = 60 #sequence in RNN
c = 40 #features
d = 6 #label classes
rnn_size = b
x_data = np.random.rand(a,b,c)
y_data = np.random.randint(0,high=d,size=[a,1])
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=[None,b,c])
Y = tf.placeholder(tf.float32, shape=[None,d])
X = tf.transpose(X, (1,0,2))
X = tf.reshape(X, (-1,c))
X = tf.split(X, b)
hidden_units = [40,20,10]
#DNN Structure
dnn = []
for i in range(len(hidden_units)):
if i == 0:
T = X
else:
T = dnn[-1]
dnn.append(tf.layers.dense(T, hidden_units[i], activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.xavier_initializer()))
# RNN Structure
rnn = {'w': tf.Variable(tf.random_normal([rnn_size, d], stddev = 0.01), dtype=tf.float32),
'b': tf.Variable(tf.random_normal([d], stddev = 0.01), dtype=tf.float32)}
cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_size)
outputs, states = tf.contrib.rnn.static_rnn(cell, dnn[-1], dtype=tf.float32)
output = tf.matmul(outputs[-1], rnn['w'])+rnn['b']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y,logits=output))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
correct = tf.equal(tf.argmax(output,1),tf.argmax(cost,1))
acc = tf.reduce_mean(tf.cast(correct, tf.float32))
# Run Session
sess = tf.Session()
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
_, c = sess.run([optimizer, cost],feed_dict={X: x_data, Y: tf.Session().run(tf.one_hot(y_data), d)})
print('Accuracy: ', sess.run(acc, feed_dict={X: x_data, Y: tf.Session().run(tf.one_hot(y_data), d)}))
When I run this code, there is an error raised:
File "C:\Anaconda3\Lib\site-packages\tensorflow\python\layers\core.py", line 250, in dense
dtype=inputs.dtype.base_dtype,
AttributeError: 'list' object has no attribute 'dtype'
it seems to be related with type of 'dnn[-1]'
Is there a connective function or data type controller for the connection of the neural networks?
I've solved the problem, finally.
The reason of error was little bit ambiguous but X was recognized by 'list' after running 'tf.split', finitely...
After I generate a list of DNN Structures which as a length of the sequence, as following:
seq = []
for i in range(b):
###dnn structure for i-th array of split###
seq.append(dnn structure)
and tuned some codes, then the whole code worked well.
Thanks for an attention :)

Tensorflow interactive prediciton using dataset API

I have created a Tensorflow model which uses the Dataset API in order to feed the data into the network.
After the training phase, I would like to restore this model and to perform inference on it once in a while.
Currently I am re initializing the dataset iterator each time, but I'm wondering if there is an alternative way.
Moreover at training time, my dataset contains x and y data, while at prediction time I only have x. As a temporary solution, I am providing a fake y, but again, this does not seem the best solution.
Here is a pseudocode of what i'm doing:
#### NETWORK
input_x = tf.placeholder(tf.int32, [None, None], name="input_x")
input_y = tf.placeholder(tf.int32, [None, 2], name="input_y")
dataset = tf.data.Dataset.from_tensor_slices((input_x, input_y))
iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
dataset_init_op = iterator.make_initializer(dataset, name='dataset_init')
x_data, y_data = iterator.get_next()
output = tf.variable(x_data, name='output')
.....
### INFERENCE
while (true):
x = new_input
x_operation = session.graph.get_operation_by_name("input_x").outputs[0]
y_operation = session.graph.get_operation_by_name("input_y").outputs[0]
dataset_operation = session.graph.get_operation_by_name("dataset_init")
output_operation = session.graph.get_operation_by_name("output").outputs[0]
fake_y = np.array([[0, 0]])
dic = {input_x: x, input_y: y}
session.run(dataset_operation, feed_dict=dic)
prediction = session.run(output_operation)
Thank you for your help

How to use dataset in TensorFlow session for training

I like to perform image classification on our own large image libary (millions of labeled images) with tensorflow. I´m new to stackoverflow, python and tensorflow and worked myself through a few tutorials (mnist etc.) and got to the point, where i was able to prepare a TensorFlow datset from a dictionary including the absolute path to the images and the according labels. However, i´m stuck at the point using the dataset in a TensorFlow session. Here is my (example) code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import time
import mymodule # I build my module to read the images and labels
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.contrib.data import Iterator
beginTime = time.time()
batch_size = 100
learning_rate = 0.005
max_steps = 2
NUM_CLASSES = 25
def input_parser(img_path, label):
one_hot = tf.one_hot(label, NUM_CLASSES)
img_file = tf.read_file(img_path)
img_decoded = tf.image.decode_jpeg(img_file, channels = 3)
return img_decoded, one_hot
#Import Training data (returns the dicitonary with paths and labels)
train_dict = mymodule.getFileMap(labelList, imageList)
#Import Test data
test_dict = mymodule.getFileMap(labelList, imageList)
#Get train data
train_file_list, train_label_list = get_file_label_list(train_dict)
train_images_tensor = ops.convert_to_tensor(train_file_list, dtype=dtypes.string)
train_labels_tensor = ops.convert_to_tensor(train_label_list, dtype=dtypes.int64)
#Get test data
test_file_list, test_label_list = get_file_label_list(test_dict)
test_images_tensor = ops.convert_to_tensor(test_file_list, dtype=dtypes.string)
test_labels_tensor = ops.convert_to_tensor(test_label_list, dtype=dtypes.int64)
#Create TensorFlow Datset object
train_data = tf.data.Dataset.from_tensor_slices((train_images_tensor, train_labels_tensor))
test_data = tf.data.Dataset.from_tensor_slices((test_images_tensor, test_labels_tensor))
# Transform the datset so that it contains decoded images
# and one-hot vector labels
train_data = train_data.map(input_parser)
test_data = test_data.map(input_parser)
# Batching --> How to do it right?
#train_data = train_data.batch(batch_size = 100)
#test_data = train_data.batch(batch_size = 100)
#Define input placeholders
image_size = 990*990*3
images_placeholder = tf.placeholder(tf.float32, shape=[None, image_size])
labels_placeholder = tf.placeholder(tf.int64, shape=[None])
# Define variables (these afe the values we want to optimize)
weigths = tf.Variable(tf.zeros([image_size, NUM_CLASSES]))
biases = tf.Variable(tf.zeros([NUM_CLASSES]))
# Define the classifier´s result
logits = tf.matmul(images_placeholder, weigths) + biases
# Define the loss function
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = labels_placeholder))
# Define the training operation
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Operation comparing prediciton with true label
correct_prediciton = tf.equal(tf.argmax(logits, 1), labels_placeholder)
# Operation calculating the accuracy of our predicitons
accuracy = tf.reduce_mean(tf.cast(correct_prediciton, tf.float32))
#Create TensorFlow Iterator object
iterator = Iterator.from_structure(train_data.output_types,
train_data.output_shapes)
next_element = iterator.get_next()
#Create two initialization ops to switch between the datasets
train_init_op = iterator.make_initializer(train_data)
test_init_op = iterator.make_initializer(test_data)
with tf.Session() as sess:
#Initialize variables
sess.run(tf.global_variables_initializer())
sess.run(train_init_op)
for _ in range(10):
try:
elem = sess.run(next_element)
print(elem)
except tf.errors.OutOfRangeError:
print("End of training datset.")
break
Following this and this tutorial i could not solve the problem of how to use the (image and label) dataset in a tensorflow session for training. I was able to print out the datset by iterating through it, but wasn´t able to use it for learning.
I don´t understand how to access the images and labels seperately after they have been merged in the train_data = tf.data.Dataset.from_tensor_slices((train_images_tensor, train_labels_tensor)) operation, as requried by the 2nd tutorial. Also i don´t know how to implement batching correctly.
What i want to do in the session is basically this (from the 2nd tutorial):
# Generate input data batch
indices = np.random.choice(data_sets['images_train'].shape[0], batch_size)
images_batch = data_sets['images_train'][indices]
labels_batch = data_sets['labels_train'][indices]
# Periodically print out the model's current accuracy
if i % 100 == 0:
train_accuracy = sess.run(accuracy, feed_dict={
images_placeholder: images_batch, labels_placeholder: labels_batch})
print('Step {:5d}: training accuracy {:g}'.format(i, train_accuracy))
# Perform a single training step
sess.run(train_step, feed_dict={images_placeholder: images_batch,
labels_placeholder: labels_batch})
# After finishing the training, evaluate on the test set
test_accuracy = sess.run(accuracy, feed_dict={
images_placeholder: data_sets['images_test'],
labels_placeholder: data_sets['labels_test']})
print('Test accuracy {:g}'.format(test_accuracy))
endTime = time.time()
print('Total time: {:5.2f}s'.format(endTime - beginTime))
If anyone can tell me, how to access images and labels in the dataset sepearately and use it for training, i would be really thankful. Also a tip where and how to do the batching would be appreciated.
Thank you.
In your code, next_element is a tuple of two tensors, matching the structure of your datasets: i.e. it is a tuple whose first element is an image, and second element is a label. To access the individual tensors, you can do the following:
next_element = iterator.get_next()
next_image = next_element[0]
next_label = next_element[1]
# Or, in a single line:
next_image, next_label = iterator.get_next()
To batch a tf.data.Dataset, you can use the Dataset.batch() transformation. Your commented out code for this should simply work:
train_data = train_data.batch(batch_size = 100)
test_data = train_data.batch(batch_size = 100)

Using Datasets to consume Numpy arrays

I'm trying to use Numpy arrays within a graph, feeding in the data using a Dataset.
I've read through this, but can't quite make sense of how I should feed placeholder arrays within a Dataset.
If we take a simple example, I start with:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.float32, [None])
b = tf.placeholder(tf.float32, [None])
c = tf.add(a, b)
with tf.Session() as sess:
for i in range(10):
x = sess.run(c, feed_dict={a: A, b:B})
print(i, x)
Then I attempt to modify it to use a Dataset as follows:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.int32, A.shape)
b = tf.placeholder(tf.int32, B.shape)
c = tf.add(a, b)
dataset = tf.data.Dataset.from_tensors((a, b))
iterator = dataset.make_initializable_iterator()
with tf.Session() as sess3:
sess3.run(tf.global_variables_initializer())
sess3.run(iterator.initializer, feed_dict={a: A, b: B})
for i in range(10):
x = sess3.run(c)
print(i, x)
If I run this I get 'InvalidArgumentError: You must feed a value for placeholder tensor ...'
The code until the for loop mimics the example here, but I don't get how I can then employ the placeholders a & b without supplying a feed_dict to every call to sess3.run(c) [which would be expensive]. I suspect I have to somehow use the iterator, but I don't understand how.
Update
It appears I oversimplified too much when picking the example. What I am really trying to do is use Datasets when training a neural network, or similar.
For a more sensible question, how would I go about using Datasets to feed placeholders in the below (though imagine X and Y_true are much longer...). The documentation takes me to the point where the loop starts and then I'm not sure.
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
y = tf.squeeze(tf.matmul(x, w), name='y')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y),
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(100):
_, loss_out = sess.run([optimiser, loss], feed_dict={x: X, y_true:Y_true})
print(i, loss_out)
Trying the following only gets me a InvalidArgumentError
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
dataset = tf.data.Dataset.from_tensor_slices((x, y_true))
iterator = dataset.make_initializable_iterator()
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
y = tf.squeeze(tf.matmul(x, w), name='y')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y),
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(iterator.initializer, feed_dict={x: X,
y_true: Y_true})
for i in range(100):
_, loss_out = sess.run([optimiser, loss])
print(i, loss_out)
Use iterator.get_next() to get elements from Dataset like:
next_element = iterator.get_next()
than initialize the iterator
sess.run(iterator.initializer, feed_dict={a:A, b:B})
and at least get the values from Dataset
value = sess.run(next_element)
EDIT:
The code above just return the elements from Dataset. The Dataset API is intended to serve features and labels for a input_fn, therefore all additional computations for preprocessing should be performed within the Dataset API. If you want to add elements, you should define a function that is applied to the elements, like:
def add_fn(exp1, exp2):
return tf.add(exp1, exp2)
and than you can map these function to your Dataset:
dataset = dataset.map(add_fn)
Complete code example:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.int32, A.shape)
b = tf.placeholder(tf.int32, B.shape)
#c = tf.add(a, b)
def add_fn(exp1, exp2):
return tf.add(exp1, exp2)
dataset = tf.data.Dataset.from_tensors((a, b))
dataset = dataset.map(add_fn)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer, feed_dict={a: A, b: B})
# just one element at dataset
x = sess.run(next_element)
print(x)
The problem in your more complicated example is that you use the same tf.placeholder() nodes as the input to the Dataset.from_tensor_slices() (which is correct) and the network itself (which causes the InvalidArgumentError. Instead, as J.E.K points out in their answer, you should use iterator.get_next() as the input to your network, as follows (note that there are a couple of other fixes I added to make the code run as-is):
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
dataset = tf.data.Dataset.from_tensor_slices((x, y_true))
# You will need to repeat the input (which has 4 elements) to be able to take
# 100 steps.
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
# Use `iterator.get_next()` to create tensors that will consume values from the
# dataset.
x_next, y_true_next = iterator.get_next()
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
# The `x_next` tensor is a vector (i.e. a row of `X`), so you will need to
# convert it to a matrix or apply batching in the dataset to make it work with
# `tf.matmul()`
x_next = tf.expand_dims(x_next, 0)
y = tf.squeeze(tf.matmul(x_next, w), name='y') # Use `x_next` here.
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true_next, logits=y), # Use `y_true_next` here.
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(iterator.initializer, feed_dict={x: X,
y_true: Y_true})
for i in range(100):
_, loss_out = sess.run([optimiser, loss])
print(i, loss_out)

Categories