Using Datasets to consume Numpy arrays - python

I'm trying to use Numpy arrays within a graph, feeding in the data using a Dataset.
I've read through this, but can't quite make sense of how I should feed placeholder arrays within a Dataset.
If we take a simple example, I start with:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.float32, [None])
b = tf.placeholder(tf.float32, [None])
c = tf.add(a, b)
with tf.Session() as sess:
for i in range(10):
x = sess.run(c, feed_dict={a: A, b:B})
print(i, x)
Then I attempt to modify it to use a Dataset as follows:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.int32, A.shape)
b = tf.placeholder(tf.int32, B.shape)
c = tf.add(a, b)
dataset = tf.data.Dataset.from_tensors((a, b))
iterator = dataset.make_initializable_iterator()
with tf.Session() as sess3:
sess3.run(tf.global_variables_initializer())
sess3.run(iterator.initializer, feed_dict={a: A, b: B})
for i in range(10):
x = sess3.run(c)
print(i, x)
If I run this I get 'InvalidArgumentError: You must feed a value for placeholder tensor ...'
The code until the for loop mimics the example here, but I don't get how I can then employ the placeholders a & b without supplying a feed_dict to every call to sess3.run(c) [which would be expensive]. I suspect I have to somehow use the iterator, but I don't understand how.
Update
It appears I oversimplified too much when picking the example. What I am really trying to do is use Datasets when training a neural network, or similar.
For a more sensible question, how would I go about using Datasets to feed placeholders in the below (though imagine X and Y_true are much longer...). The documentation takes me to the point where the loop starts and then I'm not sure.
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
y = tf.squeeze(tf.matmul(x, w), name='y')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y),
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(100):
_, loss_out = sess.run([optimiser, loss], feed_dict={x: X, y_true:Y_true})
print(i, loss_out)
Trying the following only gets me a InvalidArgumentError
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
dataset = tf.data.Dataset.from_tensor_slices((x, y_true))
iterator = dataset.make_initializable_iterator()
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
y = tf.squeeze(tf.matmul(x, w), name='y')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y),
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(iterator.initializer, feed_dict={x: X,
y_true: Y_true})
for i in range(100):
_, loss_out = sess.run([optimiser, loss])
print(i, loss_out)

Use iterator.get_next() to get elements from Dataset like:
next_element = iterator.get_next()
than initialize the iterator
sess.run(iterator.initializer, feed_dict={a:A, b:B})
and at least get the values from Dataset
value = sess.run(next_element)
EDIT:
The code above just return the elements from Dataset. The Dataset API is intended to serve features and labels for a input_fn, therefore all additional computations for preprocessing should be performed within the Dataset API. If you want to add elements, you should define a function that is applied to the elements, like:
def add_fn(exp1, exp2):
return tf.add(exp1, exp2)
and than you can map these function to your Dataset:
dataset = dataset.map(add_fn)
Complete code example:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.int32, A.shape)
b = tf.placeholder(tf.int32, B.shape)
#c = tf.add(a, b)
def add_fn(exp1, exp2):
return tf.add(exp1, exp2)
dataset = tf.data.Dataset.from_tensors((a, b))
dataset = dataset.map(add_fn)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer, feed_dict={a: A, b: B})
# just one element at dataset
x = sess.run(next_element)
print(x)

The problem in your more complicated example is that you use the same tf.placeholder() nodes as the input to the Dataset.from_tensor_slices() (which is correct) and the network itself (which causes the InvalidArgumentError. Instead, as J.E.K points out in their answer, you should use iterator.get_next() as the input to your network, as follows (note that there are a couple of other fixes I added to make the code run as-is):
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
dataset = tf.data.Dataset.from_tensor_slices((x, y_true))
# You will need to repeat the input (which has 4 elements) to be able to take
# 100 steps.
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
# Use `iterator.get_next()` to create tensors that will consume values from the
# dataset.
x_next, y_true_next = iterator.get_next()
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
# The `x_next` tensor is a vector (i.e. a row of `X`), so you will need to
# convert it to a matrix or apply batching in the dataset to make it work with
# `tf.matmul()`
x_next = tf.expand_dims(x_next, 0)
y = tf.squeeze(tf.matmul(x_next, w), name='y') # Use `x_next` here.
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true_next, logits=y), # Use `y_true_next` here.
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(iterator.initializer, feed_dict={x: X,
y_true: Y_true})
for i in range(100):
_, loss_out = sess.run([optimiser, loss])
print(i, loss_out)

Related

Augment size of a tf.variable

I am training an autoencoder by giving 2 placeholders that store the following:
x1 = [x1]
X = [x1,x2,x3...xn]
It holds that:
y1 = W*x1 + b_encoding1
Therefore, I have a variable named b_encoder1 (the b)
(When I print it I get: <tf.Variable 'b_encoder1:0' shape=(10,) dtype=float32_ref>)
But it also holds that:
Y = W*X + b_encoding1
The size of the second b_encoding1 has to be (10,n) intead of (10,). How can I augment it and pass it in tensorflow?
Y = tf.compat.v1.nn.xw_plus_b(X, W1, b_encoder1, name='Y')
The whole code looks like this:
x1 = tf.compat.v1.placeholder( tf.float32, [None,input_shape], name = 'x1')
X = tf.compat.v1.placeholder( tf.float32, [None,input_shape,sp], name = 'X')
W1 = tf.Variable(tf.initializers.GlorotUniform()(shape=[input_shape,code_length]),name='W1')
b_encoder1 = tf.compat.v1.get_variable(name='b_encoder1',shape=[code_length],initializer=tf.compat.v1.initializers.zeros(), use_resource=False)
K = tf.Variable(tf.initializers.GlorotUniform()(shape=[code_length,code_length]),name='K')
b_decoder1 = tf.compat.v1.get_variable(name='b_decoder1',shape=[input_shape],initializer=tf.compat.v1.initializers.zeros(), use_resource=False)
y1 = tf.compat.v1.nn.xw_plus_b(x1, W1, b_encoder1, name='y1')
Y = tf.compat.v1.nn.xw_plus_b(X, W1, b_encoder1, name='Y')
I also declare the loss function and so on and then train with:
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i in range(number_of_batches):
batch_data = getBatch(shuffled_data, batch_i, batch_size)
sess.run(optimizer, feed_dict={x1: batch_data[:,:,0], X: batch_data})
train_loss = sess.run(loss, feed_dict={x1: aug_data[:,:,0], X: aug_data})
print(epoch_i, train_loss)
You can consider X as a batch of x. X can take in an arbitrary number of samples:
import tensorflow as tf
import numpy as np
X = tf.placeholder(shape=(None, 100), dtype=tf.float32)
W = tf.get_variable('kernel', [100,10])
b = tf.get_variable('bias',[10])
Y = tf.nn.xw_plus_b(X, W,b, name='Y')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) # tf version < 1.13
out = sess.run(Y, {X: np.random.rand(128, 100)}) # here n=128
Note that dimension of bias b is still 10-D regardless value of n.
Please try:
b_encoding1 = tf.expand_dims(b_encoding1, axis = 1)

Tensorflow CNN test data splitting and array sizing problems

I've tried to figure things out myself and not fallback to actually creating an account here but as a self-taught beginner I've reached a wall with this code.
I'm having two major issues besides optimizing the net architecture when everything is working:
Everytime I've tried to create a new dataset for a test batch I've ran into 'xTensor is not a Tensor' error and could run a session through it, unlike with the iterator which works just fine. I'm loading custom data with dir names as labels with no manually created train and test directories. I'm probably missing a proper method for tf.
I can't work around the current first error I get which is:
'ValueError: Cannot feed value of shape (100,) for Tensor 'Placeholder_1:0', which has shape '(?, 1)' while feed_dict {y=batch_y}. I've tried some of solutions posted on SO but couldn't get it to work.
I'm pasting the whole thing, ########### are the problem triggering zones at the very bottom in the session.
import tensorflow as tf
import numpy as np
import os
# load custom imageset directory
data_path = r"..\datasets\images\flowers"
# setup hypervariables for labels and images format
n_classes = 5
img_width = 64
img_length = 64
channels = 3
# setup hypervariables for network
learning_rate = 0.0001
epochs = 2
batch_size = 100
drop_rate = 0.6
imagepaths = list()
labels = list()
label = 0
classes = sorted(os.walk(data_path).__next__()[1])
# List each sub-directory (the classes)
for c in classes:
c_dir = os.path.join(data_path, c)
walk = os.walk(c_dir).__next__()
# Add each image to the training set
for sample in walk[2]:
imagepaths.append(os.path.join(c_dir, sample))
labels.append(label)
label += 1
total_input = len(labels)
# Convert to Tensor
imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.int32)
# Build a TF Queue, shuffle data
dataset = tf.data.Dataset.from_tensor_slices((imagepaths, labels))
# read, decode, resize and normalize images on RGB range
def parse(imagepath, label):
image = tf.read_file(imagepath)
image = tf.image.decode_jpeg(image, channels=channels)
image = tf.image.resize_images(image, [img_length, img_width])
image = image * 1.0/255
return image, label
dataset = dataset.map(parse)
dataset = dataset.shuffle(buffer_size=batch_size*10)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
next_batch = iterator.get_next()
# hypervariables for layers' output size
K = 16
L = 32
M = 200
x = tf.placeholder(tf.float32, [None, 4326])
x_shaped = tf.reshape(x, [-1, img_length, img_width, 3])
y = tf.placeholder(tf.float32, [None, 1])
# weight, bias with stride size and activation method after convolution for layer 1
W1 = tf.Variable(tf.truncated_normal([5, 5, 3, K], stddev=0.03))
b1 = tf.Variable(tf.truncated_normal([K], stddev=0.01))
stride = 1
y1 = tf.nn.relu(tf.nn.conv2d(x_shaped, W1, strides=[1, stride, stride, 1], padding='SAME') + b1)
# weight, bias with stride size and activation method after convolution for layer 2
W2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.03))
b2 = tf.Variable(tf.truncated_normal([L], stddev=0.01))
stride = 2 # output is 14x14
y2 = tf.nn.relu(tf.nn.conv2d(y1, W2, strides=[1, stride, stride, 1], padding='SAME') + b2)
yflat = tf.reshape(y2, [-1, 7 * 7 * L])
W3 = tf.Variable(tf.truncated_normal([7 * 7 * L, M], stddev=0.1))
b3 = tf.Variable(tf.truncated_normal([M], stddev=0.01))
y3 = tf.nn.relu(tf.matmul(yflat, W3) + b3)
W4 = tf.Variable(tf.truncated_normal([M, 10], stddev=0.1))
b4 = tf.Variable(tf.truncated_normal([10], stddev=0.01))
ylogits = tf.matmul(y3, W4) + b4
y_ = tf.nn.softmax(ylogits)
# add cross entropy for back prop
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=ylogits, labels=y_))
# add an optimiser for back prop
optimiser = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
########## temporary solution for test_x, test_y
test_x, test_y = sess.run(next_batch)
total_batch = int(total_input / batch_size)
# define the iterator for the network
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = sess.run(next_batch)
########## ValueError: Cannot feed value of shape (100,) for Tensor 'Placeholder_1:0' -> y: batch_y
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y: batch_y})
avg_cost += c / total_batch
test_acc = sess.run(accuracy,feed_dict={x: test_x, y: test_y})
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost), " test accuracy: {:.3f}".format(test_acc))
summary = sess.run(merged, feed_dict={x: test_x, y: test_y})
print("\nTraining complete!")
print(sess.run(accuracy, feed_dict={x: test_x, y: test_y}))
are you sure that this part:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y: batch_y})
doesn't have to be:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})
furthermore you've a batchsize of 100, the data is right of the array, the shape is not complete.
What you have (dummy example)
np.zeros((100,)).shape
>>> (100,)
here 100 matches the '?' of the required shape: shape '(?, 1)', the one can be easily added, and it often occurs that numpy does not do this. See the following code:
np.expand_dims(np.zeros((100,)), axis=-1).shape
>>> (100, 1)
axis -1 stand for the last axis, you basically tell numpy to add a dimension in the end. This does not affect the data itself, but the shape of the array. So your code should be:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y:np.expand_dims(batch_y, axis=-1)})

ValueError: Cannot feed value of shape (2,) for Tensor 'Placeholder:0', which has shape '(1, 2)'

I'm new to tensorflow and trying to create a linear regression model. My code is
import numpy as np
import tensorflow as tf
bias = np.ones((50, 1))
trainX = np.arange(0, 10, 0.2).reshape(50, 1)
trainY = (3 * trainX + np.random.rand(trainX.shape[0]) * 20 - 10) + 10
trainX = np.append(bias, trainX, axis=1)
X = tf.placeholder("float", shape=(1, 2))
Y = tf.placeholder("float")
w = tf.Variable([[0.0, 0.0]], name="weights")
model = tf.matmul(X, tf.transpose(w))
cost = tf.pow((Y - model), 2)
init = tf.global_variables_initializer()
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
with tf.Session() as sess:
sess.run(init)
for i in range(50):
for (x, y) in zip(trainX, trainY):
sess.run(train_op, feed_dict={X: x, Y: y})
print(sess.run(w))
I don't know what I'm doing wrong. I think the problem is in initializing weights. The idea is simple to predict two weight constants which best predict the line fitting the data.
I'm getting this error in colaboratory notebook
Several things are conspiring here. I assume that you want the shape of trainY to be (50,), but since you add the noise only after reshaping, broadcasting causes trainX + np.random.rand(trainX.shape[0]) to have shape (50, 50). If you change the initial part of the code to
bias = np.ones(50)
trainX = np.arange(0, 10, 0.2)
trainY = (3 * trainX + np.random.rand(trainX.shape[0]) * 20 - 10) + 10
trainX = np.vstack([bias, trainX]).T
and ensure that the shapes are set up properly through
sess.run(train_op, feed_dict={X: x.reshape((1, 2)), Y: y})
then your code will run.
However, since you are only dealing with the inner product of 2-dimensional vectors, you can avoid the reshaping altogether by instead simply using tf.tensordot:
X = tf.placeholder("float", shape=(2,))
Y = tf.placeholder("float")
w = tf.Variable([0.0, 0.0], name="weights")
model = tf.tensordot(X, w, 1)
Finally, note that while the approach of splitting up the sample before passing it to the optimizer (in what is typically referred to as batching) works well for large datasets, in your case you could just as well pass the entire sample at once; that is, something that would amount to
X = tf.placeholder("float", shape=(50, 2))
Y = tf.placeholder("float", shape=(50, 1))
w = tf.Variable(tf.zeros([2, 1], "float"), name="weights")
model = tf.matmul(X, w)
cost = tf.reduce_sum(tf.pow((Y - model), 2))
init = tf.global_variables_initializer()
train_op = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
with tf.Session() as sess:
sess.run(init)
for i in range(10000):
sess.run(train_op, feed_dict={X: trainX, Y: trainY.reshape((50, 1))})

Implementation of a neural model on Tensor-flow

I am trying to implement a neural network model on Tensor flow but seems to be having problems with the shape of the placeholders. I'm new to TF, hence it could just be a simple misunderstanding. Here's my code and data sample:
_data=[[0.4,0.5,0.6,1],[0.7,0.8,0.9,0],....]
The data comprises of arrays of 4 columns, the last column of each array is the label. I want to classify each array as label 0, label 1 or label 2.
import tensorflow as tf
import numpy as np
_data = datamatrix
X = tf.placeholder(tf.float32, [None, 3])
W = tf.Variable(tf.zeros([3, 1]))
b = tf.Variable(tf.zeros([3]))
init = tf.global_variables_initializer()
Y = tf.nn.softmax(tf.matmul(X, W) + b)
# placeholder for correct labels
Y_ = tf.placeholder(tf.float32, [None, 1])
# loss function
import time
start=time.time()
cross_entropy = -tf.reduce_sum(Y_ * tf.log(Y))
# % of correct answers found in batch
is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
optimizer = tf.train.GradientDescentOptimizer(0.003)
train_step = optimizer.minimize(cross_entropy)
sess = tf.Session()
sess.run(init)
for i in range(1000):
# load batch of images and correct answers
batch_X, batch_Y = [x[:3] for x in _data[:2000]],[x[-1] for x in _data[:2000]]
train_data={X: batch_X, Y_: batch_Y}
# train
sess.run(train_step, feed_dict=train_data)
# success ?
a,c = sess.run([accuracy, cross_entropy], feed_dict=train_data)
I got the following error message after running my code:
ValueError: Cannot feed value of shape (2000,) for Tensor 'Placeholder_1:0', which has shape '(?, 1)'
My desired output should be the performance of the model using cross-entropy; the accuracy value from the codeline below:
a,c = sess.run([accuracy, cross_entropy], feed_dict=train_data)
I would also appreciate any suggestions on how to improve the model, or a model that is more suitable for my data.
The shape of Placeholder_1:0 Y_, and input data batch_Y is mismatched as specified by the error message. Notice the 1-D vs 2-D array.
So you should either define 1-D place holder:
Y_ = tf.placeholder(tf.float32, [None])
or prepare 2-D data:
batch_X, batch_Y = [x[:3] for x in _data[:2000]],[x[-1:] for x in _data[:2000]]

TypeError: The value of a feed cannot be a tf.Tensor object

I am a newbie on tensorflow and learning from very beginning. I tried to read csv file and decode using tf.decode_csv and I am trying to understand batch and graph through examples.
My example csv is 8 datas and 1 logistic shape, separated by comma.
This is first four rows of my file.
-0.294118,0.487437,0.180328,-0.292929,0,0.00149028,-0.53117,-0.0333333,0
-0.882353,-0.145729,0.0819672,-0.414141,0,-0.207153,-0.766866,-0.666667,1
-0.0588235,0.839196,0.0491803,0,0,-0.305514,-0.492741,-0.633333,0
-0.882353,-0.105528,0.0819672,-0.535354,-0.777778,-0.162444,-0.923997,0,1
My code is,
import tensorflow as tf
filename_queue = tf.train.string_input_producer(['data-03-diabetes.csv'],
shuffle=False,
name='filename_queue')
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
record_defaults = [[0.],[0.],[0.],[0.],[0.],[0.],[0.],[0.],[0.]]
xy = tf.decode_csv(value, record_defaults=record_defaults, field_delim=',')
train_x_batch, train_y_batch = tf.train.batch([xy[0:-1], xy[-1:]],
batch_size=10)
X = tf.placeholder(tf.float32, shape=[None, 8])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([8,1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sess.run(tf.global_variables_initializer())
feed = {X: train_x_batch, Y: train_y_batch}
for step in range(10001):
x_batch, y_batch = sess.run([train_x_batch, train_y_batch])
if step % 200 == 0:
print(step, sess.run(cost, feed_dict=feed))
h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict=feed)
print("\nHypothesis", h, "\nCorrect (Y):", c, "\nAccuracy", a)
and then I got this error code,
TypeError: The value of a feed cannot be a tf.Tensor object. Acceptable feed values include Python scalars, strings, lists, numpy ndarrays, or TensorHandles.
Does it mean that I declared wrong placeholder and Variable??
Thanks in advance!

Categories