tensorflow feed_dict with an array and not a single value - python

I am currently trying using tensorflow to train a neural network for regression purposes, such that i can map my input to an output. The input consist an audio filed which has been sampled and framed, each frame has a certain output.
Storing of input (audio frames) (give as example as the actual data is pretty large):
[array([[frame],[frame],...,[frame]],dtype=float32), ...]
And the output is stored as:
[array([[ 4.53255900e+01, -9.82247700e+00, -1.22920200e+00,
-6.81728800e+00, -8.23808400e+00, -1.42367900e+01,
-4.64904100e+00, -1.49673100e+01, -4.75058700e+00,
-7.54071600e+00, 5.11365500e+00, 9.80618400e+00,
-6.03554700e-01],....,[...]])]
This is my code:
number_of_examples = len(train_data)/2
train_set_data = train_data[:number_of_examples]
train_set_output = train_output_data[:number_of_examples]
test_set_data = train_data[number_of_examples:]
test_set_output = train_output_data[number_of_examples:]
############################# Training setup ##################################
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
n_samples = train_set_data[0].shape[0]
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.mul(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for i in range(len(train_set_data)):
for (x, y) in (train_set_data[i], train_set_output[i]):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_set_data, Y:train_set_output})
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b)
print "Optimization Finished!"
training_cost = sess.run(cost, feed_dict={X: train_set_data, Y: train_set_output})
print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
Problem is I get a error message
File "tensorflow_datapreprocess_mfcc_extraction_rnn.py", line 191, in <module>
for (x, y) in (train_set_data[i], train_set_output[i]):
ValueError: too many values to unpack
I am not sure I understand the error message, is it saying that I can't pass an array or I have to pass single values instead?

for (x, y) in (train_set_data[i], train_set_output[i])
is probably not doing what you think it is doing. Did you mean to use zip()?

That script is not doing what you want, to do what you want use:
...
for epoch in range(training_epochs):
for (x, y) in zip(train_set_data, train_set_output):
sess.run(optimizer, feed_dict={X: x, Y: y})
...

Related

ValueError: Cannot feed value of shape 'x' for Tensor 'y', which has shape 'z

A complete rookie here, trying to run the code. The problem is that my shapes' dimensions do not coincide. Does anyone know which variables' dimensions should be changed?
I tried changing x or y dimensions right after assigning values to x and y but I still keep getting the error
np.expand_dims(x, axis=1)
The main method:
def main():
#tf.reset.default.graph()
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, HEIGHT, WIDTH], name="input")
y = tf.placeholder(tf.float32, shape=[None, NUM_LABELS], name="labels")
dropout = tf.placeholder(tf.float32, name="dropout")
np.expand_dims(input, axis=1)
logits = get_model(x, dropout)
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y), name=None)
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
with tf.name_scope('accuracy'):
predicted = tf.argmax(logits, 1)
truth = tf.argmax(y, 1)
correct_prediction = tf.equal(predicted, truth)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
confusion_matrix = tf.confusion_matrix(truth, predicted, num_classes=NUM_LABELS)
tf.summary.scalar('accuracy', accuracy)
summ = tf.summary.merge_all()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(LOGDIR)
writer.add_graph(sess.graph)
test_writer = tf.summary.FileWriter(TEST_LOGDIR)
print('Starting training\n')
batch = get_batch(BATCH_SIZE, PATH_TRAIN)
start_time = time.time()
for i in range(1, ITERATIONS + 1):
X, Y = next(batch)
if i % EVAL_EVERY == 0:
[train_accuracy, train_loss, s] = sess.run([accuracy, loss, summ], feed_dict={x: X, y: Y, dropout:0.5}, acc_and_loss = [i, train_loss, train_accuracy * 100])
print('Iteration # {}. Train Loss: {:.2f}. Train Acc: {:.0f}%'.format(*acc_and_loss))
writer.add_summary(s, i)
if i % (EVAL_EVERY * 20) == 0:
train_confusion_matrix = sess.run([accuracy, sum], feed_dict={x: X, y: Y, dropout:1.0})
header = LABEL_TO_INDEX_MAP.keys()
df = pd.DataFrame(np.reshape(train_confusion_matrix, (NUM_LABELS, NUM_LABELS)), index=i)
print('\nConfusion Matrix:\n {}\n'.format(df))
saver.save(sess, os.path.join(LOGDIR, "model.ckpt"), i)
sess.run(train_step, feed_dict={x: X, y: Y, dropout:0.5})
print('\nTotal training time {:0f} seconds\n'.format(time.time() - start_time))
batch = get_batch(BATCH_SIZE, PATH_TEST)
total_accuracy = 0
for i in range(ITERATIONS_TEST):
X, Y = next(batch, PATH_TEST)
test_accuracy, s = sess.run([accuracy, summ], feed_dict={x: X, y: Y, dropout:1.0})
print('Iteration # {}. Test Accuracy {:.0f}%'.format(i+1, test_accuracy * 100))
total_accuracy += (test_accuracy / ITERATIONS_TEST)
test_writer.add_summary(s, i)
print('\nFinal Test Accuracy: {:.0f}%').format(total_accuracy * 100)
if __name__ == '__main__':
init(PATH_TRAIN)
main()
The Result I get:
ValueError: Cannot feed value of shape (100,) for Tensor 'input_19:0', which has shape '(?, 20, 44)'
It seems like it is complaining about feeding X which has shape (100,) into x which is required to have shape (anything, 20, 44). This variable has the name “input” noted in the error.
x and y are tensorflow placeholders rather than numpy arrays, and their shape are not changed in that way. It is telling tensorflow to expect some numpy arrays (in your case perhaps X and Y) in the specified shape. Since the shapes mismatch, you might be using the wrong data, so simply reshaping X might give you wrong results.
You will have to figure out what the shape of X and Y actually are, and where the 20x44 data should be coming from out of you dataset (or if it should not be requiring 20x44 data, what should it be requiring).

Tensorflow precision metrics not initializing

I have written the following Tensorflow code that performs logistic regression on a custom dataset.
def logi_regression(data, labels, test_data, test_labels, learning_rate,
batch_size, training_epochs, display_step):
x = tf.placeholder(tf.float32, [None, data.shape[1]])
y = tf.placeholder(tf.float32, [None, labels.shape[1]])
# Weights
W = tf.Variable(tf.zeros([data.shape[1], 1]))
b = tf.Variable(tf.zeros([1, 1]))
# Logistic Model
pred = tf.nn.sigmoid(tf.matmul(x, W) + b)
# Error function
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred,
labels=y))
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Initialise global variables
init = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
# Training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
sess.run(init_l)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(data.shape[0]/batch_size)
# Loop over all batches
for i in range(total_batch):
# The next_data_batch is a custom made function
batch_xs, batch_ys = next_data_batch(batch_size,
data, labels)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, loss], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
prediction = tf.round(tf.sigmoid(pred))
correct = tf.cast(tf.equal(prediction, y), dtype=tf.float32)
_, precision = tf.metrics.precision(y, prediction)
# Calculate accuracy
accuracy = tf.reduce_mean(correct)
avg_prec = tf.reduce_mean(precision)
print("Accuracy:", accuracy.eval({x: test_data, y: test_labels}))
print("Average Precision Score:", avg_prec.eval({x: test_data, y: test_labels}))
But, even though I get the correct output from the training (Epoch: xxxx cost= 0.xxxx) and from the test set (Accuracy:0.xxx). When the program tries to calculate the precision it returns an error:
FailedPreconditionError (see above for traceback): Attempting to use
uninitialized value precision/true_positives/count [[node
precision/true_positives/AssignAdd (defined at
:54) ]]
Therefore, the problem is in the final lines that I have added the (_, precision = tf.metrics.precision(y, prediction)). I have tried various suggestions from Stackoverflow posts but nothing has worked. It must be a silly coding mistake but due to my inexperience with Tensorflow I cannot figure out what it is.
The lines creating nodes in the tensorflow-graph should be before the 'tf.global_variables_initializer()' statement to be part of the default graph. Move the following lines to above the initializer and it will work:
# Test model
prediction = tf.round(tf.sigmoid(pred))
correct = tf.cast(tf.equal(prediction, y), dtype=tf.float32)
_, precision = tf.metrics.precision(y, prediction)
# Calculate accuracy
accuracy = tf.reduce_mean(correct)
avg_prec = tf.reduce_mean(precision)

Incorrect value of placeholder

I am trying to execute the following code which is using MNIST dataset in Tensorflow, with images of shape 28 * 28 = 784 and 10 classes (0-9 digits) as output, I am getting an error that is showed as follows :
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_33' with dtype float and shape [?,10]
# Import MNIST data
#import input_data
#mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
# Set parameters
learning_rate = 0.01
training_iteration = 30
batch_size = 100
display_step = 2
# TF graph input
x = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes
# Create a model
# Set model weights
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
with tf.name_scope("Wx_b") as scope:
# Construct a linear model
model = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
# Add summary ops to collect data
w_h = tf.summary.histogram("weights", W)
b_h = tf.summary.histogram("biases", b)
# More name scopes will clean up graph representation
with tf.name_scope("cost_function") as scope:
# Minimize error using cross entropy
# Cross entropy
cost_function = -tf.reduce_sum(y*tf.log(model))
# Create a summary to monitor the cost function
tf.summary.scalar("cost_function", cost_function)
with tf.name_scope("train") as scope:
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
# Initializing the variables
init = tf.initialize_all_variables()
# Merge all summaries into a single operator
merged_summary_op = tf.summary.merge_all()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
summary_writer = tf.summary.FileWriter('/home/raed/Tensorflow/tensorflow_demo', graph_def=sess.graph_def)
# Training cycle
for iteration in range(training_iteration):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
# Compute the average loss
avg_cost += sess.run(cost_function, feed_dict={x: batch_xs, y: batch_ys})/total_batch
# Write logs for each iteration
summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
summary_writer.add_summary(summary_str, iteration*total_batch + i)
# Display logs per iteration step
if iteration % display_step == 0:
print ("Iteration:" "%04d" % (iteration + 1), "cost=", "{:.9f}".format(avg_cost))
print ("Tuning completed!")
# Test the model
predictions = tf.equal(tf.argmax(model, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(predictions, "float"))
print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

Single Layer Perceptron Error while optimizing and minimising cost using Tensorflow

I thought to execute single layer perceptron concept using tensorflow. The input, excepted output, weights and bias all are defined in proper way. But while running the program the error is throwing during Optimization and Minimizing the Cost.
Here is the code:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
learning_rate = 0.001
training_epochs = 200
display_step = 50
# X must be 1X5
# Y must be 1X3
# w must be 5X3
# B must be 1X3
train_X = np.random.rand(1,5)
train_Y = np.random.rand(1,3)
#
X = tf.placeholder(tf.float32, [1, 5])
Y = tf.placeholder(tf.float32, [1, 3])
W = tf.Variable([[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]])
b = tf.Variable([0.0 ,0.0,0.0])
pred = tf.add(tf.matmul(X, W), b)
cost = tf.reduce_sum(tf.pow(pred-Y,2))
optimizer =tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
avg_set = []
epoch_set = []
with tf.Session() as sess:
for i in range(training_epochs):
sess.run(optimizer, feed_dict={X: train_X, Y: train_Y})
#Here the error is throwing from above line
if (i+1) % display_step == 0:
cc = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
#Again here also the error is throwing from above line
print("Epoch:", '%04d' % (i + 1), "cost=", "{:.9f}".format(cc), \
"W=", sess.run(W), "b=", sess.run(b))
avg_set.append(cc)
epoch_set.append(i + 1)
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
plt.plot(epoch_set,avg_set,'o',label = 'SLP Training phase')
plt.ylabel('cost')
plt.xlabel('epochs')
plt.legend()
plt.show()
Please help me for getting out of this error. Thanks
I found the error is from with tf.Session() as sess: Before for()y loop and removing that, making separate declaration of the session. It worked
Here is the code which will work.
main.py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
learning_rate = 0.001
training_epochs = 1000
display_step = 50
# taking input as array from numpy package and converting it into tensor
# X must be 1X5
# Y must be 1X3
# w must be 5X1
# B must be 1X3
train_X = np.random.rand(1,5)
train_Y = np.random.rand(1,3)
#
X = tf.placeholder(tf.float32, [1, 5])
Y = tf.placeholder(tf.float32, [1, 3])
W = tf.Variable([[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]])
b = tf.Variable([0.0 ,0.0,0.0])
pred = tf.add(tf.matmul(X, W), b)
cost = tf.reduce_sum(tf.pow(pred-Y,2))
optimizer =tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
avg_set = []
epoch_set = []
sess = tf.Session()
sess.run(init)
for i in range(training_epochs):
sess.run(optimizer, feed_dict={X: train_X, Y: train_Y})
if (i+1) % display_step == 0:
cc = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
#check what it thinks when you give it the input data
print("Epoch:", '%04d' % (i + 1), "cost=", "{:.9f}".format(cc), \
"W=", sess.run(W), "b=", sess.run(b))
avg_set.append(cc)
epoch_set.append(i + 1)
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
plt.plot(epoch_set,avg_set,'o',label = 'SLP Training phase')
plt.ylabel('cost')
plt.xlabel('epochs')
plt.legend()
plt.show()

Tensorflow: Simple Linear Regression using CSV data

I am an extreme beginner at tensorflow, and i was tasked to do a simple linear regression using my csv data which contains 2 columns, Height & State of Charge(SoC), where both values are float.
In CSV file, Height is the first col while SoC is the second col.
Using Height i'm suppose to predict SoC
I'm completely lost as to what i have to add in the "Fit all training data" portion of the code. I've looked at other linear regression models and their codes are mind boggling, such as this one:
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
sess.run(training_step,feed_dict={X:train_x,Y:train_y})
cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: train_x,Y: train_y}))
#calculate mean square error
pred_y = sess.run(y_, feed_dict={X: test_x})
mse = tf.reduce_mean(tf.square(pred_y - test_y))
print("MSE: %.4f" % sess.run(mse))
#plot cost
plt.plot(range(len(cost_history)),cost_history)
plt.axis([0,training_epochs,0,np.max(cost_history)])
plt.show()
fig, ax = plt.subplots()
ax.scatter(test_y, pred_y)
ax.plot([test_y.min(), test_y.max()], [test_y.min(), test_y.max()], 'k--', lw=3)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()
I've just been able to get data from my CSV file without error using this guide:
TensorFlow: Reading and using data from CSV file
Full Code:
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
rng = np.random
from numpy import genfromtxt
from sklearn.datasets import load_boston
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
n_samples = 221
X = tf.placeholder("float") # create symbolic variables
Y = tf.placeholder("float")
filename_queue = tf.train.string_input_producer(["battdata.csv"],shuffle=False)
reader = tf.TextLineReader(skip_header_lines=1)
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
record_defaults = [[1.], [1.]]
col1, col2= tf.decode_csv(
value, record_defaults=record_defaults)
features = tf.stack([col1])
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(col1, W), b) # XW + b <- y = mx + b where W is gradient, b is intercept
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-col2, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
_, cost_value = sess.run([optimizer,cost])
for (x, y) in zip(col2, col1):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: col2, Y:col1})
print( "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: col2, Y: col1})
print ("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * col2 + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
coord.request_stop()
coord.join(threads)
Error:
INFO:tensorflow:Error reported to Coordinator: , Attempted to use a closed Session.
--------------------------------------------------------------------------- TypeError Traceback (most recent call
last) in ()
8 for epoch in range(training_epochs):
9 _, cost_value = sess.run([optimizer,cost])
---> 10 for (x, y) in zip(*col1, col2):
11 sess.run(optimizer, feed_dict={X: x, Y: y})
12
C:\Users\Shiina\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py
in iter(self)
514 TypeError: when invoked.
515 """
--> 516 raise TypeError("'Tensor' object is not iterable.")
517
518 def bool(self):
TypeError: 'Tensor' object is not iterable.
The error is because your are trying to iterate over tensors in for (x, y) in zip(col2, col1) which is not allowed. The other issues with the code is that you have input pipeline queues setup and then your also trying to feed in through feed_dict{}, which is wrong. Your training part should look something like this:
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
_, cost_value = sess.run([optimizer,cost])
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost)
print( "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost)
print ("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
#Plot data after completing training
train_X = []
train_Y = []
for i in range(input_size): #Your input data size to loop through once
X, Y = sess.run([col1, pred]) # Call pred, to get the prediction with the updated weights
train_X.append(X)
train_Y.append(y)
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.legend()
plt.show()
coord.request_stop()
coord.join(threads)

Categories