I am new to Tensorflow and I still have troubles understanding how it works. I saw some examples but I am still not sure. I am trying to print the predictions and the accuracy.
I have this code:
def linear_function(x, w, b):
y_est = tf.add(tf.matmul(w, x), b)
y_est = tf.reshape(y_est, [])
return y_est
def initialize_parameters():
W = tf.get_variable('W', [1, num_of_features],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable("b1", [1, 1], initializer=tf.zeros_initializer())
return W, b
if __name__ == '__main__':
trainSetX, trainSetY = utils.load_train_set(num_of_examples)
# create placeholders & variables
X = tf.placeholder(tf.float32, shape=(num_of_features,))
X_reshaped = tf.reshape(X, [num_of_features, 1])
y = tf.placeholder(tf.float32, shape=())
W, b = initialize_parameters()
# prediction
y_estim = linear_function(X_reshaped, W, b)
y_pred = tf.sigmoid(y_estim)
# set the optimizer
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=y_pred)
loss_mean = tf.reduce_mean(loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=alpha).minimize(loss_mean)
# training phase
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for idx in range(num_of_examples):
cur_x, cur_y = trainSetX[idx], trainSetY[idx]
_, c = sess.run([optimizer, loss_mean], feed_dict={X: cur_x, y: cur_y})
So, now I want to actually read the values of y_pred and calculate the accuracy.
In some other sources I saw people adding this line to with tf.Session() as sess:
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={X: trainSetX.T, y: trainSetY}))
Clearly, it does not work for me, because my trainSetX has all the examples, while X is a placeholder for only 1 example at a time. I have tried to put the correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) and modify another like like this:
for idx in range(num_of_examples):
cur_x, cur_y = trainSetX[idx], trainSetY[idx]
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
_, c, acc = sess.run([optimizer, loss_mean, correct_prediction], feed_dict={X: cur_x, y: cur_y})
But it just gives the following arror for ArgMax (Why?)
InvalidArgumentError (see above for traceback): Expected dimension in the range [0, 0), but got 1
[[Node: ArgMax_1 = ArgMax[T=DT_FLOAT, Tidx=DT_INT32, output_type=DT_INT64, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_Placeholder_1_0_1, ArgMax/dimension)]]
Related
A complete rookie here, trying to run the code. The problem is that my shapes' dimensions do not coincide. Does anyone know which variables' dimensions should be changed?
I tried changing x or y dimensions right after assigning values to x and y but I still keep getting the error
np.expand_dims(x, axis=1)
The main method:
def main():
#tf.reset.default.graph()
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, HEIGHT, WIDTH], name="input")
y = tf.placeholder(tf.float32, shape=[None, NUM_LABELS], name="labels")
dropout = tf.placeholder(tf.float32, name="dropout")
np.expand_dims(input, axis=1)
logits = get_model(x, dropout)
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y), name=None)
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
with tf.name_scope('accuracy'):
predicted = tf.argmax(logits, 1)
truth = tf.argmax(y, 1)
correct_prediction = tf.equal(predicted, truth)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
confusion_matrix = tf.confusion_matrix(truth, predicted, num_classes=NUM_LABELS)
tf.summary.scalar('accuracy', accuracy)
summ = tf.summary.merge_all()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(LOGDIR)
writer.add_graph(sess.graph)
test_writer = tf.summary.FileWriter(TEST_LOGDIR)
print('Starting training\n')
batch = get_batch(BATCH_SIZE, PATH_TRAIN)
start_time = time.time()
for i in range(1, ITERATIONS + 1):
X, Y = next(batch)
if i % EVAL_EVERY == 0:
[train_accuracy, train_loss, s] = sess.run([accuracy, loss, summ], feed_dict={x: X, y: Y, dropout:0.5}, acc_and_loss = [i, train_loss, train_accuracy * 100])
print('Iteration # {}. Train Loss: {:.2f}. Train Acc: {:.0f}%'.format(*acc_and_loss))
writer.add_summary(s, i)
if i % (EVAL_EVERY * 20) == 0:
train_confusion_matrix = sess.run([accuracy, sum], feed_dict={x: X, y: Y, dropout:1.0})
header = LABEL_TO_INDEX_MAP.keys()
df = pd.DataFrame(np.reshape(train_confusion_matrix, (NUM_LABELS, NUM_LABELS)), index=i)
print('\nConfusion Matrix:\n {}\n'.format(df))
saver.save(sess, os.path.join(LOGDIR, "model.ckpt"), i)
sess.run(train_step, feed_dict={x: X, y: Y, dropout:0.5})
print('\nTotal training time {:0f} seconds\n'.format(time.time() - start_time))
batch = get_batch(BATCH_SIZE, PATH_TEST)
total_accuracy = 0
for i in range(ITERATIONS_TEST):
X, Y = next(batch, PATH_TEST)
test_accuracy, s = sess.run([accuracy, summ], feed_dict={x: X, y: Y, dropout:1.0})
print('Iteration # {}. Test Accuracy {:.0f}%'.format(i+1, test_accuracy * 100))
total_accuracy += (test_accuracy / ITERATIONS_TEST)
test_writer.add_summary(s, i)
print('\nFinal Test Accuracy: {:.0f}%').format(total_accuracy * 100)
if __name__ == '__main__':
init(PATH_TRAIN)
main()
The Result I get:
ValueError: Cannot feed value of shape (100,) for Tensor 'input_19:0', which has shape '(?, 20, 44)'
It seems like it is complaining about feeding X which has shape (100,) into x which is required to have shape (anything, 20, 44). This variable has the name “input” noted in the error.
x and y are tensorflow placeholders rather than numpy arrays, and their shape are not changed in that way. It is telling tensorflow to expect some numpy arrays (in your case perhaps X and Y) in the specified shape. Since the shapes mismatch, you might be using the wrong data, so simply reshaping X might give you wrong results.
You will have to figure out what the shape of X and Y actually are, and where the 20x44 data should be coming from out of you dataset (or if it should not be requiring 20x44 data, what should it be requiring).
I've tried to figure things out myself and not fallback to actually creating an account here but as a self-taught beginner I've reached a wall with this code.
I'm having two major issues besides optimizing the net architecture when everything is working:
Everytime I've tried to create a new dataset for a test batch I've ran into 'xTensor is not a Tensor' error and could run a session through it, unlike with the iterator which works just fine. I'm loading custom data with dir names as labels with no manually created train and test directories. I'm probably missing a proper method for tf.
I can't work around the current first error I get which is:
'ValueError: Cannot feed value of shape (100,) for Tensor 'Placeholder_1:0', which has shape '(?, 1)' while feed_dict {y=batch_y}. I've tried some of solutions posted on SO but couldn't get it to work.
I'm pasting the whole thing, ########### are the problem triggering zones at the very bottom in the session.
import tensorflow as tf
import numpy as np
import os
# load custom imageset directory
data_path = r"..\datasets\images\flowers"
# setup hypervariables for labels and images format
n_classes = 5
img_width = 64
img_length = 64
channels = 3
# setup hypervariables for network
learning_rate = 0.0001
epochs = 2
batch_size = 100
drop_rate = 0.6
imagepaths = list()
labels = list()
label = 0
classes = sorted(os.walk(data_path).__next__()[1])
# List each sub-directory (the classes)
for c in classes:
c_dir = os.path.join(data_path, c)
walk = os.walk(c_dir).__next__()
# Add each image to the training set
for sample in walk[2]:
imagepaths.append(os.path.join(c_dir, sample))
labels.append(label)
label += 1
total_input = len(labels)
# Convert to Tensor
imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.int32)
# Build a TF Queue, shuffle data
dataset = tf.data.Dataset.from_tensor_slices((imagepaths, labels))
# read, decode, resize and normalize images on RGB range
def parse(imagepath, label):
image = tf.read_file(imagepath)
image = tf.image.decode_jpeg(image, channels=channels)
image = tf.image.resize_images(image, [img_length, img_width])
image = image * 1.0/255
return image, label
dataset = dataset.map(parse)
dataset = dataset.shuffle(buffer_size=batch_size*10)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
next_batch = iterator.get_next()
# hypervariables for layers' output size
K = 16
L = 32
M = 200
x = tf.placeholder(tf.float32, [None, 4326])
x_shaped = tf.reshape(x, [-1, img_length, img_width, 3])
y = tf.placeholder(tf.float32, [None, 1])
# weight, bias with stride size and activation method after convolution for layer 1
W1 = tf.Variable(tf.truncated_normal([5, 5, 3, K], stddev=0.03))
b1 = tf.Variable(tf.truncated_normal([K], stddev=0.01))
stride = 1
y1 = tf.nn.relu(tf.nn.conv2d(x_shaped, W1, strides=[1, stride, stride, 1], padding='SAME') + b1)
# weight, bias with stride size and activation method after convolution for layer 2
W2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.03))
b2 = tf.Variable(tf.truncated_normal([L], stddev=0.01))
stride = 2 # output is 14x14
y2 = tf.nn.relu(tf.nn.conv2d(y1, W2, strides=[1, stride, stride, 1], padding='SAME') + b2)
yflat = tf.reshape(y2, [-1, 7 * 7 * L])
W3 = tf.Variable(tf.truncated_normal([7 * 7 * L, M], stddev=0.1))
b3 = tf.Variable(tf.truncated_normal([M], stddev=0.01))
y3 = tf.nn.relu(tf.matmul(yflat, W3) + b3)
W4 = tf.Variable(tf.truncated_normal([M, 10], stddev=0.1))
b4 = tf.Variable(tf.truncated_normal([10], stddev=0.01))
ylogits = tf.matmul(y3, W4) + b4
y_ = tf.nn.softmax(ylogits)
# add cross entropy for back prop
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=ylogits, labels=y_))
# add an optimiser for back prop
optimiser = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
########## temporary solution for test_x, test_y
test_x, test_y = sess.run(next_batch)
total_batch = int(total_input / batch_size)
# define the iterator for the network
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = sess.run(next_batch)
########## ValueError: Cannot feed value of shape (100,) for Tensor 'Placeholder_1:0' -> y: batch_y
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y: batch_y})
avg_cost += c / total_batch
test_acc = sess.run(accuracy,feed_dict={x: test_x, y: test_y})
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost), " test accuracy: {:.3f}".format(test_acc))
summary = sess.run(merged, feed_dict={x: test_x, y: test_y})
print("\nTraining complete!")
print(sess.run(accuracy, feed_dict={x: test_x, y: test_y}))
are you sure that this part:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y: batch_y})
doesn't have to be:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})
furthermore you've a batchsize of 100, the data is right of the array, the shape is not complete.
What you have (dummy example)
np.zeros((100,)).shape
>>> (100,)
here 100 matches the '?' of the required shape: shape '(?, 1)', the one can be easily added, and it often occurs that numpy does not do this. See the following code:
np.expand_dims(np.zeros((100,)), axis=-1).shape
>>> (100, 1)
axis -1 stand for the last axis, you basically tell numpy to add a dimension in the end. This does not affect the data itself, but the shape of the array. So your code should be:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y:np.expand_dims(batch_y, axis=-1)})
I am relatively new to python and machine learning. When I run this code, it comes up with this value error. Unfortunately, I cannot seen to be able to reshape the input to match both shapes. I have tried to change the shape of the label to (None, None, 2) but when I run it, train cost come up as 'nan'. What am I doing wrong? How can I fix it? Any help will be much appreciated.
import csv
import tensorflow as tf
import numpy as np
import pandas as pd
import urllib.request as request
import matplotlib.pyplot as plt
train_data = pd.read_csv("C:/Python35/train_data.csv", sep=',', header = None)
test_data = pd.read_csv("C:/Python35/test_data.csv", sep=',', header = None)
X_train = np.asarray(train_data)
X_test = np.asarray(test_data)
train_label = pd.read_csv("C:/Python35/train_label.csv", sep=',', header = None)
test_label = pd.read_csv("C:/Python35/test_label.csv", sep=',', header = None)
y_train = np.asarray(train_label)
y_test = np.asarray(test_label)
labels_train = (np.arange(2) == y_train[:,None]).astype(np.float32)
labels_test = (np.arange(2) == y_test[:,None]).astype(np.float32)
inputs = tf.placeholder(tf.float32, shape=(None, X_train.shape[1]), name='inputs')
label = tf.placeholder(tf.float32, shape=(None, 2), name='labels')
hid1_size = 128
w1 = tf.Variable(tf.random_normal([hid1_size, X_train.shape[1]], stddev=0.01), name='w1')
b1 = tf.Variable(tf.constant(0.1, shape=(hid1_size, 1)), name='b1')
y1 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(w1, tf.transpose(inputs)), b1)), keep_prob=0.5)
hid2_size = 256
w2 = tf.Variable(tf.random_normal([hid2_size, hid1_size], stddev=0.01), name='w2')
b2 = tf.Variable(tf.constant(0.1, shape=(hid2_size, 1)), name='b2')
y2 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(w2, y1), b2)), keep_prob=0.5)
wo = tf.Variable(tf.random_normal([2, hid2_size], stddev=0.01), name='wo')
bo = tf.Variable(tf.random_normal([2, 1]), name='bo')
yo = tf.transpose(tf.add(tf.matmul(wo, y2), bo))
lr = tf.placeholder(tf.float32, shape=(), name='learning_rate')
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yo, labels=label))
optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss)
pred = tf.nn.softmax(yo)
pred_label = tf.argmax(pred, 1)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
sess.run(init)
for learning_rate in [0.05, 0.01]:
for epoch in range(50):
avg_cost = 0.0
for i in range(X_train.shape[0]):
_, c = sess.run([optimizer, loss], feed_dict={lr:learning_rate,
inputs: X_train[i, None],
label: labels_train[i, None]})
avg_cost += c
avg_cost /= X_train.shape[0]
if epoch % 10 == 0:
print("Epoch: {:3d} Train Cost: {:.4f}".format(epoch, avg_cost))
acc_train = accuracy.eval(feed_dict={inputs: X_train, label: labels_train})
print("Train accuracy: {:3.2f}%".format(acc_train*100.0))
acc_test = accuracy.eval(feed_dict={inputs: X_test, label: labels_test})
print("Test accuracy: {:3.2f}%".format(acc_test*100.0))
sess.close()
What the error says is that while your labels placeholder requires an array of shape (None, 2), you are passing one with a shape of (1, 1, 2)
This is done in the line where you run your Session (sess.run()) and more specifically in the feed_dict where you are passing the labels_train[i, None] to the placeholder labels.
From your code I can't tell what exactly you are trying to do with your labels, but a simple reshape should do the trick:
_, c = sess.run([optimizer, loss], feed_dict={lr:learning_rate,
inputs: X_train[i, None],
label: labels_train[i, None].reshape(-1, 2)})
This is far from the optimal solution the problem; think of it as a workaround, till you get to better acquainted with python and tensorflow.
I'm trying to use Numpy arrays within a graph, feeding in the data using a Dataset.
I've read through this, but can't quite make sense of how I should feed placeholder arrays within a Dataset.
If we take a simple example, I start with:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.float32, [None])
b = tf.placeholder(tf.float32, [None])
c = tf.add(a, b)
with tf.Session() as sess:
for i in range(10):
x = sess.run(c, feed_dict={a: A, b:B})
print(i, x)
Then I attempt to modify it to use a Dataset as follows:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.int32, A.shape)
b = tf.placeholder(tf.int32, B.shape)
c = tf.add(a, b)
dataset = tf.data.Dataset.from_tensors((a, b))
iterator = dataset.make_initializable_iterator()
with tf.Session() as sess3:
sess3.run(tf.global_variables_initializer())
sess3.run(iterator.initializer, feed_dict={a: A, b: B})
for i in range(10):
x = sess3.run(c)
print(i, x)
If I run this I get 'InvalidArgumentError: You must feed a value for placeholder tensor ...'
The code until the for loop mimics the example here, but I don't get how I can then employ the placeholders a & b without supplying a feed_dict to every call to sess3.run(c) [which would be expensive]. I suspect I have to somehow use the iterator, but I don't understand how.
Update
It appears I oversimplified too much when picking the example. What I am really trying to do is use Datasets when training a neural network, or similar.
For a more sensible question, how would I go about using Datasets to feed placeholders in the below (though imagine X and Y_true are much longer...). The documentation takes me to the point where the loop starts and then I'm not sure.
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
y = tf.squeeze(tf.matmul(x, w), name='y')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y),
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(100):
_, loss_out = sess.run([optimiser, loss], feed_dict={x: X, y_true:Y_true})
print(i, loss_out)
Trying the following only gets me a InvalidArgumentError
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
dataset = tf.data.Dataset.from_tensor_slices((x, y_true))
iterator = dataset.make_initializable_iterator()
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
y = tf.squeeze(tf.matmul(x, w), name='y')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y),
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(iterator.initializer, feed_dict={x: X,
y_true: Y_true})
for i in range(100):
_, loss_out = sess.run([optimiser, loss])
print(i, loss_out)
Use iterator.get_next() to get elements from Dataset like:
next_element = iterator.get_next()
than initialize the iterator
sess.run(iterator.initializer, feed_dict={a:A, b:B})
and at least get the values from Dataset
value = sess.run(next_element)
EDIT:
The code above just return the elements from Dataset. The Dataset API is intended to serve features and labels for a input_fn, therefore all additional computations for preprocessing should be performed within the Dataset API. If you want to add elements, you should define a function that is applied to the elements, like:
def add_fn(exp1, exp2):
return tf.add(exp1, exp2)
and than you can map these function to your Dataset:
dataset = dataset.map(add_fn)
Complete code example:
A = np.arange(4)
B = np.arange(10, 14)
a = tf.placeholder(tf.int32, A.shape)
b = tf.placeholder(tf.int32, B.shape)
#c = tf.add(a, b)
def add_fn(exp1, exp2):
return tf.add(exp1, exp2)
dataset = tf.data.Dataset.from_tensors((a, b))
dataset = dataset.map(add_fn)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer, feed_dict={a: A, b: B})
# just one element at dataset
x = sess.run(next_element)
print(x)
The problem in your more complicated example is that you use the same tf.placeholder() nodes as the input to the Dataset.from_tensor_slices() (which is correct) and the network itself (which causes the InvalidArgumentError. Instead, as J.E.K points out in their answer, you should use iterator.get_next() as the input to your network, as follows (note that there are a couple of other fixes I added to make the code run as-is):
X = np.arange(8.).reshape(4, 2)
Y_true = np.array([0, 0, 1, 1])
x = tf.placeholder(tf.float32, [None, 2], name='x')
y_true = tf.placeholder(tf.float32, [None], name='y_true')
dataset = tf.data.Dataset.from_tensor_slices((x, y_true))
# You will need to repeat the input (which has 4 elements) to be able to take
# 100 steps.
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
# Use `iterator.get_next()` to create tensors that will consume values from the
# dataset.
x_next, y_true_next = iterator.get_next()
w = tf.Variable(np.random.randn(2, 1), name='w', dtype=tf.float32)
# The `x_next` tensor is a vector (i.e. a row of `X`), so you will need to
# convert it to a matrix or apply batching in the dataset to make it work with
# `tf.matmul()`
x_next = tf.expand_dims(x_next, 0)
y = tf.squeeze(tf.matmul(x_next, w), name='y') # Use `x_next` here.
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true_next, logits=y), # Use `y_true_next` here.
name='x_entropy')
# set optimiser
optimiser = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(iterator.initializer, feed_dict={x: X,
y_true: Y_true})
for i in range(100):
_, loss_out = sess.run([optimiser, loss])
print(i, loss_out)
I have a training model in TensorFlow (see below code).
My cumulative 'Test Accuracy' is showing: 0.92357 after training my model and I want to check it to be sure I didn't mess something up.
How do I print a predicted output matrix or 'y' after I train the model given the code I have below?
# x will be the input matrix flattened (28x29)
x = tf.placeholder(tf.float32, [None, 812])
# Define the weights (initial value doesn't matter since these will be learned)
W = tf.Variable(tf.random_uniform([812, 812], minval=0, dtype=tf.float32))
b = tf.Variable(tf.random_uniform([812], minval=0, dtype=tf.float32))
# Predict output matrix
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Actual output matrix from the training set
y_ = tf.placeholder(tf.float32, [None, 812])
# Calculate loss and optimize
cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.AdamOptimizer(0.025).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
a, b = get_batch()
train_len = len(a)
correct_prediction = tf.equal(y_, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Training
for i in range(train_len):
batch_xs = a[i]
batch_ys = b[i]
_, loss, acc = sess.run([train_step, cross_entropy, accuracy], feed_dict={x: batch_xs, y_: batch_ys})
print("Loss= " + "{:.6f}".format(loss) + " Accuracy= " + "{:.5f}".format(acc))
# Test trained model
cumulative_accuracy = 0.0
for i in range(train_len):
acc_batch_xs = a[i]
acc_batch_ys = b[i]
cumulative_accuracy += accuracy.eval(feed_dict={x: acc_batch_xs, y_: acc_batch_ys})
print("Test Accuracy= {}".format(cumulative_accuracy / train_len))
The value of any tensor object can be obtained by using
tensorFlowObject.eval()
Hence, you can use y.eval() to get the value of y