Restoring model take to long - python

I have a problem restring my model. I trained model and saved a model using this code. I'm not really sure if this is the proper method I would be grateful for suggestions. The problem occurs when I'm trying to restore model. I need it only to predict, it won't be furder trained. It takes forever to restore parameters from the model. How can I improve my model saver or model restorer to make it quick, under the assumption I need it only for predicting.
X = tf.placeholder(tf.float32, [None, 56, 56, 1])
Y_ = tf.placeholder(tf.float32, [None, 36])
L1 = 432
L2 = 72
L3 = 36
W1 = tf.Variable(tf.truncated_normal([3136, L1], stddev=0.1))
b1 = tf.Variable(tf.zeros([L1]))
W2 = tf.Variable(tf.truncated_normal([L1, L2], stddev=0.1))
b2 = tf.Variable(tf.zeros([L2]))
W3 = tf.Variable(tf.truncated_normal([L2, L3], stddev=0.1))
b3 = tf.Variable(tf.zeros([L3]))
XX = tf.reshape(X, [-1, 3136])
Y1 = tf.nn.sigmoid(tf.matmul(XX, W1) + b1)
Y1 = tf.nn.dropout(Y1, keep_prob=0.8)
Y2 = tf.nn.sigmoid(tf.matmul(Y1, W2) + b2)
Y2 = tf.nn.dropout(Y2, keep_prob=0.8)
Ylogits = tf.matmul(Y2, W3) + b3
Y = tf.nn.softmax(Ylogits)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy) * 100
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_step = tf.train.GradientDescentOptimizer(0.0001).minimize(cross_entropy)
allweights = tf.concat([tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1])], 0)
allbiases = tf.concat([tf.reshape(b1, [-1]), tf.reshape(b2, [-1]), tf.reshape(b3, [-1])], 0)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def next_batch(x, y, batch, step):
x_temp = x[cur_step:(step+batch)]
y_temp = np.squeeze(y[step:(step + batch)])
return x_temp, y_temp
with tf.Session() as sess:
sess.run(init)
cur_step = 0
for i in range(NUM_ITERS + 1):
batch_X, batch_Y = next_batch(train_xx, train_yy, BATCH, cur_step)
if i % DISPLAY_STEP == 0:
acc_trn, loss_trn, w, b = sess.run([accuracy, cross_entropy, allweights, allbiases], feed_dict={X: batch_X, Y_: batch_Y})
acc_tst, loss_tst = sess.run([accuracy, cross_entropy], feed_dict={X: test_xx, Y_: test_yy})
sess.run(train_step, feed_dict={X: batch_X, Y_: batch_Y})
save_path = saver.save(sess, "abc/model")
Restore:
X = tf.placeholder(tf.float32, [None, 56, 56, 1])
Y_ = tf.placeholder(tf.float32, [None, 36])
L1 = 432
L2 = 72
L3 = 36
W1 = tf.Variable(tf.truncated_normal([3136, L1], stddev=0.1))
b1 = tf.Variable(tf.zeros([L1]))
W2 = tf.Variable(tf.truncated_normal([L1, L2], stddev=0.1))
b2 = tf.Variable(tf.zeros([L2]))
W3 = tf.Variable(tf.truncated_normal([L2, L3], stddev=0.1))
b3 = tf.Variable(tf.zeros([L3]))
XX = tf.reshape(X, [-1, 3136])
Y1 = tf.nn.sigmoid(tf.matmul(XX, W1) + b1)
Y1 = tf.nn.dropout(Y1, keep_prob=0.8)
Y2 = tf.nn.sigmoid(tf.matmul(Y1, W2) + b2)
Y2 = tf.nn.dropout(Y2, keep_prob=0.8)
Ylogits = tf.matmul(Y2, W3) + b3
Y = tf.nn.softmax(Ylogits)
with tf.Session() as sess:
saver = tf.train.Saver()
saver = tf.train.import_meta_graph('model.meta')
saver.restore(sess, 'model')
EDIT: Maybe a fact that model is trained using Google Colab's GPU and I'm restoring it on my PC is important.

Its a duplicate of: Tensorflow: how to save/restore a model?.
Your saving of the model is right but not your restoring. What you are doing is trying to create a new graph with the same nodes as the saved model, instead of restoring it from the saved graph. The following steps should address your issue on how to restore a model:
#Start with resetting the default graph
tf.reset_default_graph()
with tf.Session() as sess:
# Nodes:Before loading the graph
print([n.name for n in tf.get_default_graph().as_graph_def().node])
# Output is [] as no graph is loaded yet.
# First let's load meta graph
saver = tf.train.import_meta_graph("abc/model.meta")
# Nodes:after loading the graph'
print([n.name for n in tf.get_default_graph().as_graph_def().node])
# Output is [save/RestoreV2/shape_and_slices', 'save/RestoreV2/tensor_ ...]
# The above step doesnt load the weights, can be checked by
print(sess.run('Variable_1:0'))
# Error: attempting to use uninitialized graph.
#load the weights
saver.restore(sess,tf.train.latest_checkpoint('./abc/'))
print(sess.run('Variable_1:0'))
# Output: [-2.80421402e-04 3.53254407e-04 ...]
Now we have the nodes loaded and ready, you need to access some of them for inference. But since the nodes are not named properly, its not easy to figure out which node are inputs and outputs. To avoid this you need to name the tensors/ops when saving the model properly using the name argument like:
X = tf.placeholder(tf.float32, [None, 56, 56, 1], name='X')
Y = tf.identity(f.nn.softmax(Ylogits), name='logits').
In your inference graph once you have loaded the graph and weights, you can get these tensors using get_tensor_by_name:
with tf.Session() as sess:
#Load the graph and weights as above
....
graph = tf.get_default_graph()
X_infer = graph.get_tensor_by_name('X:0')
Y_infer = graph.get_tensor_by_name('logits:0')
sess.run(Y_infer,{X_infer:new_input}

Related

How do I use TensorFlow Neural Network output

After running the code below I get values for accuracy and I can get the values for all the Ws and bs. My question is how do I use the output to classify things in the future? and how do I save the model?
#########################################
################ SETUP ##################
training_epochs = 500
n_neurons_in_h1 = 60
n_neurons_in_h2 = 60
learning_rate = 0.01
n_features = 3
n_classes = 3
X = tf.placeholder(tf.float32, [None, n_features], name='features')
Y = tf.placeholder(tf.float32, [None, n_classes], name='labels')
W1 = tf.Variable(tf.truncated_normal([n_features, n_neurons_in_h1], mean=0, stddev=1 / np.sqrt(n_features)), name='weights1')
b1 = tf.Variable(tf.truncated_normal([n_neurons_in_h1],mean=0, stddev=1 / np.sqrt(n_features)), name='biases1')
y1 = tf.nn.tanh((tf.matmul(X, W1)+b1), name='activationLayer1')
W2 = tf.Variable(tf.random_normal([n_neurons_in_h1, n_neurons_in_h2],mean=0,stddev=1/np.sqrt(n_features)),name='weights2')
b2 = tf.Variable(tf.random_normal([n_neurons_in_h2],mean=0,stddev=1/np.sqrt(n_features)),name='biases2')
y2 = tf.nn.sigmoid((tf.matmul(y1,W2)+b2),name='activationLayer2')
Wo = tf.Variable(tf.random_normal([n_neurons_in_h2, n_classes], mean=0, stddev=1/np.sqrt(n_features)), name='weightsOut')
bo = tf.Variable(tf.random_normal([n_classes], mean=0, stddev=1/np.sqrt(n_features)), name='biasesOut')
a = tf.nn.softmax((tf.matmul(y2, Wo) + bo), name='activationOutputLayer')
cross_entropy = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(a),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(a, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy")
#########################################
############### GET DATA ################
(Removed for sake of conciseness)
Data is in form [var1 ,var2, var3, label]. Is split 80% into training and 20% into testing.
#########################################
################ RUN ####################
# initialization of all variables
initial = tf.global_variables_initializer()
#creating a session
with tf.Session() as sess:
sess.run(initial)
# training loop over the number of epoches
batchsize=10
for epoch in range(training_epochs):
for i in range(len(tr_features)):
start=i
end=i+batchsize
x_batch=tr_features[start:end]
y_batch=tr_labels[start:end]
# feeding training data/examples
sess.run(train_step, feed_dict={X:x_batch , Y:y_batch})
i+=batchsize
# feeding testing data to determine model accuracy
y_pred = sess.run(tf.argmax(a, 1), feed_dict={X: ts_features})
y_true = sess.run(tf.argmax(ts_labels, 1))
acc = sess.run(accuracy, feed_dict={X: ts_features, Y: ts_labels})
# print accuracy for each epoch
print('epoch',epoch, acc)
print ('---------------')
print(y_pred, y_true)
You can use the tf.train.Saver() class to save your model (weights + atchitecture). The save method of this class will save everything, and the restore method will load the file created by the save method, so you can predict without having to retrain.
Look at the doc : Save and Restore - Tensorflow

Tensorflow CNN test data splitting and array sizing problems

I've tried to figure things out myself and not fallback to actually creating an account here but as a self-taught beginner I've reached a wall with this code.
I'm having two major issues besides optimizing the net architecture when everything is working:
Everytime I've tried to create a new dataset for a test batch I've ran into 'xTensor is not a Tensor' error and could run a session through it, unlike with the iterator which works just fine. I'm loading custom data with dir names as labels with no manually created train and test directories. I'm probably missing a proper method for tf.
I can't work around the current first error I get which is:
'ValueError: Cannot feed value of shape (100,) for Tensor 'Placeholder_1:0', which has shape '(?, 1)' while feed_dict {y=batch_y}. I've tried some of solutions posted on SO but couldn't get it to work.
I'm pasting the whole thing, ########### are the problem triggering zones at the very bottom in the session.
import tensorflow as tf
import numpy as np
import os
# load custom imageset directory
data_path = r"..\datasets\images\flowers"
# setup hypervariables for labels and images format
n_classes = 5
img_width = 64
img_length = 64
channels = 3
# setup hypervariables for network
learning_rate = 0.0001
epochs = 2
batch_size = 100
drop_rate = 0.6
imagepaths = list()
labels = list()
label = 0
classes = sorted(os.walk(data_path).__next__()[1])
# List each sub-directory (the classes)
for c in classes:
c_dir = os.path.join(data_path, c)
walk = os.walk(c_dir).__next__()
# Add each image to the training set
for sample in walk[2]:
imagepaths.append(os.path.join(c_dir, sample))
labels.append(label)
label += 1
total_input = len(labels)
# Convert to Tensor
imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.int32)
# Build a TF Queue, shuffle data
dataset = tf.data.Dataset.from_tensor_slices((imagepaths, labels))
# read, decode, resize and normalize images on RGB range
def parse(imagepath, label):
image = tf.read_file(imagepath)
image = tf.image.decode_jpeg(image, channels=channels)
image = tf.image.resize_images(image, [img_length, img_width])
image = image * 1.0/255
return image, label
dataset = dataset.map(parse)
dataset = dataset.shuffle(buffer_size=batch_size*10)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
next_batch = iterator.get_next()
# hypervariables for layers' output size
K = 16
L = 32
M = 200
x = tf.placeholder(tf.float32, [None, 4326])
x_shaped = tf.reshape(x, [-1, img_length, img_width, 3])
y = tf.placeholder(tf.float32, [None, 1])
# weight, bias with stride size and activation method after convolution for layer 1
W1 = tf.Variable(tf.truncated_normal([5, 5, 3, K], stddev=0.03))
b1 = tf.Variable(tf.truncated_normal([K], stddev=0.01))
stride = 1
y1 = tf.nn.relu(tf.nn.conv2d(x_shaped, W1, strides=[1, stride, stride, 1], padding='SAME') + b1)
# weight, bias with stride size and activation method after convolution for layer 2
W2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.03))
b2 = tf.Variable(tf.truncated_normal([L], stddev=0.01))
stride = 2 # output is 14x14
y2 = tf.nn.relu(tf.nn.conv2d(y1, W2, strides=[1, stride, stride, 1], padding='SAME') + b2)
yflat = tf.reshape(y2, [-1, 7 * 7 * L])
W3 = tf.Variable(tf.truncated_normal([7 * 7 * L, M], stddev=0.1))
b3 = tf.Variable(tf.truncated_normal([M], stddev=0.01))
y3 = tf.nn.relu(tf.matmul(yflat, W3) + b3)
W4 = tf.Variable(tf.truncated_normal([M, 10], stddev=0.1))
b4 = tf.Variable(tf.truncated_normal([10], stddev=0.01))
ylogits = tf.matmul(y3, W4) + b4
y_ = tf.nn.softmax(ylogits)
# add cross entropy for back prop
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=ylogits, labels=y_))
# add an optimiser for back prop
optimiser = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
########## temporary solution for test_x, test_y
test_x, test_y = sess.run(next_batch)
total_batch = int(total_input / batch_size)
# define the iterator for the network
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = sess.run(next_batch)
########## ValueError: Cannot feed value of shape (100,) for Tensor 'Placeholder_1:0' -> y: batch_y
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y: batch_y})
avg_cost += c / total_batch
test_acc = sess.run(accuracy,feed_dict={x: test_x, y: test_y})
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost), " test accuracy: {:.3f}".format(test_acc))
summary = sess.run(merged, feed_dict={x: test_x, y: test_y})
print("\nTraining complete!")
print(sess.run(accuracy, feed_dict={x: test_x, y: test_y}))
are you sure that this part:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y: batch_y})
doesn't have to be:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})
furthermore you've a batchsize of 100, the data is right of the array, the shape is not complete.
What you have (dummy example)
np.zeros((100,)).shape
>>> (100,)
here 100 matches the '?' of the required shape: shape '(?, 1)', the one can be easily added, and it often occurs that numpy does not do this. See the following code:
np.expand_dims(np.zeros((100,)), axis=-1).shape
>>> (100, 1)
axis -1 stand for the last axis, you basically tell numpy to add a dimension in the end. This does not affect the data itself, but the shape of the array. So your code should be:
_, c = sess.run([optimiser, cross_entropy], feed_dict={x_shaped: batch_x, y:np.expand_dims(batch_y, axis=-1)})

Learn the sum of two numbers in Tensorflow

I'm trying to train a neural network to predict the sum of two numbers. But I don't understand what's wrong with my model. Model consists of 2 inputs, 2 hidden and 1 output layers. Every 1000 iteration I print test execution, but the result is getting smaller and smaller.
import numpy as np
import tensorflow as tf
input_size = 2
hidden_size = 3
out_size = 1
def generate_test_data():
inp = 0.5*np.random.rand(10, 2)
oup = np.zeros((10, 1))
for idx, val in enumerate(inp):
oup[idx] = np.array([val[0] + val[1]])
return inp, oup
def create_network():
x = tf.placeholder(tf.float32, [None, input_size])
w01 = tf.Variable(tf.truncated_normal([input_size, hidden_size], stddev=0.1))
y1 = tf.sigmoid(tf.matmul(tf.sigmoid(x), w01))
w12 = tf.Variable(tf.truncated_normal([hidden_size, out_size], stddev=0.1))
y2 = tf.sigmoid(tf.matmul(y1, w12))
y_ = tf.placeholder(tf.float32, [None, out_size])
return x, y_, y2
def train(x, y_, y2):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y2)
)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for i in range(100000):
batch_xs, batch_ys = generate_test_data()
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test
if i % 1000 == 0:
out_batch = sess.run(y2, {x: batch_xs})
inx = 0
print(batch_xs[inx][0], " + ", batch_xs[inx][1], " = ", out_batch[inx][0])
(x, y_, y2) = create_network()
train(x, y_, y2)
Output every 1000 iteration:
0.37301352864927173 + 0.28949461772342683 = 0.49111518
0.050899466843458474 + 0.006174158992116541 = 0.0025260744
0.3974852369427063 + 0.22402098418952499 = 0.00090828544
0.15735921047969498 + 0.39645077887600294 = 0.0005903727
0.23560825884336228 + 0.29010766384718145 = 0.0004317883
0.4250063393420791 + 0.24181166029062096 = 0.00031525563
= smaller and smaller
Cross-entropy loss is used for classification problems, while your task is clearly a regression. The computed cross_entropy value doesn't make sense, hence the result.
Change your loss to:
cross_entropy = tf.reduce_mean(
tf.nn.l2_loss(y_ - y2)
)
... and you'll see much more sensible results.
Maxim, thanks a lot. Now it's work.
import numpy as np
import tensorflow as tf
input_size = 2
hidden_size = 3
out_size = 1
def generate_test_data():
inp = 0.5*np.random.rand(10, 2)
oup = np.zeros((10, 1))
for idx, val in enumerate(inp):
oup[idx] = np.array([val[0] + val[1]])
return inp, oup
def create_network():
x = tf.placeholder(tf.float32, [None, input_size])
w01 = tf.Variable(tf.truncated_normal([input_size, hidden_size], stddev=0.1))
y1 = tf.matmul(x, w01)
w12 = tf.Variable(tf.truncated_normal([hidden_size, out_size], stddev=0.1))
y2 = tf.matmul(y1, w12)
y_ = tf.placeholder(tf.float32, [None, out_size])
return x, y_, y2
def train(x, y_, y2):
cross_entropy = tf.reduce_mean(
tf.nn.l2_loss(y_ - y2)
)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for i in range(100000):
batch_xs, batch_ys = generate_test_data()
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test
if i % 2000 == 0:
out_batch = sess.run(y2, {x: batch_xs})
inx = 0
print(batch_xs[inx][0], " + ", batch_xs[inx][1], " = ", out_batch[inx][0], "|", batch_xs[inx][0] + batch_xs[inx][1])
(x, y_, y2) = create_network()
train(x, y_, y2)
If you consider predicting each digit to be a classification problem where you predict a value in "0123456789 ", you can use cross-entropy as your loss. For reference, see the Keras - Addition RNN Example.
But like Maxim said, it shouldn't be used for a regression problem.

2 layer neural network in tensorflow

I am new to tensorflow and trying to train the following two layer network. It seems it is not working as cross entropy is not decreasing as iteration. I think I am screwed up connecting hidden layer to output layer. Please help me if you can see the problem,
import tensorflow as tf
from scipy.io import loadmat
import numpy as np
import sys
x = loadmat('../mnist_data/ex4data1.mat')
X = x['X']
# one hot conversion
y_temp = x['y']
y_temp = np.reshape(y_temp, (len(y_temp),))
y = np.zeros((len(y_temp),10))
y[np.arange(len(y_temp)), y_temp-1] = 1.
input_size = 400
hidden1_size = 25
output_size = 10
num_iters = 50
reg_alpha = 0.05
x = tf.placeholder(tf.float32, [None, input_size], name='data')
W1 = tf.Variable(tf.zeros([hidden1_size, input_size], tf.float32, name='weights_1st_layer'))
b1 = tf.Variable(tf.zeros([hidden1_size], tf.float32), name='bias_layer_1')
W2 = tf.Variable(tf.zeros([output_size, hidden1_size], tf.float32, name='weights_2nd_layer'))
b2 = tf.Variable(tf.zeros([output_size], tf.float32), name='bias_layer_2')
hidden_op = tf.nn.relu(tf.add(tf.matmul(x, W1, transpose_b=True), b1))
output_op = tf.matmul(hidden_op, W2, transpose_b=True) + b2
pred = tf.nn.softmax(output_op)
y_ = tf.placeholder(tf.float32, [None, 10], name='actual_labels')
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=y_, logits=output_op))
train_step = tf.train.GradientDescentOptimizer(reg_alpha).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(50):
print ('training..', _)
print (sess.run([train_step, cross_entropy], feed_dict={x : X, y_ : y}))
corr_pred = tf.equal(tf.argmax(pred, axis=1), tf.argmax(y_, axis=1))
acc = tf.reduce_mean(tf.cast(corr_pred, tf.float32))
print (sess.run(acc, feed_dict={x:X, y_:y}))
sess.close()
Try initialising your weights as randoms, not zeros.
So instead of:
W1 = tf.Variable(tf.zeros([hidden1_size, input_size], tf.float32, name='weights_1st_layer'))
W2 = tf.Variable(tf.zeros([output_size, hidden1_size], tf.float32, name='weights_2nd_layer'))
use:
W1 = tf.Variable(tf.truncated_normal([hidden1_size, input_size], tf.float32, name='weights_1st_layer'), stddev=0.1))
W2 = tf.Variable(tf.truncated_normal([output_size, hidden1_size], tf.float32, name='weights_2nd_layer'), stddev=0.1))
Check this nice summary why initialising all the weights to zero prevents the network from training.

tensorflow nn mnist example with modified layer dimensions

I modified this mnist example so that it has two outputs and a middle layer of 10 nodes. It doesn't work, giving me a .50 score all the time. I think it just picks one of the outputs and responds with that no matter what the input is. How could I fix this so that it actually does some learning? The outputs are supposed to represent 0 for 'skin tone' and 1 for 'no skin tone'. I use png input.
def nn_setup(self):
input_num = 784 * 3 # like mnist but with three channels
mid_num = 10
output_num = 2
x = tf.placeholder(tf.float32, [None, input_num])
W_1 = tf.Variable(tf.random_normal([input_num, mid_num], stddev=0.04))
b_1 = tf.Variable(tf.random_normal([mid_num], stddev=0.5))
y_mid = tf.nn.relu(tf.matmul(x,W_1) + b_1)
W_2 = tf.Variable(tf.random_normal([mid_num, output_num],stddev=0.4))
b_2 = tf.Variable(tf.random_normal([output_num],stddev=0.5))
y_logits = tf.matmul(y_mid, W_2) + b_2
y = tf.nn.softmax(y_logits)
y_ = tf.placeholder(tf.float32, [None, output_num])
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_logits, y_))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(init)
for i in range(1000):
batch_xs, batch_ys = self.get_nn_next_train()
self.sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.nn_test.images, self.nn_test.labels = self.get_nn_next_test()
print(self.sess.run(accuracy, feed_dict={x: self.nn_test.images, y_: self.nn_test.labels}))

Categories