TensorFlow Ai not progressing, gym breakout-ram-v4 - python

I need help because when i run these line of code, my ai is not learning.
Could someone help me getting it to learn, i think the probleme is in the reward systeme but im not sure, thank you for your help.
can you help by or telling me how do make it better of by fixing it directly?
imports
import tensorflow as tf
import gym
import os
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
#Load an environment
env = gym.make('Breakout-ram-v4')
making my agent
class Agent:
def __init__(self, num_actions, state_size):
initializer = tf.contrib.layers.xavier_initializer()
self.input_layer = tf.placeholder(dtype=tf.float32, shape=[None, state_size])
# Neural net starts here
hidden_layer = tf.layers.dense(self.input_layer, 64, activation=tf.nn.relu, kernel_initializer=initializer)
hidden_layer_2 = tf.layers.dense(hidden_layer, 32, activation=tf.nn.relu, kernel_initializer=initializer)
dropout1 = tf.layers.dropout(hidden_layer_2, rate=0.1, training=True)
hidden_layer_3 = tf.layers.dense(dropout1, 16, activation=tf.nn.relu, kernel_initializer=initializer)
dropout = tf.layers.dropout(hidden_layer_3, rate=0.2, training=True)
# Output of neural net
out = tf.layers.dense(dropout, num_actions, activation=None)
self.outputs = tf.nn.softmax(out)
self.choice = tf.argmax(self.outputs, axis=1)
# Training Procedure
self.rewards = tf.placeholder(shape=[None, ], dtype=tf.float32)
self.actions = tf.placeholder(shape=[None, ], dtype=tf.int32)
one_hot_actions = tf.one_hot(self.actions, num_actions)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=one_hot_actions)
self.loss = tf.reduce_mean(cross_entropy * self.rewards)
self.gradients = tf.gradients(self.loss, tf.trainable_variables())
# Create a placeholder list for gradients
self.gradients_to_apply = []
for index, variable in enumerate(tf.trainable_variables()):
gradient_placeholder = tf.placeholder(tf.float32)
self.gradients_to_apply.append(gradient_placeholder)
# Create the operation to update gradients with the gradients placeholder.
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
self.update_gradients = optimizer.apply_gradients(zip(self.gradients_to_apply, tf.trainable_variables()))
Reward systeme
discount_rate = 0.95
#its to make the reward
def discount_normalize_rewards(rewards):
discount_rewards = np.zeros_like(rewards)
total_rewards = 0
for i in reversed(range(len(rewards))):
total_rewards = total_rewards * discount_rate + rewards[i]
discount_rewards[i] = total_rewards
if discount_rewards[i] == 0:
return discount_rewards
else:
discount_rewards /= np.std(discount_rewards)
discount_rewards -= np.mean(discount_rewards)
return discount_rewards
Trainning the ai
tf.reset_default_graph()
# Modify these to match shape of actions and states in your environment
num_actions = 4
state_size = 128
path = "./breakout-pg/"
training_episodes = 10000
max_steps_per_episode = 100000
episode_batch_size = 5
agent = Agent(num_actions, state_size)
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=2)
if not os.path.exists(path):
os.makedirs(path)
with tf.Session() as sess:
sess.run(init)
total_episode_rewards = []
# Create a buffer of 0'd gradients
gradient_buffer = sess.run(tf.trainable_variables())
for index, gradient in enumerate(gradient_buffer):
gradient_buffer[index] = gradient * 0
for episode in range(training_episodes):
state = env.reset()
episode_history = []
episode_rewards = 0
for step in range(max_steps_per_episode):
env.render()
# Get weights for each action
action_probabilities = sess.run(agent.outputs, feed_dict={agent.input_layer: [state]})
action_choice = np.random.choice(range(num_actions), p=action_probabilities[0])
state_next, reward, done, _ = env.step(action_choice)
episode_history.append([state, action_choice, reward, state_next])
state = state_next
episode_rewards += reward
if done or step + 1 == max_steps_per_episode:
total_episode_rewards.append(episode_rewards)
episode_history = np.array(episode_history)
episode_history[:,2] = discount_normalize_rewards(episode_history[:,2])
ep_gradients = sess.run(agent.gradients, feed_dict={agent.input_layer: np.vstack(episode_history[:, 0]),
agent.actions: episode_history[:, 1],
agent.rewards: episode_history[:, 2]})
# add the gradients to the grad buffer:
for index, gradient in enumerate(ep_gradients):
gradient_buffer[index] += gradient
break
if episode % episode_batch_size == 0:
feed_dict_gradients = dict(zip(agent.gradients_to_apply, gradient_buffer))
sess.run(agent.update_gradients, feed_dict=feed_dict_gradients)
for index, gradient in enumerate(gradient_buffer):
gradient_buffer[index] = gradient * 0
#more frequent plz
if episode % 10 == 0:
saver.save(sess, path + "pg-checkpoint", episode)
print("stage: " + str(episode) + "||| Average reward / 10 eps: " + str(np.mean(total_episode_rewards[-10:])))
print("done!")

Related

Tensorflow - Changing dropout value has no effect on network

I trained a network to perform semantic segmentation with dropout, and it is my understanding that as you vary the dropout keep_prob value, the output prediction changes. However, after saving the model using the tensorflow-serving method, loading it using tf.saved_model.loader.load, and varying the dropout value, I get the same output prediction value (dice score).
I followed the suggestions in this SO post, but I still get the same prediction results even if I enter 0.0.
Didn't know if it was a tensorflow issue or a bug in my code, so I tried downgrading from v1.15 to v1.10 to see if it was the former and still got the same results. I am sure it is a bug in my code now, but I am not sure where it is. A minimum working example is shown below. Could someone help me? Thank you!
This is a snippet of my training script:
#===============
def run_iteration(self, feed_dict, op_list, summaries):
output_args = self.sess.run(op_list, feed_dict=feed_dict)
return output_args
#===============
def run_epoch_train(self, curr_epoch):
print('Training over all batches')
num_total_batches = self.num_total_batches_train
curr_batch_counter = 0
# for each batch in training images
for batch in self.batch_iterator_train:
# dropout is included
if self.dropout_training_Flag == 1:
_, loss, dice = self.run_iteration(
feed_dict={
self.placeholders['images']: batch['images'],
self.placeholders['labels']: batch['labels'],
self.placeholders['is_training']: True,
self.placeholders['dropout_prob']: self.dropout_prob_training,
},
op_list=[
self.fitting_op,
self.losses[self.active_loss],
#self.outputs['sigmoid'],
self.outputs['dice'],
],
summaries=[],
)
curr_batch_counter = curr_batch_counter + 1
if (self.iteration % 5) == 0:
print('Saving model in training session')
self.saver.save(curr_epoch + 1)
This is a snippet of my testing script:
#===============
path_to_model = self.root_path_to_models + '/' + '25'
print(path_to_model)
model = tf.saved_model.loader.load( #tf.saved_model.loader.load(
sess,
[tf.saved_model.tag_constants.SERVING],
path_to_model
)
inputImage_name = model.signature_def['prediction'].inputs['images'].name
x_inp = tf.get_default_graph().get_tensor_by_name(inputImage_name)
isTraining_name = model.signature_def['prediction'].inputs['is_training'].name
tflag_op = tf.get_default_graph().get_tensor_by_name(isTraining_name)
outputs_name = model.signature_def['prediction'].outputs['sigmoid'].name
y_op = tf.get_default_graph().get_tensor_by_name(outputs_name)
if self.dropout_training_Flag == 1:
dropoutProb_name = model.signature_def['prediction'].inputs['dropout_prob'].name
dropout_prob_op = tf.get_default_graph().get_tensor_by_name(dropoutProb_name)
print(dropout_prob_op)
# iterate over batches of images
# iterate over motion category
for moCat in self.motion_categories:
# get datasets in motion category
datasets_in_moCat = d_ffn_images_labels[moCat]
dataset_name = list(datasets_in_moCat.keys())[-1]
#print(dataset_name)
loss_for_each_image = []
final_vol = np.zeros((self.original_input_image_width, self.original_input_image_height, self.num_vol_slices), dtype = np.uint8)
# get images
curr_dataset_images = datasets_in_moCat[dataset_name][0][0]
# get labels
curr_dataset_labels = datasets_in_moCat[dataset_name][0][1]
#current dataset label numbers
curr_dataset_label_numbers = d_bfnumber_images_labels[moCat][dataset_name]
#print('curr_dataset_label_numbers',curr_dataset_label_numbers)
# number of images/labels in current dataset, for current category
num_images = len(curr_dataset_images)
num_labels = len(curr_dataset_labels)
# check if num-images/labels are the same
assert(num_images == num_labels)
# load each image
for elem_idx in range(num_images):
img_path = curr_dataset_images[elem_idx]
lab_path = curr_dataset_labels[elem_idx]
xn = nib.load(img_path)
x = np.array(xn.dataobj)
labn = nib.load(lab_path)
lab = np.array(labn.dataobj)
data_affine_tform = xn.affine
# resize
xr = cv2.resize(x, (self.network_input_image_width, self.network_input_image_height), interpolation = cv2.INTER_LANCZOS4)
# standardize
y = standardize_zeroMeanUnitVar_image(copy.deepcopy(xr), self.network_input_image_width, self.network_input_image_height, self.network_input_channels)
#y = cv2.normalize(copy.deepcopy(xr), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# match network input -- [height, width, channels]
y = np.reshape(y, newshape=(self.network_input_image_height, self.network_input_image_width, self.network_input_channels))
# append to match network input -- [batch, height, width, channels]
input_list = []
input_list.append(y)
input_list = np.asarray(input_list).astype(np.float32)
# ======================
# MODIFY DROPOUT HERE FROM JSON FILE
# CHANGED VALUES FROM 0.0, 0.5, 1.0 -- same prediction score
# ======================
# run and get output
if self.dropout_training_Flag == 1:
output = sess.run(y_op, feed_dict={x_inp: input_list, tflag_op: True, dropout_prob_op: self.dropout_prob_testing})
else:
output = sess.run(y_op, feed_dict={x_inp: input_list, tflag_op: False})
tmpOut = cv2.resize(output[0,:,:,0], (self.original_input_image_width, self.original_input_image_height), interpolation = cv2.INTER_LANCZOS4)
prediction = np.asarray((tmpOut > 0.5))
labels = np.asarray((lab > 0))
EPS = 0.0000001
#output_original = cv2.resize(output[0,:,:,0], (original_input_image_width, original_input_image_height), interpolation = cv2.INTER_LANCZOS4)
loss = 2.0 * np.sum(labels * prediction, axis=(0, 1)) / (np.sum(labels ** 2 + prediction ** 2, axis=(0, 1)) + EPS)
loss_for_each_image.append(loss)
#place slice in final_vol
#print(curr_dataset_label_numbers[elem_idx][1])
#print(type(curr_dataset_label_numbers[elem_idx][1]))
final_vol[:,:,curr_dataset_label_numbers[elem_idx][1] - 1] = np.asarray(prediction*255.0).astype(np.uint8)
# dice mean over dataset
dice_mean_for_dataset = np.mean(loss_for_each_image)
print(dataset_name, dice_mean_for_dataset)
self.diceScore_for_each_dataset.append(dice_mean_for_dataset)
self.list_dataset_name.append(dataset_name)
This is the code for the inputs/outputs:
#===============
def create_placeholders(self):
self.placeholders['images'] = tf.placeholder(
shape=[None] + self.network_input_size + [self.network_input_channels],
name='images',
dtype=tf.float32
)
self.placeholders['labels'] = tf.placeholder(
shape=[None] + self.network_input_size + [self.network_output_channels],
name='labels',
dtype=tf.float32
)
self.placeholders['is_training'] = tf.placeholder(
shape=[],
name='is_training',
dtype=tf.bool
)
# dropout is included
if self.dropout_training_Flag == 1:
self.placeholders['dropout_prob'] = tf.placeholder(
shape=[],
name='dropout_prob',
dtype=tf.float32
)
#===============
def create_outputs(self):
if self.network_name == 'UNet':
print('\n')
print('Training UNet')
# dropout is included
if self.dropout_training_Flag == 1:
# train with dropout
unet_output = unet_dropout(
self.placeholders['images'],
self.placeholders['is_training'],
self.placeholders['dropout_prob'],
self.network_output_channels
)
if self.network_output_channels == 1:
self.outputs['sigmoid'] = unet_output
else:
self.outputs['sigmoid'] = unet_output
This is the code for my model:
#===============
def batch_norm_relu(inputs, is_training):
net = slim.batch_norm(inputs, is_training=is_training)
net = tf.nn.relu(net)
return net
#===============
def dropout (input, keep_prob, is_training):
if is_training == True:
dropout = tf.nn.dropout(input, keep_prob)
else:
dropout = input
return dropout
#===============
def model(inputs, is_training, keep_prob, num_classes):
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
base_num_kernels = 64
# =================================
# encoder
# 256
x = conv2d_fixed_padding(inputs=inputs, filters=base_num_kernels, kernel_size=3, stride=1)
x = batch_norm_relu(x, is_training)
x = conv2d_fixed_padding(inputs=x, filters=base_num_kernels, kernel_size=3, stride=1)
x = batch_norm_relu(x, is_training)
output_b1 = x
output_list_b1 = [x]
output_b1 = dropout(output_b1, keep_prob, is_training)
output_b1 = tf.layers.max_pooling2d(inputs=output_b1, pool_size=2, strides=2, padding='SAME')
# =================================
# 128
x = conv2d_fixed_padding(inputs=output_b1, filters=2*base_num_kernels, kernel_size=3, stride=1)
x = batch_norm_relu(x, is_training)
x = conv2d_fixed_padding(inputs=x, filters=2*base_num_kernels, kernel_size=3, stride=1)
x = batch_norm_relu(x, is_training)
output_b2 = x
output_list_b2 = [x]
output_b2 = dropout(output_b2, keep_prob, is_training)
# =================================
# decoder
# 128 -> 256
output_b3 = conv2d_transpose(output_b2, kernel_size=2, output_channels=base_num_kernels)
output_b4 = tf.concat([output_b3, x], axis=3)
# =================================
# 256
conv_final = conv2d_fixed_padding(inputs=output_b4, filters=base_num_kernels, kernel_size=3, stride=1)
conv_final = batch_norm_relu(conv_final, is_training)
conv_final = conv2d_fixed_padding(inputs=conv_final, filters=base_num_kernels, kernel_size=3, stride=1)
conv_final = batch_norm_relu(conv_final, is_training)
# =================================
# output
outputs = conv2d_fixed_padding(inputs=conv_final, filters=num_classes, kernel_size=3, stride=1)
if num_classes == 1:
outputs = tf.nn.sigmoid(outputs)
else:
h = outputs.get_shape().as_list()[1]
w = outputs.get_shape().as_list()[2]
outputs_reshaped = tf.reshape(outputs, np.asarray([-1, num_classes]))
outputs_final = tf.nn.softmax(outputs_reshaped)
outputs = tf.reshape(outputs_final, np.asarray([-1, h, w, num_classes]))
return outputs
This is the way that I save the network weights:
#===============
def __create_summary_manager(self):
self.saver = Saver(
self.sess,
self.placeholders,
self.outputs,
self.savepath
)
#===============
import tensorflow as tf
class Saver(object):
def __init__(self, sess, input_dict, output_dict, path):
self.sess = sess
self.input_dict = input_dict
self.output_dict = output_dict
self.path = path
self.iteration = 0
self.input_dict_info = {}
self.output_dict_info = {}
for key in input_dict.keys():
self.input_dict_info[key] = \
tf.saved_model.utils.build_tensor_info(
self.input_dict[key]
)
for key in output_dict.keys():
self.output_dict_info[key] = \
tf.saved_model.utils.build_tensor_info(
self.output_dict[key]
)
self.prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=self.input_dict_info,
outputs=self.output_dict_info)
)
def save(self, iteration_val):
self.iteration += 1
export_path = os.path.join(
tf.compat.as_bytes(self.path),
tf.compat.as_bytes(str(iteration_val))
)
self.builder = tf.saved_model.builder.SavedModelBuilder(export_path)
self.builder.add_meta_graph_and_variables(
self.sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'prediction': self.prediction_signature,
}
)
self.builder.save()

VGG16 Tensorflow implementation does not learn on cifar-10

This VGGNet was implemented using Tensorflow framework, from scratch, where all of the layers are defined in the code.
The main problem I am facing here is that the training accuracy, not to mention validation accuracy, goes up even though I wait it out for a decent amount of time. There are few problems that I suspect is causing this problem right now. First, I think the network is too deep and wide for cifar-10 dataset. Second, extracting data batch out of the whole dataset is not exhaustive, i.e. Batch selection is used over and over again over the whole dataset without eliminating those examples that were selected in the ongoing epoch.
However, still I could not get this code to work after many hours and days of experiments.
I wish I could extract the problematic code section to ask a question, but since I cannot pinpoint the exact section here, let me upload my whole code.
import os
import sys
import tensorflow as tf
import numpy as np
import scipy as sci
import math
import matplotlib.pyplot as plt
import time
import random
import imageio
import pickle
import cv2
import json
from pycocotools.coco import COCO
class SVGG:
def __init__(self, num_output_classes):
self.input_layer_size = 0
self.num_output_classes = num_output_classes
# Data
self.X = []
self.Y = []
self.working_x = []
self.working_y = []
self.testX = []
self.testY = []
# hard coded for now. Have to change.
self.input_data_size = 32 # 32 X 32
self.input_data_size_flat = 3072 # 32 X 32 X 3 == 3072
self.num_of_channels = 3 # 3 for colour image
self.input_data_size = 32 # 32 X 32
self.input_data_size_flat = self.input_data_size * self.input_data_size # 32 X 32 X 3 == 3072
self.num_of_channels = 3 # 3 for colour image
self.convolution_layers = []
self.convolution_weights = []
self.fully_connected_layers = []
self.fully_connected_weights = []
def feed_examples(self, input_X, input_Y):
"""
Feed examples to be learned
:param input_X: Training dataset X
:param input_Y: Traning dataset label
:return:
"""
# Take first input and calculate its size
# hard code size
self.X = input_X
self.Y = input_Y
self.input_data_size_flat = len(self.X[0]) * len(self.X[0][0]) * len(self.X[0][0][0])
def feed_test_data(self, test_X, test_Y):
self.testX = test_X
self.testY = test_Y
def run(self):
x = tf.placeholder(tf.float32, [None, self.input_data_size_flat], name='x')
x_data = tf.reshape(x, [-1, self.input_data_size, self.input_data_size, 3])
y_true = tf.placeholder(tf.float32, [None, self.num_output_classes], name='y_true')
y_true_cls = tf.argmax(y_true, axis=1)
"""
VGG layers
"""
# Create layers
######################################## Input Layer ########################################
input_layer, input_weight = self.create_convolution_layer(x_data, num_input_channels=3, filter_size=3, num_filters=64,
use_pooling=True) # False
######################################## Convolutional Layer ########################################
############### Conv Layer 1 #################
conv_1_1, w_1_1 = self.create_convolution_layer(input=input_layer, num_input_channels=64, filter_size=3, num_filters=64, use_pooling=False)
conv_1_2, w_1_2 = self.create_convolution_layer(input=conv_1_1, num_input_channels=64, filter_size=3, num_filters=128, use_pooling=True)
############### Conv Layer 2 #################
conv_2_1, w_2_1 = self.create_convolution_layer(input=conv_1_2, num_input_channels=128, filter_size=3, num_filters=128, use_pooling=False)
conv_2_2, w_2_2 = self.create_convolution_layer(input=conv_2_1, num_input_channels=128, filter_size=3, num_filters=256, use_pooling=True)
############### Conv Layer 3 #################
conv_3_1, w_3_1 = self.create_convolution_layer(input=conv_2_2, num_input_channels=256, filter_size=3, num_filters=256, use_pooling=False)
conv_3_2, w_3_2 = self.create_convolution_layer(input=conv_3_1, num_input_channels=256, filter_size=3, num_filters=256, use_pooling=False)
conv_3_3, w_3_3 = self.create_convolution_layer(input=conv_3_2, num_input_channels=256, filter_size=3, num_filters=512, use_pooling=True)
############### Conv Layer 4 #################
conv_4_1, w_4_1 = self.create_convolution_layer(input=conv_3_3, num_input_channels=512, filter_size=3, num_filters=512, use_pooling=False)
conv_4_2, w_4_2 = self.create_convolution_layer(input=conv_4_1, num_input_channels=512, filter_size=3, num_filters=512, use_pooling=False)
conv_4_3, w_4_3 = self.create_convolution_layer(input=conv_4_2, num_input_channels=512, filter_size=3, num_filters=512, use_pooling=True)
############### Conv Layer 5 #################
conv_5_1, w_5_1 = self.create_convolution_layer(input=conv_4_3, num_input_channels=512, filter_size=3, num_filters=512, use_pooling=False)
conv_5_2, w_5_2 = self.create_convolution_layer(input=conv_5_1, num_input_channels=512, filter_size=3, num_filters=512, use_pooling=False)
conv_5_3, w_5_3 = self.create_convolution_layer(input=conv_5_2, num_input_channels=512, filter_size=3, num_filters=512, use_pooling=True)
layer_flat, num_features = self.flatten_layer(conv_5_3)
######################################## Fully Connected Layer ########################################
fc_1 = self.create_fully_connected_layer(input=layer_flat, num_inputs=num_features, num_outputs=4096)
fc_2 = self.create_fully_connected_layer(input=fc_1, num_inputs=4096, num_outputs=4096)
fc_3 = self.create_fully_connected_layer(input=fc_2, num_inputs=4096, num_outputs=self.num_output_classes, use_dropout=False)
# Normalize prediction
y_prediction = tf.nn.softmax(fc_3)
# The class-number is the index of the largest element
y_prediction_class = tf.argmax(y_prediction, axis=1)
# Cost-Fuction to be optimized
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=fc_3, labels=y_true)
# => Now we have a measure of how well the model performs on each image individually. But in order to use the
# Cross entropy to guide the optimization of the model's variable swe need a single value, so we simply take the
# Average of the cross-entropy for all the image classifications
cost = tf.reduce_mean(cross_entropy)
# Optimizer
optimizer_adam = tf.train.AdamOptimizer(learning_rate=0.002).minimize(cost)
# Performance measure
correct_prediction = tf.equal(y_prediction_class, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
total_iterations = 0
num_iterations = 100000
start_time = time.time()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(num_iterations):
x_batch, y_true_batch, _ = self.get_batch(X=self.X, Y=self.Y, low=0, high=40000, batch_size=128)
feed_dict_train = {x: x_batch, y_true: y_true_batch}
sess.run(optimizer_adam, feed_dict_train)
if i % 100 == 99:
# Calculate the accuracy on the training-set.
x_batch, y_true_batch, _ = self.get_batch(X=self.X, Y=self.Y, low=40000, high=50000, batch_size=1000)
feed_dict_validate = {x: x_batch, y_true: y_true_batch}
acc = sess.run(accuracy, feed_dict=feed_dict_validate)
# Message for printing.
msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
# print(sess.run(y_prediction, feed_dict=feed_dict_train))
# print(sess.run(y_prediction_class, feed_dict=feed_dict_train))
print(msg.format(i + 1, acc))
if i % 10000 == 9999:
oSaver = tf.train.Saver()
oSess = sess
path = "./model/_" + "iteration_" + str(i) + ".ckpt"
oSaver.save(oSess, path)
if i == num_iterations - 1:
x_batch, y_true_batch, _ = self.get_batch(X=self.testX, Y=self.testY, low=0, high=10000, batch_size=10000)
feed_dict_test = {x: x_batch, y_true: y_true_batch}
test_accuracy = sess.run(accuracy, feed_dict=feed_dict_test)
msg = "Test Accuracy: {0:>6.1%}"
print(msg.format(test_accuracy))
def get_batch(self, X, Y, low=0, high=50000, batch_size=128):
x_batch = []
y_batch = np.ndarray(shape=(batch_size, self.num_output_classes))
index = np.random.randint(low=low, high=high, size=batch_size)
counter = 0
for idx in index:
x_batch.append(X[idx].flatten())
y_batch[counter] = one_hot_encoded(Y[idx], self.num_output_classes)
y_batch_cls = Y[idx]
counter += 1
return x_batch, y_batch, y_batch_cls
def generate_new_weights(self, shape):
w = tf.Variable(tf.truncated_normal(shape, stddev=0.05))
return w
def generate_new_biases(self, shape):
b = tf.Variable(tf.constant(0.05, shape=[shape]))
return b
def create_convolution_layer(self, input, num_input_channels, filter_size, num_filters, use_pooling):
"""
:param input: The previous layer
:param num_input_channels: Number of channels in previous layer
:param filter_size: W and H of each filter
:param num_filters: Number of filters
:return:
"""
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = self.generate_new_weights(shape)
biases = self.generate_new_biases(num_filters)
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1], padding='SAME')
layer += biases
# Max Pooling
if use_pooling:
layer = tf.nn.max_pool(layer, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
# ReLu. Using elu for better performance
layer = tf.nn.elu(layer)
return layer, weights
def create_fully_connected_layer(self, input, num_inputs, num_outputs, use_dropout=True):
weights = self.generate_new_weights(shape=[num_inputs, num_outputs])
biases = self.generate_new_biases(shape=num_outputs)
layer = tf.matmul(input, weights) + biases
layer = tf.nn.elu(layer)
if use_dropout:
keep_prob = tf.placeholder(tf.float32)
keep_prob = 0.5
layer = tf.nn.dropout(layer, keep_prob)
return layer
def flatten_layer(self, layer):
"""
Flattens dimension that is output by a convolution layer.
Flattening is need to feed into a fully-connected-layer.
:param layer:
:return:
"""
# shape [num_images, img_height, img_width, num_channels]
layer_shape = layer.get_shape()
# Number of features h x w x channels
num_features = layer_shape[1: 4].num_elements()
# Reshape
layer_flat = tf.reshape(layer, [-1, num_features])
# Shape is now [num_images, img_height * img_width * num_channels]
return layer_flat, num_features
def unpickle(file):
with open(file, 'rb') as file:
dict = pickle.load(file, encoding='bytes')
return dict
def convert_to_individual_image(flat):
img_R = flat[0:1024].reshape((32, 32))
img_G = flat[1024:2048].reshape((32, 32))
img_B = flat[2048:3072].reshape((32, 32))
#B G R
mean = [125.3, 123.0, 113.9]
img = np.dstack((img_R - mean[0], img_G - mean[1], img_B - mean[2]))
img = np.array(img)
# img = cv2.resize(img, (224, 224), img)
return img
def read_coco_data(img_path, annotation_path):
coco = COCO(annotation_path)
ids = list(coco.imgs.keys())
ann_keys = list(coco.anns.keys())
print(coco.imgs[ids[0]])
print(coco.anns[ann_keys[0]])
def one_hot_encoded(class_numbers, num_classes=None):
if num_classes is None:
num_classes = np.max(class_numbers) + 1
return np.eye(num_classes, dtype=float)[class_numbers]
if __name__ == '__main__':
data = []
labels = []
val_data = []
val_label = []
# cifar-10
counter = 0
for i in range(1, 6):
unpacked = unpickle("./cifar10/data_batch_" + str(i))
tmp_data = unpacked[b'data']
tmp_label = unpacked[b'labels']
inner_counter = 0
for flat in tmp_data:
converted = convert_to_individual_image(flat)
data.append(converted)
labels.append(tmp_label[inner_counter])
counter += 1
inner_counter += 1
cv2.imwrite("./img/" + str(counter) + ".jpg", converted)
# Test data
unpacked = unpickle("./cifar10/test_batch")
test_data = []
test_data_flat = unpacked[b'data']
test_label = unpacked[b'labels']
for flat in test_data_flat:
test_data.append(convert_to_individual_image(flat))
svgg = SVGG(10)
svgg.feed_examples(input_X=data, input_Y=labels)
svgg.feed_test_data(test_X=test_data, test_Y=test_label)
svgg.run()

ValueError: Cannot feed value of shape (1, 4, 84, 84) for Tensor 'Placeholder:0', which has shape '(?, 84, 84, 4)'

I am running a DQN to learn to play Atari games, and am training it on GPU. I noticed that the 'data_format' for my model was NHWC (which is slower than NCHW for GPU training). I changed the data_format to NCHW but it gave this error;
ValueError: Cannot feed value of shape (1, 4, 84, 84) for Tensor 'Placeholder:0', which has shape '(?, 84, 84, 4)'
This is my code;
from __future__ import division, print_function, unicode_literals
from functools import reduce
# Handle arguments (before slow imports so --help can be fast)
import argparse
parser = argparse.ArgumentParser(
description="Train a DQN net.")
parser.add_argument("-n", "--number-steps", type=int, default=1000000, #4000000 CHANGED,
help="total number of training steps")
parser.add_argument("-l", "--learn-iterations", type=int, default=4,
help="number of game iterations between each training step")
parser.add_argument("-s", "--save-steps", type=int, default=1000,
help="number of training steps between saving checkpoints")
parser.add_argument("-c", "--copy-steps", type=int, default=10000,
help="number of training steps between copies of online DQN to target DQN")
parser.add_argument("-r", "--render", action="store_true", default=False,
help="render the game during training or testing")
parser.add_argument("-p", "--path", default="model",
help="path of the checkpoint file")
parser.add_argument("-m", "--model_fname", default="model.ckpt",
help="name of the checkpoint file")
parser.add_argument("-t", "--test", action="store_true", default=False,
help="test (no learning and minimal epsilon)")
parser.add_argument("-tg", "--test_games", type=int, default=20,
help="How many games to test across (no learning and minimal epsilon)")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
args = parser.parse_args()
from collections import deque
import gym
import numpy as np
import os
import tensorflow as tf
import random
import sys
import time
import sys
from history import History
from replay_memory import ReplayMemory
from utils import rgb2gray, imresize
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
env = gym.make("Breakout-v0")
done = True # env needs to be reset
# First let's build the two DQNs (online & target)
input_height = 84
input_width = 84
input_channels = history_length = 4
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["VALID"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden = 256 #512 CHANGED
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9 discrete actions are available (for MsPacman at least)
initializer = tf.truncated_normal_initializer(0, 0.02)
tf.device("/gpu:0")
# Deep-Q network
def q_network(X_state, name):
prev_layer = X_state
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
prev_layer_shape = prev_layer.get_shape().as_list()
n_hidden_in = reduce(lambda x, y: x * y, prev_layer_shape[1:])
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
# Place holder for input
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
# Create two Deep-Q networks
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
# We need an operation to copy the online DQN to the target DQN
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
# Parameters for optimizer
learning_rate = 0.001 #0.00025 #seems low? CHANGED
learning_rate_minimum = 0.001 #0.00025 CHANGED
learning_rate_decay = 0.96
learning_rate_decay_step = 50000
momentum = 0.95
# Huber loss
def clipped_error(x):
try:
return tf.select(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
except:
return tf.where(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
# Initialize optimizer for training
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None]) # Action based on Q-value from Online network
y = tf.placeholder(tf.float32, shape=[None]) # Q-value from Target network
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1)
delta = y - q_value
loss = tf.reduce_mean(clipped_error(delta))
global_step = tf.Variable(0, trainable=False, name='global_step')
learning_rate_step = tf.placeholder('int64', None, name='learning_rate_step')
learning_rate_op = tf.maximum(
learning_rate_minimum,
tf.train.exponential_decay(
learning_rate,
learning_rate_step,
learning_rate_decay_step,
learning_rate_decay,
staircase=True
)
)
training_op = tf.train.RMSPropOptimizer(
learning_rate_op, momentum=momentum, epsilon=0.01
).minimize(loss, global_step=global_step)
# Summary for Tensorboard
summary_steps = 100
with tf.variable_scope('summary'):
summary_tags = ['average.reward']
summary_placeholders = {}
summary_ops = {}
for tag in summary_tags:
summary_placeholders[tag] = tf.placeholder('float32', None, name=tag.replace(' ', '_'))
summary_ops[tag] = tf.summary.scalar(tag, summary_placeholders[tag])
histogram_summary_tags = ['episode.rewards', 'episode.actions']
for tag in histogram_summary_tags:
summary_placeholders[tag] = tf.placeholder('float32', None, name=tag.replace(' ', '_'))
summary_ops[tag] = tf.summary.histogram(tag, summary_placeholders[tag])
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# TensorFlow - Execution phase
training_start = 10000 # start training after 10,000 game iterations
discount_rate = 0.99
skip_start = 90 # Skip the start of every game (it's just waiting time). -- Is this just for msPacman??
batch_size = 64 #32
iteration = 0 # game iterations
done = True # env needs to be reset
min_reward = -1.
max_reward = 1.
exp_moving_avg_reward = 0.
first_train_step = True
iterationStep = 0
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1
# We will keep track of the max Q-Value over time and compute the mean per game
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
# Initialize history
history = History(
data_format='NCHW',
batch_size=batch_size,
history_length=history_length,
screen_height=input_height,
screen_width=input_width
)
# Initialize Replay Memory
replay_memory_size = 1000000
replay_memory = ReplayMemory(
data_format='NCHW',
batch_size=batch_size,
history_length=history_length,
screen_height=input_height,
screen_width=input_width,
memory_size=replay_memory_size,
model_dir='model'
)
# And on to the epsilon-greedy policy with decaying epsilon
eps_min = 0.1
eps_max = 1.0
test_eps = eps_min if args.test else None
eps_decay_steps = 1000000
# eps_min = 0.1
# eps_max = 1.0 if not args.test else eps_min
# eps_decay_steps = args.number_steps // 2
def epsilon_greedy(q_values, step):
epsilon = test_eps or \
(
eps_min + max(
0.,
(eps_max - eps_min) * (eps_decay_steps - max(0., step - training_start)) / eps_decay_steps
)
)
# epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
def preprocess_observation(obs):
return imresize(rgb2gray(obs)/255., (input_width, input_height))
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(os.path.join(args.path, "logs"), sess.graph) #Logdir
def inject_summary(tag_dict, step):
summary_str_lists = sess.run(
[summary_ops[tag] for tag in tag_dict.keys()],
{summary_placeholders[tag]: value for tag, value in tag_dict.items()}
)
for summary_str in summary_str_lists:
summary_writer.add_summary(summary_str, step)
# Resume the training (if possible)
ckpt = tf.train.get_checkpoint_state(args.path)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
fname = os.path.join(args.path, ckpt_name)
saver.restore(sess, fname)
print(" [*] Load SUCCESS: %s" % fname)
else:
init.run()
copy_online_to_target.run()
print(" [!] Load FAILED: %s" % args.path)
# ----------- Training ----------- #
if not args.test:
print("TRAINING")
exp_moving_avg_reward = 0.0
current_rewards = []
while True:
step = global_step.eval()
if step >= args.number_steps:
break
iteration += 1
if args.verbosity > 0:
print("\rIter {}, training step {}/{} ({:.1f})%, "
"loss {:5f}, exp-moving-avg reward {:5f}, "
"mean max-Q {:5f}".format(
iteration, step, args.number_steps, step * 100 / args.number_steps,
loss_val, exp_moving_avg_reward,
mean_max_q),
end=""
)
# Game over, start again
if done:
obs = env.reset()
# Randomly skip the start of each game
for skip in range(random.randint(0, skip_start - 1)):
obs, _, done, _ = env.step(0)
state = preprocess_observation(obs)
for _ in range(history_length):
history.add(state)
if args.render:
env.render() #rending training option
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [history.get()]})[0]
action = epsilon_greedy(q_values, step)
# Online DQN plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Reward clipping
reward = max(min_reward, min(max_reward, reward))
# Update history
history.add(next_state)
# Let's memorize what happened
replay_memory.add(next_state, reward, action, done)
state = next_state
current_rewards.append(reward)
# Compute statistics for tracking progress (not shown in the book)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % args.learn_iterations != 0:
continue # only train after warmup period and at regular intervals
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, terminal = \
replay_memory.sample()
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val}
)
max_next_q_values = np.max(next_q_values, axis=1)
y_val = rewards + (1. - terminal) * discount_rate * max_next_q_values
# Update exponential moving average of rewards
if first_train_step:
exp_moving_avg_reward = np.mean(current_rewards)
first_train_step = False
else:
exp_moving_avg_reward = (exp_moving_avg_reward * 0.99) + (0.01 * np.mean(current_rewards))
current_rewards = []
# Train the online DQN
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val,
X_action: X_action_val,
y: y_val,
learning_rate_step: step,
})
# Regularly inject summary
if step % summary_steps == 0:
inject_summary(
{
'average.reward': exp_moving_avg_reward
},
step
)
# Regularly copy the online DQN to the target DQN
if step % args.copy_steps == 0:
# print("Copying the weight from online DQN to target DQN ...")
copy_online_to_target.run()
# And save regularly
if step % args.save_steps == 0:
# print("Saving model ...")
saver.save(sess, os.path.join(args.path, args.model_fname), global_step=step)
# ----------- Testing ----------- #
if args.test:
print("TESTING")
test_games_played = -1 # -1 to offset the first env.reset() call
test_game_reward = []
test_game_reward_average = []
best_score = 0
sys.stdout.flush()
current_rewards = []
while args.test_games >= test_games_played:
sys.stdout.flush()
step = global_step.eval()
# Game over, start again - we've won or lost the game at this point
if done:
test_games_played += 1
print("\n# --------------------------------------------------------- #")
print("GAMES PLAYED SO FAR: {}".format(test_games_played))
print("GAME SCORE: {}".format(sum(x for x in test_game_reward if x > 0))) # Printing total score from the game (positive values only)
test_game_reward_average.append(sum(x for x in test_game_reward if x > 0))
if test_games_played > 0:
print("AVERAGE SCORE: {:5f}".format(sum(test_game_reward_average)/test_games_played)) # Printing average score across games
if sum(x for x in test_game_reward if x > 0) > best_score:
best_score = sum(x for x in test_game_reward if x > 0)
print("BEST SCORE: {}".format(best_score))
test_game_reward.clear() # clearing the list of games scores for the next game to be played
print("Game Finished. Resetting Env...")
print("# --------------------------------------------------------- #\n")
obs = env.reset()
state = preprocess_observation(obs)
for _ in range(history_length):
history.add(state)
if args.render:
env.render()
# time.sleep(0.1) # slow down the render
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [history.get()]})[0]
action = epsilon_greedy(q_values, step)
# Online DQN plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Reward clipping
reward = max(min_reward, min(max_reward, reward))
# Update history
history.add(next_state)
# Let's memorize what happened
replay_memory.add(next_state, reward, action, done)
state = next_state
current_rewards.append(reward)
if args.test:
test_game_reward.append(reward)
continue #Puts it back to top of While loop
I think that the placeholder for input (line 89) the X_state needs to potentially be changed. However, I'm not sure how or exactly where a change needs to be made.
Thanks for your help.
From your code:
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels])
You're hardcoding the shape of the placeholder to be in the NHWC format. If you want to feed arrays in NCHW format, change X_state to tf.placeholder(tf.float32, shape=[None, input_channels, input_height, input_width])

How to initialize a neural network by user defined parameters in tensorflow

I am trying to implement a Neural Network using tensorflow. I want my model to initialize itself using default parameters or any weight size list passed by users. But the compute graph generated by my code seems incorrect. How can I initialize a NN by user-defined parameters in tensorflow?
def setUp(self):
self.hidden_layer_ = len(self.hidden_layer_sizes)
self.weights = []
self.biases = []
size_list = [self.input_size]
size_list.extend(list(self.hidden_layer_sizes))
i = 0
while i + 1 < len(size_list):
prev_size = size_list[i]
size = size_list[i+1]
w = tf.Variable(tf.truncated_normal([prev_size, size], stddev=0.1))
b = tf.Variable(tf.zeros([size]))
self.weights.append(w)
self.biases.append(b)
i += 1
self.w_out = tf.Variable(tf.truncated_normal([size_list[-1], self.output_size], stddev=0.1))
self.b_out = tf.Variable(tf.zeros([self.output_size]))
self.input_x = tf.placeholder(tf.float32, [None, self.input_size], name='input')
self.input_y = tf.placeholder(tf.float32, [None, self.output_size], name='label')
self.keep_prob_plh = tf.placeholder(tf.float32)
self.layers = [self.input_x]
for w, b in zip(self.weights, self.biases):
hidden = ACT_FUN_DICT[self.activation](tf.matmul(self.layers[-1], w) + b)
hidden_drop = tf.nn.dropout(hidden, self.keep_prob_plh)
self.layers.append(hidden_drop)
with tf.variable_scope('output'):
self.output_layer = tf.nn.sigmoid(tf.matmul(self.layers[-1], self.w_out) + self.b_out)
self.cost_func = tf.reduce_mean(tf.reduce_sum(tf.pow((self.input_y - self.output_layer), 2)))
self.cost_summary = tf.summary.scalar('Cost', self.cost_func)
self.optimizer = SOLVER_DICT[self.solver](self.learning_rate).minimize(self.cost_func)
root_logdir = './tf_logs'
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
log_dir = "{}/run-{}/".format(root_logdir, now)
self.file_writer = tf.summary.FileWriter(log_dir, tf.get_default_graph())
self.sess.run(tf.global_variables_initializer())
I implement the NN using a class, the size of weights are stored in the member self.hidden_layer_sizes.
class MLPClassifier(BaseEstimator, TransformerMixin):
def __init__(self, hidden_layer_sizes=(100,), activation='relu', solver='sgd', alpha=0.0001,
learning_rate=0.001, max_iter=200, random_state=42, keep_prob=0.75, logged = True):
self.hidden_layer_sizes = hidden_layer_sizes
self.activation = activation
self.solver = solver
self.alpha = alpha
self.learning_rate = learning_rate
self.max_iter = max_iter
self.random_state = random_state
self.keep_prob = keep_prob
self.fitted = False
self.logged = True
self.sess = tf.Session()
self.sess.as_default()
That's all my code, and this is my compute graph:

Check Failed: vec.size() == NDIMS (1 vs. 2) Aborted error in a RaspberryPi Convolutional Neural Network

I'm trying to train a Tensorflow Convolutional Neural Network on my RaspberryPi 3B. I get the following error:
2018-08-19 18:57:07.720022: F./tensorflow/core/util/bcast.h:111] Check failed:
vec.size() == NDIMS (1 vs. 2)
Aborted
Not sure what's going wrong, but here's the code:
import tensorflow as tf
import numpy as np
import csv
import pandas as pd
import os
image_height = 60
image_width = 1
image1_height = 15
image2_height = 1
model_name = "TensorflowCNN"
#Training Data Configuration
train_data = np.asarray(pd.read_csv("/media/pi/DISK_IMG/TrainingInput.csv", usecols=[1]))
lis = train_data.tolist()
lis = lis[0:60]
lis = [x[0].strip('[]\n,') for x in lis]
nlis = []
for i in lis:
nlis.append(i.split())
for i in range(len(nlis)):
nlis[i] = [float(x) for x in nlis[i] if x != "...,"]
nlis = [np.mean(x) for x in nlis]
train_data = np.asarray(nlis)
#Training Labels Configuration
train_labels = np.asarray(pd.read_csv("/media/pi/DISK_IMG/TrainingInput.csv", usecols=[2]))
mylist = train_labels.tolist()
mylist = mylist[0:60]
mylist = [x[0] for x in mylist]
index = 0
while index < len(mylist):
if mylist[index] == "GravelTraining":
mylist[index] = 1
elif mylist[index] == "WaterTraining":
mylist[index] = 2
else:
mylist[index] = 3
index=index+1
train_labels = np.asarray(mylist)
#Validation Data Configuration
eval_data = np.asarray(pd.read_csv("/media/pi/DISK_IMG/TestingInput.csv", usecols=[1]))
List = eval_data.tolist()
List = List[0:15]
eval_data = np.asarray(List)
#Validation Labels Configuration
eval_labels = np.asarray(pd.read_csv("/media/pi/DISK_IMG/TestingInput.csv", usecols=[2]))
myList = eval_labels.tolist()
myList = myList[0:15]
index = 0
while index < len(myList):
if myList[index] == "GravelTesting":
myList[index] = 1
elif myList[index] == "WaterTesting":
myList[index] = 2
else:
myList[index] = 3
index=index+1
eval_labels = np.asarray(myList)
category_names = list(map(str, range(3)))
#Processing and reshaping data
train_data = np.reshape(train_data, (-1, image_height, image_width, 1))
train_labels = np.reshape(train_labels, (-1, image_height, image_width, 1))
eval_data = np.reshape(eval_data, (-1, image1_height, image2_height, 1))
eval_labels = np.reshape(eval_labels, (-1, image1_height, image2_height, 1))
#CLASS FOR THE CONVOLUTIONAL NEURAL NETWORK
class ConvNet:
def __init__(self, image_height, Image_width, num_classes, chan):
self.input_layer = tf.placeholder(dtype = tf.float32, shape = [1,image_height, Image_width, chan], name = "inputs")
conv_layer_1 = tf.layers.conv2d(self.input_layer, filters = 32, kernel_size = [5,5], padding = "same", activation = tf.nn.relu)
pooling_layer_1 = tf.layers.max_pooling2d(conv_layer_1, pool_size = [2,1], strides = 1)
flattened_pooling = tf.layers.flatten(pooling_layer_1)
dense_layer = tf.layers.dense(flattened_pooling, 60, activation = tf.nn.relu)
dropout = tf.layers.dropout(dense_layer, rate = 0.4, training = True)
output_dense_layer = tf.layers.dense(dropout, num_classes)
self.choice = tf.argmax(output_dense_layer, axis=1)
self.probabilities = tf.nn.softmax(output_dense_layer)
self.labels = tf.placeholder(dtype=tf.float32, name="labels")
self.accuracy, self.accuracy_op = tf.metrics.accuracy(self.labels, self.choice)
one_hot_labels = tf.one_hot(indices=tf.cast(self.labels, dtype=tf.int32), depth=num_classes)
self.loss = tf.losses.softmax_cross_entropy(onehot_labels = one_hot_labels, logits=output_dense_layer)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-2)
self.train_operation = optimizer.minimize(loss=self.loss, global_step=tf.train.get_global_step())
#Training process:variables
training_steps = 20000
batch_size = 60
path = "./" + model_name + "-cnn/"
load_checkpoint = False
tf.reset_default_graph()
dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels))
dataset = dataset.shuffle(buffer_size=train_labels.shape[0])
dataset = dataset.batch(batch_size)
dataset = dataset.repeat()
dataset_iterator = dataset.make_initializable_iterator()
next_element = dataset_iterator.get_next()
#Final initialization of Neural Network and Training Process
cnn = ConvNet(image_height, image_width, 1, 1)
print("milestone1")
saver = tf.train.Saver(max_to_keep=2)
print('milestone2')
if not os.path.exists(path):
os.makedirs(path)
print('milestone3')
#Training Loop For neural network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('milestone4')
sess.run(tf.local_variables_initializer())
sess.run(dataset_iterator.initializer)
for step in range(training_steps):
current_batch = sess.run(next_element)
batch_inputs = current_batch[0]
batch_labels = current_batch[1]
print("milestone5")
sess.run((cnn.train_operation, cnn.accuracy_op), feed_dict={cnn.input_layer:batch_inputs, cnn.labels:batch_labels})
if step % 1 == 0 and step > 0:
current_acc = sess.run(cnn.accuracy)
print("Accuracy at step " + str(step) + ":" + str(current_acc))
saver.save(sess, path + model_name, step)
print("Saving final checkpoint for training session.")
saver.save(sess, path + model_name, step)
I know it's long, but I would appreciate it if someone could let me know what's wrong with my program. There is no traceback.I'm getting the notification from Stack Overflow that my post is mostly code, but there's really not much more to say about the problem. Also, I have looked at many questions talking about this and have not found an answer in any of them that I can use. Thanks in advance!

Categories