I am trying to use the inception_v1 module written in tf.slim provided here to train the model on CIFAR 10 dataset.
The code to train and evaluate the model on the dataset is below.
# test_data = (data['images_test'], data['labels_test'])
train_data = (train_x, train_y)
val_data = (val_x, val_y)
# create two datasets, one for training and one for test
train_dataset = tf.data.Dataset.from_tensor_slices(train_data).shuffle(buffer_size=10000).batch(BATCH_SIZE).map(preprocess)
# train_dataset = train_dataset.shuffle(buffer_size=10000).batch(BATCH_SIZE).map(preprocess)
val_dataset = tf.data.Dataset.from_tensor_slices(val_data).batch(BATCH_SIZE).map(preprocess)
# test_dataset = tf.data.Dataset.from_tensor_slices(test_data).batch(BATCH_SIZE).map(preprocess)
# create a _iterator of the correct shape and type
_iter = tf.data.Iterator.from_structure(
train_dataset.output_types,
train_dataset.output_shapes
)
features, labels = _iter.get_next()
# create the initialization operations
train_init_op = _iter.make_initializer(train_dataset)
val_init_op = _iter.make_initializer(val_dataset)
# test_init_op = _iter.make_initializer(test_dataset)
# Placeholders which evaluate in the session
training_mode = tf.placeholder(shape=None, dtype=tf.bool)
dropout_prob = tf.placeholder_with_default(1.0, shape=())
reuse_bool = tf.placeholder_with_default(True, shape=())
# Init the saver Object which handles saves and restores of
# model weights
# saver = tf.train.Saver()
# Initialize the model inside the arg_scope to define the batch
# normalization layer and the appropriate parameters
with slim.arg_scope(inception_v1_arg_scope(use_batch_norm=True)) as scope:
logits, end_points = inception_v1(features,
reuse=None,
dropout_keep_prob=dropout_prob, is_training=training_mode)
# Create the cross entropy loss function
cross_entropy = tf.reduce_mean(
tf.losses.softmax_cross_entropy(tf.one_hot(labels, 10), logits))
train_op = tf.train.AdamOptimizer(1e-2).minimize(loss=cross_entropy)
# train_op = slim.learning.create_train_op(cross_entropy, optimizer, global_step=)
# Define the accuracy metric
preds = tf.argmax(logits, axis=-1, output_type=tf.int64)
acc = tf.reduce_mean(tf.cast(tf.equal(preds, labels), tf.float32))
# Count the iterations for each set
n_train_batches = train_y.shape[0] // BATCH_SIZE
n_val_batches = val_y.shape[0] // BATCH_SIZE
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# saver = tf.train.Saver([v for v in tf.all_variables()][:-1])
# for v in tf.all_variables():
# print(v.name)
# saver.restore(sess, tf.train.latest_checkpoint('./', latest_filename='inception_v1.ckpt'))
for i in range(EPOCHS):
total_loss = 0
total_acc = 0
# Init train session
sess.run(train_init_op)
with tqdm(total=n_train_batches * BATCH_SIZE) as pbar:
for batch in range(n_train_batches):
_, loss, train_acc = sess.run([train_op, cross_entropy, acc], feed_dict={training_mode: True, dropout_prob: 0.2})
total_loss += loss
total_acc += train_acc
pbar.update(BATCH_SIZE)
print("Epoch: {} || Loss: {:.5f} || Acc: {:.5f} %".\
format(i+1, total_loss / n_train_batches, (total_acc / n_train_batches)*100))
# Switch to validation
total_val_loss = 0
total_val_acc = 0
sess.run(val_init_op)
for batch in range(n_val_batches):
val_loss, val_acc = sess.run([cross_entropy, acc], feed_dict={training_mode: False})
total_val_loss += val_loss
total_val_acc += val_acc
print("Epoch: {} || Validation Loss: {:.5f} || Val Acc: {:.5f} %".\
format(i+1, total_val_loss / n_val_batches, (total_val_acc / n_val_batches) * 100))
The paradox is that I get the following results when training and evaluate the model on the validation set:
Epoch: 1 || Loss: 2.29436 || Acc: 23.61750 %
│Epoch: 1 || Validation Loss: 1158854431554614016.00000 || Val Acc: 10.03000 %
│100%|███████████████████████████████████████████████████| 40000/40000 [03:52<00:00, 173.21it/s]
│Epoch: 2 || Loss: 1.68389 || Acc: 36.49250 %
│Epoch: 2 || Validation Loss: 27997399226326712.00000 || Val Acc: 10.03000 %
│100%|██████████████████████████████████████████████████▋| 39800/40000 [03:51<00:01, 174.11it/s]
I have set the training_mode to true during training and false during the validation. However, regarding the train_op that is only set in the training phase the model seems to be unset in the validation set. My guess is that the is_training variable does not handle the situation very well and does not keep the variables of the batch normalization initialized in the validation. Has anyone experienced a similar situation before?
I found the solution to my problem. Two things were involved in this problem.
The first one was to set a smaller batch norm decay due to a smaller than imagenet dataset i should lower it to 0.99.
batch_norm_decay=0.99
And the other thing was to use the following line in order to keep track of the trainable parameters of batch normalization layer.
train_op = slim.learning.create_train_op(cross_entropy, optimizer)
Related
I am following a Pytorch code on deep learning. Where I saw model evaluation taking place within the training epoch!
Q) Should the torch.no_grad and model.eval() be out of the training epoch loop?
Q) And how to determine that, which parameter (weight) are getting optimised by the optimiser during the back-propagation?
...
for l in range(1):
model = GTN(num_edge=A.shape[-1],
num_channels=num_channels,w_in = node_features.shape[1],w_out = node_dim,
num_class=num_classes,num_layers=num_layers,norm=norm)
if adaptive_lr == 'false':
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=0.001)
else:
optimizer = torch.optim.Adam([{'params':model.weight},{'params':model.linear1.parameters()},{'params':model.linear2.parameters()},
{"params":model.layers.parameters(), "lr":0.5}], lr=0.005, weight_decay=0.001)
loss = nn.CrossEntropyLoss()
# Train & Valid & Test
best_val_loss = 10000
best_train_loss = 10000
best_train_f1 = 0
best_val_f1 = 0
for i in range(epochs):
print('Epoch: ',i+1)
model.zero_grad()
model.train()
loss,y_train,Ws = model(A, node_features, train_node, train_target)
train_f1 = torch.mean(f1_score(torch.argmax(y_train.detach(),dim=1), train_target, num_classes=num_classes)).cpu().numpy()
print('Train - Loss: {}, Macro_F1: {}'.format(loss.detach().cpu().numpy(), train_f1))
loss.backward()
optimizer.step()
model.eval()
# Valid
with torch.no_grad():
val_loss, y_valid,_ = model.forward(A, node_features, valid_node, valid_target)
val_f1 = torch.mean(f1_score(torch.argmax(y_valid,dim=1), valid_target, num_classes=num_classes)).cpu().numpy()
if val_f1 > best_val_f1:
best_val_loss = val_loss.detach().cpu().numpy()
best_train_loss = loss.detach().cpu().numpy()
best_train_f1 = train_f1
best_val_f1 = val_f1
print('---------------Best Results--------------------')
print('Train - Loss: {}, Macro_F1: {}'.format(best_train_loss, best_train_f1))
print('Valid - Loss: {}, Macro_F1: {}'.format(best_val_loss, best_val_f1))
final_f1 += best_test_f1
For each epoch, you are doing train, followed by validation/test.
For validation/test you are moving the model to evaluation model
using model.eval() and then doing forward propagation with
torch.no_grad() which is correct. Again, you are moving back the
model back to train model using model.train() at the start of
train. There is no issue with the code and you are using the model
modes correctly.
In your code, if adaptive_lr if False then you are optimizing the parameters given by model.parameters() and when adaptive_lr
is True then you are optimizing:
model.weight
model.linear1.parameters()
model.linear2.parameters()
model.layers.parameters()
1 ) Problem
I observe an odd behaviour during training where my validation-accuracy is above 100% right from the start.
Epoch 0/3
----------
100%|██████████| 194/194 [00:50<00:00, 3.82it/s]
train Loss: 1.8653 Acc: 0.4796
100%|██████████| 194/194 [00:32<00:00, 5.99it/s]
val Loss: 1.7611 Acc: 1.2939
Epoch 1/3
----------
100%|██████████| 194/194 [00:42<00:00, 4.61it/s]
train Loss: 0.8704 Acc: 0.7467
100%|██████████| 194/194 [00:31<00:00, 6.11it/s]
val Loss: 1.0801 Acc: 1.4694
The output indicates that one epoch iterates over 194 batches, which does seem to be correct for the training data (which has a length of 6186, batch_size is 32, hence 32*194 = 6208 and this is ≈6186) but does not match the size of the validation-data (length of 3447, batch_size = 32).
Hence I would expect my validation-loop to generate 108 (3447 / 32 ≈ 108) batches insted of 194.
I thought this behaviour is handled within my for loop at:
for dataset in tqdm(dataloaders[phase]):
But somehow I can't figure out what is wrong here. See point 3) below for my entire code.
2 ) Question
If my assumption above is correct i.e. that this error stems from the for-loop within in my code then I would like to know the following:
How do I need to adjust the for-loop during the validation phase to handle the number of batches that are being used for validation correctly?
3 ) Background:
Following two tutorials, one on how to do transfer-learning (https://discuss.pytorch.org/t/transfer-learning-using-vgg16/20653) and one on how to do data-loading (https://pytorch.org/tutorials/beginner/data_loading_tutorial.html) in pytorch, I am trying to customize the code such that I can perform transfer-learning on a new custom dataset which I want to provide via pandas dataframes.
As such, my training- and validation-data is provided via two dataframes (df_train & df_val) which both contain two columns, one for the path and one for the target. E.g. like this:
url target
0 C:/Users/aaron/Desktop/pics/4ebd... 9
1 C:/Users/aaron/Desktop/pics/7153... 3
2 C:/Users/aaron/Desktop/pics/3ee6... 3
3 C:/Users/aaron/Desktop/pics/4652... 16
4 C:/Users/aaron/Desktop/pics/28ce... 15
...
And their respective length:
print(len(df_train))
print(len(df_val))
>> 6186
>> 3447
My pipeline looks like this:
class CustomDataset(Dataset):
def __init__(self, df, transform=None):
self.dataframe = df_train
self.transform = transform
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
img_name = self.dataframe.iloc[idx, 0]
img = Image.open(img_name)
img_normalized = self.transform(img)
landmarks = self.dataframe.iloc[idx, 1]
sample = {'data': img_normalized, 'label': int(landmarks)}
return sample
train_dataset = CustomDataset(df_train,transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]))
val_dataset = CustomDataset(df_val,transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]))
train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=32,shuffle=True, num_workers=0)
val_loader = torch.utils.data.DataLoader(val_dataset,batch_size=32,shuffle=True, num_workers=0)
dataloaders = {'train': train_loader, 'val': val_loader}
dataset_sizes = {'train': len(df_train) ,'val': len(df_val)}
################### Training
from tqdm import tqdm
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for dataset in tqdm(dataloaders[phase]):
inputs, labels = dataset["data"], dataset["label"]
#print(inputs.type())
inputs = inputs.to(device, dtype=torch.float)
labels = labels.to(device,dtype=torch.long)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, len(le.classes_))
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=4)
Your problem appears to be here:
class CustomDataset(Dataset):
def __init__(self, df, transform=None):
>>>>> self.dataframe = df_train
This should be
self.dataframe = df
In your case, you are inadvertently setting both the train and val CustomDataset to df_train ...
I'm running into an issue where I don't know how to define my network to run on two datasets at once with tf.feature_column.input_layer. In the "traditional" layout, I'd just use the feed_dict and manually pass in the training and testing data via some input-placeholder and output-placeholder but I thought it would be interesting to try and use the input_layer.
Datasets
features, labels = dataset_iterator(training_files, config)
features_test, labels_test = dataset_iterator(testing_files, config)
Network
dense_tensor = tf.feature_column.input_layer(features=features, feature_columns=columns)
for units in [256, 16]:
dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu)
logits = tf.layers.dense(dense_tensor, 8)
# Verification
correct_pred = tf.equal(tf.cast(logits, tf.int32), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Training
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels))
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss_op)
Is there a way for me to use the features_test and labels_test?
My training process looks like the following:
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
keep_iterating = True
i = 0
print('Accuracy: {}'.format(sess.run(accuracy)))
while keep_iterating:
i += 1
try:
_, loss_val, accuracy_val = sess.run([train_op, loss_op, accuracy])
if i % 1000 == 1:
print('Iteration: {}: Loss: {} Accuracy: {}'.format(i, loss_val, accuracy_val))
except tf.errors.OutOfRangeError:
print('Iteration: {}: Loss: {} Accuracy: {}'.format(i, loss_val, accuracy_val))
keep_iterating = False
except Exception as e:
keep_iterating = False
To clarify: I'm asking if it's possible to feed in separate things into
dense_tensor = tf.feature_column.input_layer(features=features, feature_columns=columns)
such that I can call train_op and have it run using the training iterator (features,labels) and call accuracy and have it run the testing iterator (features_test, labels_test).
Currently, calling accuracy still uses "features" from the training iterator
So, the solution was to do the following:
1) Wrap into
def train_func():
return dataset_config(filenames=filename_list, batch_size=64, mapper=feature_proto.unpack, num_cpus=num_cpus)
def test_func():
return dataset_config(filenames=evaluation_list, batch_size=4096, mapper=feature_proto.unpack, num_cpus=num_cpus)
2) Use
is_training = tf.placeholder_with_default(True, shape=(), name='Is_Training')
features, labels = tf.cond(is_training, train_func, test_func)
3) Modify the network inputs to
dense_tensor = tf.feature_column.input_layer(features=features, feature_columns=columns)
4) Modify correct_pred into
correct_pred = tf.equal(tf.cast(logits, tf.int32), labels)
such that it now uses whatever labels were given
I took a tutorial available here and I tried to run it on my dataset, it was able to compile and it begins the training but here what I got :
The model don't seem to be saved at each iteration.
And I tried with 100 epochs it didn't change anything, it gives the output of the first iteration.
Do you have an idea what would be the problem ? (I know the code is long sorry)
def train(model, epochs, log_string):
'''Train the RNN'''
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Used to determine when to stop the training early
valid_loss_summary = []
# Keep track of which batch iteration is being trained
iteration = 0
print()
print("Training Model: {}".format(log_string))
train_writer = tf.summary.FileWriter('./logs/3/train/{}'.format(log_string), sess.graph)
valid_writer = tf.summary.FileWriter('./logs/3/valid/{}'.format(log_string))
for e in range(epochs):
state = sess.run(model.initial_state)
# Record progress with each epoch
train_loss = []
train_acc = []
val_acc = []
val_loss = []
with tqdm(total=len(x_train)) as pbar:
for _, (x, y) in enumerate(get_batches(x_train, y_train, batch_size), 1):
feed = {model.inputs: x,
model.labels: y[:, None],
model.keep_prob: dropout,
model.initial_state: state}
summary, loss, acc, state, _ = sess.run([model.merged,
model.cost,
model.accuracy,
model.final_state,
model.optimizer],
feed_dict=feed)
# Record the loss and accuracy of each training batch
train_loss.append(loss)
train_acc.append(acc)
# Record the progress of training
train_writer.add_summary(summary, iteration)
iteration += 1
pbar.update(batch_size)
avg_train_loss = np.mean(train_loss)
avg_train_acc = np.mean(train_acc)
val_state = sess.run(model.initial_state)
with tqdm(total=len(x_valid)) as pbar:
for x, y in get_batches(x_valid, y_valid, batch_size):
feed = {model.inputs: x,
model.labels: y[:, None],
model.keep_prob: 1,
model.initial_state: val_state}
summary, batch_loss, batch_acc, val_state = sess.run([model.merged,
model.cost,
model.accuracy,
model.final_state],
feed_dict=feed)
# Record the validation loss and accuracy of each epoch
val_loss.append(batch_loss)
val_acc.append(batch_acc)
pbar.update(batch_size)
# Average the validation loss and accuracy of each epoch
avg_valid_loss = np.mean(val_loss)
avg_valid_acc = np.mean(val_acc)
valid_loss_summary.append(avg_valid_loss)
# Record the validation data's progress
valid_writer.add_summary(summary, iteration)
# Print the progress of each epoch
print("Epoch: {}/{}".format(e, epochs),
"Train Loss: {:.3f}".format(avg_train_loss),
"Train Acc: {:.3f}".format(avg_train_acc),
"Valid Loss: {:.3f}".format(avg_valid_loss),
"Valid Acc: {:.3f}".format(avg_valid_acc))
# Stop training if the validation loss does not decrease after 3 epochs
if avg_valid_loss > min(valid_loss_summary):
print("No Improvement.")
stop_early += 1
if stop_early == 3:
break
# Reset stop_early if the validation loss finds a new low
# Save a checkpoint of the model
else:
print("New Record!")
stop_early = 0
checkpoint = "sauvegarde/controverse_{}.ckpt".format(log_string)
saver.save(sess,checkpoint)
Thank you very much for your answers :)
I am very new in pytorch and implementing my own network of image classifier. However I see for each epoch training accuracy is very good but validation accuracy is 0.i noted till 5th epoch. I am using Adam optimizer and have learning rate .001. also resampling the whole data set after each epoch into training n validation set. Please help where I am going wrong.
Here is my code:
### where is data?
data_dir_train = '/home/sup/PycharmProjects/deep_learning/CNN_Data/training_set'
data_dir_test = '/home/sup/PycharmProjects/deep_learning/CNN_Data/test_set'
# Define your batch_size
batch_size = 64
allData = datasets.ImageFolder(root=data_dir_train,transform=transformArr)
# We need to further split our training dataset into training and validation sets.
def split_train_validation():
# Define the indices
num_train = len(allData)
indices = list(range(num_train)) # start with all the indices in training set
split = int(np.floor(0.2 * num_train)) # define the split size
#train_idx, valid_idx = indices[split:], indices[:split]
# Random, non-contiguous split
validation_idx = np.random.choice(indices, size=split, replace=False)
train_idx = list(set(indices) - set(validation_idx))
# define our samplers -- we use a SubsetRandomSampler because it will return
# a random subset of the split defined by the given indices without replacement
train_sampler = SubsetRandomSampler(train_idx)
validation_sampler = SubsetRandomSampler(validation_idx)
#train_loader = DataLoader(allData,batch_size=batch_size,sampler=train_sampler,shuffle=False,num_workers=4)
#validation_loader = DataLoader(dataset=allData,batch_size=1, sampler=validation_sampler)
return (train_sampler,validation_sampler)
Training
from torch.optim import Adam
import torch
import createNN
import torch.nn as nn
import loadData as ld
from torch.autograd import Variable
from torch.utils.data import DataLoader
# check if cuda - GPU support available
cuda = torch.cuda.is_available()
#create model, optimizer and loss function
model = createNN.ConvNet(class_num=2)
optimizer = Adam(model.parameters(),lr=.001,weight_decay=.0001)
loss_func = nn.CrossEntropyLoss()
if cuda:
model.cuda()
# function to save model
def save_model(epoch):
torch.save(model.load_state_dict(),'imageClassifier_{}.model'.format(epoch))
print('saved model at epoch',epoch)
def exp_lr_scheduler ( epoch , init_lr = args.lr, weight_decay = args.weight_decay, lr_decay_epoch = cf.lr_decay_epoch):
lr = init_lr * ( 0.5 ** (epoch // lr_decay_epoch))
def train(num_epochs):
best_acc = 0.0
for epoch in range(num_epochs):
print('\n\nEpoch {}'.format(epoch))
train_sampler, validation_sampler = ld.split_train_validation()
train_loader = DataLoader(ld.allData, batch_size=30, sampler=train_sampler, shuffle=False)
validation_loader = DataLoader(dataset=ld.allData, batch_size=1, sampler=validation_sampler)
model.train()
acc = 0.0
loss = 0.0
total = 0
# train model with training data
for i,(images,labels) in enumerate(train_loader):
# if cuda then move to GPU
if cuda:
images = images.cuda()
labels = labels.cuda()
# Variable class wraps a tensor and we can calculate grad
images = Variable(images)
labels = Variable(labels)
# reset accumulated gradients for each batch
optimizer.zero_grad()
# pass images to model which returns preiction
output = model(images)
#calculate the loss based on prediction and actual
loss = loss_func(output,labels)
# backpropagate the loss and compute gradient
loss.backward()
# update weights as per the computed gradients
optimizer.step()
# prediction class
predVal , predClass = torch.max(output.data, 1)
acc += torch.sum(predClass == labels.data)
loss += loss.cpu().data[0]
total += labels.size(0)
# print the statistics
train_acc = acc/total
train_loss = loss / total
print('Mean train acc = {} over epoch = {}'.format(epoch,acc))
print('Mean train loss = {} over epoch = {}'.format(epoch, loss))
# Valid model with validataion data
model.eval()
acc = 0.0
loss = 0.0
total = 0
for i,(images,labels) in enumerate(validation_loader):
# if cuda then move to GPU
if cuda:
images = images.cuda()
labels = labels.cuda()
# Variable class wraps a tensor and we can calculate grad
images = Variable(images)
labels = Variable(labels)
# reset accumulated gradients for each batch
optimizer.zero_grad()
# pass images to model which returns preiction
output = model(images)
#calculate the loss based on prediction and actual
loss = loss_func(output,labels)
# backpropagate the loss and compute gradient
loss.backward()
# update weights as per the computed gradients
optimizer.step()
# prediction class
predVal, predClass = torch.max(output.data, 1)
acc += torch.sum(predClass == labels.data)
loss += loss.cpu().data[0]
total += labels.size(0)
# print the statistics
valid_acc = acc / total
valid_loss = loss / total
print('Mean train acc = {} over epoch = {}'.format(epoch, valid_acc))
print('Mean train loss = {} over epoch = {}'.format(epoch, valid_loss))
if(best_acc<valid_acc):
best_acc = valid_acc
save_model(epoch)
# at 30th epoch we save the model
if (epoch == 30):
save_model(epoch)
train(20)
I think you did not take into account that acc += torch.sum(predClass == labels.data) returns a tensor instead of a float value. Depending on the version of pytorch you are using I think you should change it to:
acc += torch.sum(predClass == labels.data).cpu().data[0] #pytorch 0.3
acc += torch.sum(predClass == labels.data).item() #pytorch 0.4
Although your code seems to be working for old pytorch version, I would recommend you to upgrade to the 0.4 version.
Also, I mentioned other problems/typos in your code.
You are loading the dataset for every epoch.
for epoch in range(num_epochs):
print('\n\nEpoch {}'.format(epoch))
train_sampler, validation_sampler = ld.split_train_validation()
train_loader = DataLoader(ld.allData, batch_size=30, sampler=train_sampler, shuffle=False)
validation_loader = DataLoader(dataset=ld.allData, batch_size=1, sampler=validation_sampler)
...
That should not happen, it should be enough loading it once
train_sampler, validation_sampler = ld.split_train_validation()
train_loader = DataLoader(ld.allData, batch_size=30, sampler=train_sampler, shuffle=False)
validation_loader = DataLoader(dataset=ld.allData, batch_size=1, sampler=validation_sampler)
for epoch in range(num_epochs):
print('\n\nEpoch {}'.format(epoch))
...
In the training part you have (this does not happen in the validation):
train_acc = acc/total
train_loss = loss / total
print('Mean train acc = {} over epoch = {}'.format(epoch,acc))
print('Mean train loss = {} over epoch = {}'.format(epoch, loss))
Where you are printing acc instead of train_acc
Also, in the validation part I mentioned that you are printing print('Mean train acc = {} over epoch = {}'.format(epoch, valid_acc)) when it should be something like 'Mean val acc'.
Changing this lines of code, using a standard model I created and CIFAR dataset the training seems to converge, accuracy increases at every epoch while mean loss value decreases.
I Hope I could help you!