I want to get through Fashion_Mnist data, I would like to see the output gradient which might be mean squared sum between first and second layer
My code first below
#import the nescessary libs
import numpy as np
import torch
import time
# Loading the Fashion-MNIST dataset
from torchvision import datasets, transforms
# Get GPU Device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.cuda.get_device_name(0)
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
# Download and load the training data
trainset = datasets.FashionMNIST('MNIST_data/', download = True, train = True, transform = transform)
testset = datasets.FashionMNIST('MNIST_data/', download = True, train = False, transform = transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size = 128, shuffle = True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size = 128, shuffle = True, num_workers=4)
# Examine a sample
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Define the network architecture
from torch import nn, optim
import torch.nn.functional as F
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 10),
nn.LogSoftmax(dim = 1)
)
model.to(device)
# Define the loss
criterion = nn.MSELoss()
# Define the optimizer
optimizer = optim.Adam(model.parameters(), lr = 0.001)
# Define the epochs
epochs = 5
train_losses, test_losses = [], []
squared_sum = []
# start = time.time()
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten Fashion-MNIST images into a 784 long vector
images = images.to(device)
labels = labels.to(device)
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
output = model[0].forward(images)
loss = criterion(output[0], labels.float())
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(running_loss)
test_loss = 0
accuracy = 0
# Turn off gradients for validation, saves memory and computation
with torch.no_grad():
# Set the model to evaluation mode
model.eval()
# Validation pass
for images, labels in testloader:
images = images.to(device)
labels = labels.to(device)
images = images.view(images.shape[0], -1)
ps = model(images[0])
test_loss += criterion(ps, labels)
top_p, top_class = ps.topk(1, dim = 1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
model.train()
print("Epoch: {}/{}..".format(e+1, epochs),
"Training loss: {:.3f}..".format(running_loss/len(trainloader)),
"Test loss: {:.3f}..".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
What I want to get,
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten Fashion-MNIST images into a 784 long vector
images = images.to(device)
labels = labels.to(device)
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
output = model[0].forward(images)
loss = criterion(output[0], labels.float())
loss.backward()
optimizer.step()
running_loss += loss.item()
In here, model[0] (This might be the first layer nn.Linear(784, 128)), I would love to get the mean square errors only for first and second layer,
If I run this code, I receive this error below
RuntimeError: The size of tensor a (128) must match the size of tensor b (96) at non-singleton dimension 0
If I want to run this code correctly to get the MSELoss, what I need to do?
The error is caused by the number of samples in the dataset and the batch size.
In more detail, the training MNIST dataset includes 60,000 samples, your current batch_size is 128 and you will need 60000/128=468.75 loops to finish training on one epoch. So the problem comes from here, for 468 loops, your data will have 128 samples but the last loop just contains 60000 - 468*128 = 96 samples.
To solve this problem, I think you need to find the suitable batch_size and the number of neural in your model as well.
I think it should work for computing loss
trainloader = torch.utils.data.DataLoader(trainset, batch_size = 96, shuffle = True, num_workers=0)
testloader = torch.utils.data.DataLoader(testset, batch_size = 96, shuffle = True, num_workers=0)
model = nn.Sequential(nn.Linear(784, 96),
nn.ReLU(),
nn.Linear(96, 10),
nn.LogSoftmax(dim = 1)
)
Related
I'm having trouble understanding why this is throwing an error. This code is pulled directly from the PyTorch documentation for a NN classifier for the fashion MNIST dataset. However when I try to flip this to the MNIST handwritten digits data set it comes up with the following error:
RuntimeError: The size of tensor a (10) must match the size of tensor b (64) at non-singleton dimension 1
This occurs when using the loss function during the training loop function. Can anyone help me understand why this is happening. Thanks!
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda
import torchvision.models as models
import matplotlib.pyplot as plt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#device = "cpu"
print(f"Using {device} device")
training_data = datasets.MNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.MNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)
train_dataloader = DataLoader(training_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
X, y = X.to(device), y.to(device)
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
def save_checkpoint(state, filename = "checkpoint.pth.tar"):
print("=> Saving checkpoint")
torch.save(state, filename)
model = NeuralNetwork().to(device)
learning_rate = 1e-3
batch_size = 64
epochs = 10
# Initialize the loss function
loss_fn = nn.MSELoss()
optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimiser)
test_loop(test_dataloader, model, loss_fn)
print("Done!")
torch.nn.MSELoss is an implemention of mean squared error. You can't measure the difference between two tensors if they're different sizes (MSELoss does not allow for broadcasting). So if you're using MSELoss, then the predictions and the targets must be the same shape. In your case, preds is a tensor of shape [64, 10], and y is a tensor of shape [64].
The reason y is of shape [64] rather than [64, 10] is that most classification dataset implementations represent targets as integer labels rather than one-hot encoded vectors. In theory, you could convert these integer label targets to one-hot encoded targets.
But in reality, since this is a classification problem, you should probably be using something like nn.CrossEntropyLoss rather than nn.MSELoss. The former is a conventional classification loss function, and it allows the targets to be integer labels rather than one-hot labels (so just swapping out MSELoss for CrossEntropyLoss should solve your problem). MSELoss is better suited for regression tasks and such.
I have experimented with training a simple tensorflow model using two scenarios: passing my model to my training loop (and to the subfunctions which are called from training loop), versus not passing my model to the training loop. The two cases result in different results. When passing my model to the training functions, the model is trained properly. But in the second scenario, something is wrong because the model is apparently not trained. I am baffled, and I wonder if it's a scope thing.
To be more specific, my setup involves dynamically creating a new model of larger size (adding some layers at each iteration of a for loop), and then training the resulting model. As stated before, I train the model in two scenarios: passing the model to training subfunctions and not doing so, and I obtain different results depending on which one I do. I verify this by giving the model a test sample (class 0 MNIST images) and checking if the correct classification is output. The models trained by passing the model as an argument is trained correctly, but, if I do not do this, then only the first model created by the for loop is correctly trained, as verified by incorrect class predictions. Can this be explained?
The code below is for training by not passing model as an argument.
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.keras.optimizers import Adam
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import time
epochs = 200
input_shape = (28,28,1)
num_classes=10
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
x_train = np.expand_dims(x_train, -1)
x_val = np.expand_dims(x_val, -1)
x_test = np.expand_dims(x_test, -1)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_val = x_val.astype("float32")
y_test_sorted_args_0=np.where(y_test == 0)
x_test_0=x_test[y_test_sorted_args_0]
y_test_0=np.full( (x_test_0.shape)[0],0)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)
acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=False)
#tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
mod_output = model(x, training=True)
loss_value = loss_fn(y, mod_output)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
acc_metric.update_state(y, mod_output)
return loss_value
#tf.function
def test_step(x, y):
val = model(x, training=False)
acc_metric.update_state(y, val)
def train( epochs):
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step( x_batch_train, y_batch_train)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * batch_size))
# Display metrics at the end of each epoch.
train_acc = acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
test_step(x_batch_val, y_batch_val)
val_acc = acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
max_hidden=7
for num_hidden_layers in range(1,max_hidden,3):
model1 = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Flatten(),
]
)
for i in range(1, num_hidden_layers+1):
model1.add(layers.Dense(150, activation="relu"))
model1.add(layers.Dense(num_classes, activation="softmax"))
model=model1
train(epochs)
#verify that the model is properly trained by checking that the model correclty predicts images from class 0.
#The more class 0 predictions we have, the better.
for sample_index in range(0,10):
x_sample=x_test_0[sample_index,:,:,:]
x_sample=np.expand_dims(x_sample, 0)
print(tf.math.argmax(model(x_sample),axis=1))
time.sleep(1)
When I train my PyTorch-based CNN model, I can get the classification (BCELoss) error to go from ~0.6 to 0.1 or better on the training data. But when I rerun the training or test data through the trained model, the results are essentially random. This isn't even an issue of how good the model is -- I'm not even looking at testing data. I can crank the learning rate up high and get complete overfitting on training, but when I check the predictions on the same training data, it's just junk.
I must have a bug somewhere in my code, but I can't figure out where.
If anyone can take a look at this code and spot the error, I would be very appreciative!
## data loader
def create_balanced_DataLoader(X, Y, batch_size, train=True, num_workers=0):
classes, counts = np.unique(Y, return_counts=True)
class_weights = [sum(counts) / c for c in counts]
sample_weights = [class_weights[x] for x in Y]
sample_weights = torch.from_numpy(np.array(sample_weights)).float()
wrs = utils.WeightedRandomSampler(sample_weights, len(sample_weights))
td = utils.TensorDataset(
torch.from_numpy(np.moveaxis(X, 2, 1)).float(),
torch.from_numpy(Y).float()
)
dl = utils.DataLoader(td, batch_size=batch_size, num_workers=num_workers, sampler=wrs)
return dl
batch_size = 128
train_full_dl = create_balanced_DataLoader(X_train, Y_train, batch_size, train=True, num_workers=4)
test_full_dl = create_balanced_DataLoader(X_test, Y_test, batch_size, train=False, num_workers=4)
## training code
def train(epochs, optimizer, scheduler, model, loss_fn, train_loader, test_loader):
for epoch in range(1, epochs + 1):
train_losses = []
model.train()
for images, labels in train_loader:
optimizer.zero_grad()
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = loss_fn(torch.flatten(outputs), labels)
loss.backward()
optimizer.step()
train_losses.append( loss.item() )
test_loss = test(model, test_loader, loss_fn)
test_loss = np.round(test_loss, 4)
train_loss = np.round(np.mean(np.asarray(train_losses)),4)
def test(model, test_loader, loss_fn):
model.eval()
test_losses = []
with torch.no_grad():
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = loss_fn(torch.flatten(outputs), labels)
test_losses.append( loss.item() )
mean_loss = np.mean(np.asarray(test_losses))
return mean_loss
## run training
loss_fn = torch.nn.BCELoss()
epoch_n = 100
lr = 1e-4
betas = (0.9, 0.999)
weight_decay = 0.5e-4
eps = 1e-8
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay, eps=eps)
train(
epochs = epoch_n,
optimizer = optimizer,
scheduler = None,
model = model,
loss_fn = loss_fn,
train_loader = train_full_dl,
test_loader = test_short_dl,
weights_and_baises = wandb
)
## check predictions
with torch.no_grad():
model.eval()
test_predictions = list()
test_actual = list()
for i, (images,labels) in enumerate(test_full_dl):
images = images.to(device)
test_actual += copy.deepcopy(labels) # fix pty issue
labels = labels.to(device)
images = images.to(device)
outputs = model(images)
test_predictions += outputs.cpu()
test_predictions = torch.stack(test_predictions).cpu().detach().numpy()
test_actual = torch.stack(test_actual).cpu().detach().numpy()
train_predictions = list()
train_actual = list()
for i, (images,labels) in enumerate(train_full_dl):
images = images.to(device)
train_actual += copy.deepcopy(labels) # fix pty issue
labels = labels.to(device)
outputs = model(images)
train_predictions += outputs.cpu()
train_predictions = torch.stack(train_predictions).cpu().detach().numpy()
train_actual = torch.stack(train_actual).cpu().detach().numpy()
## now compare predictions and actual data...
## ...
The following code (MNIST MLP in PyTorch) delivers approximately the same training loss regardless of having the last layer in the forward pass returning:
F.log_softmax(x)
(x)
Option 1 is incorrect because I use criterion = nn.CrossEntropyLoss() and yet the results are almost identical. Am I missing anything?
import torch
import numpy as np
import time
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# linear layer (784 -> 1 hidden node)
self.fc1 = nn.Linear(28 * 28, 512)
self.dropout1= nn.Dropout(p=0.2, inplace= False)
self.fc2 = nn.Linear(512, 256)
self.dropout2= nn.Dropout(p=0.2, inplace= False)
self.dropout = nn.Dropout(p=0.2, inplace= False)
self.fc3 = nn.Linear(256, 10)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
x = self.dropout1(x)
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = self.fc3(x)
# return F.log_softmax(x)
return x
# initialize the NN
model = Net()
print(model)
model.to('cuda')
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
n_epochs = 10
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
start = time.time()
for data, target in train_loader:
data, target = data.to('cuda'), target.to('cuda')
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f} \ttime: {:.6f}'.format(
epoch+1,
train_loss,
time.time()-start
))
For numerical stability, the nn.CrossEntropyLoss() is implemented with the softmax layer inside it. So you should NOT use the softmax layer in your forward pass.
From the docs (https://pytorch.org/docs/stable/nn.html#crossentropyloss):
This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
Using the softmax layer in the forward pass will lead to worse metrics because the gradient magnitudes are lowered (thus, the weight updates are also lowered). I learned it the hard way!
I guess your problem is that the loss is similar at the beginning of training, but at the end of the training, they should not. It is usually a good sanity check to overfit your model in one batch of data. The model should reach 100% accuracy if the batch is small enough. If the model is taking too long to train than you probably have a bug somewhere.
Hope that helps =)
Assume that I have 77 samples to train my CNN, and my batch size is 10. Then the last batch has a batch size of 7 instead of 10. Somehow when I pass it to the loss function such as nn.MSELoss(), it gives me the error:
RuntimeError: The size of tensor a (10) must match the size of tensor
b (7) at non-singleton dimension 1
So pytorch doesn't support batches with different sizes?
My code in doubt:
import numpy as np
import torch
from torch import nn
import torchvision
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5,4))
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(64, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, x.shape[1] * x.shape[2] * x.shape[3])
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = Net()
batch_size = 10
# Generating Artifical data
x_train = torch.randn((77,1,20,20))
y_train = torch.randint(0,10,size=(77,),dtype=torch.float)
trainset = torch.utils.data.TensorDataset(x_train,y_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)
# testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0)
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
for epoch in range(20): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i%10==0:
print('epoch{}, step{}, loss: {}'.format(epoch + 1, i + 1, running_loss))
# print("frac post = {}".format(frac_post))
running_loss = 0.0
The problem is not due to the batch size, but to a failure to broadcast properly between the 10 outputs of your CNN and the single label provided in each example.
If you look at the model output and label tensor shapes during the batch where the error is thrown,
print(outputs.shape, labels.shape)
#out: torch.Size([7, 10]) torch.Size([7])
you'll see that the labels are stored in a singleton tensor. According to pytorch broadcasting rules, to be broadcastable two tensors have to be compatible in all trailing dimensions. In this case, the trailing dimension of the model output (10) is incompatible with that of the label (7).
To fix, either add a dummy dimension to the label (assuming you actually want to broadcast the labels to match your ten network outputs), or define a network with scalar outputs. For example:
y_train = torch.randint(0,10,size=(77,1),dtype=torch.float)
results in
print(outputs.shape, labels.shape)
#out: torch.Size([7, 10]) torch.Size([7,1])
# these are broadcastable