Multiclass classification, IndexError: Target 2 is out of bounds - python

I am facing a multiclass classification problem related to the activity of some drugs using Pytorch neural net, I have three activity classes (0, 1 and 2), to tackle the problem I adopted the one vs. one approach, thus creating three binary classifiers: 0 vs. 1, 1 vs. 2 and 2 vs. 0. When I train the second classifier (class 1 vs. class 2) I get the following error:
IndexError: Target 2 is out of bounds.
Is there a method to solve it without reassigning labels? Thank you all!
This is my net, is a Graph Isomorphism Network build with Pytorch Geometric:
class GIN1(torch.nn.Module):
def __init__(self, h):
super(GIN1, self).__init__()
dim_h_conv = h
dim_h_fc = dim_h_conv*5
# Convolutional layers
self.conv1 = GINConv(Sequential(Linear(14, dim_h_conv),
BatchNorm1d(dim_h_conv), ReLU(),
Linear(dim_h_conv, dim_h_conv), ReLU()))
self.conv2 = GINConv(Sequential(Linear(dim_h_conv, dim_h_conv),
BatchNorm1d(dim_h_conv), ReLU(),
Linear(dim_h_conv, dim_h_conv), ReLU()))
self.conv3 = GINConv(Sequential(Linear(dim_h_conv, dim_h_conv),
BatchNorm1d(dim_h_conv), ReLU(),
Linear(dim_h_conv, dim_h_conv), ReLU()))
self.conv4 = GINConv(Sequential(Linear(dim_h_conv, dim_h_conv),
BatchNorm1d(dim_h_conv), ReLU(),
Linear(dim_h_conv, dim_h_conv), ReLU()))
self.conv5 = GINConv(Sequential(Linear(dim_h_conv, dim_h_conv),
BatchNorm1d(dim_h_conv), ReLU(),
Linear(dim_h_conv, dim_h_conv), ReLU()))
# Fully connected layers
self.lin1 = Linear(dim_h_fc, dim_h_fc)
self.lin2 = Linear(dim_h_fc, 2)
self.initialize_w()
def forward(self, x, edge_index, batch):
h1 = self.conv1(x, edge_index)
h2 = self.conv2(h1, edge_index)
h3 = self.conv3(h2, edge_index)
h4 = self.conv4(h3, edge_index)
h5 = self.conv5(h4, edge_index)
# Graph level readout
h1 = global_add_pool(h1, batch)
h2 = global_add_pool(h2, batch)
h3 = global_add_pool(h3, batch)
h4 = global_add_pool(h4, batch)
h5 = global_add_pool(h5, batch)
# Concatenate graph embeddings
h = torch.cat((h1, h2, h3, h4, h5), dim=1)
# Classifier
h = self.lin1(h)
h = h.relu()
h = F.dropout(h, p=hp_gin1['p'], training=self.training)
h = self.lin2(h)
h = F.log_softmax(h, dim=1)
return h
def initialize_w(self):
for m in self.modules():
if isinstance(m, Linear):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
torch.nn.init.constant_(m.bias, 0)
if isinstance(m, BatchNorm1d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
And this is my training loop:
gin2 = GIN2(h=hp_gin2['h']) #40
optimizer = torch.optim.Adam(gin2.parameters(), lr=hp_gin2['lr'])
criterion = torch.nn.CrossEntropyLoss()
def train(train_loader):
gin2.train()
loss_all = 0
for data in train_loader:
output = gin2(data.x, data.edge_index, data.batch)
loss = criterion(output, data.y)
l2_lambda = hp_gin2['lambda']
l2_norm = sum(p.pow(2.0).sum()
for p in gin2.parameters())
loss = loss + l2_lambda * l2_norm
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_all += loss.item() * data.num_graphs
return loss_all / len(train_loader.dataset)
def test_loss(loader):
total_loss_val = 0
with torch.no_grad():
for data in loader:
output = gin2(data.x, data.edge_index, data.batch)
batch_loss = criterion(output, data.y)
total_loss_val += batch_loss.item() * data.num_graphs
return total_loss_val / len(loader.dataset)
def test(loader):
gin2.eval()
correct = 0
for data in loader:
output = gin2(data.x, data.edge_index, data.batch)
accuracy = Accuracy(average='macro', num_classes=2)
acc = accuracy(output, data.y)
return acc

OP needed to match the output dimension of their model with the number of label classes (see discussion).

Related

How to perform weight regularization in pytorch?

I have a model implemented in pytorch that applies a final fully connected layer before running the softmax function.
The architecture is defined to solve a 4-class Speech Emotion Recognition task: given an audio track, it transforms it into its spectrogram and uses it to predict the emotion between happiness, sadness, neutrality and anger.
Unlike the architecture of the paper, it attempts to adapt the implementation of the Compact Convolutional Transformer found on Github at the link https://github.com/SHI-Labs/Compact-Transformers/blob/main/src/cct.py.
To improve the performance of the model I am following some tricks defined in the paper https://arxiv.org/abs/2104.07288.
Like what is described in the paper, however, my model also suffers from a "class collapse" problem: even by balancing the dataset, it tends to predict the anger and sadness classes well and the other two badly.
In the paper to solve this problem they apply a particular weight regularization technique to the fully connected layer, described in chapter 2.4.
Unfortunately, however, I cannot understand how I should modify my fully connected layer in pytorch to implement this type of regularization.
Code of the model:
class CCT(nn.Module):
def __init__(self,
img_size=224,
embedding_dim=768,
n_input_channels=3,
n_conv_layers=1,
kernel_size=7,
stride=2,
padding=3,
pooling_kernel_size=3,
pooling_stride=2,
pooling_padding=1,
dropout=0.,
attention_dropout=0.1,
stochastic_depth=0.1,
num_layers=14,
num_heads=6,
mlp_ratio=4.0,
num_classes=1000,
positional_embedding='learnable',
*args, **kwargs):
super(CCT, self).__init__()
self.tokenizer = Tokenizer(n_input_channels=n_input_channels,
n_output_channels=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
pooling_kernel_size=pooling_kernel_size,
pooling_stride=pooling_stride,
pooling_padding=pooling_padding,
max_pool=True,
activation=nn.ReLU,
n_conv_layers=n_conv_layers,
conv_bias=False)
self.classifier = TransformerClassifier(
sequence_length=self.tokenizer.sequence_length(n_channels=n_input_channels,
height=img_size,
width=img_size),
embedding_dim=embedding_dim,
seq_pool=True,
dropout=dropout,
attention_dropout=attention_dropout,
stochastic_depth=stochastic_depth,
num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
num_classes=num_classes,
positional_embedding=positional_embedding
)
def forward(self, x):
x = self.tokenizer(x)
return self.classifier(x)
class Tokenizer(nn.Module):
def __init__(self,
kernel_size, stride, padding,
pooling_kernel_size=3, pooling_stride=2, pooling_padding=1,
n_conv_layers=1,
n_input_channels=3,
n_output_channels=64,
in_planes=64,
activation=None,
max_pool=True,
conv_bias=False):
super(Tokenizer, self).__init__()
n_filter_list = [n_input_channels] + \
[in_planes for _ in range(n_conv_layers - 1)] + \
[n_output_channels]
self.conv_layers = nn.Sequential(
*[nn.Sequential(
nn.Conv2d(n_filter_list[i], n_filter_list[i + 1],
kernel_size=(kernel_size, kernel_size),
stride=(stride, stride),
padding=(padding, padding), bias=conv_bias),
nn.Identity() if activation is None else activation(),
nn.MaxPool2d(kernel_size=pooling_kernel_size,
stride=pooling_stride,
padding=pooling_padding) if max_pool else nn.Identity()
)
for i in range(n_conv_layers)
])
self.flattener = nn.Flatten(2, 3)
self.apply(self.init_weight)
def sequence_length(self, n_channels=3, height=224, width=224):
return self.forward(torch.zeros((1, n_channels, height, width))).shape[1]
def forward(self, x):
return self.flattener(self.conv_layers(x)).transpose(-2, -1)
#staticmethod
def init_weight(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
class TransformerClassifier(Module):
def __init__(self,
seq_pool=True,
embedding_dim=768,
num_layers=12,
num_heads=12,
mlp_ratio=4.0,
num_classes=1000,
dropout=0.1,
attention_dropout=0.1,
stochastic_depth=0.1,
positional_embedding='learnable',
sequence_length=None):
super().__init__()
positional_embedding = positional_embedding if \
positional_embedding in ['sine', 'learnable', 'none'] else 'sine'
dim_feedforward = int(embedding_dim * mlp_ratio)
self.embedding_dim = embedding_dim
self.sequence_length = sequence_length
self.seq_pool = seq_pool
self.num_tokens = 0
assert sequence_length is not None or positional_embedding == 'none', \
f"Positional embedding is set to {positional_embedding} and" \
f" the sequence length was not specified."
if not seq_pool:
sequence_length += 1
self.class_emb = Parameter(torch.zeros(1, 1, self.embedding_dim),
requires_grad=True)
self.num_tokens = 1
else:
self.attention_pool = Linear(self.embedding_dim, 1)
if positional_embedding != 'none':
if positional_embedding == 'learnable':
self.positional_emb = Parameter(torch.zeros(1, sequence_length, embedding_dim),
requires_grad=True)
init.normal_(self.positional_emb, std=0.2)
else:
self.positional_emb = Parameter(self.sinusoidal_embedding(sequence_length, embedding_dim),
requires_grad=False)
else:
self.positional_emb = None
self.dropout = Dropout(p=dropout)
dpr = [x.item() for x in torch.linspace(0, stochastic_depth, num_layers)]
self.blocks = ModuleList([
TransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads,
dim_feedforward=dim_feedforward, dropout=dropout,
attention_dropout=attention_dropout, drop_path_rate=dpr[i])
for i in range(num_layers)])
self.norm = LayerNorm(embedding_dim)
self.fc = Linear(embedding_dim, num_classes)
self.apply(self.init_weight)
def forward(self, x):
if self.positional_emb is None and x.size(1) < self.sequence_length:
x = F.pad(x, (0, 0, 0, self.n_channels - x.size(1)), mode='constant', value=0)
if not self.seq_pool:
cls_token = self.class_emb.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), dim=1)
if self.positional_emb is not None:
x += self.positional_emb
x = self.dropout(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
if self.seq_pool:
x = torch.matmul(F.softmax(self.attention_pool(x), dim=1).transpose(-1, -2), x).squeeze(-2)
else:
x = x[:, 0]
x = self.fc(x)
return x
Can someone help me?
As you have not shared any network architecture I will try to give a basic example. I am not sure the regularization of the paper but I will give a simple example that will apply L1 regularization on specific layer (e.g. layer 0)
my_model = nn.Sequential(
nn.Linear(5, 5),
nn.ReLU(),
nn.Linear(5, 2)
)
x = torch.randn(5, 5)
target = torch.ones(5, dtype=torch.long)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-2)
for epoch in range(10):
optimizer.zero_grad()
output = model(x)
loss = criterion(output, target)
# This will be your weight regularization, choose your layer like model[0] and apply normalization that you want on that layer weights
l1_norm = torch.norm(model[0].weight, p=1)
loss += l1_norm
loss.backward()
optimizer.step()
print('Epoch {}, loss {}, norm layer {}'.format(
epoch, loss.item(), l1_norm.item()))

How to plot Receptive Fields, for a CNN/fashionMNIST?

I created my CNN with PyTorch Lightning, and I am actually looking for plotting the Receptive Fields.
Do you have any suggestions about it?
I look for different solutions here and there, but I actually can't make them synergize with PyTorch Lightning.
Is it possible to visualize the Receptive fields directly inside Tensorboard?
I'll share with you my Dataset:
train_dataset = torchvision.datasets.FashionMNIST('classifier_data', train=True, download=True, transform=transforms.ToTensor())
train, val = train_test_split(train_dataset, test_size = .2)
train_loader = DataLoader(train, batch_size = 32)
val_loader = DataLoader(train, batch_size = 32)
test_dataset = torchvision.datasets.FashionMNIST('classifier_data', train=False, download=True, transform=transforms.ToTensor())
test_loader = DataLoader(test_dataset, batch_size = 32)
and CNN:
def __init__(self, dropout, learn_rate, momentum, weight_decay, optimizer):
#def __init__(self, dropout, learn_rate, weight_decay, optimizer):
super().__init__()
self.conv1 = nn.Conv2d(in_channels = 1, out_channels = 6, kernel_size = 5)
self.conv2 = nn.Conv2d(in_channels = 6, out_channels = 12 , kernel_size = 5)
self.fc1 = nn.Linear(in_features = 12*4*4, out_features = 120)
self.fc2 = nn.Linear(in_features = 120, out_features = 60)
self.out = nn.Linear(in_features = 60, out_features = 10)
self.do = nn.Dropout(dropout) #for overfitting issues
self.loss = nn.CrossEntropyLoss()
self.accuracy = torchmetrics.Accuracy()
self.learn_rate = learn_rate
self.momentum = momentum #with Adam we don't have momentum. To Check best Optimizer with Optune, please comment this line.
self.weight_decay = weight_decay
self.optimizer = optimizer
self.train_loss = []
self.val_loss = []
self.train_acc = []
self.test_acc = []
#plot into tensorboard
log_dir = pathlib.Path.cwd() / "lightning_logs"
self.writer = SummaryWriter(log_dir)
#forward step
#I add each layer to the histogram. It's plotted into tensorboard
def forward(self, x, additional_out=False):
#conv1
x = self.conv1(x)
self.writer.add_histogram("First convolutional layer CNN", x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size = 2, stride = 2)
#conv2
x = self.conv2(x)
self.writer.add_histogram("Second convolutional layer CNN", x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size = 2, stride = 2)
#fuly connected 1
x = x.reshape(-1, 12*4*4)
x = self.fc1(x)
self.writer.add_histogram("First linear layer CNN", x)
x = F.relu(x)
x = self.do(x)
#fully connected 2
x=self.fc2(x)
self.writer.add_histogram("Second linear layer CNN", x)
x = F.relu(x)
x = self.do(x)
#output
x = self.out(x)
self.writer.add_histogram("Output layer CNN", x)
return x
#optimizer
def configure_optimizers(self):
#optimizer = self.optimizer(self.parameters(), lr = self.learn_rate, momentum = self.momentum, weight_decay = self.weight_decay)
optimizer = self.optimizer(self.parameters(), lr = self.learn_rate, weight_decay = self.weight_decay)
return optimizer
#training step
def training_step(self, batch, batch_idx):
x, y = batch
b = x.size(0)
x = x.view(b, -1, 28, 28)
logit = self(x)
J = self.loss(logit, y) #loss
#self.train_loss.append(J) #no need to append
acc = self.accuracy(logit, y) #accuracy
#self.train_acc.append(acc) #no need to append
self.log("train_loss_cnn", J.item())
self.log("train_acc_cnn", acc.item())
return {'loss': J}
#Since I used Tensorboard, it don't have to append to loss
def test_step(self, batch, batch_idx):
p, q = batch
b = p.size(0)
p = p.view(b, -1, 28, 28)
logit = self(p)
J = self.loss(logit, q) #loss
acc_test = self.accuracy(logit, q) #accuracy
#self.train_acc.append(acc_test) #no need to append
#self.train_loss.append(J) #no need to append
self.log("test_acc_cnn", acc_test.item())
self.log("test_loss_cnn", J.item())
def validation_step(self, batch, batch_idx=None):
u, v = batch
b = u.size(0)
u = u.view(b, -1, 28, 28)
logit = self(u)
J = self.loss(logit, v) #loss
#self.val_loss.append(J) #no need to append
acc_val = self.accuracy(logit, v) #accuracy
#self.train_acc.append(acc_val) #no need to append
self.log("val_loss_cnn", J.item())
self.log("val_acc_cnn", acc_val.item())
return {"loss": J, "pred": logit, "target": v}
#Once saves from validation step, I take with me the returned elements, and I can plot the Confusion Matrix inside Tensorboard
def validation_epoch_end(self, outputs):
preds = torch.cat([tmp['pred'] for tmp in outputs])
targets = torch.cat([tmp['target'] for tmp in outputs])
conf_mat = confusion_matrix(preds, targets, num_classes=10)
df_cm = pd.DataFrame(conf_mat.numpy(), index = range(10), columns=range(10))
plt.figure(figsize = (10,7))
fig_ = sns.heatmap(df_cm, annot=True, cmap='Spectral').get_figure()
plt.close(fig_)
self.logger.experiment.add_figure("Confusion matrix CNN", fig_, self.current_epoch)

binary classification but only one class gives results

I am trying to train a neural network. I have two classes. Precision and Recall for one of the classes equals 0 all the time.
Here's the code for the neural network.
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(
in_channels=1,
out_channels=200,
kernel_size=4,
stride=3,
padding = 0)
,nn.ReLU()
)
self.maxpool = nn.MaxPool1d(2)
random_input = torch.rand(1, 1, 1500 , requires_grad=False)
random_input = self.conv1(random_input)
random_input = self.maxpool(random_input)
maxpool_out = random_input.view(1, -1).shape[1]
self.fc1 = nn.Sequential(
nn.Linear(
in_features= maxpool_out,
out_features=200
),
nn.Dropout(p=0.05),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(
in_features=200,
out_features=100
),
nn.Dropout(p=0.05),
nn.ReLU()
)
self.fc3 = nn.Sequential(
nn.Linear(
in_features=100,
out_features=50
),
nn.Dropout(p=0.05),
nn.ReLU()
)
self.lastlayer = nn.Linear(
in_features=50,
out_features=1
)
def forward(self,x):
#adding 1 dimention
x = x.unsqueeze(1)
#conv layers
x = self.conv1(x)
x = self.maxpool(x)
#flatten
x = x.reshape(x.shape[0], -1)
#3fc
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
#output
x = self.lastlayer(x)
return x
Here's the training loop:
def binary_acc(y_pred, y_test):
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc = correct_results_sum/y_test.shape[0]
acc = torch.round(acc * 100)
return acc
def Training(model, train_loader, criterion, optimizer, epochs):
train_losses = []
Acc =[]
for epoch in range(epochs):
epoch_accuracy = 0
train_loss = 0
total_pcaps = 0
model.train()
for elem in train_loader:
pcap_byte = elem['feature'].to(device)
labels = elem['label'].to(device)
optimizer.zero_grad()
outputs = model(pcap_byte)
loss = criterion(outputs, labels.unsqueeze(1).float())
loss.backward()
# Updating parameters
optimizer.step()
total_pcaps += labels.size(0)
acc = binary_acc(outputs, labels.unsqueeze(1).float()
train_loss += loss.item()
epoch_accuracy += acc.item()
Acc.append(epoch_accuracy)
average_loss = train_loss / len(train_loader)
train_losses.append(train_loss)
print('epoch %d, train_loss: %.3f' % (epoch + 1, average_loss))
After training The precision and recall of one of the classes equals 0 and the other one is pr = 1 and recall = 0.9.
Is there something wrong with the data? I self-collected the data and I can't understand whether the problem is with the data or something is wrong in my code.
The problem was with the data. I was using undersampling in the pre-processing step. I deleted that part and the model performed well.

Accuracy, training, test loss doesn't change throughout all the epochs

I am working on a text classification problem with a binary output 0 or 1. The accuracy, train loss and test loss remains the same. The accuracy is exact the same for all the epochs. All the steps looks very correct. I had tried out several ways to figure out what is going wrong. Nothing actually worked. Please help.
class RNN(nn.Module):
def __init__(self, num_layers, num_classes, input_size, hidden_size,vocab,dropout):
super(RNN,self).__init__()
self.num_layers = num_layers
self.num_classes = num_classes
self.input_size = input_size
self.hidden_size = hidden_size
self.sequence_len = sequence_len
self.embedding = nn.Embedding(len(vocab),input_size)
nn.init.xavier_normal_(self.embedding.weight)
self.rnn = nn.RNN(input_size, hidden_size,num_layers,dropout = dropout, nonlinearity = 'tanh', batch_first=True, bias = True, bidirectional = False)
self.linear = nn.Linear(hidden_size, 1)
nn.init.xavier_normal_(self.linear.weight)
self.sigmoid = nn.Sigmoid()
def forward(self,x):
lens = list(map(len, x))
padded = pad_sequence(x, batch_first=True)
output_embedding=self.embedding(padded)
packed = pack_padded_sequence(input = output_embedding,lengths = lens, batch_first=True, enforce_sorted=False)
input_rnn = packed
h011 = torch.zeros(1,32,3)
output_11, hidden_11 = self.rnn(input_rnn,h011)
output_padded, output_lengths = pad_packed_sequence(output_11, batch_first=True)
final_output_11 = self.linear(hidden_11)
prob_11 = self.sigmoid(final_output_11)
return output_padded, hidden_11, prob_11
num_layers = 1
num_classes = 2
input_size = 5
hidden_size = 3
criterion = nn.BCELoss()
sequence_len = 1
dropout = 0.5
rnn = RNN(num_layers, num_classes, input_size, hidden_size,vocab,dropout)
epochs = 10
lr = 0.01
weight_decay=0.011
def train_loop(model,criterion,optimizer,train_loader,valid_loader,epochs):
train_losses= []
valid_losses= []
for epoch in range(epochs):
train_loss=0
for label,text in train_loader:
output,hidden,prob = rnn.forward(text)
prob = torch.tensor([item.item() for sublist in prob for item in sublist],dtype = torch.float32)
label = torch.tensor(label, dtype = torch.float32)
loss=criterion(prob,label)
optimizer.zero_grad()
loss.requires_grad = True
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss=train_loss/len(train_loader)
valid_loss=0
with torch.no_grad():
correct=0
total=0
for label,text in valid_loader:
output,hidden,prob = rnn.forward(text)
prob = torch.tensor([item.item() for sublist in prob for item in sublist],dtype = torch.float32)
label = torch.tensor(label, dtype = torch.float32)
loss=criterion(prob,label)
valid_loss += loss.item()
p = torch.tensor([1 if i > 0.5 else 0 for i in prob.data], dtype = torch.float32)
#predicted = torch.max(p, 1)
total += label.size(0)
correct += (p == label).sum().item()
valid_loss=valid_loss/len(valid_loader)
accuracy = 100 * correct / total
print(accuracy)
scheduler.step(accuracy)
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'Epoch {epoch+1:<2d}/{epochs} --> Train Loss: {train_loss:.4f} | Valid Loss: {valid_loss:.4f}')
from torch.optim.lr_scheduler import StepLR
from torch.optim.lr_scheduler import ReduceLROnPlateau
optimizer = torch.optim.Adam(rnn.parameters(), lr=0.01)
scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=0.4, patience=5,
verbose=True) # need to change scheduler.step
train_loop(rnn,criterion,optimizer,train_loader,valid_loader,epochs)
Output:

How to make prediction on pytorch emotion detection model

I made a CNN model for emotion recognition on 5 emotions. I wanted to test it on an single image to get the individual class predictions for each emotion.
Evaluating the model works, but I can't seem to find how to make a prediction with a single image. How can I do that?
The Model
def conv_block(in_channels, out_channels, pool=False):
layers = [nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ELU(inplace=True)]
if pool: layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
class ResNet(ImageClassificationBase):
def __init__(self, in_channels, num_classes):
super().__init__()
self.conv1 = conv_block(in_channels, 128)
self.conv2 = conv_block(128, 128, pool=True)
self.res1 = nn.Sequential(conv_block(128, 128), conv_block(128, 128))
self.drop1 = nn.Dropout(0.5)
self.conv3 = conv_block(128, 256)
self.conv4 = conv_block(256, 256, pool=True)
self.res2 = nn.Sequential(conv_block(256, 256), conv_block(256, 256))
self.drop2 = nn.Dropout(0.5)
self.conv5 = conv_block(256, 512)
self.conv6 = conv_block(512, 512, pool=True)
self.res3 = nn.Sequential(conv_block(512, 512), conv_block(512, 512))
self.drop3 = nn.Dropout(0.5)
self.classifier = nn.Sequential(nn.MaxPool2d(6),
nn.Flatten(),
nn.Linear(512, num_classes))
def forward(self, xb):
out = self.conv1(xb)
out = self.conv2(out)
out = self.res1(out) + out
out = self.drop1(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.res2(out) + out
out = self.drop2(out)
out = self.conv5(out)
out = self.conv6(out)
out = self.res3(out) + out
out = self.drop3(out)
out = self.classifier(out)
return out
The fit_one_cycle function is called to train the model
#torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader,
weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD):
torch.cuda.empty_cache()
history = []
# Set up custom optimizer with weight decay
optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)
# Set up one-cycle learning rate scheduler
sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs,
steps_per_epoch=len(train_loader))
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
lrs = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
# Gradient clipping
if grad_clip:
nn.utils.clip_grad_value_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
# Record & update learning rate
lrs.append(get_lr(optimizer))
sched.step()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
result['lrs'] = lrs
model.epoch_end(epoch, result)
history.append(result)
return history
This returns the accuracy and loss, i want to change this so it returns prediction percentages for each class.
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out, labels)
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out, labels)
acc = accuracy(out, labels)
return {'val_loss': loss, 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], last_lr: {:.5f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['lrs'][-1], result['train_loss'], result['val_loss'], result['val_acc']))
Evaluating the model works, but I can't seem to find how to make a
prediction with a single image. How can I do that?
Simply, if you have a single image make sure to:
use additional 1 dimension at the beginning
make sure to use CHW format instead of HWC (or specify that within pytorch, check out how to do that here)
For example:
my_model = CNN(...)
random_image = torch.randn(1, 3, 100, 100) # 3 channels, 100x100 img
BTW. Your accuracy could be written a little simpler like this:
def accuracy(outputs, labels):
preds = torch.argmax(outputs, dim=1)
return torch.sum(preds == labels) / len(preds)
Getting class probability
Similar to argmax you can use softmax which transforms logits (unnormalized probability outputted by your network) into probabilities:
def probability(outputs):
return torch.nn.functional.softmax(outputs, dim=1)

Categories