I am trying to implement the code from a Pytorch beginner's tutorial. But I have written the code for loading the saved model in another Python file.
The FashionClassify file contains the code exactly as its in the tutorial.
Below is the code:
from FashionClassify import NeuralNetwork
from FashionClassify import test_data
import torch
model = NeuralNetwork()
model.load_state_dict(torch.load("model.pth"))
classes = [
"T-shirt/top", "Trouser","Pullover","Dress","Coat","Sandal","Shirt","Sneaker","Bag","Ankle boot",
]
model.eval()
x, y = test_data[0][0], test_data[0][1]
with torch.no_grad():
pred = model(x)
predicted, actual = classes[pred[0].argmax(0)],classes[y]
print(f'Predicted: "{predicted}", Actual: "{actual}"')
However, when I run this, the entire training process starts again. Why is that so ?
OR
Is it an expected behavior ?
(I have gone through a couple of webpages and StackOverflow answers but couldn't find my problem)
FashionClassify file code:
import torch
from torch import nn
from torch.utils.data import DataLoader # wraps an iterable around dataset
from torchvision import datasets # stores samples and their label
from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib as plt
training_data = datasets.FashionMNIST(root='data', train=True, download=True, transform=ToTensor(), )
test_data = datasets.FashionMNIST(root='data', train=False, download=True, transform=ToTensor(), )
batch_size = 64
train_dataLoader = DataLoader(training_data, batch_size=batch_size)
test_dataLoader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataLoader:
print('Shape of X [N,C,H,W]:', X.size())
print('Shape of y:', y.shape, y.dtype)
break
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
# to define a NN, we inherit a class from nn.Module
class NeuralNetwork(nn.Module):
def __init__(self):
# will specify how data will proceed in the forward pass
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28 * 28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU()
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork().to(device)
print(model)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X,y) in enumerate(dataloader):
X,y = X.to(device), y.to(device)
#compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
#backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch%100 ==0:
loss,current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model):
size = len(dataloader.dataset)
model.eval()
test_loss, correct = 0,0
with torch.no_grad():
for X, y in dataloader:
X,y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataLoader, model, loss_fn, optimizer)
test(test_dataLoader, model)
print("Done!")
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
That's what happens when you import another file. All the code gets rerun.
Instead, in your training file:
class FancyNetwork(nn.Module):
[...]
def train():
[train code]
if __name__ == "__main__":
train()
Now when you run this file train() will get called, but when you import this file in another one, train won't get called automatically.
Related
I am working on a multiclass image classification problem that has 4k labeled images. Currently, I am using cross-validation.However, I want to try nested-cross-validation for the problem in pytorch but I couldn't find it.
Is it possible to use nested-cross-validation in pytorch? if so, how?
I built a simple pipeline below. Is it correct to implement nested-cross-validation like this.
import torch
from torch.utils.data import DataLoader,SubsetRandomSampler
from sklearn.model_selection import KFold
from torchvision import datasets
input_size = (256,3,224,244)
target_size = (256,)
class Dataset(datasets.VisionDataset):
def __init__(self):
super().__init__(self)
self.images = torch.rand(input_size).float()
self.targets = torch.randint(0,3,target_size)
def __getitem__(self, index: int) -> any:
return self.images[index],self.targets[index]
def __len__(self) -> int:
return len(self.images)
class BasicModel(torch.nn.Module):
def __init__(self) -> None:
super(BasicModel,self).__init__()
self.conv = torch.nn.Conv2d(3,16,kernel_size=(5,5))
self.adp = torch.nn.AdaptiveAvgPool2d(1)
self.linear = torch.nn.Linear(16,3)
def forward(self,x):
x = self.conv(x)
x = self.adp(x)
x = x.view(x.size(0),-1)
x = self.linear(x)
return x
data_ids = [*range(input_size[0])]
data = Dataset()
model = BasicModel()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=0.01)
k_fold = 5
kfold_test = KFold(n_splits=k_fold, shuffle=True)
num_epochs = 2
for test_fold,(remain_ids,test_ids) in enumerate(kfold_test.split(data_ids)):
test_sampler = SubsetRandomSampler(test_ids)
testLoader = DataLoader(data,sampler=test_sampler,batch_size=2)
kfold_val = KFold(n_splits=k_fold-1, shuffle=True)
for epoch in range(num_epochs):
for val_fold,(train_ids,val_ids) in enumerate(kfold_val.split(remain_ids)):
train_sampler = SubsetRandomSampler(train_ids)
trainLoader = DataLoader(data,sampler=train_sampler,batch_size=2)
val_sampler = SubsetRandomSampler(val_ids)
valLoader = DataLoader(data,sampler=val_sampler,batch_size=2)
model.train()
for image,target in trainLoader:
with torch.cuda.amp.autocast():
output = model(image)
loss = criterion(output,target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"train loss:{loss.item()}")
model.eval()
with torch.no_grad():
for image,target in valLoader:
output = model(image)
loss = criterion(output,target)
print(f"test loss:{loss.item()}")
model.eval()
with torch.no_grad():
for image,target in testLoader:
output = model(image)
loss = criterion(output,target)
print(f"test loss:{loss.item()}")
I tried to make a copy of a neural network in pytorch and subsequently train the copied network, but training does not seem to change the weights in the network after copying. This post suggests that deepcopy is a convenient way to make a copy of a neural network, so I tried using that in my code.
The code below works just fine and shows that the weights and accuracy of the network are different after training from before training. However, when I toggle so that network_cp=deepcopy(network) and optimizer_cp=deepcopy(optimizer), the accuracy and weights before and after training are exactly the same.
# torch settings
torch.backends.cudnn.enabled = True
device = torch.device("cpu")
# training settings
learning_rate = 0.01
momentum = 0.5
batch_size_train = 64
batch_size_test = 1000
# get MNIST data set
train_loader, test_loader = load_mnist(batch_size_train=batch_size_train,
batch_size_test=batch_size_test)
# make a network
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
network.to(device)
# train network
train(network, optimizer, train_loader, device)
# copy network
network_cp = network
#network_cp = deepcopy(network)
optimizer_cp = optimizer
#optimizer_cp = deepcopy(optimizer)
# get edge weights and accuracy of the copied network
acc1 = float(test(network_cp, optimizer_cp, test_loader, device))
weights1 = np.array(get_edge_weights(network_cp))
# train copied network
train(network_cp, optimizer_cp, train_loader, device)
# get edge weights and accuracy of the copied network after training
acc2 = float(test(network_cp, optimizer_cp, test_loader, device))
weights2 = np.array(get_edge_weights(network_cp))
# compare edge weights and accuracy of copied network before and after training
print('accuracy', acc1, acc2)
print('abs diff of weights for net1 and net2', np.sum(np.abs(weights1-weights2)))
To run the code above, include these imports and function definitions:
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as tnn
import torch.nn.functional as tnf
from copy import deepcopy
import numpy as np
def load_mnist(batch_size_train = 64, batch_size_test = 1000):
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('temp/', #'/data/users/alice/pytorch_training_files/',
train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('temp/', #'/data/users/alice/pytorch_training_files/',
train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
return(train_loader, test_loader)
def train(network, optimizer, train_loader, device, n_epochs=5):
network.train()
for epoch in range(1, n_epochs + 1):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = network(data)
loss = tnf.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(network, optimizer, test_loader, device):
network.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = network(data)
test_loss += tnf.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return(float(correct)/float(len(test_loader.dataset)))
def get_edge_weights(network):
layers = [module for module in network.modules()][1:]
output = np.zeros(1)
for j, layer in enumerate(layers):
weights = list(layer.parameters())[0]
weights_arr = weights.detach().numpy()
weights_arr = weights_arr.flatten()
output = np.concatenate((output,weights_arr))
return output[1:]
class Net(tnn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 =tnn.Linear(784,264)
self.fc2 = tnn.Linear(264,10)
def forward(self, x):
x = tnf.relu(self.fc1(x.view(-1,784)))
x = tnf.relu(self.fc2(x))
return tnf.log_softmax(x)
After optimizer_cp = deepcopy(optimizer), the optimizer_cp still wants to optimize the old model's parameters (as defined by optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum)).
After deep copying the model, the optimizer needs to be told to optimize this new model's parameters:
optimizer_cp = optim.SGD(network_cp.parameters(), lr=learning_rate, momentum=momentum)
I am trying to create a logistic model by using CIFAR10 data in PyTorch. After running the model for evaluation I run into an error :
RuntimeError: size mismatch, m1: [750 x 4096], m2: [1024 x 10] at C:\w\1\s\tmp_conda_3.7_100118\conda\conda-bld\pytorch_1579082551706\work\aten\src\TH/generic/THTensorMath.cpp:136
It seems like input_size is creating a problem, I dont know I am new to this. Please let me know what changes should I make in order to overcome this error.
These are the hyperparameters:
batch_size = 100
learning_rate = 0.001
# Other constants
input_size = 4*4*64
num_classes = 10
This is the cell that downloads and splits the dataset into train, validation and test.
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))])
testset = torchvision.datasets.CIFAR10(root='D:\PyTorch\cifar-10-python', train=False,download=False, transform=transform)
trainvalset = torchvision.datasets.CIFAR10(root='D:\PyTorch\cifar-10-python', train=True,download=False, transform=transform)
trainset, valset = torch.utils.data.random_split(trainvalset, [45000, 5000]) # 10% for validation
train_loader = torch.utils.data.DataLoader(trainset, batch_size=50, shuffle=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=1000, shuffle=False)
val_loader = torch.utils.data.DataLoader(valset, batch_size=1000, shuffle=False)
This is the architecture of my model.
class CifarModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, xb):
xb = xb.view(-1, 64*8*8)
#xb = xb.reshape(-1, 784)
print(xb.shape)
out = self.linear(xb)
return out
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc.detach()}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['val_loss'], result['val_acc']))
model = CifarModel()
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
def evaluate(model, val_loader):
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
for batch in train_loader:
loss = model.training_step(batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
model.epoch_end(epoch, result)
history.append(result)
return history
evaluate(model, val_loader)
Here you are specifying that the number of output classes should be 10:
num_classes = 10
Your forward function does not reflect this:
xb = xb.view(-1, 64*8*8) # you get 750x4096
out = self.linear(xb) # here an input of
# input_size to linear layer = 4*4*64 # 1024
# num_classes = 10
Modify it like this:
xb = xb.view(-1, 64*4*4) # you get 750x1024
out = self.linear(xb) # M1 750x1024 M2 1024x10:
# input_size = 4*4*64 # 1024
# num_classes = 10
F.nll_loss: I am getting
AttributeError: 'int' object has no attribute 'size'
when I try to run this code. I also get a snippet of the module code.
raise ValueError('Expected 2 or more dimensions (got {})'.format(dim))
if input.size(0) != target.size(0):
raise ValueError('Expected input batch_size ({}) to match target batch_size ({}).'
format(input.size(0), target.size(0)))
import torch
from torchvision import transforms, datasets
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pylab as plt
train_dataset = datasets.MNIST(root = '', train =True, download = True,
transform =transforms.Compose([transforms.ToTensor()]))
test_dataset = datasets.MNIST(root ='', download =True, train =False,
transform =transforms.Compose([transforms.ToTensor()]))
batch_size = 10
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size, shuffle =True)
test_dataset = torch.utils.data.DataLoader(test_dataset, batch_size, shuffle =True)
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28*28, 64)
self.fc2 = nn.Linear(64,64)
self.fc3 = nn.Linear(64,64)
self.fc4 = nn.Linear(64,10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc2(x))
x = self.fc4(x)
return F.log_softmax(x, dim=1)
x=torch.rand((28,28))
x=x.view(-1,28*28)
net =Net()
out=net(x)
out
import torch.optim as optim
optimizer =optim.Adam(net.parameters(), lr=0.001)
EPOCHS = 3
for epoch in range(EPOCHS):
for data in train_dataset:
x, y = data
net.zero_grad()
x=x.view(-1, 28*28)
output = net(x)
loss = F.nll_loss(output, y)
loss.backward()
optimizer.step()
print(loss)
Just change the for loop from:
for data in train_dataset:
to
for data in train_loader:
Environment:
TF2.0
Python 3.5
ubuntu 16.04
Problem:
I try to use the pre-trained mobilenet_V2 but accuracy doesn't increase:
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
The script is copied from the tutorial of the tensorflow 2.0(https://www.tensorflow.org/tutorials/images/transfer_learning?hl=zh-cn)
The only change I made is the dataset which feed into the network. The original code makes binary classification between dogs and cats, and everything works. However, the accuracy never increases while using multi-classes datasets like: "mnist", "tf_flowers". Please note that, I used the correct loss function and metrics.
Naive model and results:
Keras.mobilenetv2:
Here is the code:
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, GlobalAveragePooling2D
from tensorflow.keras import Model
keras = tf.keras
import tensorflow_datasets as tfds
# tfds.disable_progress_bar()
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
def format_example(image, label):
if image.shape[-1] == 1:
image = tf.concat([image, image, image], 2)
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
##----functional model----##
class TinyModel():
def __init__(self, num_classes, hiddens=32, input_shape=IMG_SHAPE):
import tensorflow as tf
self.num_classes = num_classes
self.input_shape = input_shape
self.hiddens = hiddens
def build(self):
inputs = Input(shape=self.input_shape)
x = Conv2D(16, 3, activation="relu", strides=2)(inputs)
x = Conv2D(32, 3, activation="relu", strides=2)(x)
x = Conv2D(32, 3, activation="relu", strides=2)(x)
x = Conv2D(16, 3, activation="relu")(x)
x = Flatten()(x)
x = Dense(self.hiddens, activation="relu")(x)
outputs = Dense(self.num_classes, activation="softmax")(x)
model = Model(inputs=inputs, outputs=outputs, name='my_model')
return model
def assemble_model(num_classes, model_name='MobileNetV2'):
import tensorflow as tf
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
weights='imagenet',
include_top=False)
model = tf.keras.Sequential([
base_model,
GlobalAveragePooling2D(),
Dense(num_classes, activation='softmax')
])
model.trainable = True
return model
## ---- dataset preparation -----##
SPLIT_WEIGHTS = (8, 1, 1)
splits = tfds.Split.TRAIN.subsplit(weighted=SPLIT_WEIGHTS)
(raw_train, raw_validation, raw_test), metadata = tfds.load(
'tf_flowers', split=list(splits),
with_info=True, as_supervised=True)
get_label_name = metadata.features['label'].int2str
train = raw_train.map(format_example)
validation = raw_validation.map(format_example)
test = raw_test.map(format_example)
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 1000
train_ds = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
validation_ds = validation.batch(BATCH_SIZE)
test_ds = test.batch(BATCH_SIZE)
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
## ----- model config ---- ##
# Create an instance of the model
model = TinyModel(num_classes=5).build() # model 1
# model = assemble_model(num_classes=5) # model 2
model.summary()
## ----- training config -----##
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
## ----- training loop -----##
#tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
#tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 5
for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
----------------------SOLVED-----------------------
Solution:add the argument "training=True" when training the keras.application.. For example
model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,weights="imagenet",include_top=False)
pred = model(inputs, training=True)
The reason might be caused by "batchnorm" layer. Those model which has BN layers works well in keras training loop, "model.fit()", and nothing to takecare. However, they cannot learn anything by costume training loop if you forget to set training=True in model()
The problem is that you set all your parameters to be non-trainable, check this on hte summary of the model, you will see something like this
Change this line, (or just delete it)
base_model.trainable = False
To
base_model.trainable = True
And everything will work fine