pytorch vgg model test on one image - python

I've trained a vgg model, this is how I transformed the test data
test_transform_2= transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor()])
test_data = datasets.ImageFolder(test_dir, transform=test_transform_2)
the model's finished training now I want to test it on a single image
from scipy import misc
test_image = misc.imread('flower_data/valid/1/image_06739.jpg')
vgg16(torch.from_numpy(test_image))
Error
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-60-b83587325fea> in <module>
----> 1 vgg16(torch.from_numpy(test_image))
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
475 result = self._slow_forward(*input, **kwargs)
476 else:
--> 477 result = self.forward(*input, **kwargs)
478 for hook in self._forward_hooks.values():
479 hook_result = hook(self, input, result)
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torchvision\models\vgg.py in forward(self, x)
40
41 def forward(self, x):
---> 42 x = self.features(x)
43 x = x.view(x.size(0), -1)
44 x = self.classifier(x)
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
475 result = self._slow_forward(*input, **kwargs)
476 else:
--> 477 result = self.forward(*input, **kwargs)
478 for hook in self._forward_hooks.values():
479 hook_result = hook(self, input, result)
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\container.py in forward(self, input)
89 def forward(self, input):
90 for module in self._modules.values():
---> 91 input = module(input)
92 return input
93
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
475 result = self._slow_forward(*input, **kwargs)
476 else:
--> 477 result = self.forward(*input, **kwargs)
478 for hook in self._forward_hooks.values():
479 hook_result = hook(self, input, result)
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
299 def forward(self, input):
300 return F.conv2d(input, self.weight, self.bias, self.stride,
--> 301 self.padding, self.dilation, self.groups)
302
303
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [64, 3, 3, 3], but got input of size [628, 500, 3] instead
I can tell I need to shape the input, however I don't know how to based on the way it seems to expect the input to be inform of a batch.

Your image is [h, w, 3] where 3 means the rgb channel, and pytorch expects [b, 3, h, w] where b is batch size. So you can reshape it by calling do that by calling reshaped = img.permute(2, 0, 1).unsqueeze(0). I think there is also a utility function for that somewhere, but I can't find it right now.
So in your case
tensor = torch.from_numpy(test_image)
reshaped = tensor.permute(2, 0 1).unsqueeze(0)
your_result = vgg16(reshaped)

Related

How do I resolve the error "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)"?

I tried my best to make a minimal reproducible example: there's an issue in my train() function where on the line output = model(data); I get the error IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1). I also get an error before that but I cannot decipher the meaning. I've included the full traceback in this message.
I've seen other users post about the same error message, but each one has a different solution; I used the debugger to look into data.location but I'm still unable to resolve the problem. I'm using PySyft v0.2.9.
import torch
import syft as sy
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from auto_esn.esn.esn import ESNBase
import torch.nn.functional as F
import torch.optim as optim
from auto_esn.esn.esn import GroupedDeepESN
from auto_esn.esn.reservoir.util import NRMSELoss
class CarHackingDataset(Dataset):
"""
Loading the Car-Hacking Dataset from
https://ocslab.hksecurity.net/Datasets/car-hacking-dataset
Args:
csv_file: A path to the dataset file which has the extension CSV.
root_dir: The directory of the parent folder of the dataset.
transform (callable, optional): Optional tansform to be applied on a sample.
"""
def __init__(self, csv_file: str, root_dir: str, transform=None):
self.car_hacking_frame = pd.read_csv(csv_file)[:10000]
self.root_dir = root_dir
self.transform = transform
def __getitem__(self,idx):
'''Grabs relevant features from the dataset.'''
if torch.is_tensor(idx):
idx = idx.tolist()
features = ['Timestamp', 'DLC', 'CAN_ID', 'Data']
X_train = self.car_hacking_frame.loc[:, features].values
X_train_scaled = StandardScaler().fit_transform(X_train)
X_train_scaled = torch.as_tensor(X_train_scaled)
# It looks like it's a bad idea to encode features.
# https://stackoverflow.com/questions/61217713/labelencoder-for-categorical-features
class_le = LabelEncoder()
target = class_le.fit_transform(self.car_hacking_frame['Flag'].values)
target = torch.as_tensor(target)
return X_train_scaled[idx], target[idx]
def __len__(self):
return len(self.car_hacking_frame)
train_dataset = CarHackingDataset(csv_file='/content/car_hacking_data/clean_fuzzy_dataset.csv',
root_dir='/content/car_hacking_data')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
hook = sy.TorchHook(torch)
car1 = sy.VirtualWorker(hook, id="car1")
car2 = sy.VirtualWorker(hook, id="car2")
args = {
'batch_size' : 32,
'epochs' : 1
}
federated_train_loader = sy.FederatedDataLoader(train_dataset.federate((car1, car2)),
batch_size=args['batch_size'], shuffle=True)
# Intializing the loss function which is probably a variation of mean squared error.
nrmse = NRMSELoss()
def train(model, device, federated_train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(federated_train_loader):
model = model.send(data.location)
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = nrmse(output, target)
loss.backward()
optimizer.step()
model.get()
if batch_idx % 10 == 0:
loss = loss.get()
print(f'''Train Epoch: {epoch} [{(batch_idx * args['batch_size'])}/{(len(federated_train_loader) * args['batch_size'])}'''
+ f'''({100. * batch_idx / len(federated_train_loader):.0f}%)]\tLoss: {loss.item():.6f}''')
model = GroupedDeepESN().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in range(1, args['batch_size'] + 1):
train(model, device, federated_train_loader, optimizer, epoch)
Traceback of the error message:
---------------------------------------------------------------------------
PureFrameworkTensorFoundError Traceback (most recent call last)
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
336 new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(
--> 337 cmd, args_, kwargs_, return_args_type=True
338 )
20 frames
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in unwrap_args_from_function(attr, args_, kwargs_, return_args_type)
166 # Run it
--> 167 new_args = args_hook_function(args_)
168
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in <lambda>(x)
355
--> 356 return lambda x: f(lambdas, x)
357
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in three_fold(lambdas, args_, **kwargs)
534 lambdas[0](args_[0], **kwargs),
--> 535 lambdas[1](args_[1], **kwargs),
536 lambdas[2](args_[2], **kwargs),
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in <lambda>(i)
330 # Last if not, rule is probably == 1 so use type to return the right transformation.
--> 331 else lambda i: forward_func[type(i)](i)
332 for a, r in zip(args_, rules) # And do this for all the args / rules provided
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/hook/hook_args.py in <lambda>(i)
23 if hasattr(i, "child")
---> 24 else (_ for _ in ()).throw(PureFrameworkTensorFoundError),
25 torch.nn.Parameter: lambda i: i.child
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/hook/hook_args.py in <genexpr>(.0)
23 if hasattr(i, "child")
---> 24 else (_ for _ in ()).throw(PureFrameworkTensorFoundError),
25 torch.nn.Parameter: lambda i: i.child
PureFrameworkTensorFoundError:
During handling of the above exception, another exception occurred:
IndexError Traceback (most recent call last)
<ipython-input-6-c9ac87b98598> in <module>
24
25 for epoch in range(1, args['batch_size'] + 1):
---> 26 train(model, device, federated_train_loader, optimizer, epoch)
<ipython-input-6-c9ac87b98598> in train(model, device, federated_train_loader, optimizer, epoch)
7 data, target = data.to(device), target.to(device)
8 optimizer.zero_grad()
----> 9 output = model(data)
10
11 loss = nrmse(output, target)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/dist-packages/auto_esn/esn/esn.py in forward(self, input)
31 mapped_input = self.reservoir(input)
32
---> 33 return self.readout(mapped_input)
34
35 def reset_hidden(self):
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/dist-packages/auto_esn/esn/readout/svr_readout.py in forward(self, input)
10
11 def forward(self, input: Tensor) -> Tensor:
---> 12 return self.readout(input)
13
14 def fit(self, input: Tensor, target: Tensor):
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py in forward(self, input)
85
86 def forward(self, input):
---> 87 return F.linear(input, self.weight, self.bias)
88
89 def extra_repr(self):
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook.py in overloaded_func(*args, **kwargs)
333 handle_func_command = syft.framework.Tensor.handle_func_command
334
--> 335 response = handle_func_command(command)
336
337 return response
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
378 # in the execute_command function
379 try:
--> 380 response = cls._get_response(cmd, args_, kwargs_)
381 except AttributeError:
382 # Change the library path to avoid errors on layers like AvgPooling
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in _get_response(cmd, args_, kwargs_)
412
413 if isinstance(args_, tuple):
--> 414 response = command_method(*args_, **kwargs_)
415 else:
416 response = command_method(args_, **kwargs_)
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
1368 if input.dim() == 2 and bias is not None:
1369 # fused op is marginally faster
-> 1370 ret = torch.addmm(bias, input, weight.t())
1371 else:
1372 output = input.matmul(weight.t())
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook.py in overloaded_func(*args, **kwargs)
333 handle_func_command = syft.framework.Tensor.handle_func_command
334
--> 335 response = handle_func_command(command)
336
337 return response
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
378 # in the execute_command function
379 try:
--> 380 response = cls._get_response(cmd, args_, kwargs_)
381 except AttributeError:
382 # Change the library path to avoid errors on layers like AvgPooling
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in _get_response(cmd, args_, kwargs_)
412
413 if isinstance(args_, tuple):
--> 414 response = command_method(*args_, **kwargs_)
415 else:
416 response = command_method(args_, **kwargs_)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
I thought it might also be important to include the shape of my data inside the train function's loop over the federated data: Data shape: torch.Size([32, 4]), Target shape: torch.Size([32]).

Pytorch: non-positive stride is not supported

I have some MRI scans, where each scan is a set of 31 RGB images. The dimensions of the input data are (Channels, Depth, Height, Width). The images are png, and each scan is a folder containing its 31 images.
I created a custom Dataset class:
class TrainImages(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.annotations = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
img_path = os.path.join(self.root_dir, str(self.annotations.iloc[index, 0]).zfill(5))
image = torch.from_numpy(np.array([np.array(Image.open(os.path.join(str(img_path),"rgb-"+str(i)+".png"))) for i in range(31)]).transpose(3,0,1,2).astype(np.float32))
y_label = torch.tensor(int(self.annotations.iloc[index, 1]))
return (image, y_label)
Then, I created a small 3D CNN class:
class CNN2(nn.Module):
def __init__(self):
super(CNN2, self).__init__()
self.conv_layer1 = self._conv_layer(3, 12)
def _conv_layer(self, in_c, out_c, conv_kernel_size=3, padding=0):
layer = nn.Sequential(
nn.Conv3d(in_c, out_c, conv_kernel_size, padding),
)
return layer
def forward(self, x):
out = self.conv_layer1(x)
return out
Then, I tried to feed one scan into the CNN2 object:
x=torch.unsqueeze(dataset[0][0], 0)
x.shape #torch.Size([1, 3, 31, 512, 512])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data = x.to(device)
model = CNN2().to(device)
model(x)
But it produces this error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-24-c89306854a22> in <module>
1 model = CNN_test().to(device)
----> 2 model(x)
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
<ipython-input-22-ac66ca7a2459> in forward(self, x)
14
15 def forward(self, x):
---> 16 out = self.conv_layer1(x)
17
18 return out
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(self, input)
115 def forward(self, input):
116 for module in self:
--> 117 input = module(input)
118 return input
119
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/conv.py in forward(self, input)
571 self.dilation, self.groups)
572 return F.conv3d(input, self.weight, self.bias, self.stride,
--> 573 self.padding, self.dilation, self.groups)
574
575
RuntimeError: non-positive stride is not supported
However, when I just create a Conv3D object and pass the same scan in, no error results:
x=torch.unsqueeze(dataset[0][0], 0)
m=nn.Conv3d(3,12,3)
out=m(x)
I think the error might have to do with the dimensions of the input data, but I don't understand what "non-positive stride" means. I'm also confused why no error occurs when I just pass the data into a Conv3D object, but an error occurs when I pass the same data into an instance of the CNN class that does the same thing.
The issue is not with your input shape, it has to do with your layer initialization. You have essentially defined your 3D convolution with this line:
nn.Conv3d(in_c, out_c, conv_kernel_size, padding)
The issue is nn.Conv3d function head is the following:
torch.nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', device=None, dtype=None)
Notice how stride is placed before padding. In your code, variable padding ends up being assigned to the stride argument.
To solve the issue, you can specify the argument name with a keyword argument, i.e. padding=padding. This disambiguates the issue with positional arguments stride and padding.
class CNN2(nn.Module):
def __init__(self):
super(CNN2, self).__init__()
self.conv_layer1 = self._conv_layer(3, 12)
def _conv_layer(self, in_c, out_c, conv_kernel_size=3, padding=0):
layer = nn.Sequential(
nn.Conv3d(in_c, out_c, conv_kernel_size, padding=padding))
return layer
def forward(self, x):
out = self.conv_layer1(x)
return out

How to create new layer for Resnet?

I practice transfer learning by adapting and finetuning ResNet18 for CIFAR10. I want to replace the last fc layer with a new fc layer. So, I want to create a new layer but I didn't. How can I create a new layer?
Download Resnet18
OrigResNet18 = None
OrigResNet18 = torch.hub.load('pytorch/vision:v0.9.0', 'resnet18', pretrained=True)
Fast Layer
(fc): Linear(in_features=512, out_features=1000, bias=True)
I tried but I am not sure this code:
num_in_features=OrigResNet18.fc.in_features
num_out_features=OrigResNet18.fc.out_features
NewResNet18.conv1=nn.Conv2d(in_channels=1,out_channels=16, kernel_size=
(3,3))
NewResNet18.fc=nn.Linear(in_features=num_in_features,out_features=num_out_features)
and I have error
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-58-f8fe748d1e75> in <module>()
32 NewResNet18 = NewResNet18.to(device)
33 epochs = 1
---> 34 loss_history = train(NewResNet18, criterion, optimizer, epochs, trainloader)
6 frames
<ipython-input-57-a35bfc25b940> in train(model, criterion, optimizer, epochs, dataloader, verbose)
19
20 # Obtain the scores
---> 21 outputs = model(inputs)
22
23 # Calculate loss
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
/usr/local/lib/python3.7/dist-packages/torchvision/models/resnet.py in forward(self, x)
247
248 def forward(self, x: Tensor) -> Tensor:
--> 249 return self._forward_impl(x)
250
251
/usr/local/lib/python3.7/dist-packages/torchvision/models/resnet.py in _forward_impl(self, x)
230 def _forward_impl(self, x: Tensor) -> Tensor:
231 # See note [TorchScript super()]
--> 232 x = self.conv1(x)
233 x = self.bn1(x)
234 x = self.relu(x)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/conv.py in forward(self, input)
397
398 def forward(self, input: Tensor) -> Tensor:
--> 399 return self._conv_forward(input, self.weight, self.bias)
400
401 class Conv3d(_ConvNd):
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight, bias)
394 _pair(0), self.dilation, self.groups)
395 return F.conv2d(input, weight, bias, self.stride,
--> 396 self.padding, self.dilation, self.groups)
397
398 def forward(self, input: Tensor) -> Tensor:
RuntimeError: Given groups=1, weight of size [16, 1, 3, 3], expected input[8, 3, 224, 224] to have 1 channels, but got 3 channels instead
The error is coming from this line:
NewResNet18.conv1=nn.Conv2d(in_channels=1,out_channels=16, kernel_size=
(3,3))
you are changing the first convolution to have 1 input channel (ie a grey-scale image), but you are feeding it a 3-channel image (ie an RGB image).
if you just want to change the size of the classifier, you can just use:
num_in_features=OrigResNet18.fc.in_features
num_out_features=OrigResNet18.fc.out_features
NewResNet18.fc=nn.Linear(in_features=num_in_features,out_features=num_out_features)

Pytorch got CUDA error: device-side assert triggered when training conv1d classifier

I'm implementing a CNN multi-label classifier with PyTorch, while it always shows this error:
"CUDA error: device-side assert triggered". In the error message it points out the loss function, but when I change a loss function, it still occurs and points to the other parts (really like randomly picked). When I changed to CPU, it said "index out of range in self", however when I investigated my dataloader, it is nothing weird.
I have 15 classes, 59462 unique tokens, and the len of each document is 30,000(token).
My model and loss function is like this:
class model(nn.Module):
def __init__(self, num_classes=15):
super(model, self).__init__()
self.embedding = nn.Sequential(nn.Embedding(59462,400),nn.Dropout(0.15))
self.features = nn.Sequential(
nn.Conv1d(400, 500, kernel_size=3, stride=1, padding=False), nn.ReLU(),
nn.Dropout(0.05), nn.MaxPool1d(kernel_size=2), nn.Dropout(0.15))
self.linear = nn.Linear(500*14999, 15)
def forward(self, x):
x = self.embedding(x)
x = x.permute(0,2,1)
x = self.features(x)
x = x.view(x.size(0), 500*14999)
x = self.linear(x)
return x
model = model()
model = model.to(device)
def loss_fn(outputs, targets):
return torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight.to(device))(outputs, targets).to(device)
#pos_weight is for my unbalanced data
This is the error messsage when using CPU:
/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py in run_cell_magic(self, magic_name, line, cell)
2115 magic_arg_s = self.var_expand(line, stack_depth)
2116 with self.builtin_trap:
-> 2117 result = fn(magic_arg_s, cell)
2118 return result
2119
<decorator-gen-60> in time(self, line, cell, local_ns)
/usr/local/lib/python3.6/dist-packages/IPython/core/magic.py in <lambda>(f, *a, **k)
186 # but it's overkill for just that one bit of state.
187 def magic_deco(arg):
--> 188 call = lambda f, *a, **k: f(*a, **k)
189
190 if callable(arg):
/usr/local/lib/python3.6/dist-packages/IPython/core/magics/execution.py in time(self, line, cell, local_ns)
1191 else:
1192 st = clock2()
-> 1193 exec(code, glob, local_ns)
1194 end = clock2()
1195 out = None
<timed exec> in <module>()
<ipython-input-67-09829392983d> in train_epoch(model, data_loader, loss_fn, optimizer, device, scheduler, n_examples)
27 targets = d["targets"].to(device)
28 outputs = model(
---> 29 tokens
30 )
31
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
<ipython-input-59-2c2ec554cb05> in forward(self, x)
16
17 def forward(self, x):
---> 18 x = self.embedding(x)
19 x = x.permute(0,2,1)
20 x = self.features(x)
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py in forward(self, input)
115 def forward(self, input):
116 for module in self:
--> 117 input = module(input)
118 return input
119
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/sparse.py in forward(self, input)
124 return F.embedding(
125 input, self.weight, self.padding_idx, self.max_norm,
--> 126 self.norm_type, self.scale_grad_by_freq, self.sparse)
127
128 def extra_repr(self) -> str:
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
1850 # remove once script supports set_grad_enabled
1851 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1852 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
1853
1854
IndexError: index out of range in self
Does anyone know what this problem is, and how I can solve it?
Thanks in advance.

RuntimeError: size mismatch, m1: [672 x 224], m2: [672 x 224]

While working on Udacity's AI course I encountered the error in the title.
At first it looks like the image isn't formatted for the model, however the instruction for the course has us format the images using resize, so I get an image dimension of 3x224x224, that's 3 color channels and 224 pixels by 224 pixels. The image array is in the required dimensions, [3,224,224].
Return/Output:
(3, 224, 224)
(3, 224, 224)
torch.Size([1, 3, 224, 224])
Next, I figured it was the model, but when I go back, before the training loop and change the original model to fix this error I get a similar error in revers "up stream".
I want this predict function to use my model to predict the category of an image.
Posted are:
Original classifier
Save checkpoint block
Process image function
Predict image function
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 500)),
('relu', nn.ReLU()),
('drop', nn.Dropout(0.25)),
('fc2', nn.Linear(500, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.load_state_dict(state_dict)
checkpoint= {'input_size': 672,
'output_size': 102,
'hidden_layers': 224,
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model= model.cuda()
model.eval()
# TODO: Implement the code to predict the class from an image file
pil_img= Image.open(image_path)
#print(pil_img)
processed_image= process_image(pil_img)
print(processed_image.shape)
torch_image= torch.from_numpy(processed_image)
torch_image= torch_image.unsqueeze_(0)
print(torch_image.shape)
torch_image= torch_image.float().to('cuda')
output= model.forward(torch_image)
#top_k= predict.topk(topk)
#print(top_k)
return torch_img
image_path = 'flowers/test/8/image_03299.jpg'
predict(image_path, model, 5)
> RuntimeError Traceback (most recent call last)
<ipython-input-21-d84c5999ad68> in <module>()
23
24 image_path = 'flowers/test/8/image_03299.jpg'
---> 25 predict(image_path, model, 5)
<ipython-input-21-d84c5999ad68> in predict(image_path, model, topk)
16 print(torch_image.shape)
17 torch_image= torch_image.float().to('cuda')
---> 18 output= model.forward(torch_image)
19 #top_k= predict.topk(topk)
20 #print(top_k)
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
89 def forward(self, input):
90 for module in self._modules.values():
---> 91 input = module(input)
92 return input
93
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
489 result = self._slow_forward(*input, **kwargs)
490 else:
--> 491 result = self.forward(*input, **kwargs)
492 for hook in self._forward_hooks.values():
493 hook_result = hook(self, input, result)
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/linear.py in forward(self, input)
53
54 def forward(self, input):
---> 55 return F.linear(input, self.weight, self.bias)
56
57 def extra_repr(self):
/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py in linear(input, weight, bias)
992 return torch.addmm(bias, input, weight.t())
993
--> 994 output = input.matmul(weight.t())
995 if bias is not None:
996 output += bias
RuntimeError Traceback (most recent call last)
<ipython-input-21-d84c5999ad68> in <module>()
23
24 image_path = 'flowers/test/8/image_03299.jpg'
---> 25 predict(image_path, model, 5)
<ipython-input-21-d84c5999ad68> in predict(image_path, model, topk)
16 print(torch_image.shape)
17 torch_image= torch_image.float().to('cuda')
---> 18 output= model.forward(torch_image)
19 #top_k= predict.topk(topk)
20 #print(top_k)
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
89 def forward(self, input):
90 for module in self._modules.values():
---> 91 input = module(input)
92 return input
93
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
489 result = self._slow_forward(*input, **kwargs)
490 else:
--> 491 result = self.forward(*input, **kwargs)
492 for hook in self._forward_hooks.values():
493 hook_result = hook(self, input, result)
/opt/conda/lib/python3.6/site-packages/torch/nn/modules/linear.py in forward(self, input)
53
54 def forward(self, input):
---> 55 return F.linear(input, self.weight, self.bias)
56
57 def extra_repr(self):
/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py in linear(input, weight, bias)
992 return torch.addmm(bias, input, weight.t())
993
--> 994 output = input.matmul(weight.t())
995 if bias is not None:
996 output += bias
RuntimeError: size mismatch, m1: [672 x 224], m2: [672 x 224] at /opt/conda/conda-bld/pytorch_1524584710464/work/aten/src/THC/generic/THCTensorMathBlas.cu:249

Categories