Having problem with creating a dataloader class in PyTorch - python

I am building a neural network for Bengali numerical digit classification using PyTorch. I am facing difficulties building the dataset class to load my dataset using a data loader. I have a folder with all the images (numerical digits from 0-9) and a CSV file with 2 columns, the first column contains the name of the image, and the second contains the label(0-9). This is my data loader class which probably isn't causing the error.
class BDRWDataset(Dataset):
"""BDRW dataset."""
def __init__(self, csv_file, imgs_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with labels.
imgs_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.labels = pd.read_csv(csv_file).iloc[:, 1].to_numpy().reshape(-1,1)
self.imgs_dir = imgs_dir
self.transform = transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = '/content/BDRW_train/digit_' + str(idx) + '.jpg'
image = io.imread(img_name, plugin='matplotlib')
image = Image.fromarray(np.uint8(image))
label = self.labels[idx]
label = float(label)
if self.transform:
image = self.transform(image)
return (image, label)
I create an instance of this class.
transformed_dataset = BDRWDataset(csv_file='/content/labels.csv',imgs_dir='/content/BDRW_train',
transform=transforms.Compose([
Rescale((28, 28)),
transforms.Normalize((0.5,), (0.5,)),
ToTensor()
]))
I have defined rescale and to tensor as follows
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, label = sample
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = transform.resize(image, (new_h, new_w))
return img, label
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return (torch.from_numpy(image), torch.from_numpy(label))
Splitting the dataset into test and train and created train loader and validation loader using torch.utils.data.DataLoader
The neural network is
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Linear(7*7*32, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
I create an instance of this class and start training
cnn = CNN()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
losses = [];
for epoch in range(num_epochs):
for i, (image, label) in enumerate(valloader):
image = Variable(image.float())
label = Variable(label)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = cnn(image)
loss = criterion(output, label)
loss.backward()
optimizer.step()
losses.append(loss.data[0]);
if (i+1) % 100 == 0:
print ('Epoch : %d/%d, Iter : %d/%d, Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))
This is where I get the error
AttributeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/PIL/Image.py in open(fp, mode)
2812 try:
-> 2813 fp.seek(0)
2814 except (AttributeError, io.UnsupportedOperation):
AttributeError: 'str' object has no attribute 'seek'
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
9 frames
/usr/local/lib/python3.6/dist-packages/PIL/Image.py in open(fp, mode)
2813 fp.seek(0)
2814 except (AttributeError, io.UnsupportedOperation):
-> 2815 fp = io.BytesIO(fp.read())
2816 exclusive_fp = True
2817
AttributeError: 'str' object has no attribute 'read'
It refers to PIL Image which I have used in the dataloader. So it seems to me that's where I am doing something wrong.
https://colab.research.google.com/drive/17XdP7gUoMNLxPCJ6PHEi3B09UQItzKyf?usp=sharing
This is the notebook I am working on. Please help me debug the errors in the code.
https://drive.google.com/open?id=1DznuHV9Fi5jVEbGdP-tg3ckmp5CNDOj1
This is the dataset I am working on.

Related

NotImplementedError error using custom generator

I use custom generator for training my data. It should inherit keras.utils.Sequence and should have defined such methods:
init,len,on_epoch_end,getitem.
when I fit my model "NotImplemented Error" occurred. I know its about one of these overrided function but I dont know how can I handle it
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, root_dir=r'../data/val_test', image_folder='img/', mask_folder='masks/',
batch_size=4, image_size=288, nb_y_features=1,
augmentation=None,
suffle=True):
# self.image_filenames = listdir_fullpath(os.path.join(root_dir, image_folder))
self.image_filenames = np.sort([os.path.join(os.path.join(root_dir, image_folder), f)
for f in os.listdir(os.path.join(root_dir, image_folder))])
# self.mask_names = listdir_fullpath(os.path.join(root_dir, mask_folder))
self.mask_names = np.sort([os.path.join(os.path.join(root_dir, mask_folder), f)
for f in os.listdir(os.path.join(root_dir, mask_folder))])
self.batch_size = batch_size
self.augmentation = augmentation
self.image_size = image_size
self.nb_y_features = nb_y_features
self.suffle = suffle
# def listdir_fullpath(d):
# return np.sort([os.path.join(d, f) for f in os.listdir(d)])
def __getitem__(self, index):
data_index_min = int(index*self.batch_size)
data_index_max = int(min((index+1)*self.batch_size, len(self.image_filenames)))
indexes = self.image_filenames[data_index_min:data_index_max]
this_batch_size = len(indexes) # The last batch can be smaller than the others
X = np.empty((this_batch_size, self.image_size, self.image_size, 3), dtype=np.float32)
y = np.empty((this_batch_size, self.image_size, self.image_size, self.nb_y_features), dtype=np.uint8)
for i, sample_index in enumerate(indexes):
X_sample, y_sample = self.read_image_mask(self.image_filenames[index * self.batch_size + i],
self.mask_names[index * self.batch_size + i])
#if augmentation is defined, we assume its a train set
if self.augmentation is not None:
# Augmentation code
augmented = self.augmentation(self.image_size)(image=X_sample, mask=y_sample)
image_augm = augmented['image']
mask_augm = augmented['mask'].reshape(self.image_size, self.image_size, self.nb_y_features)
# divide by 255 to normalize images from 0 to 1
X[i, ...] = image_augm/255
y[i, ...] = mask_augm/255
else:
...
return X,y
history = model.fit(train_generator,
epochs=EPOCHS,
steps_per_epoch = spe_train,
callbacks=callbacks,
validation_data = validation_generator,
validation_steps=spe_val)
this is error:
NotImplementedError Traceback (most recent call last)
<ipython-input-36-fa9c887c02c7> in <module>
17 callbacks=callbacks,
18 validation_data = validation_generator,
---> 19 validation_steps=spe_val)
1 frames
/usr/local/lib/python3.7/dist-packages/keras/utils/data_utils.py in __len__(self)
489 The number of batches in the Sequence.
490 """
--> 491 raise NotImplementedError
492
493 def on_epoch_end(self):
NotImplementedError:

Variational Auto Encoder weird mosaic reconstructed result

I used VAE and want to train a VAE network but somehow after 150 epochs the output is very weird, the loss value is converged to ~0.07. In my Dataloader the image was transferred from int 16 to Gray image and there is my code, and the input image is [batch_size, 5, 512, 512]( 5 gray image), I have no idea why the output looks like that.
class MyDataset(Dataset):
def __init__(self, transform):
data_path="/content/drive/MyDrive/sub_sample/imgs_train_data_0_5_5bands.npy"
# train_ = CustomImageDataset(data_path, transform=None)
imgs_test = np.load(data_path)
# x = torch.zeros(3, 244, 395, dtype = torch.uint8)
print(imgs_test.dtype)
img = imgs_test.astype('float32')
imgs_test = (img - np.min(img, axis = (0,1))) / np.ptp(img, axis=(0,1))
print(imgs_test.dtype)
print(imgs_test.max(),imgs_test.min())
self.training = imgs_test
# print(max(imgs_test[0]),min(imgs_test[0]))
self.img = torch.from_numpy(self.training)
# self.img = F.interpolate(self.img, size = (256,256))
print(imgs_test.shape)
# self.transform = transform
# print('****',self.subset.shape[0])
def __getitem__(self, index):
# img_resize = np.random.random((13,256,256))
img_resize = self.img[index, :, :, :] # 读取每一个npy的数据
# print((img_resize[0,:,:].max()))
return img_resize
def __len__(self):
# print('the shape of the total dataset',len(self.training))
return len(self.training)
The reconstructed result:
BTW, the loss function is F.mse_loss(input,reconstruncted image)
I would be very appreciate if anyone can give me some advice.

PyTorch | getting "RuntimeError: Found dtype Long but expected Float"

I'm trying to train a CNN on a custom dataset. Code:
Dataset.py
class MyDataset(Dataset):
def __init__(self, csv_file, root_dir):
self.annotations = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0])
y_label = torch.tensor(int(self.annotations.iloc[index, 1]))
img = cv2.imread(img_path)
# resize
res = cv2.resize(img, dsize=(50, 50), interpolation=cv2.INTER_CUBIC)
# convert image to tensor
res = torch.from_numpy(res)
return (res, y_label)
Model.py
class ConvNet(torch.nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
f2 = 4
self.layer2 = nn.Sequential(
nn.Conv2d(50, f2, kernel_size=5, padding=2),
nn.ReLU(),
nn.BatchNorm2d(f2),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc1 = nn.Linear(100, 200)
self.fc2 = nn.Linear(200, 20)
self.fc3 = nn.Linear(20, 1)
def forward(self, x):
x = self.layer2(x.float())
x = x.reshape(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
and here is my training code:
dataset = MyDataset(
csv_file='dataset.csv',
root_dir='tmp')
train_set, test_set = torch.utils.data.random_split(dataset, lengths=[500, 70])
train_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True)
test_loader = DataLoader(dataset=test_set, batch_size=16, shuffle=True)
model = ConvNet()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
for epoch in range(20):
losses = []
for batch_idx, (data, targets) in enumerate(train_loader):
data = data.to(device=device)
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores, targets)
losses.append(loss.item())
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Cost: {0} = {1}'.format(epoch, sum(losses)/len(losses)))
But I get RuntimeError: Found dtype Long but expected Float. This probably comes from the fact that I do x = self.layer2(x.float()) to avoid overflow.
I would like to know how to fix that error. It's difficult to pin-point where the exact problem comes from.
How can I solve this?
The problem might be caused by data tensor. When data loader calls images via getitem() method, image is read with opencv and transformed to tensor. I think at that point, type of your data tensor is long but it should be float. If you cast your numpy array -named res- to float it should work fine. You can see my solution below.
res = cv2.resize(img, dsize=(50, 50), interpolation=cv2.INTER_CUBIC)
res = res.astype(np.float32) # you should add this line
res = torch.from_numpy(res)
res = res.permute(2, 0, 1)

No performance improvement using quantization model in pytorch

I have trained a model in pytorch with float data type. I want to improve my inference time by converting this model to quantized model. I have used torch.quantization.convert api to convert my model's weight to uint8 data type. However, when I use this model for inference, I do not get any performance improvement. Am I doing something wrong here ?
The Unet Model code:
def gen_initialization(m):
if type(m) == nn.Conv2d:
sh = m.weight.shape
nn.init.normal_(m.weight, std=math.sqrt(2.0 / (sh[0]*sh[2]*sh[3])))
nn.init.constant_(m.bias, 0)
elif type(m) == nn.BatchNorm2d:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class TripleConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(TripleConv, self).__init__()
mid_ch = (in_ch + out_ch) // 2
self.conv = nn.Sequential(
nn.Conv2d(in_ch, mid_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(num_features=mid_ch),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2d(mid_ch, mid_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(num_features=mid_ch),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2d(mid_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(num_features=out_ch),
nn.LeakyReLU(negative_slope=0.1)
)
self.conv.apply(gen_initialization)
def forward(self, x):
return self.conv(x)
class Down(nn.Module):
def __init__(self, in_ch, out_ch):
super(Down, self).__init__()
self.triple_conv = TripleConv(in_ch, out_ch)
self.avg_pool_conv = nn.AvgPool2d(2, 2)
self.in_ch = in_ch
self.out_ch = out_ch
def forward(self, x):
self.cache = self.triple_conv(x)
pad = torch.zeros(x.shape[0], self.out_ch - self.in_ch, x.shape[2], x.shape[3], device=x.device)
x = torch.cat((x, pad), dim=1)
self.cache += x
return self.avg_pool_conv(self.cache)
class Center(nn.Module):
def __init__(self, in_ch, out_ch):
super(Center, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(num_features=out_ch),
nn.LeakyReLU(negative_slope=0.1, inplace=True)
)
self.conv.apply(gen_initialization)
def forward(self, x):
return self.conv(x)
class Up(nn.Module):
def __init__(self, in_ch, out_ch):
super(Up, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear',
align_corners=True)
self.triple_conv = TripleConv(in_ch, out_ch)
def forward(self, x, cache):
x = self.upsample(x)
x = torch.cat((x, cache), dim=1)
x = self.triple_conv(x)
return x
class UNet(nn.Module):
def __init__(self, in_ch, first_ch=None):
super(UNet, self).__init__()
if not first_ch:
first_ch = 32
self.down1 = Down(in_ch, first_ch)
self.down2 = Down(first_ch, first_ch*2)
self.down3 = Down(first_ch*2, first_ch*4)
self.down4 = Down(first_ch*4, first_ch*8)
self.center = Center(first_ch*8, first_ch*8)
self.up4 = Up(first_ch*8*2, first_ch*4)
self.up3 = Up(first_ch*4*2, first_ch*2)
self.up2 = Up(first_ch*2*2, first_ch)
self.up1 = Up(first_ch*2, first_ch)
self.output = nn.Conv2d(first_ch, in_ch, kernel_size=3, stride=1,
padding=1, bias=True)
self.output.apply(gen_initialization)
def forward(self, x):
x = self.down1(x)
x = self.down2(x)
x = self.down3(x)
x = self.down4(x)
x = self.center(x)
x = self.up4(x, self.down4.cache)
x = self.up3(x, self.down3.cache)
x = self.up2(x, self.down2.cache)
x = self.up1(x, self.down1.cache)
return self.output(x)
The inference code:
from tqdm import tqdm
import os
import numpy as np
import torch
import gan_network
import torch.nn.parallel
from torch.utils.data import DataLoader
import torch.utils.data as data
import random
import glob
import scipy.io
import time
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
class DataFolder(data.Dataset):
def __init__(self, file):
super(DataFolder, self).__init__()
self.image_names = []
fid = file
for line in fid:
# line = line[:-1]
if line == '':
continue
# print(line)
self.image_names.append(line)
random.shuffle(self.image_names)
self.image_names = self.image_names[0:]
def __len__(self):
return len(self.image_names)
def __getitem__(self, index):
path = self.image_names[index]
img = np.load(path)
img = np.rollaxis(img, 2, 0)
img = torch.from_numpy(img[:, :, :])
return img, path
if __name__ == '__main__':
batch_size = 1
image_size = 2048
channels = 6
model_path = 'D:/WorkProjects/Network_Training_Aqusens/FullFovReconst/network/network_epoch9.pth'
test_data = glob.glob('D:/save/temp/*.npy')
dest_dir = 'D:/save/temp/results/'
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
net = gan_network.UNet(6, 32)
if torch.cuda.device_count() > 1:
net = torch.nn.DataParallel(net)
net.to(device)
net.load_state_dict(torch.load(model_path))
quantized_model = torch.quantization.convert(net, {torch.nn.Conv2d, torch.nn.BatchNorm2d}, inplace=False)
dataset = DataFolder(file=test_data)
print(f'{len(dataset)}')
data_loader = DataLoader(dataset=dataset, num_workers=4,
batch_size=batch_size, shuffle=False,
drop_last=False, pin_memory=True)
input = torch.Tensor(batch_size, channels, image_size, image_size).to(device)
t0 = time.time()
with torch.no_grad():
for i, batch in enumerate(tqdm(data_loader)):
input.copy_(batch[0])
output = net(input).cpu().clone().numpy()
np.array(output)
output = np.rollaxis(output, 1, 4)
for num in range(batch_size):
arr = output[num, :, :, :]
file_name = os.path.basename(batch[1][num])
save_name = os.path.join(dest_dir, file_name)
save_name = save_name.replace(".npy", "")
scipy.io.savemat(save_name+'.mat', {'output': arr})
t1 = time.time()
print(f'Elapsed time = {t1-t0}')
For models net and quantized model, i get the elapsed time around 30 seconds for 12 images passed through them.
PyTorch documentation suggests three ways to perform quantization. You are doing post-training dynamic quantization (the simplest quantization method available) which only supports torch.nn.Linear and torch.nn.LSTM layers as listed here. To quantize CNN layers, you would want to check out the other two techniques (these are the ones that support CNN layers): post-training static quantization and quantization aware training. This tutorial shows both these techniques applied on CNNs.
Have tried out static quantization approach on Yolov5, it cuts 73% of size and decreases inference time by ~ 13-15%. It seems to me, that conv layers gains less than linear in latency, because same static approach on toy MNIST fully-dense net decreased inference time by 4 times.

PyTorch why does the forward function run multiple times and can I change the input shape?

import torch
import torch.nn as nn
import torchvision.datasets as dsets
from skimage import transform
import torchvision.transforms as transforms
from torch.autograd import Variable
import pandas as pd;
import numpy as np;
from torch.utils.data import Dataset, DataLoader
import statistics
import random
import math
class FashionMNISTDataset(Dataset):
'''Fashion MNIST Dataset'''
def __init__(self, csv_file, transform=None):
"""
Args:
csv_file (string): Path to the csv file
transform (callable): Optional transform to apply to sample
"""
data = pd.read_csv(csv_file)
self.X = np.array(data.iloc[:, 1:]).reshape(-1, 1, 28, 28)
self.Y = np.array(data.iloc[:, 0])
del data
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
item = self.X[idx]
label = self.Y[idx]
if self.transform:
item = self.transform(item)
return (item, label)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(616,300),
nn.ReLU())
self.layer2 = nn.Sequential(
nn.Linear(300,100),
nn.ReLU())
self.fc = nn.Linear(100, 10)
def forward(self, x):
print("x shape",x.shape)
out = self.layer1(x)
out = self.layer2(out)
out = self.fc(out)
return out
def run():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
num_epochs = 15
batch_size = 100
learning_rate = 0.0001
train_dataset = FashionMNISTDataset(csv_file='fashion-mnist_train.csv')
test_dataset = FashionMNISTDataset(csv_file='fashion-mnist_test.csv')
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=True)
#instance of the Conv Net
cnn = CNN()
cnn.to(device)
#loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
losses = []
for epoch in range(num_epochs):
l = 0
for i, (images, labels) in enumerate(train_loader):
images = Variable(images.float())
labels = Variable(labels)
#print(images[0])
images = images.to(device)
labels = labels.to(device)
print("img shape=",images.shape, "label shape=",labels.shape)
images = images.resize_((100,616))
print("img shape=",images.shape, "label shape=",labels.shape)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = cnn(images)
loss = criterion(outputs, labels)
#print(loss)
loss.backward()
optimizer.step()
#print(loss.item())
losses.append(loss.item())
l = loss.item()
cnn.eval()
with torch.no_grad():
val_loss = []
for images, labels in test_loader:
images = Variable(images.float()).to(device)
labels = labels.to(device)
outputs = cnn.forward(images)
batch_loss = criterion(outputs, labels)
val_loss.append(batch_loss.item())
avgloss = statistics.mean(val_loss)
if avgloss < min(losses):
torch.save(cnn.state_dict(), 'model')
cnn.train()
if (i+1) % 100 == 0:
print ('Epoch : %d/%d, Iter : %d/%d, Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.item()))
print(l)
final_model = CNN()
final_model.load_state_dict(torch.load('model'))
final_model.eval()
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images.float()).to(device)
outputs = final_model(images).to(device)
labels.to(device)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Test Accuracy of the model on the 10000 test images: %.4f %%' % (100 * correct / total))
if __name__ == '__main__':
run()
I have enclosed all the code for testing purposes. But Here is the error I get
img shape= torch.Size([100, 1, 28, 28]) label shape= torch.Size([100])
img shape= torch.Size([100, 616]) label shape= torch.Size([100]) x
shape torch.Size([100, 616]) x shape torch.Size([100, 1, 28, 28])
Traceback (most recent call last): File "test.py", line 145, in
run() File "test.py", line 115, in run
outputs = cnn.forward(images) File "test.py", line 56, in forward
out = self.layer1(x) File "/usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/modules/module.py",
line 489, in call
result = self.forward(*input, **kwargs) File "/usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/modules/container.py",
line 92, in forward
input = module(input) File "/usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/modules/module.py",
line 489, in call
result = self.forward(*input, **kwargs) File "/usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/modules/linear.py",
line 67, in forward
return F.linear(input, self.weight, self.bias) File "/usr/share/anaconda3/envs/DL/lib/python3.6/site-packages/torch/nn/functional.py",
line 1354, in linear
output = input.matmul(weight.t()) RuntimeError: size mismatch, m1: [2800 x 28], m2: [616 x 300] at
/opt/conda/conda-bld/pytorch_1549630534704/work/aten/src/THC/generic/THCTensorMathBlas.cu:266
The problem here is that I want all 616 pixels to feed as input into the neural network but I dont know how to do so. I tried to reshape the input to solve the problem but it ran model.forward twice, once with the correct shape and then the wrong shape.
You are calling forward twice in run:
Once for the training data
Once for the validation data
However, you do not appear to have applied the following transformation to your validation data:
images = images.resize_((100,616))
Maybe consider doing the resize in the forward function.

Categories