Transform DVS128Gesture dataset - python

I am making a neural network in Python using the DVS128Gesture dataset. I want to transform the default 128x128 trinary frames to 32x32 binary frames, but when I try to use torchvision.transform in the dataset, I am getting this error:
img should be PIL Image. Got <class 'numpy.lib.npyio.NpzFile'>
My code:
import torch
import torchvision
from spikingjelly.datasets.dvs128_gesture import DVS128Gesture
train_data = DVS128Gesture(root_dir, train=True, data_type='event',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(32),
torchvision.transforms.Normalize((0.0,), (0.8,)),
torchvision.transforms.ToTensor()
]))
test_data = DVS128Gesture(root_dir, train=False, data_type='event',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(32),
torchvision.transforms.Normalize((0.0,), (0.8,)),
torchvision.transforms.ToTensor()
]))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=bs, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=bs, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
example_data.shape
I have done the same with the MNIST dataset and everything worked as expected. I think the problem is that I use torchvision.transform in DVS128Gesture, but I am not sure what else I can use.
The same with MNIST:
train_data = torchvision.datasets.MNIST(root_dir, train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(28),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.0,), (0.8,))
]))
test_data = torchvision.datasets.MNIST(root_dir, train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(28),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.0,), (0.8,))
]))
What am I doing wrong?
Stack trace of error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In [10], line 2
1 examples = enumerate(test_loader)
----> 2 batch_idx, (example_data, example_targets) = next(examples)
3 example_data.shape
File p:\Programs\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py:681, in _BaseDataLoaderIter.__next__(self)
678 if self._sampler_iter is None:
679 # TODO(https://github.com/pytorch/pytorch/issues/76750)
680 self._reset() # type: ignore[call-arg]
--> 681 data = self._next_data()
682 self._num_yielded += 1
683 if self._dataset_kind == _DatasetKind.Iterable and \
684 self._IterableDataset_len_called is not None and \
685 self._num_yielded > self._IterableDataset_len_called:
File p:\Programs\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py:721, in _SingleProcessDataLoaderIter._next_data(self)
719 def _next_data(self):
720 index = self._next_index() # may raise StopIteration
--> 721 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
722 if self._pin_memory:
723 data = _utils.pin_memory.pin_memory(data, self._pin_memory_device)
File p:\Programs\Anaconda3\lib\site-packages\torch\utils\data\_utils\fetch.py:49, in _MapDatasetFetcher.fetch(self, possibly_batched_index)
47 def fetch(self, possibly_batched_index):
48 if self.auto_collation:
---> 49 data = [self.dataset[idx] for idx in possibly_batched_index]
50 else:
51 data = self.dataset[possibly_batched_index]
File p:\Programs\Anaconda3\lib\site-packages\torch\utils\data\_utils\fetch.py:49, in <listcomp>(.0)
47 def fetch(self, possibly_batched_index):
48 if self.auto_collation:
---> 49 data = [self.dataset[idx] for idx in possibly_batched_index]
50 else:
51 data = self.dataset[possibly_batched_index]
File p:\Programs\Anaconda3\lib\site-packages\torchvision\datasets\folder.py:232, in DatasetFolder.__getitem__(self, index)
230 sample = self.loader(path)
231 if self.transform is not None:
--> 232 sample = self.transform(sample)
233 if self.target_transform is not None:
234 target = self.target_transform(target)
File p:\Programs\Anaconda3\lib\site-packages\torchvision\transforms\transforms.py:94, in Compose.__call__(self, img)
92 def __call__(self, img):
93 for t in self.transforms:
---> 94 img = t(img)
95 return img
File p:\Programs\Anaconda3\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File p:\Programs\Anaconda3\lib\site-packages\torchvision\transforms\transforms.py:349, in Resize.forward(self, img)
341 def forward(self, img):
342 """
343 Args:
344 img (PIL Image or Tensor): Image to be scaled.
(...)
347 PIL Image or Tensor: Rescaled image.
348 """
--> 349 return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias)
File p:\Programs\Anaconda3\lib\site-packages\torchvision\transforms\functional.py:430, in resize(img, size, interpolation, max_size, antialias)
428 warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
429 pil_interpolation = pil_modes_mapping[interpolation]
--> 430 return F_pil.resize(img, size=size, interpolation=pil_interpolation, max_size=max_size)
432 return F_t.resize(img, size=size, interpolation=interpolation.value, max_size=max_size, antialias=antialias)
File p:\Programs\Anaconda3\lib\site-packages\torchvision\transforms\functional_pil.py:249, in resize(img, size, interpolation, max_size)
240 #torch.jit.unused
241 def resize(
242 img: Image.Image,
(...)
245 max_size: Optional[int] = None,
246 ) -> Image.Image:
248 if not _is_pil_image(img):
--> 249 raise TypeError(f"img should be PIL Image. Got {type(img)}")
250 if not (isinstance(size, int) or (isinstance(size, Sequence) and len(size) in (1, 2))):
251 raise TypeError(f"Got inappropriate size arg: {size}")
TypeError: img should be PIL Image. Got <class 'numpy.lib.npyio.NpzFile'>

The error probably comes from the Resize transform (can you provide more details on the stack trace of the error ?).
Resize is a image specific tranform, expecting a PIL image (or a torch Tensor, see the transform documentation), while your dataset DVS128Gesture outputs another object type.

Related

How do I resolve the error "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)"?

I tried my best to make a minimal reproducible example: there's an issue in my train() function where on the line output = model(data); I get the error IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1). I also get an error before that but I cannot decipher the meaning. I've included the full traceback in this message.
I've seen other users post about the same error message, but each one has a different solution; I used the debugger to look into data.location but I'm still unable to resolve the problem. I'm using PySyft v0.2.9.
import torch
import syft as sy
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from auto_esn.esn.esn import ESNBase
import torch.nn.functional as F
import torch.optim as optim
from auto_esn.esn.esn import GroupedDeepESN
from auto_esn.esn.reservoir.util import NRMSELoss
class CarHackingDataset(Dataset):
"""
Loading the Car-Hacking Dataset from
https://ocslab.hksecurity.net/Datasets/car-hacking-dataset
Args:
csv_file: A path to the dataset file which has the extension CSV.
root_dir: The directory of the parent folder of the dataset.
transform (callable, optional): Optional tansform to be applied on a sample.
"""
def __init__(self, csv_file: str, root_dir: str, transform=None):
self.car_hacking_frame = pd.read_csv(csv_file)[:10000]
self.root_dir = root_dir
self.transform = transform
def __getitem__(self,idx):
'''Grabs relevant features from the dataset.'''
if torch.is_tensor(idx):
idx = idx.tolist()
features = ['Timestamp', 'DLC', 'CAN_ID', 'Data']
X_train = self.car_hacking_frame.loc[:, features].values
X_train_scaled = StandardScaler().fit_transform(X_train)
X_train_scaled = torch.as_tensor(X_train_scaled)
# It looks like it's a bad idea to encode features.
# https://stackoverflow.com/questions/61217713/labelencoder-for-categorical-features
class_le = LabelEncoder()
target = class_le.fit_transform(self.car_hacking_frame['Flag'].values)
target = torch.as_tensor(target)
return X_train_scaled[idx], target[idx]
def __len__(self):
return len(self.car_hacking_frame)
train_dataset = CarHackingDataset(csv_file='/content/car_hacking_data/clean_fuzzy_dataset.csv',
root_dir='/content/car_hacking_data')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
hook = sy.TorchHook(torch)
car1 = sy.VirtualWorker(hook, id="car1")
car2 = sy.VirtualWorker(hook, id="car2")
args = {
'batch_size' : 32,
'epochs' : 1
}
federated_train_loader = sy.FederatedDataLoader(train_dataset.federate((car1, car2)),
batch_size=args['batch_size'], shuffle=True)
# Intializing the loss function which is probably a variation of mean squared error.
nrmse = NRMSELoss()
def train(model, device, federated_train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(federated_train_loader):
model = model.send(data.location)
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = nrmse(output, target)
loss.backward()
optimizer.step()
model.get()
if batch_idx % 10 == 0:
loss = loss.get()
print(f'''Train Epoch: {epoch} [{(batch_idx * args['batch_size'])}/{(len(federated_train_loader) * args['batch_size'])}'''
+ f'''({100. * batch_idx / len(federated_train_loader):.0f}%)]\tLoss: {loss.item():.6f}''')
model = GroupedDeepESN().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in range(1, args['batch_size'] + 1):
train(model, device, federated_train_loader, optimizer, epoch)
Traceback of the error message:
---------------------------------------------------------------------------
PureFrameworkTensorFoundError Traceback (most recent call last)
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
336 new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(
--> 337 cmd, args_, kwargs_, return_args_type=True
338 )
20 frames
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in unwrap_args_from_function(attr, args_, kwargs_, return_args_type)
166 # Run it
--> 167 new_args = args_hook_function(args_)
168
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in <lambda>(x)
355
--> 356 return lambda x: f(lambdas, x)
357
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in three_fold(lambdas, args_, **kwargs)
534 lambdas[0](args_[0], **kwargs),
--> 535 lambdas[1](args_[1], **kwargs),
536 lambdas[2](args_[2], **kwargs),
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in <lambda>(i)
330 # Last if not, rule is probably == 1 so use type to return the right transformation.
--> 331 else lambda i: forward_func[type(i)](i)
332 for a, r in zip(args_, rules) # And do this for all the args / rules provided
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/hook/hook_args.py in <lambda>(i)
23 if hasattr(i, "child")
---> 24 else (_ for _ in ()).throw(PureFrameworkTensorFoundError),
25 torch.nn.Parameter: lambda i: i.child
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/hook/hook_args.py in <genexpr>(.0)
23 if hasattr(i, "child")
---> 24 else (_ for _ in ()).throw(PureFrameworkTensorFoundError),
25 torch.nn.Parameter: lambda i: i.child
PureFrameworkTensorFoundError:
During handling of the above exception, another exception occurred:
IndexError Traceback (most recent call last)
<ipython-input-6-c9ac87b98598> in <module>
24
25 for epoch in range(1, args['batch_size'] + 1):
---> 26 train(model, device, federated_train_loader, optimizer, epoch)
<ipython-input-6-c9ac87b98598> in train(model, device, federated_train_loader, optimizer, epoch)
7 data, target = data.to(device), target.to(device)
8 optimizer.zero_grad()
----> 9 output = model(data)
10
11 loss = nrmse(output, target)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/dist-packages/auto_esn/esn/esn.py in forward(self, input)
31 mapped_input = self.reservoir(input)
32
---> 33 return self.readout(mapped_input)
34
35 def reset_hidden(self):
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/dist-packages/auto_esn/esn/readout/svr_readout.py in forward(self, input)
10
11 def forward(self, input: Tensor) -> Tensor:
---> 12 return self.readout(input)
13
14 def fit(self, input: Tensor, target: Tensor):
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py in forward(self, input)
85
86 def forward(self, input):
---> 87 return F.linear(input, self.weight, self.bias)
88
89 def extra_repr(self):
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook.py in overloaded_func(*args, **kwargs)
333 handle_func_command = syft.framework.Tensor.handle_func_command
334
--> 335 response = handle_func_command(command)
336
337 return response
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
378 # in the execute_command function
379 try:
--> 380 response = cls._get_response(cmd, args_, kwargs_)
381 except AttributeError:
382 # Change the library path to avoid errors on layers like AvgPooling
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in _get_response(cmd, args_, kwargs_)
412
413 if isinstance(args_, tuple):
--> 414 response = command_method(*args_, **kwargs_)
415 else:
416 response = command_method(args_, **kwargs_)
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
1368 if input.dim() == 2 and bias is not None:
1369 # fused op is marginally faster
-> 1370 ret = torch.addmm(bias, input, weight.t())
1371 else:
1372 output = input.matmul(weight.t())
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook.py in overloaded_func(*args, **kwargs)
333 handle_func_command = syft.framework.Tensor.handle_func_command
334
--> 335 response = handle_func_command(command)
336
337 return response
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
378 # in the execute_command function
379 try:
--> 380 response = cls._get_response(cmd, args_, kwargs_)
381 except AttributeError:
382 # Change the library path to avoid errors on layers like AvgPooling
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in _get_response(cmd, args_, kwargs_)
412
413 if isinstance(args_, tuple):
--> 414 response = command_method(*args_, **kwargs_)
415 else:
416 response = command_method(args_, **kwargs_)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
I thought it might also be important to include the shape of my data inside the train function's loop over the federated data: Data shape: torch.Size([32, 4]), Target shape: torch.Size([32]).

multiclass sequence classifiaction with fastai and huggingface

I am looking to implement DistilBERT via fastai and huggingface for a mutliclass sequence classification problem. I found a useful tutorial that gave a good example on how to do this with binary classification. The code is below:
# !pip install torch==1.9.0
# !pip install torchtext==0.10
# !pip install transformers==4.7
# !pip install fastai==2.4
from fastai.text.all import *
from sklearn.model_selection import train_test_split
import pandas as pd
import glob
from transformers import AutoTokenizer, AutoModelForSequenceClassification
hf_tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
hf_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased")
"""
train_df and val_df looks like this:
label text
4240 5 whoa interesting.
13 7 you could you could we just
4639 4 you set the goal,
28 1 because ive already agreed to that
66 8 oh hey freshman thats you gona need
"""
print(list(train_df.label.value_counts().index))
"""
[4, 1, 5, 6, 7, 0, 2, 3, 8]
"""
class HF_Dataset(torch.utils.data.Dataset):
def __init__(self, df, hf_tokenizer):
self.df = df
self.hf_tokenizer = hf_tokenizer
self.label_map = {
0:0,
1:0,
2:0,
3:0,
4:1,
5:1,
6:1,
7:1,
8:1
}
def __len__(self):
return len(self.df)
def decode(self, token_ids):
return ' '.join([hf_tokenizer.decode(x) for x in tokenizer_outputs['input_ids']])
def decode_to_original(self, token_ids):
return self.hf_tokenizer.decode(token_ids.squeeze())
def __getitem__(self, index):
label, text = self.df.iloc[index]
label = self.label_map[label]
label = torch.tensor(label)
tokenizer_output = self.hf_tokenizer(text, return_tensors="pt", padding='max_length', truncation=True, max_length=512)
tokenizer_output['input_ids'].squeeze_()
tokenizer_output['attention_mask'].squeeze_()
return tokenizer_output, label
train_dataset = HF_Dataset(train_df, hf_tokenizer)
valid_dataset = HF_Dataset(valid_df, hf_tokenizer)
train_dl = DataLoader(train_dataset, bs=16, shuffle=True)
valid_dl = DataLoader(valid_dataset, bs=16)
dls = DataLoaders(train_dl, valid_dl)
hf_model(**batched_data)
class HF_Model(nn.Module):
def __init__(self, hf_model):
super().__init__()
self.hf_model = hf_model
def forward(self, tokenizer_outputs):
model_output = self.hf_model(**tokenizer_outputs)
return model_output.logits
model = HF_Model(hf_model)
# Manually popping the model onto the gpu since the data is in a dictionary format
# (doesn't automatically place model + data on gpu otherwise)
learn = Learner(dls, model, loss_func=nn.CrossEntropyLoss(), metrics=[accuracy])
learn.fit_one_cycle(3, 1e-4)
This works fine. However, I mapped my multiclass labels to 2 labels to allow this to work. I actually have 9 classes. I tried adjusting the label mapping scheme in HF_Dataset() class to match my actual labels like below:
class HF_Dataset(torch.utils.data.Dataset):
def __init__(self, df, hf_tokenizer):
self.df = df
self.hf_tokenizer = hf_tokenizer
self.label_map = {
0:0,
1:1,
2:2,
3:3,
4:4,
5:5,
6:6,
7:7,
8:8
}
def __len__(self):
return len(self.df)
def decode(self, token_ids):
return ' '.join([hf_tokenizer.decode(x) for x in tokenizer_outputs['input_ids']])
def decode_to_original(self, token_ids):
return self.hf_tokenizer.decode(token_ids.squeeze())
def __getitem__(self, index):
label, text = self.df.iloc[index]
label = self.label_map[label]
label = torch.tensor(label)
tokenizer_output = self.hf_tokenizer(text, return_tensors="pt", padding='max_length', truncation=True, max_length=512)
tokenizer_output['input_ids'].squeeze_()
tokenizer_output['attention_mask'].squeeze_()
return tokenizer_output, label
Every line works until learn.fit_one_cycle.
Here is the full stack trace from this line:
0.00% [0/3 00:00<00:00]
epoch train_loss valid_loss accuracy time
0.00% [0/519 00:00<00:00]
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-21-0ec2ff9e12e1> in <module>
----> 1 learn.fit_one_cycle(3, 1e-4)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
111 scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
112 'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
--> 113 self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
114
115 # Cell
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
219 self.opt.set_hypers(lr=self.lr if lr is None else lr)
220 self.n_epoch = n_epoch
--> 221 self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
222
223 def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
161
162 def _with_events(self, f, event_type, ex, final=noop):
--> 163 try: self(f'before_{event_type}'); f()
164 except ex: self(f'after_cancel_{event_type}')
165 self(f'after_{event_type}'); final()
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _do_fit(self)
210 for epoch in range(self.n_epoch):
211 self.epoch=epoch
--> 212 self._with_events(self._do_epoch, 'epoch', CancelEpochException)
213
214 def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
161
162 def _with_events(self, f, event_type, ex, final=noop):
--> 163 try: self(f'before_{event_type}'); f()
164 except ex: self(f'after_cancel_{event_type}')
165 self(f'after_{event_type}'); final()
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _do_epoch(self)
204
205 def _do_epoch(self):
--> 206 self._do_epoch_train()
207 self._do_epoch_validate()
208
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _do_epoch_train(self)
196 def _do_epoch_train(self):
197 self.dl = self.dls.train
--> 198 self._with_events(self.all_batches, 'train', CancelTrainException)
199
200 def _do_epoch_validate(self, ds_idx=1, dl=None):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
161
162 def _with_events(self, f, event_type, ex, final=noop):
--> 163 try: self(f'before_{event_type}'); f()
164 except ex: self(f'after_cancel_{event_type}')
165 self(f'after_{event_type}'); final()
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in all_batches(self)
167 def all_batches(self):
168 self.n_iter = len(self.dl)
--> 169 for o in enumerate(self.dl): self.one_batch(*o)
170
171 def _do_one_batch(self):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in one_batch(self, i, b)
192 b = self._set_device(b)
193 self._split(b)
--> 194 self._with_events(self._do_one_batch, 'batch', CancelBatchException)
195
196 def _do_epoch_train(self):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
161
162 def _with_events(self, f, event_type, ex, final=noop):
--> 163 try: self(f'before_{event_type}'); f()
164 except ex: self(f'after_cancel_{event_type}')
165 self(f'after_{event_type}'); final()
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _do_one_batch(self)
173 self('after_pred')
174 if len(self.yb):
--> 175 self.loss_grad = self.loss_func(self.pred, *self.yb)
176 self.loss = self.loss_grad.clone()
177 self('after_loss')
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
1119 def forward(self, input: Tensor, target: Tensor) -> Tensor:
1120 return F.cross_entropy(input, target, weight=self.weight,
-> 1121 ignore_index=self.ignore_index, reduction=self.reduction)
1122
1123
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
2822 if size_average is not None or reduce is not None:
2823 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2824 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2825
2826
IndexError: Target 6 is out of bounds.
This seems like it should be a simple fix. Do I need to adjust something in the model architecture to allow it to accept 9 labels? Or do I need to one hot encode my labels? If so, is there a solution prebuilt to do this in the pipeline?
You need to define num_labels=9 when loading the model:
hf_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=9)
The default value is 2, which suits the first use-case, but breaks when you tried to change.
Note that the lib explictly says that the classifier (which generates the .logits that are of your interest) is randomly initialized:
Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight', 'pre_classifier.weight', 'pre_classifier.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.

Fine tuning bert on next sentence prediction task

I am trying to fine-tune Bert using the Huggingface library on next sentence prediction task. I looked at the tutorial and I am trying to use DataCollatorForNextSentencePrediction and TextDatasetForNextSentencePrediction . When I am using that I get the following error. I have provided my code bellow.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-18-7678758b2c9c> in <module>()
56 train(bert_model,bert_tokenizer,train_data_set_path)
57 #prepare_data_set(bert_tokenizer)
---> 58 main()
9 frames
<ipython-input-18-7678758b2c9c> in main()
54 bert_model = BertForNextSentencePrediction.from_pretrained("bert-base-cased")
55 train_data_set_path = "/content/drive/My Drive/next_sentence/line_data_set_file.txt"
---> 56 train(bert_model,bert_tokenizer,train_data_set_path)
57 #prepare_data_set(bert_tokenizer)
58 main()
<ipython-input-18-7678758b2c9c> in train(bert_model, bert_tokenizer, path, eval_path)
47
48 )
---> 49 trainer.train()
50 trainer.save_model(out_dir)
51 def main():
/usr/local/lib/python3.6/dist-packages/transformers/trainer.py in train(self, model_path, trial)
697
698 epoch_pbar = tqdm(epoch_iterator, desc="Iteration", disable=disable_tqdm)
--> 699 for step, inputs in enumerate(epoch_iterator):
700
701 # Skip past any already trained steps if resuming training
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __next__(self)
361
362 def __next__(self):
--> 363 data = self._next_data()
364 self._num_yielded += 1
365 if self._dataset_kind == _DatasetKind.Iterable and \
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _next_data(self)
401 def _next_data(self):
402 index = self._next_index() # may raise StopIteration
--> 403 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
404 if self._pin_memory:
405 data = _utils.pin_memory.pin_memory(data)
/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
45 else:
46 data = self.dataset[possibly_batched_index]
---> 47 return self.collate_fn(data)
/usr/local/lib/python3.6/dist-packages/transformers/data/data_collator.py in __call__(self, examples)
356 for i, doc in enumerate(examples):
357 input_id, segment_id, attention_mask, label = self.create_examples_from_document(doc, i, examples)
--> 358 input_ids.extend(input_id)
359 segment_ids.extend(segment_id)
360 attention_masks.extend(attention_mask)
/usr/local/lib/python3.6/dist-packages/transformers/data/data_collator.py in create_examples_from_document(self, document, doc_index, examples)
444 random_document = examples[random_document_index]
445 random_start = random.randint(0, len(random_document) - 1)
--> 446 for j in range(random_start, len(random_document)):
447 tokens_b.extend(random_document[j])
448 if len(tokens_b) >= target_b_length:
/usr/lib/python3.6/random.py in randint(self, a, b)
219 """
220
--> 221 return self.randrange(a, b+1)
222
223 def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
/usr/lib/python3.6/random.py in randrange(self, start, stop, step, _int)
197 return istart + self._randbelow(width)
198 if step == 1:
--> 199 raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
200
201 # Non-unit step argument supplied.
ValueError: empty range for randrange() (0,0, 0)
def train(bert_model,bert_tokenizer,path,eval_path=None):
out_dir = "/content/drive/My Drive/next_sentence/"
training_args = TrainingArguments(
output_dir=out_dir,
overwrite_output_dir=True,
num_train_epochs=1,
per_device_train_batch_size=30,
save_steps=10000,
save_total_limit=2,
)
data_collator = DataCollatorForNextSentencePrediction(
tokenizer=bert_tokenizer,mlm=False,block_size=512,nsp_probability =0.5
)
dataset = TextDatasetForNextSentencePrediction(
tokenizer = bert_tokenizer,
file_path=path,
block_size=512,
)
trainer = Trainer(
model=bert_model,
args=training_args,
train_dataset=dataset,
data_collator=data_collator,
)
trainer.train()
trainer.save_model(out_dir)
def main():
print("Running main")
bert_tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
bert_model = BertForNextSentencePrediction.from_pretrained("bert-base-cased")
train_data_set_path = "/content/drive/My Drive/next_sentence/line_data_set_file.txt"
train(bert_model,bert_tokenizer,train_data_set_path)
#prepare_data_set(bert_tokenizer)
main()

Tensorflow: Error when trying transfer learning: Invalid JPEG data or crop window

I am trying to shape my own custom image dataset into the correct input shape for the pretrained MobileNet model on Tensorflow using their tutorial here.
My code:
batch_size = 256
epochs = 15
IMG_HEIGHT = 160
IMG_WIDTH = 160
BATCH_SIZE = 256
SHUFFLE_BUFFER_SIZE = 1000
traindir = pathlib.Path('/train')
valdir = pathlib.Path('/validation')
list_ds = tf.data.Dataset.list_files(str(traindir/'*/*'))
val_list_ds = tf.data.Dataset.list_files(str(valdir/'*/*'))
CLASS_NAMES = np.array([item.name for item in valdir.glob('*') if item.name != "LICENSE.txt"])
def get_label(file_path):
# convert the path to a list of path components
parts = tf.strings.split(file_path, os.path.sep)
# The second to last is the class-directory
return parts[-2] == CLASS_NAMES
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# resize the image to the desired size.
return tf.image.resize(img, [IMG_HEIGHT, IMG_WIDTH])
def process_path(file_path):
label = get_label(file_path)
# load the raw data from the file as a string
img = tf.io.read_file(file_path)
img = decode_img(img)
return img, label
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
labeled_ds = list_ds.map(process_path, num_parallel_calls=5)
labeled_val_ds = val_list_ds.map(process_path, num_parallel_calls=5)
train_batches = labeled_ds.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
validation_batches = labeled_val_ds.batch(BATCH_SIZE)
for image_batch, label_batch in train_batches.take(1):
pass
image_batch.shape
After which I continue with the TF tutorial on transfer learning here. However, I ran into this problem where I suspect the JPEG image is corrupted or there is a lack of/problem with the iterator?:
Epoch 1/10
21/330 [>.............................] - ETA: 14:02 - loss: 3.9893 - accuracy: 0.0326
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-87-11afdc6d5aef> in <module>
1 history = model.fit(train_batches,
2 epochs=initial_epochs,
----> 3 validation_data=validation_batches)
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 66 return method(self, *args, **kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
--> 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
609 # In this case we have created variables on the first call, so we run the
610 # defunned version which is guaranteed to never create variables.
--> 611 return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
612 elif self._stateful_fn is not None:
613 # Release the lock early so that multiple threads can perform the call
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
2418 with self._lock:
2419 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 2420 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2421
2422 #property
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in _filtered_call(self, args, kwargs)
1663 if isinstance(t, (ops.Tensor,
1664 resource_variable_ops.BaseResourceVariable))),
-> 1665 self.captured_inputs)
1666
1667 def _call_flat(self, args, captured_inputs, cancellation_manager=None):
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1744 # No tape is watching; skip to running the function.
1745 return self._build_call_outputs(self._inference_function.call(
-> 1746 ctx, args, cancellation_manager=cancellation_manager))
1747 forward_backward = self._select_forward_and_backward_functions(
1748 args,
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager)
596 inputs=args,
597 attrs=attrs,
--> 598 ctx=ctx)
599 else:
600 outputs = execute.execute_with_cancellation(
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: Invalid JPEG data or crop window, data size 34228
[[{{node DecodeJpeg}}]]
[[IteratorGetNext]]
(1) Invalid argument: Invalid JPEG data or crop window, data size 34228
[[{{node DecodeJpeg}}]]
[[IteratorGetNext]]
[[IteratorGetNext/_4]]
0 successful operations.
0 derived errors ignored. [Op:__inference_train_function_30787]
Function call stack:
train_function -> train_function
Thank you for your time!
Edit: After re-running the code a few times, it seems it will produce the same errors with different data sizes like 16384....
Edit:
Yes, the problem lies with the fact that some .jpeg are actually .png in disguise, or they are just plain corrupted. I highly recommend checking data integrity before training any model with the data.
I faced similar problem. there is a problem in some of your training data. you can use code below to check which jpeg image is corrupted and delete it.
from struct import unpack
from tqdm import tqdm
import os
marker_mapping = {
0xffd8: "Start of Image",
0xffe0: "Application Default Header",
0xffdb: "Quantization Table",
0xffc0: "Start of Frame",
0xffc4: "Define Huffman Table",
0xffda: "Start of Scan",
0xffd9: "End of Image"
}
class JPEG:
def __init__(self, image_file):
with open(image_file, 'rb') as f:
self.img_data = f.read()
def decode(self):
data = self.img_data
while(True):
marker, = unpack(">H", data[0:2])
# print(marker_mapping.get(marker))
if marker == 0xffd8:
data = data[2:]
elif marker == 0xffd9:
return
elif marker == 0xffda:
data = data[-2:]
else:
lenchunk, = unpack(">H", data[2:4])
data = data[2+lenchunk:]
if len(data)==0:
break
bads = []
for img in tqdm(images):
image = osp.join(root_img,img)
image = JPEG(image)
try:
image.decode()
except:
bads.append(img)
for name in bads:
os.remove(osp.join(root_img,name))
I used yasoob script to decode jpeg image.

pytorch vgg model test on one image

I've trained a vgg model, this is how I transformed the test data
test_transform_2= transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor()])
test_data = datasets.ImageFolder(test_dir, transform=test_transform_2)
the model's finished training now I want to test it on a single image
from scipy import misc
test_image = misc.imread('flower_data/valid/1/image_06739.jpg')
vgg16(torch.from_numpy(test_image))
Error
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-60-b83587325fea> in <module>
----> 1 vgg16(torch.from_numpy(test_image))
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
475 result = self._slow_forward(*input, **kwargs)
476 else:
--> 477 result = self.forward(*input, **kwargs)
478 for hook in self._forward_hooks.values():
479 hook_result = hook(self, input, result)
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torchvision\models\vgg.py in forward(self, x)
40
41 def forward(self, x):
---> 42 x = self.features(x)
43 x = x.view(x.size(0), -1)
44 x = self.classifier(x)
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
475 result = self._slow_forward(*input, **kwargs)
476 else:
--> 477 result = self.forward(*input, **kwargs)
478 for hook in self._forward_hooks.values():
479 hook_result = hook(self, input, result)
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\container.py in forward(self, input)
89 def forward(self, input):
90 for module in self._modules.values():
---> 91 input = module(input)
92 return input
93
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
475 result = self._slow_forward(*input, **kwargs)
476 else:
--> 477 result = self.forward(*input, **kwargs)
478 for hook in self._forward_hooks.values():
479 hook_result = hook(self, input, result)
c:\users\sam\mydocu~1\code\envs\data-science\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
299 def forward(self, input):
300 return F.conv2d(input, self.weight, self.bias, self.stride,
--> 301 self.padding, self.dilation, self.groups)
302
303
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [64, 3, 3, 3], but got input of size [628, 500, 3] instead
I can tell I need to shape the input, however I don't know how to based on the way it seems to expect the input to be inform of a batch.
Your image is [h, w, 3] where 3 means the rgb channel, and pytorch expects [b, 3, h, w] where b is batch size. So you can reshape it by calling do that by calling reshaped = img.permute(2, 0, 1).unsqueeze(0). I think there is also a utility function for that somewhere, but I can't find it right now.
So in your case
tensor = torch.from_numpy(test_image)
reshaped = tensor.permute(2, 0 1).unsqueeze(0)
your_result = vgg16(reshaped)

Categories