multiclass sequence classifiaction with fastai and huggingface - python

I am looking to implement DistilBERT via fastai and huggingface for a mutliclass sequence classification problem. I found a useful tutorial that gave a good example on how to do this with binary classification. The code is below:
# !pip install torch==1.9.0
# !pip install torchtext==0.10
# !pip install transformers==4.7
# !pip install fastai==2.4
from fastai.text.all import *
from sklearn.model_selection import train_test_split
import pandas as pd
import glob
from transformers import AutoTokenizer, AutoModelForSequenceClassification
hf_tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
hf_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased")
"""
train_df and val_df looks like this:
label text
4240 5 whoa interesting.
13 7 you could you could we just
4639 4 you set the goal,
28 1 because ive already agreed to that
66 8 oh hey freshman thats you gona need
"""
print(list(train_df.label.value_counts().index))
"""
[4, 1, 5, 6, 7, 0, 2, 3, 8]
"""
class HF_Dataset(torch.utils.data.Dataset):
def __init__(self, df, hf_tokenizer):
self.df = df
self.hf_tokenizer = hf_tokenizer
self.label_map = {
0:0,
1:0,
2:0,
3:0,
4:1,
5:1,
6:1,
7:1,
8:1
}
def __len__(self):
return len(self.df)
def decode(self, token_ids):
return ' '.join([hf_tokenizer.decode(x) for x in tokenizer_outputs['input_ids']])
def decode_to_original(self, token_ids):
return self.hf_tokenizer.decode(token_ids.squeeze())
def __getitem__(self, index):
label, text = self.df.iloc[index]
label = self.label_map[label]
label = torch.tensor(label)
tokenizer_output = self.hf_tokenizer(text, return_tensors="pt", padding='max_length', truncation=True, max_length=512)
tokenizer_output['input_ids'].squeeze_()
tokenizer_output['attention_mask'].squeeze_()
return tokenizer_output, label
train_dataset = HF_Dataset(train_df, hf_tokenizer)
valid_dataset = HF_Dataset(valid_df, hf_tokenizer)
train_dl = DataLoader(train_dataset, bs=16, shuffle=True)
valid_dl = DataLoader(valid_dataset, bs=16)
dls = DataLoaders(train_dl, valid_dl)
hf_model(**batched_data)
class HF_Model(nn.Module):
def __init__(self, hf_model):
super().__init__()
self.hf_model = hf_model
def forward(self, tokenizer_outputs):
model_output = self.hf_model(**tokenizer_outputs)
return model_output.logits
model = HF_Model(hf_model)
# Manually popping the model onto the gpu since the data is in a dictionary format
# (doesn't automatically place model + data on gpu otherwise)
learn = Learner(dls, model, loss_func=nn.CrossEntropyLoss(), metrics=[accuracy])
learn.fit_one_cycle(3, 1e-4)
This works fine. However, I mapped my multiclass labels to 2 labels to allow this to work. I actually have 9 classes. I tried adjusting the label mapping scheme in HF_Dataset() class to match my actual labels like below:
class HF_Dataset(torch.utils.data.Dataset):
def __init__(self, df, hf_tokenizer):
self.df = df
self.hf_tokenizer = hf_tokenizer
self.label_map = {
0:0,
1:1,
2:2,
3:3,
4:4,
5:5,
6:6,
7:7,
8:8
}
def __len__(self):
return len(self.df)
def decode(self, token_ids):
return ' '.join([hf_tokenizer.decode(x) for x in tokenizer_outputs['input_ids']])
def decode_to_original(self, token_ids):
return self.hf_tokenizer.decode(token_ids.squeeze())
def __getitem__(self, index):
label, text = self.df.iloc[index]
label = self.label_map[label]
label = torch.tensor(label)
tokenizer_output = self.hf_tokenizer(text, return_tensors="pt", padding='max_length', truncation=True, max_length=512)
tokenizer_output['input_ids'].squeeze_()
tokenizer_output['attention_mask'].squeeze_()
return tokenizer_output, label
Every line works until learn.fit_one_cycle.
Here is the full stack trace from this line:
0.00% [0/3 00:00<00:00]
epoch train_loss valid_loss accuracy time
0.00% [0/519 00:00<00:00]
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-21-0ec2ff9e12e1> in <module>
----> 1 learn.fit_one_cycle(3, 1e-4)
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
111 scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
112 'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
--> 113 self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
114
115 # Cell
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
219 self.opt.set_hypers(lr=self.lr if lr is None else lr)
220 self.n_epoch = n_epoch
--> 221 self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
222
223 def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
161
162 def _with_events(self, f, event_type, ex, final=noop):
--> 163 try: self(f'before_{event_type}'); f()
164 except ex: self(f'after_cancel_{event_type}')
165 self(f'after_{event_type}'); final()
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _do_fit(self)
210 for epoch in range(self.n_epoch):
211 self.epoch=epoch
--> 212 self._with_events(self._do_epoch, 'epoch', CancelEpochException)
213
214 def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
161
162 def _with_events(self, f, event_type, ex, final=noop):
--> 163 try: self(f'before_{event_type}'); f()
164 except ex: self(f'after_cancel_{event_type}')
165 self(f'after_{event_type}'); final()
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _do_epoch(self)
204
205 def _do_epoch(self):
--> 206 self._do_epoch_train()
207 self._do_epoch_validate()
208
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _do_epoch_train(self)
196 def _do_epoch_train(self):
197 self.dl = self.dls.train
--> 198 self._with_events(self.all_batches, 'train', CancelTrainException)
199
200 def _do_epoch_validate(self, ds_idx=1, dl=None):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
161
162 def _with_events(self, f, event_type, ex, final=noop):
--> 163 try: self(f'before_{event_type}'); f()
164 except ex: self(f'after_cancel_{event_type}')
165 self(f'after_{event_type}'); final()
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in all_batches(self)
167 def all_batches(self):
168 self.n_iter = len(self.dl)
--> 169 for o in enumerate(self.dl): self.one_batch(*o)
170
171 def _do_one_batch(self):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in one_batch(self, i, b)
192 b = self._set_device(b)
193 self._split(b)
--> 194 self._with_events(self._do_one_batch, 'batch', CancelBatchException)
195
196 def _do_epoch_train(self):
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _with_events(self, f, event_type, ex, final)
161
162 def _with_events(self, f, event_type, ex, final=noop):
--> 163 try: self(f'before_{event_type}'); f()
164 except ex: self(f'after_cancel_{event_type}')
165 self(f'after_{event_type}'); final()
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/fastai/learner.py in _do_one_batch(self)
173 self('after_pred')
174 if len(self.yb):
--> 175 self.loss_grad = self.loss_func(self.pred, *self.yb)
176 self.loss = self.loss_grad.clone()
177 self('after_loss')
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
1119 def forward(self, input: Tensor, target: Tensor) -> Tensor:
1120 return F.cross_entropy(input, target, weight=self.weight,
-> 1121 ignore_index=self.ignore_index, reduction=self.reduction)
1122
1123
~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
2822 if size_average is not None or reduce is not None:
2823 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2824 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2825
2826
IndexError: Target 6 is out of bounds.
This seems like it should be a simple fix. Do I need to adjust something in the model architecture to allow it to accept 9 labels? Or do I need to one hot encode my labels? If so, is there a solution prebuilt to do this in the pipeline?

You need to define num_labels=9 when loading the model:
hf_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=9)
The default value is 2, which suits the first use-case, but breaks when you tried to change.
Note that the lib explictly says that the classifier (which generates the .logits that are of your interest) is randomly initialized:
Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight', 'pre_classifier.weight', 'pre_classifier.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.

Related

How do I resolve the error "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)"?

I tried my best to make a minimal reproducible example: there's an issue in my train() function where on the line output = model(data); I get the error IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1). I also get an error before that but I cannot decipher the meaning. I've included the full traceback in this message.
I've seen other users post about the same error message, but each one has a different solution; I used the debugger to look into data.location but I'm still unable to resolve the problem. I'm using PySyft v0.2.9.
import torch
import syft as sy
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from auto_esn.esn.esn import ESNBase
import torch.nn.functional as F
import torch.optim as optim
from auto_esn.esn.esn import GroupedDeepESN
from auto_esn.esn.reservoir.util import NRMSELoss
class CarHackingDataset(Dataset):
"""
Loading the Car-Hacking Dataset from
https://ocslab.hksecurity.net/Datasets/car-hacking-dataset
Args:
csv_file: A path to the dataset file which has the extension CSV.
root_dir: The directory of the parent folder of the dataset.
transform (callable, optional): Optional tansform to be applied on a sample.
"""
def __init__(self, csv_file: str, root_dir: str, transform=None):
self.car_hacking_frame = pd.read_csv(csv_file)[:10000]
self.root_dir = root_dir
self.transform = transform
def __getitem__(self,idx):
'''Grabs relevant features from the dataset.'''
if torch.is_tensor(idx):
idx = idx.tolist()
features = ['Timestamp', 'DLC', 'CAN_ID', 'Data']
X_train = self.car_hacking_frame.loc[:, features].values
X_train_scaled = StandardScaler().fit_transform(X_train)
X_train_scaled = torch.as_tensor(X_train_scaled)
# It looks like it's a bad idea to encode features.
# https://stackoverflow.com/questions/61217713/labelencoder-for-categorical-features
class_le = LabelEncoder()
target = class_le.fit_transform(self.car_hacking_frame['Flag'].values)
target = torch.as_tensor(target)
return X_train_scaled[idx], target[idx]
def __len__(self):
return len(self.car_hacking_frame)
train_dataset = CarHackingDataset(csv_file='/content/car_hacking_data/clean_fuzzy_dataset.csv',
root_dir='/content/car_hacking_data')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
hook = sy.TorchHook(torch)
car1 = sy.VirtualWorker(hook, id="car1")
car2 = sy.VirtualWorker(hook, id="car2")
args = {
'batch_size' : 32,
'epochs' : 1
}
federated_train_loader = sy.FederatedDataLoader(train_dataset.federate((car1, car2)),
batch_size=args['batch_size'], shuffle=True)
# Intializing the loss function which is probably a variation of mean squared error.
nrmse = NRMSELoss()
def train(model, device, federated_train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(federated_train_loader):
model = model.send(data.location)
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = nrmse(output, target)
loss.backward()
optimizer.step()
model.get()
if batch_idx % 10 == 0:
loss = loss.get()
print(f'''Train Epoch: {epoch} [{(batch_idx * args['batch_size'])}/{(len(federated_train_loader) * args['batch_size'])}'''
+ f'''({100. * batch_idx / len(federated_train_loader):.0f}%)]\tLoss: {loss.item():.6f}''')
model = GroupedDeepESN().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in range(1, args['batch_size'] + 1):
train(model, device, federated_train_loader, optimizer, epoch)
Traceback of the error message:
---------------------------------------------------------------------------
PureFrameworkTensorFoundError Traceback (most recent call last)
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
336 new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(
--> 337 cmd, args_, kwargs_, return_args_type=True
338 )
20 frames
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in unwrap_args_from_function(attr, args_, kwargs_, return_args_type)
166 # Run it
--> 167 new_args = args_hook_function(args_)
168
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in <lambda>(x)
355
--> 356 return lambda x: f(lambdas, x)
357
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in three_fold(lambdas, args_, **kwargs)
534 lambdas[0](args_[0], **kwargs),
--> 535 lambdas[1](args_[1], **kwargs),
536 lambdas[2](args_[2], **kwargs),
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook_args.py in <lambda>(i)
330 # Last if not, rule is probably == 1 so use type to return the right transformation.
--> 331 else lambda i: forward_func[type(i)](i)
332 for a, r in zip(args_, rules) # And do this for all the args / rules provided
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/hook/hook_args.py in <lambda>(i)
23 if hasattr(i, "child")
---> 24 else (_ for _ in ()).throw(PureFrameworkTensorFoundError),
25 torch.nn.Parameter: lambda i: i.child
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/hook/hook_args.py in <genexpr>(.0)
23 if hasattr(i, "child")
---> 24 else (_ for _ in ()).throw(PureFrameworkTensorFoundError),
25 torch.nn.Parameter: lambda i: i.child
PureFrameworkTensorFoundError:
During handling of the above exception, another exception occurred:
IndexError Traceback (most recent call last)
<ipython-input-6-c9ac87b98598> in <module>
24
25 for epoch in range(1, args['batch_size'] + 1):
---> 26 train(model, device, federated_train_loader, optimizer, epoch)
<ipython-input-6-c9ac87b98598> in train(model, device, federated_train_loader, optimizer, epoch)
7 data, target = data.to(device), target.to(device)
8 optimizer.zero_grad()
----> 9 output = model(data)
10
11 loss = nrmse(output, target)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/dist-packages/auto_esn/esn/esn.py in forward(self, input)
31 mapped_input = self.reservoir(input)
32
---> 33 return self.readout(mapped_input)
34
35 def reset_hidden(self):
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/dist-packages/auto_esn/esn/readout/svr_readout.py in forward(self, input)
10
11 def forward(self, input: Tensor) -> Tensor:
---> 12 return self.readout(input)
13
14 def fit(self, input: Tensor, target: Tensor):
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py in forward(self, input)
85
86 def forward(self, input):
---> 87 return F.linear(input, self.weight, self.bias)
88
89 def extra_repr(self):
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook.py in overloaded_func(*args, **kwargs)
333 handle_func_command = syft.framework.Tensor.handle_func_command
334
--> 335 response = handle_func_command(command)
336
337 return response
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
378 # in the execute_command function
379 try:
--> 380 response = cls._get_response(cmd, args_, kwargs_)
381 except AttributeError:
382 # Change the library path to avoid errors on layers like AvgPooling
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in _get_response(cmd, args_, kwargs_)
412
413 if isinstance(args_, tuple):
--> 414 response = command_method(*args_, **kwargs_)
415 else:
416 response = command_method(args_, **kwargs_)
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
1368 if input.dim() == 2 and bias is not None:
1369 # fused op is marginally faster
-> 1370 ret = torch.addmm(bias, input, weight.t())
1371 else:
1372 output = input.matmul(weight.t())
/usr/local/lib/python3.7/dist-packages/syft/generic/frameworks/hook/hook.py in overloaded_func(*args, **kwargs)
333 handle_func_command = syft.framework.Tensor.handle_func_command
334
--> 335 response = handle_func_command(command)
336
337 return response
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
378 # in the execute_command function
379 try:
--> 380 response = cls._get_response(cmd, args_, kwargs_)
381 except AttributeError:
382 # Change the library path to avoid errors on layers like AvgPooling
/usr/local/lib/python3.7/dist-packages/syft/frameworks/torch/tensors/interpreters/native.py in _get_response(cmd, args_, kwargs_)
412
413 if isinstance(args_, tuple):
--> 414 response = command_method(*args_, **kwargs_)
415 else:
416 response = command_method(args_, **kwargs_)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
I thought it might also be important to include the shape of my data inside the train function's loop over the federated data: Data shape: torch.Size([32, 4]), Target shape: torch.Size([32]).

AttributeError: 'list' object has no attribute 'view' while training network

I have a pytorch which i am trying to train but i am getting this error AttributeError: 'list' object has no attribute 'view'. Dont know why i am getting this.
sample data
data = np.random.rand(400, 46, 55, 46)
ds = TensorDataset(torch.from_numpy(data))
train_ds, valid_ds = random_split(ds, (350, 50))
train_dl, valid_dl = DataLoader(train_ds), DataLoader(valid_ds)
model
class AutoEncoder(pl.LightningModule):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(46*55*46, 400),
nn.Tanh())
self.decoder = nn.Sequential(
nn.Linear(400, 46*55*46),
nn.Sigmoid())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def training_step(self, train_batch, batch_idx):
x = train_batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('train_loss', loss)
return loss
def validation_step(self, val_batch, batch_idx):
x = val_batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('val_loss', loss)
model = AutoEncoder()
Error
AttributeError Traceback (most recent call last)
<ipython-input-18-11e725b78922> in <module>()
1 trainer = pl.Trainer()
----> 2 trainer.fit(model, train_dl, valid_dl)
16 frames
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, train_dataloader, ckpt_path)
739 train_dataloaders = train_dataloader
740 self._call_and_handle_interrupt(
--> 741 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
742 )
743
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
683 """
684 try:
--> 685 return trainer_fn(*args, **kwargs)
686 # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
687 except KeyboardInterrupt as exception:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
775 # TODO: ckpt_path only in v1.7
776 ckpt_path = ckpt_path or self.resume_from_checkpoint
--> 777 self._run(model, ckpt_path=ckpt_path)
778
779 assert self.state.stopped
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1197
1198 # dispatch `start_training` or `start_evaluating` or `start_predicting`
-> 1199 self._dispatch()
1200
1201 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _dispatch(self)
1277 self.training_type_plugin.start_predicting(self)
1278 else:
-> 1279 self.training_type_plugin.start_training(self)
1280
1281 def run_stage(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self, trainer)
200 def start_training(self, trainer: "pl.Trainer") -> None:
201 # double dispatch to initiate the training loop
--> 202 self._results = trainer.run_stage()
203
204 def start_evaluating(self, trainer: "pl.Trainer") -> None:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in run_stage(self)
1287 if self.predicting:
1288 return self._run_predict()
-> 1289 return self._run_train()
1290
1291 def _pre_training_routine(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_train(self)
1309 self.progress_bar_callback.disable()
1310
-> 1311 self._run_sanity_check(self.lightning_module)
1312
1313 # enable train mode
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_sanity_check(self, ref_model)
1373 # run eval step
1374 with torch.no_grad():
-> 1375 self._evaluation_loop.run()
1376
1377 self.call_hook("on_sanity_check_end")
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/dataloader/evaluation_loop.py in advance(self, *args, **kwargs)
108 dl_max_batches = self._max_batches[dataloader_idx]
109
--> 110 dl_outputs = self.epoch_loop.run(dataloader, dataloader_idx, dl_max_batches, self.num_dataloaders)
111
112 # store batch level output per dataloader
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in advance(self, data_fetcher, dataloader_idx, dl_max_batches, num_dataloaders)
120 # lightning module methods
121 with self.trainer.profiler.profile("evaluation_step_and_end"):
--> 122 output = self._evaluation_step(batch, batch_idx, dataloader_idx)
123 output = self._evaluation_step_end(output)
124
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in _evaluation_step(self, batch, batch_idx, dataloader_idx)
215 self.trainer.lightning_module._current_fx_name = "validation_step"
216 with self.trainer.profiler.profile("validation_step"):
--> 217 output = self.trainer.accelerator.validation_step(step_kwargs)
218
219 return output
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/accelerators/accelerator.py in validation_step(self, step_kwargs)
237 """
238 with self.precision_plugin.val_step_context():
--> 239 return self.training_type_plugin.validation_step(*step_kwargs.values())
240
241 def test_step(self, step_kwargs: Dict[str, Union[Any, int]]) -> Optional[STEP_OUTPUT]:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in validation_step(self, *args, **kwargs)
217
218 def validation_step(self, *args, **kwargs):
--> 219 return self.model.validation_step(*args, **kwargs)
220
221 def test_step(self, *args, **kwargs):
<ipython-input-12-16d602e3e66b> in validation_step(self, val_batch, batch_idx)
29 def validation_step(self, val_batch, batch_idx):
30 x = val_batch
---> 31 x = x.view(x.size(0), -1)
32 z = self.encoder(x)
33 x_hat = self.decoder(z)
AttributeError: 'list' object has no attribute 'view'
As indicated by the error log, it is in this line:
29 def validation_step(self, val_batch, batch_idx):
30 x = val_batch
31 x = x.view(x.size(0), -1) # here is your problem
x or vali_batch is a list object, and a list does not have an attribute view() since it is not a tensor. If you want to convert a list to a tensor, you can simply use:
x = torch.tensor(val_batch)
Or you can convert val_batch to a tensor earlier in your code during loading and processing the data.

AttributeError: 'list' object has no attribute 'rank' When converting Keras Model To CoreML

I am trying to convert my Keras model that contains GRU layers to generate Shakespeares text to a coreml model, although when I try to convert it, I get the error "AttributeError: 'list' object has no attribute 'rank'". I followed the instructions on this website. Here is my code:
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import coremltools as ct
model = keras.models.load_model("checkpointshakespear.h5")
mlmodel = ct.convert(model)
This is my model layer:
model = keras.models.Sequential([
keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id],
dropout=0.2, recurrent_dropout=0.2),
keras.layers.GRU(128, return_sequences=True,
dropout=0.2, recurrent_dropout=0.2),
keras.layers.TimeDistributed(keras.layers.Dense(max_id,
activation="softmax"))
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
This is the full error:
Running TensorFlow Graph Passes: 100%|██████████| 5/5 [00:00<00:00, 9.78 passes/s]
Converting Frontend ==> MIL Ops: 0%| | 0/84 [00:00<?, ? ops/s]
Converting Frontend ==> MIL Ops: 0%| | 0/95 [00:00<?, ? ops/s]
Converting Frontend ==> MIL Ops: 0%| | 0/84 [00:00<?, ? ops/s]
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-8240f4ae502a> in <module>
6 import coremltools as ct
7 model = keras.models.load_model("checkpointshakespear.h5")
----> 8 mlmodel = ct.convert(model)
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/_converters_entry.py in convert(model, source, inputs, outputs, classifier_config, minimum_deployment_target, **kwargs)
256 outputs=outputs,
257 classifier_config=classifier_config,
--> 258 **kwargs
259 )
260
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/converter.py in _convert(model, convert_from, convert_to, converter_registry, **kwargs)
118 backend_converter = backend_converter_type()
119
--> 120 prog = frontend_converter(model, **kwargs)
121 common_pass(prog)
122 out = backend_converter(prog, **kwargs)
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/converter.py in __call__(self, *args, **kwargs)
50
51 tf2_loader = TF2Loader(*args, **kwargs)
---> 52 return tf2_loader.load()
53
54
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow/load.py in load(self)
78 )
79
---> 80 program = self._program_from_tf_ssa()
81 logging.debug("program:\n{}".format(program))
82 return program
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow2/load.py in _program_from_tf_ssa(self)
179
180 converter = TF2Converter(self._tf_ssa, **self.kwargs)
--> 181 return converter.convert()
182
183 def _populate_sub_graph_input_shapes(self, graph, graph_fns):
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow/converter.py in convert(self)
392 for g_name in self.graph_stack[1:]:
393 self.context.add_graph(g_name, self.tfssa.functions[g_name].graph)
--> 394 self.convert_main_graph(prog, graph)
395
396 # Apply TF frontend passes on Program. These passes are different
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow/converter.py in convert_main_graph(self, prog, graph)
337 for name in func_inputs.keys():
338 self.context.add(name, ssa_func.inputs[name])
--> 339 outputs = convert_graph(self.context, graph, self.outputs)
340 ssa_func.set_outputs(outputs)
341 prog.add_function("main", ssa_func)
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow/convert_utils.py in convert_graph(context, graph, outputs)
179 )
180 raise NotImplementedError(msg)
--> 181 _add_op(context, node)
182
183 if len(node.outputs) > 0:
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow2/ops.py in StatelessWhile(context, node)
98 return body_output_vars
99
--> 100 x = mb.while_loop(_cond=cond, _body=body, loop_vars=loop_vars, name=node.name)
101
102 # wraps x as tuple for get_tuple that always follow the while node.
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/mil/ops/registry.py in add_op(cls, **kwargs)
60 #classmethod
61 def add_op(cls, **kwargs):
---> 62 return cls._add_op(op_cls, **kwargs)
63
64 setattr(Builder, op_type, add_op)
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/mil/builder.py in _add_op(cls, op_cls, **kwargs)
188 new_op = op_cls(**kwargs)
189 curr_block()._insert_op_before(new_op, before_op=before_op)
--> 190 new_op.build_nested_blocks()
191 new_op.type_value_inference()
192 if len(new_op.outputs) == 1:
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/mil/ops/defs/control_flow.py in build_nested_blocks(self)
302 v.consuming_blocks = list()
303
--> 304 block, exit_vars = self.build_block(block_inputs)
305
306 # Verify exit_vars has the same types as loop_vars
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/mil/ops/defs/control_flow.py in build_block(self, block_inputs)
271 # Body func
272 body_func = self._body.val
--> 273 exit_vars = body_func(*block.inputs)
274
275 # Cond func:
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow2/ops.py in body(*loop_vars)
94 def body(*loop_vars):
95 context.stack_func_inputs(loop_vars)
---> 96 body_output_vars = convert_graph(context, body_graph)
97 context.unstack_func_inputs()
98 return body_output_vars
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow/convert_utils.py in convert_graph(context, graph, outputs)
179 )
180 raise NotImplementedError(msg)
--> 181 _add_op(context, node)
182
183 if len(node.outputs) > 0:
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow/ops.py in StridedSlice(context, node)
1351 squeeze_mask,
1352 ellipsis_mask,
-> 1353 new_axis_mask,
1354 )
1355
~/opt/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/tensorflow/ops.py in _pad_mask(x, begin, end, stride, begin_mask, end_mask, squeeze_mask, ellipsis_mask, new_axis_mask)
1257 x_rank = x.rank + new_dims
1258 else:
-> 1259 x_rank = x.rank
1260
1261 def pad_array(arr, max_rank, idx, default_value):
AttributeError: 'list' object has no attribute 'rank'
Looks like the error is because of the recurrent_dropout parameter. Removing this parameter solves the error.
Also note that I have added batch_size parameter to the first GRU layer. This is necessary because CoreML inputs should be either rank 3 (Seq,B,C) or rank 5 (Seq,B,C,H,W) for RNNs.
This is the working code snippet.
import tensorflow.keras as keras
import coremltools as ct
max_id = 1000
model = keras.models.Sequential([
keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id], batch_size=64,
dropout=0.2),
keras.layers.GRU(128, return_sequences=True,
dropout=0.2),
keras.layers.TimeDistributed(keras.layers.Dense(max_id,
activation="softmax"))
])
model.save('tf_keras_model.h5')
mlmodel = ct.convert('tf_keras_model.h5', source="tensorflow")

TypeError: take(): argument 'index' (position 1) must be Tensor, not numpy.ndarray

I'm new to pytorch. I'm trying to do a cross validation, and I found the skorch library, which allow users to use sklearn functions with a torch model. So, I define a neural network class:
torch.manual_seed(42)
class Netcross(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(5,30)
self.sig1 = nn.Tanh()
#self.dout = nn.Dropout(0.2)
self.fc2 = nn.Linear(30,30)
self.sig2 = nn.Sigmoid()
self.out = nn.Linear(30, 1)
self.out_act = nn.Sigmoid()
#self.fc1.weight = torch.nn.Parameter(torch.rand(50,5))
def forward(self, x):
x = self.fc1(x)
x = self.sig1(x)
#x = self.dout(x)
x = self.fc2(x)
x = self.sig2(x)
x = self.out(x)
y = self.out_act(x)
return y
crossnet1 = NeuralNet(
Netcross,
max_epochs = 5,
criterion=torch.nn.BCELoss,
#user defined coeff.
callbacks = [epoch_acc, epoch_f1, epoch_phi],
optimizer=torch.optim.SGD,
optimizer__momentum=0.9,
lr=0.85,
)
inputs = Variable(x_traintensor)
labels = Variable(y_traintensor)
crossnet1.fit(inputs, labels)
so far everything is fine, the function returns credible results without any errors. The problem appears when I try to use the GridSearchCV function:
from sklearn.model_selection import GridSearchCV
param_grid = {'max_epochs':[5, 10, 20],
'lr': [0.1, 0.65, 0.8],
}
gs = GridSearchCV(estimator = crossnet1, param_grid = param_grid, refit = False, cv = 3, scoring = "accuracy")
gs.fit(inputs, labels)
I get the following error:
TypeError Traceback (most recent call last)
<ipython-input-41-e1f3dbd9a2b0> in <module>
3 labels1 = torch.from_numpy(np.array(labels))
4
----> 5 gs.fit(inputs1, labels1)
~\Anaconda3\lib\site-packages\sklearn\model_selection\_search.py in fit(self, X, y, groups, **fit_params)
720 return results_container[0]
721
--> 722 self._run_search(evaluate_candidates)
723
724 results = results_container[0]
~\Anaconda3\lib\site-packages\sklearn\model_selection\_search.py in _run_search(self, evaluate_candidates)
1189 def _run_search(self, evaluate_candidates):
1190 """Search all candidates in param_grid"""
-> 1191 evaluate_candidates(ParameterGrid(self.param_grid))
1192
1193
~\Anaconda3\lib\site-packages\sklearn\model_selection\_search.py in evaluate_candidates(candidate_params)
709 for parameters, (train, test)
710 in product(candidate_params,
--> 711 cv.split(X, y, groups)))
712
713 all_candidate_params.extend(candidate_params)
~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in __call__(self, iterable)
915 # remaining jobs.
916 self._iterating = False
--> 917 if self.dispatch_one_batch(iterator):
918 self._iterating = self._original_iterator is not None
919
~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in dispatch_one_batch(self, iterator)
757 return False
758 else:
--> 759 self._dispatch(tasks)
760 return True
761
~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in _dispatch(self, batch)
714 with self._lock:
715 job_idx = len(self._jobs)
--> 716 job = self._backend.apply_async(batch, callback=cb)
717 # A job can complete so quickly than its callback is
718 # called before we get here, causing self._jobs to
~\Anaconda3\lib\site-packages\sklearn\externals\joblib\_parallel_backends.py in apply_async(self, func, callback)
180 def apply_async(self, func, callback=None):
181 """Schedule a func to be run"""
--> 182 result = ImmediateResult(func)
183 if callback:
184 callback(result)
~\Anaconda3\lib\site-packages\sklearn\externals\joblib\_parallel_backends.py in __init__(self, batch)
547 # Don't delay the application, to avoid keeping the input
548 # arguments in memory
--> 549 self.results = batch()
550
551 def get(self):
~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in __call__(self)
223 with parallel_backend(self._backend, n_jobs=self._n_jobs):
224 return [func(*args, **kwargs)
--> 225 for func, args, kwargs in self.items]
226
227 def __len__(self):
~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in <listcomp>(.0)
223 with parallel_backend(self._backend, n_jobs=self._n_jobs):
224 return [func(*args, **kwargs)
--> 225 for func, args, kwargs in self.items]
226
227 def __len__(self):
~\Anaconda3\lib\site-packages\sklearn\model_selection\_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, return_estimator, error_score)
516 start_time = time.time()
517
--> 518 X_train, y_train = _safe_split(estimator, X, y, train)
519 X_test, y_test = _safe_split(estimator, X, y, test, train)
520
~\Anaconda3\lib\site-packages\sklearn\utils\metaestimators.py in _safe_split(estimator, X, y, indices, train_indices)
201 X_subset = X[np.ix_(indices, train_indices)]
202 else:
--> 203 X_subset = safe_indexing(X, indices)
204
205 if y is not None:
~\Anaconda3\lib\site-packages\sklearn\utils\__init__.py in safe_indexing(X, indices)
214 indices.dtype.kind == 'i'):
215 # This is often substantially faster than X[indices]
--> 216 return X.take(indices, axis=0)
217 else:
218 return X[indices]
TypeError: take(): argument 'index' (position 1) must be Tensor, not numpy.ndarray
What is wrong?
Change your input and labels to np.ndarray (see examples here).
Those will be casted to torch.Tensor when needed automatically by skorch.
All in all change your
inputs = Variable(x_traintensor)
labels = Variable(y_traintensor)
to:
inputs = x_traintensor.numpy() # assuming x is torch.Tensor
labels = y_traintensor.numpy() # assuming y is torch.Tensor
BTW. torch.Variable is deprecated, you should use torch.Tensor(data, requires_grad=True). In this case, inputs and labels do not need gradient, hence Variable is even more out of place.

DNNClassifier: 'DataFrame' object has no attribute 'dtype'

I am trying to run a tensorflow DNNClassifier model with some data, that I read from a csv. Even though I converted the datatype of each column to float32, I keeo getting the 'DataFrame' object has no attribute 'dtype' Error. I would really appreciate if you could help me.
Dataformat:
27 columns, 23 input, 4 classes
Thank you
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
factors = pd.read_csv('xxx.csv')
#Formatting data to float32
factors['1'] = factors['1'].astype('float32')
factors['2'] = factors['2'].astype('float32')
...
factors['27'] = factors['27'].astype('float32')
#Definition of in- and output
feat_data = factors[['1', '2', ... '23']]
labels = factors[['24', '25','26', '27']]
#Train-Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(feat_data,labels, test_size=0.3, random_state=101)
from sklearn.preprocessing import MinMaxScalerscaler = MinMaxScaler()
scaled_x_train = scaler.fit_transform(X_train) scaled_x_test = scaler.transform(X_test)
#Model
from tensorflow import estimator
feat_cols = [tf.feature_column.numeric_column('x', shape [23],dtype=tf.float32)]
deep_model = estimator.DNNClassifier(hidden_units=[23,23,23],
feature_columns=feat_cols,
n_classes=4, optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01) )
input_fn = estimator.inputs.numpy_input_fn(x {'x':scaled_x_train},y=y_train,shuffle=True,batch_size=10,num_epochs=5)
deep_model.train(input_fn=input_fn,steps=50)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-169-9b2e050e4e40> in <module>()
----> 1 deep_model.train(input_fn=input_fn,steps=50)
~\Anaconda\envs\tfdeeplearning\lib\site- packages\tensorflow\python\estimator\estimator.py in train(self, input_fn, hooks, steps, max_steps)
239 hooks.append(training.StopAtStepHook(steps, max_steps))
240
--> 241 loss = self._train_model(input_fn=input_fn, hooks=hooks)
242 logging.info('Loss for final step: %s.', loss)
243 return self
~\Anaconda\envs\tfdeeplearning\lib\site-packages\tensorflow\python\estimator\estimator.py in _train_model(self, input_fn, hooks)
626 global_step_tensor = self._create_and_assert_global_step(g)
627 features, labels = self._get_features_and_labels_from_input_fn(
--> 628 input_fn, model_fn_lib.ModeKeys.TRAIN)
629 estimator_spec = self._call_model_fn(features, labels,
630 model_fn_lib.ModeKeys.TRAIN)
~\Anaconda\envs\tfdeeplearning\lib\site-packages\tensorflow\python\estimator\estimator.py in _get_features_and_labels_from_input_fn(self, input_fn, mode)
497
498 def _get_features_and_labels_from_input_fn(self, input_fn, mode):
--> 499 result = self._call_input_fn(input_fn, mode)
500 if isinstance(result, (list, tuple)):
501 if len(result) != 2:
~\Anaconda\envs\tfdeeplearning\lib\site-packages\tensorflow\python\estimator\estimator.py in _call_input_fn(***failed resolving arguments***)
583 kwargs['config'] = self.config
584 with ops.device('/cpu:0'):
--> 585 return input_fn(**kwargs)
586
587 def _call_model_fn(self, features, labels, mode):
~\Anaconda\envs\tfdeeplearning\lib\site-packages\tensorflow\python\estimator\inputs\numpy_io.py in input_fn()
122 num_threads=num_threads,
123 enqueue_size=batch_size,
--> 124 num_epochs=num_epochs)
125
126 features = (queue.dequeue_many(batch_size) if num_epochs is None
~\Anaconda\envs\tfdeeplearning\lib\site-packages\tensorflow\python\estimator\inputs\queues\feeding_functions.py in _enqueue_data(data, capacity, shuffle, min_after_dequeue, num_threads, seed, name, enqueue_size, num_epochs)
315 elif isinstance(data, collections.OrderedDict):
316 types = [dtypes.int64] + [
--> 317 dtypes.as_dtype(col.dtype) for col in data.values()
318 ]
319 queue_shapes = [()] + [col.shape[1:] for col in data.values()]
~\Anaconda\envs\tfdeeplearning\lib\site-packages\tensorflow\python\estimator\inputs\queues\feeding_functions.py in <listcomp>(.0)
315 elif isinstance(data, collections.OrderedDict):
316 types = [dtypes.int64] + [
--> 317 dtypes.as_dtype(col.dtype) for col in data.values()
318 ]
319 queue_shapes = [()] + [col.shape[1:] for col in data.values()]
~\Anaconda\envs\tfdeeplearning\lib\site-packages\pandas\core\generic.py in __getattr__(self, name)
3079 if name in self._info_axis:
3080 return self[name]
-> 3081 return object.__getattribute__(self, name)
3082
3083 def __setattr__(self, name, value):
AttributeError: 'DataFrame' object has no attribute 'dtype'`$`
Tensorflow assumes that you pass numpy arrays not pandas DataFrames (which have dtype attribute). So, you should pass df.values instead of df to tensorflow functions.

Categories