I am trying to implement MNIST digits using PyTorch Lightning.
The train function is like the below one
def train(epochs, train_loader, test_loader, model):
early_stopping = EarlyStopping('train_loss', mode='min', patience=5)
model_checkpoint = ModelCheckpoint(dirpath=model_path/'mnist_{epoch}-{train_loss:.2f}',monitor='train_loss', mode='min', save_top_k=3)
trainer = pl.Trainer(max_epochs=epochs, profiler=False, callbacks = [model_checkpoint],default_root_dir=model_path)
trainer.fit(model, train_dataloader=train_loader)
trainer.test(test_dataloaders=test_loader, ckpt_path=None)
The test_step function is like the below one
def test_step(self, test_batch):
x, y = test_batch
logits = self.forward(x)
loss = self.mean_squared_error_loss(logits.squeeze(-1), y.float())
# I want to calculate R2, MAPE, etc and want to save in a pandas df and
# need to return to the train function
self.log('test_loss', loss)
return {'test_loss': loss}
I can do calculate R2, MAPE, etc using TorchMetrics. But, I am not sure how (or is it possible) to save them in a pandas df (or maybe in a list) for the whole test dataset. I have gone through this post but not sure how should I try!
Any suggestions are appreciated.
You can aggregate test result in test_epoch_end:
def test_step(self, test_batch):
x, y = test_batch
logits = self.forward(x)
loss = self.mean_squared_error_loss(logits.squeeze(-1), y.float())
self.log('test_loss', loss)
return {'test_loss': loss, "logits":logits, "labels": y}
def test_epoch_end(self, outputs):
all_preds, all_labels = [], []
for output in outputs:
probs = list(output['logits'].cpu().detach().numpy()) # predicted values
labels = list(output['labels'].flatten().cpu().detach().numpy())
all_preds.extend(probs)
all_labels.extend(labels)
# you can calculate R2 here or save results as file
r2 = ...
Note that this only works on a single GPU. If you are using multiple GPUs, you need some function to gather results from different GPUs.
To get model predictions, you need to add a predict_step() in the model class.
def predict_step(self, test_batch):
x, y = test_batch
logits = self.forward(x)
return {'logits': logits, 'labels':y}
And run:
outputs = trainer.predict(model, test_loader, return_predictions=True)
Related
I’m trying to constrain the weight of my model by explicitly applying the gradients; shower, this is not working and I can’t figure out why.
I’m defining the model with the following function:
def init_model(num_hidden_layers=2, num_neurons_per_layer=64):
model = tf.keras.Sequential()
model.add(tf.keras.Input(shape=(2,)) )
for _ in range(num_hidden_layers):
model.add(tf.keras.layers.Dense(num_neurons_per_layer, activation=tf.keras.layers.LeakyReLU( ),kernel_initializer="glorot_uniform") )
model.add(tf.keras.layers.Dense(1,kernel_initializer="glorot_uniform"))
return model
When using the fit method, the loss function decreases and the model fits the data:
Nepochs = 1500
lr = 0.001
def my_loss(u_true, u_pred):
return tf.math.reduce_mean(tf.math.square(u_true - u_pred))
model_0 = init_model(num_hidden_layers=2, num_neurons_per_layer=64)
optim_0 = tf.keras.optimizers.Adam(learning_rate=lr)
model_0.compile(loss=my_loss, optimizer=optim_0)
model_0.summary()
history_0 = model_0.fit(X_train,u_train,validation_data=(X_test.numpy(),u_test.numpy()),epochs=Nepochs, batch_size=X_train.shape[0])
When I explicitly specify and apply the gradient, the loss function stagnates and the output does not fit the data (it is uniform everywhere):
Nepochs = 1500
lr = 0.001
def compute_loss(model, X_data, u_data):
u_pred = model(X_data)
loss = tf.math.reduce_mean(tf.math.square(u_data - u_pred))
return loss
#tf.function
def training(model, optim, X_train, u_train, X_test=None, u_test=None):
if X_test is not None:
validation_loss = compute_loss(model, X_test, u_test )
else:
validation_loss = None
with tf.GradientTape(persistent=True) as tape:
tape.watch(model.trainable_variables)
loss = compute_loss(model, X_train, u_train )
grad_theta = tape.gradient(loss, model.trainable_variables)
optim.apply_gradients(zip(grad_theta, model.trainable_variables))
return loss,validation_loss
model_G = init_model(num_hidden_layers=2, num_neurons_per_layer=64)
optim_G = tf.keras.optimizers.Adam(learning_rate=lr)
model_G.summary()
hist = {'val_loss':[],'loss':[]}
for i in range(Nepochs+1):
loss, val_loss = training(model_G,optim_G,X_train,u_train,X_test,u_test)
hist['loss'].append(loss.numpy())
hist['val_loss'].append(val_loss.numpy())
if val_loss is not None:
print('It {:05d}: loss = {:10.8e}, validation loss = {:10.8e} '.format(i,loss,val_loss))
else:
print('It {:05d}: loss = {:10.8e}'.format(i,loss))
Why do the two versions provide different results?
Thanks for the help.
Cesare
Finally, I found that expanding the dimension of the targets as follows:
u_train = tf.expand_dims(u_train,axis=-1)
u_test = tf.expand_dims(u_test,axis=-1)
the model training properly and the loss functions are correctly evaluated.
u_train and u_test previously had shapes equal to the number of entries N only; by expanding the dimension, the shape now is (N,1).
using fit the code works with both; when explicitly using the gradient, only with targets of shape (N,1).
I have a data set like this:
edge_origins = np.array([[0,1,2,3,4],[6,7,8]])
edge_destinations = np.array([[1,2,3,4,5],[7,8,9]])
target = np.array([0,1])
x = [[np.array([0.1,0.5,0.2]),np.array([0.5,0.6,0.23]),
np.array([0.1,0.5,0.5]),np.array([0.1,0.6,0.23]),
np.array([0.1,0.4,0.4]),np.array([0.52,0.6,0.23])],
[np.array([0.1,0.3,0.3]),np.array([0.3,0.6,0.23]),
np.array([0.1,0.1,0.2]),np.array([0.4,0.6,0.23])]]
This is a list of two networks. The first network has 6 nodes with 5 edges and a class 0, and then 4 nodes with 3 edges and class 1 networks.
I want to develop a model in Pytorch that will classify each network into it's class, and then i'll give it a new set of networks to classify.
So ultimately, I want to be able to shuffle these lists (simultaneously, i.e. maintaining the order between the data and the classes), split into train and test, and then read the train and test data into two data loaders, and feed these into a PyTorch network.
I wrote this:
edge_origins = np.array([[0,1,2,3,4],[6,7,8]])
edge_destinations = np.array([[1,2,3,4,5],[7,8,9]])
target = np.array([0,1])
x = [[np.array([0.1,0.5,0.2]),np.array([0.5,0.6,0.23]),
np.array([0.1,0.5,0.5]),np.array([0.1,0.6,0.23]),
np.array([0.1,0.4,0.4]),np.array([0.52,0.6,0.23])],
[np.array([0.1,0.3,0.3]),np.array([0.3,0.6,0.23]),
np.array([0.1,0.1,0.2]),np.array([0.4,0.6,0.23])]]
edge_index = torch.tensor([edge_origins, edge_destinations], dtype=torch.long)
dataset = Data(x=x, edge_index=edge_index, y=y, num_classes = len(set(target)))
print(dataset)
And the error is:
edge_index = torch.tensor([edge_origins, edge_destinations], dtype=torch.long)
ValueError: expected sequence of length 5 at dim 2 (got 3)
But then once that is fixed I think the next step is:
torch.manual_seed(12345)
dataset = dataset.shuffle()
train_dataset = dataset[:1] #for toy example
test_dataset = dataset[1:]
print(f'Number of training graphs: {len(train_dataset)}')
print(f'Number of test graphs: {len(test_dataset)}')
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
class GCN(torch.nn.Module):
def __init__(self, hidden_channels):
super(GCN, self).__init__()
torch.manual_seed(12345)
self.conv1 = GCNConv(dataset.num_node_features, hidden_channels)
self.conv2 = GCNConv(hidden_channels, hidden_channels)
self.conv3 = GCNConv(hidden_channels, hidden_channels)
self.lin = Linear(hidden_channels, dataset.num_classes)
def forward(self, x, edge_index, batch):
# 1. Obtain node embeddings
x = self.conv1(x, edge_index)
x = x.relu()
x = self.conv2(x, edge_index)
x = x.relu()
x = self.conv3(x, edge_index)
# 2. Readout layer
x = global_mean_pool(x, batch) # [batch_size, hidden_channels]
# 3. Apply a final classifier
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin(x)
return x
model = GCN(hidden_channels=64)
print(model)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
def train():
model.train()
for data in train_loader: # Iterate in batches over the training dataset.
out = model(data.x, data.edge_index, data.batch) # Perform a single forward pass.
loss = criterion(out, data.y) # Compute the loss.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
optimizer.zero_grad() # Clear gradients.
def test(loader):
model.eval()
correct = 0
for data in loader: # Iterate in batches over the training/test dataset.
out = model(data.x, data.edge_index, data.batch)
pred = out.argmax(dim=1) # Use the class with highest probability.
correct += int((pred == data.y).sum()) # Check against ground-truth labels.
return correct / len(loader.dataset) # Derive ratio of correct predictions.
for epoch in range(1, 171):
train()
train_acc = test(train_loader)
test_acc = test(test_loader)
print(f'Epoch: {epoch:03d}, Train Acc: {train_acc:.4f}, Test Acc: {test_acc:.4f}')
Could someone demonstrate to me how to get my data running into the Pytorch network above?
In Pytorch Geometric the Data object is used to contain only one graph. So you could iterate through all your arrays like so:
data_list = []
for i in range(2):
edge_index_curr = torch.tensor([edge_origins[i],
edge_destinations[i],
dtype=torch.long)
data = Data(x=torch.tensor(x[i]), edge_index=edge_index_curr, y=torch.tensor(target[i]))
datas.append(data)
You can then use this list of Data to create your own Dataloader:
loader = DataLoader(data_list, batch_size=32)
If you need to split into train/val/test (I would advise having more than 2 samples for this case) you can do it manually or using sklearn.model_selection.
For data augmentation if you really do have very little data, pytorch-geometric comes with transforms.
So basically, I am using the class from the Pytorch Lightning Module. My issue is that I'm loading my data using Pytorch Dataloader:
def train_dataloader(self):
train_dir = f"{self.img_dir_gender}/train"
# train_transforms: from PIL to TENSOR + DATA AUG
train_transforms = T.Compose([
T.ToTensor(),
# T.Pad(25, padding_mode='symmetric'),
# T.RandomHorizontalFlip(),
# T.RandomVerticalFlip()
])
train_dataset = ImageFolder(train_dir, transform=train_transforms)
print(train_dataset.class_to_idx)
print(Counter(train_dataset.targets))
# oversampling giving more weight to minority classes
class_weights = Counter(train_dataset.targets)
class_weights_adjusted = [0] * len(train_dataset)
for idx, (data, label) in enumerate(train_dataset):
# inverse gives more weight to minority classes
class_weight = 1 / class_weights[label]
class_weights_adjusted[idx] = class_weight
sampler = WeightedRandomSampler(class_weights_adjusted, num_samples=self.num_samples , replacement=True)
train_loader = DataLoader(train_dataset, batch_size=self.hparams.batch_size, num_workers=4, sampler=sampler, shuffle=False)
return train_loader
And there I manage to retrieve my class weights and execute some oversampling:
However, I cannot manage to retrieve those weights and, say, take their inverse to then pass them to my cross_entropy loss function within my training_step and val_step methods with the aim of tackling class imbalance in my val dataset:
def training_step(self, batch, batch_idx):
# torch.Size([bs, 3, 224, 224])
# x = batch["pixel_values"]
# torch.Size([bs])
# y = batch["labels"]
x, y = batch
# unfreeze after a certain number of epochs
# self.trainer.current_epoch >=
# meaning it will not keep a graph with grads for the backbone (memory efficient)
if self.trainer.current_epoch < self.hparams.unfreeze_epoch:
with torch.no_grad():
features = self.backbone(x)
else:
features = self.backbone(x)
preds = self.finetune_layer(features)
# pred_probs = softmax(preds, dim=-1)
# pred_labels = torch.argmax(pred_probs, dim=-1)
train_loss = cross_entropy(preds, y, weight=?)
self.log("train_loss", train_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("train_accuracy", self.train_accuracy(preds, y), on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("train_f1_score", self.train_f1(preds, y), on_step=True, on_epoch=True, prog_bar=True, logger=True)
#self.log("train_accuracy", self.train_accuracy(preds, y), prog_bar=True)
#self.log("train_precision", self.train_precision(preds, y), prog_bar=True)
#self.log("train_recall", self.train_recall(preds, y), prog_bar=True)
#self.log("train_f1", self.train_f1(preds, y), prog_bar=True)
return train_loss
So I know that I should use the weight= parameter in the cross_entropy function, but how can I retrieve my class weights from my training dataset?
Let me know if I should add some clarifications.
You could:
dm = DataModule()
# write your weights getter function in your pl.LightningDataModule
weights = dm.get_weights()
# where your loss function is set under your pl.LightningModule's init
#
# self.loss = nn.CrossEntropyLoss(weights=weights))
#
# and then called under training_step as self.loss(preds, y)
model = model(weights)
trainer.fit(model, dm)
No need of passing weights all the time to your loss function
I use TensorFlow 2.2.0. In my data pipeline, I use multiple datasets to train a neural net. Something like:
# these are all tf.data.Dataset objects:
paired_data = get_dataset(id=0, repeat=False, shuffle=True)
unpaired_images = get_dataset(id=1, repeat=True, shuffle=True)
unpaired_masks = get_dataset(id=2, repeat=True, shuffle=True)
In the training loop, I want to iterate over paired_data to define one epoch. But I also want to iterate over unpaired_images and unpaired_masks to optimize other objectives (classic semi-supervised learning for semantic segmentation, with a mask discriminator).
In order to do this, my current code looks like:
def train_one_epoch(self, writer, step, paired_data, unpaired_images, unpaired_masks):
unpaired_images = unpaired_images.as_numpy_iterator()
unpaired_masks = unpaired_masks.as_numpy_iterator()
for images, labels in paired_data:
with tf.GradientTape() as sup_tape, \
tf.GradientTape() as gen_tape, \
tf.GradientTape() as disc_tape:
# paired data (supervised cost):
predictions = segmentor(images, training=True)
sup_loss = weighted_cross_entropy(predictions, labels)
# unpaired data (adversarial cost):
pred_real = discriminator(next(unpaired_masks), training=True)
pred_fake = discriminator(segmentor(next(unpaired_images), training=True), training=True)
gen_loss = generator_loss(pred_fake)
disc_loss = discriminator_loss(pred_real, pred_fake)
gradients = sup_tape.gradient(sup_loss, self.segmentor.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients, self.segmentor.trainable_variables))
gradients = gen_tape.gradient(gen_loss, self.segmentor.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients, self.segmentor.trainable_variables))
gradients = disc_tape.gradient(disc_loss, self.discriminator.trainable_variables)
discriminator_optimizer.apply_gradients(zip(gradients, self.discriminator.trainable_variables))
However, this results in the error:
main.py:275 train_one_epoch *
unpaired_images = unpaired_images.as_numpy_iterator()
/home/venvs/conda/miniconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/data/ops/dataset_ops.py:476 as_numpy_iterator **
raise RuntimeError("as_numpy_iterator() is not supported while tracing "
RuntimeError: as_numpy_iterator() is not supported while tracing functions
Any idea what is wrong with this? Is this the correct way of optimizing over multiple losses/datasets in tensorflow 2?
I add my current solution to the problem in the comments. Any suggestion fo more optimized ways is more than welcome! :)
My current solution:
def train_one_epoch(self, writer, step, paired_data, unpaired_images, unpaired_masks):
# create a new dataset zipping the three original dataset objects
dataset = tf.data.Dataset.zip((paired_data, unpaired_images, unpaired_masks))
for (images, labels), unpaired_images, unpaired_masks in dataset:
# go ahead and train:
with tf.GradientTape() as tape:
#[...]
I want to train, evaluate the accuracy and eventually predict with my model. This is my first time using high level APIs such as tf.estimator.
I'm getting a value error from estimator.train(train_input_fn):
'ValueError: features should be a dictionary of `Tensor's. Given type: '
I'm not sure what is going on here. My model is taking 3 inputs and producing a binary output from one neuron.
Before this error I was getting an error about the requested shape not equal to the actual shape, or something along those lines. I fixed it by reducing the batchSize down to 1, instead of 100. I'm sure this isn't going to do so well when it comes to training though.
Any ideas? Heres my code:
import tensorflow as tf
import numpy as np
import sys
sys.path.insert(0, '/Users/blairburns/Documents/DeepLearning/BackgroundColourPredictor/Dataset/Testing/')
sys.path.insert(0, '/Users/blairburns/Documents/DeepLearning/BackgroundColourPredictor/Dataset/Training/')
#other files
from TestDataNormaliser import *
from TrainDataNormaliser import *
learning_rate = 0.01
trainingIteration = 15
batchSize = 1
displayStep = 2
#Layers using tf.layers
def get_logits(features):
l1 = tf.layers.dense(features, 3, activation=tf.nn.relu)
l2 = tf.layers.dense(l1, 4, activation=tf.nn.relu)
l3 = tf.layers.dense(l2, 1, activation=None)
a = l3
return a
#cost function
def get_loss(a, labels):
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(a)))
return tf.nn.sigmoid_cross_entropy_with_logits(logits=a, labels=labels)
#cross_entropy = tf.reduce_mean((l3 - y)**2)
#cross_entropy = -tf.reduce_sum(y*tf.log(a))-tf.reduce_sum((1-y)*tf.log(1-a))
#optimizer
def get_train_op(loss):
learning_rate = 1e-3
optimizer = tf.train.RMSPropOptimizer(learning_rate)
return optimizer.minimize(loss, global_step=tf.train.get_global_step())
#training
####
def get_inputs(feature_data, label_data, batch_size, n_epochs=None, shuffle=True):
dataset = tf.data.Dataset.from_tensor_slices(
(feature_data, label_data))
dataset = dataset.repeat(n_epochs)
if shuffle:
dataset = dataset.shuffle(len(feature_data))
dataset = dataset.batch(batch_size)
features, labels = dataset.make_one_shot_iterator().get_next()
return features, labels
def model_fn(features, labels, mode):
a = get_logits(features)
loss = get_loss(a, labels)
train_op = get_train_op(loss)
predictions = tf.greater(a, 0)
accuracy = tf.metrics.accuracy(labels, predictions)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={'Accuracy': accuracy},
predictions=predictions
)
def train_input_fn():
return get_inputs(
trainArrayValues,
trainArrayLabels,
batchSize
)
def eval_input_fn():
return get_inputs(
testArrayValues,
testArrayLabels,
batchSize,
n_epochs=1,
shuffle=False
)
model_dir = './savedModel'
estimator = tf.estimator.LinearRegressor(feature_columns=[model_fn, model_dir])
#estimator.train(train_input_fn, max_steps=1)
estimator.train(train_input_fn)
estimator.evaluate(eval_input_fn)
Your problem is this line:
estimator = tf.estimator.LinearRegressor(feature_columns=[model_fn, model_dir])
You need to set the feature_columns argument to an array of feature columns. A feature column tells the estimator about the data you're feeding it.
It looks like all your input data is numeric, so I'd call tf.feature_column.numeric_column to create your feature column(s). The documentation is here. For example, the following code creates a numeric feature column containing x-coordinates:
xcol = tf.feature_column.numeric_column('x')
If all your estimator needs are x-coordinates, then you could create the estimator with the following code:
estimator = tf.estimator.LinearRegressor(feature_columns=[xcol])