tensorboard,, the scalar value is not recorded - python

The scalar value is not being recorded as in these pictures.
enter image description here 2.
enter image description here
I don't know if I'm doing the wrong with code for training or code for tensorboard.
It's my first time using a tensorboard, so I don't know if I wrote the correct code for viewing the tensorboard.
If the scalar value is not recorded as shown in the attached image even though the code for the tensorboard is written correctly, I would like to look at the code part for training.
Therefore, I would appreciate it if you could check code only from recording the scalar value to verifying it with the tensorboard
part of the main function of <run.py>
def main():
args = parser.parse_args()
assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2."
# check if gpu training is available
if not args.disable_cuda and torch.cuda.is_available():
args.device = torch.device('cuda')
cudnn.deterministic = True
cudnn.benchmark = True
else:
args.device = torch.device('cpu')
args.gpu_index = -1
dataset = ContrastiveLearningDataset(args.data)
train_dataset = dataset.__getitem__(args.dataset_name, args.n_views, 1)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=True)
#f(*)
model = ResNetSimCLR(base_model=args.arch, out_dim=args.out_dim)
optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader), eta_min=0,
last_epoch=-1)
with torch.cuda.device(args.gpu_index):
simclr = SimCLR(model=model, optimizer=optimizer, scheduler=scheduler, args=args)
simclr.train(train_loader)
simclr.writer.flush()
<simclr.py> to write train function in main()
class SimCLR(object):
def __init__(self, *args, **kwargs):
self.args = kwargs['args']
self.model = kwargs['model'].to(self.args.device)
self.optimizer = kwargs['optimizer']
self.scheduler = kwargs['scheduler']
self.writer = SummaryWriter()
logging.basicConfig(filename=os.path.join(self.writer.log_dir, 'training.log'), level=logging.DEBUG)
self.criterion = torch.nn.CrossEntropyLoss().to(self.args.device)
def info_nce_loss(self, features):
labels = torch.cat([torch.arange(self.args.batch_size) for i in range(self.args.n_views)], dim=0)
labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
labels = labels.to(self.args.device)
features = F.normalize(features, dim=1)
similarity_matrix = torch.matmul(features, features.T)
mask = torch.eye(labels.shape[0], dtype=torch.bool).to(self.args.device)
labels = labels[~mask].view(labels.shape[0], -1)
similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)
positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)
negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)
logits = torch.cat([positives, negatives], dim=1)
labels = torch.zeros(logits.shape[0], dtype=torch.long).to(self.args.device)
logits = logits / self.args.temperature
return logits, labels
def train(self, train_loader):
scaler = GradScaler(enabled=self.args.fp16_precision)
# save config file
save_config_file(self.writer.log_dir, self.args)
n_iter = 0 #global optimization step
logging.info(f"Start SimCLR training for {self.args.epochs} epochs.")
logging.info(f"Training with gpu: {self.args.disable_cuda}.")
for epoch_counter in range(self.args.epochs):
for images, _ in tqdm(train_loader):
images = torch.cat(images, dim=0)
images = images.to(self.args.device)
with autocast(enabled=self.args.fp16_precision):
features = self.model(images)
logits, labels = self.info_nce_loss(features)
loss = self.criterion(logits, labels)
self.optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(self.optimizer)
scaler.update()
if n_iter % self.args.log_every_n_steps == 0:
top1, top5 = accuracy(logits, labels, topk=(1, 5))
self.writer.add_scalar('loss', loss, global_step=n_iter)
self.writer.add_scalar('acc/top1', top1[0], global_step=n_iter)
self.writer.add_scalar('acc/top5', top5[0], global_step=n_iter)
self.writer.add_scalar('learning_rate', self.scheduler.get_lr()[0], global_step=n_iter)
n_iter += 1
# warmup for the first 10 epochs
if epoch_counter >= 1:
try:
self.scheduler.step()
except ZeroDivisionError:
print("ZeroDivision")
logging.debug(f"Epoch: {epoch_counter}\tLoss: {loss}\tTop1 accuracy: {top1[0]}")
logging.info("Training has finished.")
# save model checkpoints
checkpoint_name = 'checkpoint_{:04d}.pth.tar'.format(self.args.epochs)
save_checkpoint({
'epoch': self.args.epochs,
'arch': self.args.arch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}, is_best=False, filename=os.path.join(self.writer.log_dir, checkpoint_name))
logging.info(f"Model checkpoint and metadata has been saved at {self.writer.log_dir}.")

Related

Why is my Kaggle notebook no showing GPU usage even with Cuda enabled?

I'm running a Kaggle Notebook, but when I'm running my training loop it shows GPU usage as Zero. When I run my training loop I print the line:
print("Using", torch.cuda.get_device_name(0))
Which returns:
Using Tesla P100-PCIE-16GB
But when I click the meters up at the top it shows CPU at 189% and GPU at 0%? Why would this show Zero if GPU is enabled?
Below is my DataLoader, Model, and Training Loop:
Dataloader:
def read_xray(file_path, img_size=None):
dicom = pydicom.read_file(file_path)
img = dicom.pixel_array
if dicom.PhotometricInterpretation == "MONOCHROME1":
img = np.max(img) - img
if img_size:
img_size = (img_size, img_size) # Convert img_size to a tuple
img = cv2.resize(img, dsize=img_size)
# Add channel dim at First
img = img[np.newaxis]
# Converting img to float32
img = img / np.max(img)
img = img.astype("float32")
return img
class ImageDataset(Dataset):
def __init__(self, root_dir, df, transform=None):
self.root_dir = root_dir
self.transform = transform
self.folder_list = os.listdir(self.root_dir)
self.image_counts = [len(os.listdir(os.path.join(self.root_dir, folder))) for folder in self.folder_list]
self.df = df
def __len__(self):
return sum(self.image_counts)
def __getitem__(self, index):
row = self.df.iloc[index]
folder_index = 0
while index >= self.image_counts[folder_index]:
index -= self.image_counts[folder_index]
folder_index += 1
image_path = os.path.join(self.root_dir, self.folder_list[folder_index], os.listdir(os.path.join(self.root_dir, self.folder_list[folder_index]))[index])
img = read_xray(image_path, 128)
target = torch.tensor(row.cancer).float()
img = np.repeat(img, 3, axis=0)
img = torch.from_numpy(img)
if self.transform:
img = self.transform(img)
return img, target
transformed_dataset = ImageDataset(root_dir="/kaggle/input/rsna-breast-cancer-detection/train_images", df=train_df)
dataloader = DataLoader(transformed_dataset, batch_size=128, shuffle=True, num_workers=2)
Model:
model = models.resnet18(pretrained=True)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, 2)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=.001)
Train Loop:
def train(model, dataloader, loss_fn, optimizer, epochs):
for epoch in tqdm(range(epochs)):
print("Using", torch.cuda.get_device_name(0))
running_loss = 0.0
for i, data in tqdm(enumerate(dataloader, 0)):
inputs, labels = data
model = model.to(device)
labels = labels.type(torch.LongTensor)
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
print(f"Out put done{i}")
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print('[Epoch %d] loss: %.3f' % (epoch + 1, running_loss / len(dataloader)))
print('Finished Training')
train(model, dataloader, criterion, optimizer, 4)

RuntimeError: mat1 and mat2 shapes cannot be multiplied CNN

while running this code for my dataset , I am getting errors. my data head looks like this
fridge_temperature temp_condition label
0 13.10 high 0
1 8.65 high 0
2 2.00 low 0
3 4.80 low 0
4 10.70 high 0
and this the shape of my data (587076, 3)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, random_split
# --------------- Dataset ---------------
class StudentsPerformanceDataset(Dataset):
"""Students Performance dataset."""
def __init__(self, csv_file):
"""Initializes instance of class StudentsPerformanceDataset.
Args:
csv_file (str): Path to the csv file with the students data.
"""
df = pd.read_csv("Z:/new_file.csv")
# Drop the column you want to remove
df = df.drop('date_time', axis=1)
df = df.drop('type', axis=1)
print(df.head())
print(df.shape)
# Grouping variable names
self.categorical = ["temp_condition"]
self.target = "label"
# One-hot encoding of categorical variables
self.students_frame = pd.get_dummies(df, prefix=self.categorical)
# Save target and predictors
self.X = self.students_frame.drop(self.target, axis=1)
self.y = self.students_frame[self.target]
def __len__(self):
return len(self.students_frame)
def __getitem__(self, idx):
# Convert idx from tensor to list due to pandas bug (that arises when using pytorch's random_split)
if isinstance(idx, torch.Tensor):
idx = idx.tolist()
return [self.X.iloc[idx].values, self.y[idx]]
# --------------- Model ---------------
class Net(nn.Module):
def __init__(self, D_in, H=15, D_out=1):
super().__init__()
self.fc1 = nn.Linear(D_in, H)
self.fc2 = nn.Linear(H, D_out)
self.relu = nn.ReLU()
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x.squeeze()
""" class Net(nn.Module):
def __init__(self, D_in, H=15, D_out=1):
super().__init__()
self.fc1 = nn.Linear(D_in, H)
self.fc2 = nn.Linear(H, D_out)
self.relu = nn.ReLU()
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x.squeeze() """
# --------------- Training ---------------
def train(csv_file, n_epochs=100):
"""Trains the model.
Args:
csv_file (str): Absolute path of the dataset used for training.
n_epochs (int): Number of epochs to train.
"""
# Load dataset
dataset = StudentsPerformanceDataset(csv_file)
# Split into training and test
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
trainset, testset = random_split(dataset, [train_size, test_size])
# Dataloaders
trainloader = DataLoader(trainset, batch_size=200, shuffle=True)
testloader = DataLoader(testset, batch_size=200, shuffle=False)
# Use gpu if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Define the model
# Define the model
D_in, H = 3, 15
net = Net(D_in, H).to(device)
#D_in, H = 19, 15
#net = Net(D_in, H).to(device)
# Loss function
criterion = nn.MSELoss()
# Optimizer
optimizer = optim.Adam(net.parameters(), weight_decay=0.0001)
# Train the net
loss_per_iter = []
loss_per_batch = []
for epoch in range(n_epochs):
running_loss = 0.0
for i, (inputs, labels) in enumerate(trainloader):
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = net(inputs.float())
loss = criterion(outputs, labels.float())
loss.backward()
optimizer.step()
# Save loss to plot
running_loss += loss.item()
loss_per_iter.append(loss.item())
loss_per_batch.append(running_loss / (i + 1))
running_loss = 0.0
# Comparing training to test
dataiter = iter(testloader)
inputs, labels = dataiter.next()
inputs = inputs.to(device)
labels = labels.to(device)
outputs = net(inputs.float())
print("Root mean squared error")
print("Training:", np.sqrt(loss_per_batch[-1]))
print("Test", np.sqrt(criterion(labels.float(), outputs).detach().cpu().numpy()))
# Plot training loss curve
plt.plot(np.arange(len(loss_per_iter)), loss_per_iter, "-", alpha=0.5, label="Loss per epoch")
plt.plot(np.arange(len(loss_per_iter), step=4) + 3, loss_per_batch, ".-", label="Loss per mini-batch")
plt.xlabel("Number of epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
if __name__ == "__main__":
import os
import sys
import argparse
# By default, read csv file in the same directory as this script
csv_file = os.path.join(sys.path[0], "Z:/new_file.csv")
# Parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument("--file", "-f", nargs="?", const=csv_file, default=csv_file,
help="Dataset file used for training")
parser.add_argument("--epochs", "-e", type=int, nargs="?", default=100, help="Number of epochs to train")
args = parser.parse_args()
# Call the main function of the script
train(args.file, args.epochs)
I am getting this error return F.linear(input, self.weight,
self.bias) RuntimeError: mat1 and mat2 shapes cannot be multiplied
(200x7 and 3x15)

Implementing Early Stopping in Pytorch without Torchsample

As a Pytorch newbie (coming from tensorflow), I am unsure of how to implement Early Stopping. My research has led me discover that pytorch does not have a native way to so this. I have also discovered torchsample, but am unable to install it in my conda environment for whatever reason. Is there a simple way to go about applying early stopping without it? Here is my current setup:
class RegressionDataset(Dataset):
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
def __len__(self):
return len(self.X_data)
train_dataset = RegressionDataset(torch.from_numpy(X_train).float(), torch.from_numpy(y_train).float())
val_dataset = RegressionDataset(torch.from_numpy(X_val).float(), torch.from_numpy(y_val).float())
test_dataset = RegressionDataset(torch.from_numpy(X_test).float(), torch.from_numpy(y_test).float())
# Model Params
EPOCHS = 100
BATCH_SIZE = 1000
LEARNING_RATE = 0.001
NUM_FEATURES = np.shape(X_test)[1]
# Initialize Dataloader
train_loader = DataLoader(dataset = train_dataset, batch_size=BATCH_SIZE, shuffle = True)
val_loader = DataLoader(dataset = val_dataset, batch_size=BATCH_SIZE)
test_loader = DataLoader(dataset = test_dataset, batch_size=BATCH_SIZE)
# Define Neural Network Architecture
class MultipleRegression(nn.Module):
def __init__(self, num_features):
super(MultipleRegression, self).__init__()
# Define architecture
self.layer_1 = nn.Linear(num_features, 16)
self.layer_2 = nn.Linear(16, 32)
self.layer_3 = nn.Linear(32, 25)
self.layer_4 = nn.Linear(25, 20)
self.layer_5 = nn.Linear(20, 16)
self.layer_out = nn.Linear(16, 1)
self.relu = nn.ReLU() # ReLU applied to all layers
# Initialize weights and biases
nn.init.xavier_uniform_(self.layer_1.weight)
nn.init.zeros_(self.layer_1.bias)
nn.init.xavier_uniform_(self.layer_2.weight)
nn.init.zeros_(self.layer_2.bias)
nn.init.xavier_uniform_(self.layer_3.weight)
nn.init.zeros_(self.layer_3.bias)
nn.init.xavier_uniform_(self.layer_4.weight)
nn.init.zeros_(self.layer_4.bias)
nn.init.xavier_uniform_(self.layer_5.weight)
nn.init.zeros_(self.layer_5.bias)
nn.init.xavier_uniform_(self.layer_out.weight)
nn.init.zeros_(self.layer_out.bias)
def forward(self, inputs):
x = self.relu(self.layer_1(inputs))
x = self.relu(self.layer_2(x))
x = self.relu(self.layer_3(x))
x = self.relu(self.layer_4(x))
x = self.relu(self.layer_5(x))
x = self.layer_out(x)
return(x)
def predict(self, test_inputs):
x = self.relu(self.layer_1(test_inputs))
x = self.relu(self.layer_2(x))
x = self.relu(self.layer_3(x))
x = self.relu(self.layer_4(x))
x = self.relu(self.layer_5(x))
x = self.layer_out(x)
return(x)
# Check for GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = MultipleRegression(NUM_FEATURES)
model.to(device)
print(model)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr = LEARNING_RATE)
# define dictionary to store loss/epochs for training and validation
loss_stats = {
"train": [],
"val": []
}
# begin training
print("Begin Training")
for e in tqdm(range(1, EPOCHS+1)):
# Training
train_epoch_loss = 0
model.train()
for X_train_batch, y_train_batch in train_loader:
X_train_batch, y_train_batch = X_train_batch.to(device), y_train_batch.to(device)
optimizer.zero_grad()
y_train_pred = model(X_train_batch)
train_loss = criterion(y_train_pred, y_train_batch.unsqueeze(1))
train_loss.backward()
optimizer.step()
train_epoch_loss += train_loss.item()
# validation
with torch.no_grad():
val_epoch_loss = 0
model.eval()
for X_val_batch, y_val_batch in val_loader:
X_val_batch, y_val_batch = X_val_batch.to(device), y_val_batch.to(device)
y_val_pred = model(X_val_batch)
val_loss = criterion(y_val_pred, y_val_batch.unsqueeze(1))
val_epoch_loss += val_loss.item()
loss_stats["train"].append(train_epoch_loss/len(train_loader))
loss_stats["val"].append(val_epoch_loss/len(val_loader))
print(f"Epoch {e}: \ Train loss: {train_epoch_loss/len(train_loader):.5f} \ Val loss: {val_epoch_loss/len(val_loader):.5f}")
# Visualize loss and accuracy
train_val_loss_df = pd.DataFrame.from_dict(loss_stats).reset_index().melt(id_vars=["index"]).rename(columns = {"index":"epochs"})
plt.figure()
sns.lineplot(data = train_val_loss_df, x = "epochs", y = "value", hue = "variable").set_title("Train-Val Loss/Epoch")
# Test model
y_pred_list = []
with torch.no_grad():
model.eval()
for X_batch, _ in test_loader:
X_batch = X_batch.to(device)
y_test_pred = model(X_batch)
y_pred_list.append(y_test_pred.cpu().numpy())
y_pred_list = [a.squeeze().tolist() for a in y_pred_list]
y_pred_list = [item for sublist in y_pred_list for item in sublist]
y_pred_list = np.array(y_pred_list)
mse = mean_squared_error(y_test, y_pred_list)
r_square = r2_score(y_test, y_pred_list)
print("Mean Squared Error :", mse)
print("R^2 :", r_square)
A basic way to do this is to keep track of the best validation loss obtained so far.
You can have a variable best_loss = 0 initialized before your loop over epochs (or you could do other things like best loss per epoch, etc.).
After each validation pass then do:
if val_loss > best_loss:
best_loss = val_loss
# At this point also save a snapshot of the current model
torch.save(model, 'my_model_best_loss.pth')
Then, if the best_loss does not improve significantly after some number of training steps, or by the end of the epoch, or if it val_loss gets worse, break out of the loop and terminate the training there.
For implementing algorithms like early stopping (and your training loop in general) you may find it easier to give PyTorch Lightning a try (no affiliation, but it's much easier than trying to roll everything by hand).

how to save Keras Model instance of class

am using an Seq2Seq project from Google that use Encoder/Decoder, there is the 2 encoder and decoder class :
#ENCODER
class EncoderNetwork(tf.keras.Model):
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_parents', None)
return d
def __init__(self,input_vocab_size,embedding_dims, rnn_units ):
super().__init__()
self.encoder_embedding = tf.keras.layers.Embedding(input_dim=input_vocab_size,
output_dim=embedding_dims)
self.encoder_rnnlayer = tf.keras.layers.LSTM(rnn_units,return_sequences=True,
return_state=True )
encoder_embedding = self.encoder_embedding
encoder_rnnlayer = self.encoder_rnnlayer
#DECODER
class DecoderNetwork(tf.keras.Model):
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_parents', None)
return d
def __init__(self,output_vocab_size, embedding_dims, rnn_units):
super().__init__()
self.decoder_embedding = tf.keras.layers.Embedding(input_dim=output_vocab_size,
output_dim=embedding_dims)
self.dense_layer = tf.keras.layers.Dense(output_vocab_size)
self.decoder_rnncell = tf.keras.layers.LSTMCell(rnn_units)
# Sampler
self.sampler = tfa.seq2seq.sampler.TrainingSampler()
# Create attention mechanism with memory = None
self.attention_mechanism = self.build_attention_mechanism(dense_units,None,BATCH_SIZE*[Tx])
self.rnn_cell = self.build_rnn_cell(BATCH_SIZE)
self.decoder = tfa.seq2seq.BasicDecoder(self.rnn_cell, sampler= self.sampler,
output_layer=self.dense_layer)
def build_attention_mechanism(self, units,memory, memory_sequence_length):
return tfa.seq2seq.LuongAttention(units, memory = memory,
memory_sequence_length=memory_sequence_length)
#return tfa.seq2seq.BahdanauAttention(units, memory = memory, memory_sequence_length=memory_sequence_length)
# wrap decodernn cell
def build_rnn_cell(self, batch_size ):
rnn_cell = tfa.seq2seq.AttentionWrapper(self.decoder_rnncell, self.attention_mechanism,
attention_layer_size=dense_units)
return rnn_cell
def build_decoder_initial_state(self, batch_size, encoder_state,Dtype):
decoder_initial_state = self.rnn_cell.get_initial_state(batch_size = batch_size,
dtype = Dtype)
decoder_initial_state = decoder_initial_state.clone(cell_state=encoder_state)
return decoder_initial_state
i create an instance of EncoderNetwork and DecoderNetwork with my argument and use the loss_function and train_step already defined to train my model
def loss_function(y_pred, y):
#shape of y [batch_size, ty]
#shape of y_pred [batch_size, Ty, output_vocab_size]
sparsecategoricalcrossentropy = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,
reduction='none')
loss = sparsecategoricalcrossentropy(y_true=y, y_pred=y_pred)
mask = tf.logical_not(tf.math.equal(y,0)) #output 0 for y=0 else output 1
mask = tf.cast(mask, dtype=loss.dtype)
loss = mask* loss
loss = tf.reduce_mean(loss)
return loss
def train_step(input_batch, output_batch,encoder_initial_cell_state):
#initialize loss = 0
loss = 0
with tf.GradientTape() as tape:
encoder_emb_inp = encoderNetwork.encoder_embedding(input_batch)
a, a_tx, c_tx = encoderNetwork.encoder_rnnlayer(encoder_emb_inp,
initial_state =encoder_initial_cell_state)
#[last step activations,last memory_state] of encoder passed as input to decoder Network
# Prepare correct Decoder input & output sequence data
decoder_input = output_batch[:,:-1] # ignore <end>
#compare logits with timestepped +1 version of decoder_input
decoder_output = output_batch[:,1:] #ignore <start>
# Decoder Embeddings
decoder_emb_inp = decoderNetwork.decoder_embedding(decoder_input)
#Setting up decoder memory from encoder output and Zero State for AttentionWrapperState
decoderNetwork.attention_mechanism.setup_memory(a)
decoder_initial_state = decoderNetwork.build_decoder_initial_state(BATCH_SIZE,
encoder_state=[a_tx, c_tx],
Dtype=tf.float32)
#BasicDecoderOutput
outputs, _, _ = decoderNetwork.decoder(decoder_emb_inp,initial_state=decoder_initial_state,
sequence_length=BATCH_SIZE*[Ty-1])
logits = outputs.rnn_output
#Calculate loss
loss = loss_function(logits, decoder_output)
#Returns the list of all layer variables / weights.
variables = encoderNetwork.trainable_variables + decoderNetwork.trainable_variables
# differentiate loss wrt variables
gradients = tape.gradient(loss, variables)
#grads_and_vars – List of(gradient, variable) pairs.
grads_and_vars = zip(gradients,variables)
optimizer.apply_gradients(grads_and_vars)
return loss
the training does not use fit() methode but like this :
epochs = 20
for i in range(1, epochs+1):
encoder_initial_cell_state = initialize_initial_state()
total_loss = 0.0
for ( batch , (input_batch, output_batch)) in enumerate(dataset.take(steps_per_epoch)):
batch_loss = train_step(input_batch, output_batch, encoder_initial_cell_state)
total_loss += batch_loss
if (batch+1)%5 == 0:
print("total loss: {} epoch {} batch {} ".format(batch_loss.numpy(), i, batch+1))
the result are fine and the predict fonction work perfectly (custom predict function), but how can i save the model ? i tried pickel and keras.save() but it doesn't work any idea ?

Using tf.dataset API in training cant get the whole data

I use tf.dataset to fetch images, labels, edges in training with GPU. But I find the dataset API cannot load all the data.
I use codes:
def get_dataset(filenames, shuffle_buffer, repeat_times, batch_size):
dataset = tf.data.TFRecordDataset([filenames])
dataset = dataset.map(tfrecord_preprocess)
if repeat_times is None:
dataset = dataset.repeat()
else:
dataset = dataset.repeat(repeat_times)
dataset = dataset.shuffle(shuffle_buffer).batch(batch_size)
return dataset
def tfrecord_preprocess(example):
feature = {"image": tf.FixedLenFeature((), tf.string, default_value=""),
"label": tf.FixedLenFeature((), tf.string, default_value=""),
"edge": tf.FixedLenFeature((), tf.string, default_value="")}
parsed_feature = tf.parse_single_example(example, feature)
image = tf.decode_raw(parsed_feature["image"], out_type=tf.uint8)
label = tf.decode_raw(parsed_feature["label"], out_type=tf.uint8)
edge = tf.decode_raw(parsed_feature["edge"], out_type=tf.uint8)
image = tf.cast(tf.reshape(image, shape=[1, 128, 128]), tf.float32)
label = tf.cast(tf.reshape(label, shape=[1, 128, 128]), tf.float32)
edge = tf.cast(tf.reshape(edge, shape=[128, 128]), tf.float32)
return image, label, edge
I write a simple codes to test the API
dataset = get_dataset(filenames, shuffle_buffer, repeat_times, batchsize)
#shuffle=1000, repeat_times=2, batchsize=13
iter = dataset.make_one_shot_iterator
images, labels, edges = iter.get_next()
count = 0
with tf.Session() as sess:
for _ in xrange(40):
try:
edges_value = sess.run(edges)
count = count+len(edges_value)
print count
except tf.errors.OutofRangeError:
break
the number of data is 260 so after repeat and batch, the epochs should be 40. It works.
However, when I use similar code for training, the total number of data is less than 260, only 140(through the var count). Does anyone know haw to solve this problem? Pls help me.
I use tensorflow-gpu 1.4
my training code is:
shuffle_buffer = params["shuffle_buffer"] #1000
repeat_times = params["repeat_times"] #1
batch_size = params["batch_size"] #26
num_classes = params["num_classes"] #2
dataset = model.get_dataset(filenames, shuffle_buffer, repeat_times, batch_size)
iterator = dataset.make_one_shot_iterator()
with tf.device('/gpu:1'):
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(params["learning_rate"],
global_step, 100, 0.99)
optimizer = tf.train.AdamOptimizer(learning_rate)
images, labels, edges = iterator.get_next()
_, probs = model.interence(features=images, training=True)
loss, reg = model.get_loss(probs, labels, edges, num_classes)
_, acc_mean, _ = model.get_acc(probs, labels)
train_op = optimizer.minimize(loss, global_step=global_step)
variables_average = tf.train.ExponentialMovingAverage(0.99, global_step)
var_list = tf.trainable_variables(scope='.*(kernel|bias)')
variables_average_op = variables_average.apply(var_list)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_all_op = tf.group(train_op, variables_average_op)
tf.summary.scalar("loss", loss)
tf.summary.scalar("reg", reg)
tf.summary.scalar("acc_mean", acc_mean)
merged = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=5)
config = tf.ConfigProto(log_device_placement=True,
allow_soft_placement=True)
config.gpu_options.allow_growth = True
count = 0
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
writer = tf.summary.FileWriter('./train', sess.graph)
for _ in xrange(10):
try:
edges_value = sess.run(edges)
count = count+len(edges_value)
_, step, summary = sess.run([train_all_op, global_step, merged])
writer.add_summary(summary, step)
if step % 5 == 0:
loss_value = sess.run(loss)
print loss_value
acc_mean_value = sess.run(acc_mean)
print acc_mean_value
saver.save(sess, params["save_dir"], step)
except tf.errors.OutOfRangeError:
print "end of data"
break
print count
print "the final step is %d" % step
loss_value = sess.run(loss)
print loss_value
acc_mean_value = sess.run(acc_mean)
print acc_mean_value
saver.save(sess, params["save_dir"], step)
writer.close()
finally I got info in the terminal:
end of data
130
the final step is 5
to test the code I set the repeat times 1
But I use test codes:
def test():
dataset = get_dataset("train_output.tfrecords", 1000, 1, 26)
terator = dataset.make_one_shot_iterator()
images, labels, edges = iterator.get_next()
count = 0
with tf.Session() as sess:
for i in xrange(10):
try:
images_value, labels_value, edges_value = sess.run([images, labels, edges])
count = count+len(edges_value)
except tf.errors.OutOfRangeError:
print "end of data"
print count
print i
test()
The terminal shows:
260
9
The problem is that the sess.run(edges) is causing this part of the graph to execute again: images, labels, edges = iterator.get_next(). Therefore, everytime you run it, you are consuming one iteration that is not counted in your counter.
To get the count of edges, keep a counter inside of the with tf.device('/gpu:1') block. You can even graph it on tensorboard using a tf.summary.scalar similar to how you do so with loss.
Declare a edges_count = tf.Variable(1, name='edges_count', trainable=False, dtype=tf.int32)
images, labels, edges = iterator.get_next()
edges_count_update_op = tf.assign_add(edges_count, len(edges))
Then add edges_count_update_op to your train_op group.

Categories