How to group the histograms shown on tensorboard in Keras? - python

I am trying to come to grips with the graphs shown on tensorboard when using Keras and trying to tune hyperparameters using HPARAMS.
As an example take a NN with dense layers and batch normalization layers to classify the MNIST digits from 0 to 4.
The function producing the summaries shown on tensorboard is the following:
def run(run_dir, hparams):
with tf.summary.create_file_writer(run_dir).as_default() as summ:
hp.hparams(hparams) # record the values used in this trial
num_layers = 5
m = train_test_model(hparams, run_dir, num_layers=num_layers)
with tf.name_scope("init_lr_{}".format(
hparams[HP_INITIAL_LEARNING_RATE])):
for i,l in enumerate(m.layers[1:]):
#print(l.get_config())
if "batch_normalization" in l.get_config()['name']:
w, b,c,d = l.get_weights()
tf.summary.histogram(name='W%d'%i, data=w, step=1, description="weigths of " + l.get_config()['name'])
tf.summary.histogram(name='B%d'%i, data=b, step=1, description="biases of " + l.get_config()['name'])
else:
w, b = l.get_weights()
tf.summary.histogram(name='W%d'%i, data=w, step=1, description="weigths of " + l.get_config()['name'])
tf.summary.histogram(name='B%d'%i, data=b, step=1, description="biases of " + l.get_config()['name'])
train_test_model is a function that trains different models using hp.Hparam for the hyperparameter variables. The problem is with the histograms. With this code the histogram graphs (of weights and biases) are named sequentially: so for example for the first model I have from W_1 to W_6 and from B1 to B_6, for the second from W7 to W_12. The model they belong to is shown at the top of the corresponding figure, but I think it is better to group the weights and biases according to the model they belong to and within each model have the histograms names from W_1 to W_6 and from B1 to B_6.
An example of what I get is the following where each model actually has only 5 Dense layers and 5 Batch normalization layers (excluding the one after the flatten layer):
To solve this problem I tried to add with tf.name_scope("init_lr_{}".format(
hparams[HP_INITIAL_LEARNING_RATE])): but this does not solve the problems: it keeps showing the previous graphs with the previous names and shows other graphs as in the following picture, having a different layout:
How can I make tensorboard group the histograms according to the NN model they belong to? Should I create a specific folder for each model trained?
FULL CODE
#SET UP
import tensorflow as tf
from tensorflow import keras
from tensorboard.plugins.hparams import api as hp
import os
keras.backend.clear_session()
#LOAD DATA
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_reduced = X_train_full[y_train_full<5]
y_train_reduced = y_train_full[y_train_full<5]
X_test_reduced = X_test[y_test<5]
y_test_reduced = y_test[y_test<5]
X_train = X_train_reduced[5000:]
y_train = y_train_reduced[5000:]
X_valid = X_train_reduced[:5000]
y_valid = y_train_reduced[:5000]
#set the hyperparameters to tune
keras.backend.clear_session()
HP_INITIAL_LEARNING_RATE = hp.HParam("initial_learning_rate", hp.Discrete([0.0001, 0.00012, 0.00015]))
HP_NUM_BATCH_SIZE = hp.HParam("batch_size", hp.Discrete([32]))
HP_NUM_EPOCHS = hp.HParam("epochs", hp.Discrete([180]))
HP_BETA_1 = hp.HParam("beta_1", hp.Discrete([0.95]))
HP_BETA_2 = hp.HParam("beta_2", hp.Discrete([0.9994]))
HP_DECAY_STEP = hp.HParam("decay_step", hp.Discrete([10000]))
HP_DECAY_RATE = hp.HParam("decay_rate", hp.Discrete([0.8]))
#function creating the summaries
def run(run_dir, hparams):
with tf.summary.create_file_writer(run_dir).as_default() as summ:
hp.hparams(hparams) # record the values used in this trial
num_layers = 5
m = train_test_model(hparams, run_dir, num_layers=num_layers)
with tf.name_scope("init_lr_{}".format(
hparams[HP_INITIAL_LEARNING_RATE])):
for i,l in enumerate(m.layers[1:]):
#print(l.get_config())
if "batch_normalization" in l.get_config()['name']:
w, b,c,d = l.get_weights()
tf.summary.histogram(name='W%d'%i, data=w, step=1, description="weigths of " + l.get_config()['name'])
tf.summary.histogram(name='B%d'%i, data=b, step=1, description="biases of " + l.get_config()['name'])
#tf.summary.histogram(name='3rd type of weight in batch %d'%i, data=c, step=1, description="3rd argument of " + l.get_config()['name'])
#tf.summary.histogram(name='4th type of weight in batch %d'%i, data=d, step=1, description="4th argument of " + l.get_config()['name'])
else:
w, b = l.get_weights()
tf.summary.histogram(name='W%d'%i, data=w, step=1, description="weigths of " + l.get_config()['name'])
tf.summary.histogram(name='B%d'%i, data=b, step=1, description="biases of " + l.get_config()['name'])
#function creating different models according to the hyperparameters passed with
# hparams
def train_test_model(hparams, folder, num_layers=5):
act_fun = "elu"
initializer = "he_normal"
NAME = "MNIST_0-4-earl_stp-nDens_{}-{}-{}-batch_norm-btch_sz_{}-epo_{}-Adam_opt-b1_{}-b2_{}-lr_exp_dec-in_lr_{}-lr_decRate_{}-lr_decStep_{}".format(
num_layers, act_fun, initializer, hparams[HP_NUM_BATCH_SIZE], hparams[HP_NUM_EPOCHS], hparams[HP_BETA_1],
hparams[HP_BETA_2], hparams[HP_INITIAL_LEARNING_RATE], hparams[HP_DECAY_RATE], hparams[HP_DECAY_STEP])
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28,28]))
input_size=28**2
model.add(keras.layers.BatchNormalization())
hidden_layer_neurons=100
for i in range(num_layers):
model.add(keras.layers.Dense(hidden_layer_neurons, activation=act_fun, kernel_initializer="he_normal"))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(5, activation='softmax'))
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = hparams[HP_INITIAL_LEARNING_RATE],
decay_steps=hparams[HP_DECAY_STEP],
decay_rate=hparams[HP_DECAY_RATE])
my_opt = keras.optimizers.Adam(learning_rate=lr_schedule, beta_1=hparams[HP_BETA_1], beta_2 = hparams[HP_BETA_2])
early_stopping_cb = keras.callbacks.EarlyStopping(patience = 10, restore_best_weights=True)
#model_checkpoint_cb = keras.callbacks.ModelCheckpoint("GridSearchCV.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
logdir = os.path.join(os.curdir, folder, NAME)
tensorboard_cb = keras.callbacks.TensorBoard(logdir, histogram_freq=1)
callbacks = [
early_stopping_cb,
tensorboard_cb,
hp.KerasCallback(logdir, hparams), # log hparams
]
model.compile(loss='sparse_categorical_crossentropy', metrics=["accuracy"], optimizer=my_opt)
model.fit(X_train, y_train, epochs=hparams[HP_NUM_EPOCHS], batch_size=hparams[HP_NUM_BATCH_SIZE],
validation_data=(X_valid, y_valid), callbacks=callbacks)
return model
session_num = 0
LOG_FOLDER='logs/batch_normalization/'
for num_batch in HP_NUM_BATCH_SIZE.domain.values:
for epoch in HP_NUM_EPOCHS.domain.values:
for beta_1 in HP_BETA_1.domain.values:
for beta_2 in HP_BETA_2.domain.values:
for initial_learning_rate in HP_INITIAL_LEARNING_RATE.domain.values:
for decay_rate in HP_DECAY_RATE.domain.values:
for decay_step in HP_DECAY_STEP.domain.values:
hparams = {
HP_NUM_BATCH_SIZE: num_batch,
HP_NUM_EPOCHS: epoch,
HP_BETA_1: beta_1,
HP_BETA_2: beta_2,
HP_INITIAL_LEARNING_RATE: initial_learning_rate,
HP_DECAY_RATE: decay_rate,
HP_DECAY_STEP: decay_step
}
#print('--- Starting trial: %s' % run_name)
print({h.name: hparams[h] for h in hparams})
run(LOG_FOLDER , hparams)
session_num += 1

Related

RuntimeError: mat1 and mat2 shapes cannot be multiplied CNN

while running this code for my dataset , I am getting errors. my data head looks like this
fridge_temperature temp_condition label
0 13.10 high 0
1 8.65 high 0
2 2.00 low 0
3 4.80 low 0
4 10.70 high 0
and this the shape of my data (587076, 3)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, random_split
# --------------- Dataset ---------------
class StudentsPerformanceDataset(Dataset):
"""Students Performance dataset."""
def __init__(self, csv_file):
"""Initializes instance of class StudentsPerformanceDataset.
Args:
csv_file (str): Path to the csv file with the students data.
"""
df = pd.read_csv("Z:/new_file.csv")
# Drop the column you want to remove
df = df.drop('date_time', axis=1)
df = df.drop('type', axis=1)
print(df.head())
print(df.shape)
# Grouping variable names
self.categorical = ["temp_condition"]
self.target = "label"
# One-hot encoding of categorical variables
self.students_frame = pd.get_dummies(df, prefix=self.categorical)
# Save target and predictors
self.X = self.students_frame.drop(self.target, axis=1)
self.y = self.students_frame[self.target]
def __len__(self):
return len(self.students_frame)
def __getitem__(self, idx):
# Convert idx from tensor to list due to pandas bug (that arises when using pytorch's random_split)
if isinstance(idx, torch.Tensor):
idx = idx.tolist()
return [self.X.iloc[idx].values, self.y[idx]]
# --------------- Model ---------------
class Net(nn.Module):
def __init__(self, D_in, H=15, D_out=1):
super().__init__()
self.fc1 = nn.Linear(D_in, H)
self.fc2 = nn.Linear(H, D_out)
self.relu = nn.ReLU()
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x.squeeze()
""" class Net(nn.Module):
def __init__(self, D_in, H=15, D_out=1):
super().__init__()
self.fc1 = nn.Linear(D_in, H)
self.fc2 = nn.Linear(H, D_out)
self.relu = nn.ReLU()
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x.squeeze() """
# --------------- Training ---------------
def train(csv_file, n_epochs=100):
"""Trains the model.
Args:
csv_file (str): Absolute path of the dataset used for training.
n_epochs (int): Number of epochs to train.
"""
# Load dataset
dataset = StudentsPerformanceDataset(csv_file)
# Split into training and test
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
trainset, testset = random_split(dataset, [train_size, test_size])
# Dataloaders
trainloader = DataLoader(trainset, batch_size=200, shuffle=True)
testloader = DataLoader(testset, batch_size=200, shuffle=False)
# Use gpu if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Define the model
# Define the model
D_in, H = 3, 15
net = Net(D_in, H).to(device)
#D_in, H = 19, 15
#net = Net(D_in, H).to(device)
# Loss function
criterion = nn.MSELoss()
# Optimizer
optimizer = optim.Adam(net.parameters(), weight_decay=0.0001)
# Train the net
loss_per_iter = []
loss_per_batch = []
for epoch in range(n_epochs):
running_loss = 0.0
for i, (inputs, labels) in enumerate(trainloader):
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = net(inputs.float())
loss = criterion(outputs, labels.float())
loss.backward()
optimizer.step()
# Save loss to plot
running_loss += loss.item()
loss_per_iter.append(loss.item())
loss_per_batch.append(running_loss / (i + 1))
running_loss = 0.0
# Comparing training to test
dataiter = iter(testloader)
inputs, labels = dataiter.next()
inputs = inputs.to(device)
labels = labels.to(device)
outputs = net(inputs.float())
print("Root mean squared error")
print("Training:", np.sqrt(loss_per_batch[-1]))
print("Test", np.sqrt(criterion(labels.float(), outputs).detach().cpu().numpy()))
# Plot training loss curve
plt.plot(np.arange(len(loss_per_iter)), loss_per_iter, "-", alpha=0.5, label="Loss per epoch")
plt.plot(np.arange(len(loss_per_iter), step=4) + 3, loss_per_batch, ".-", label="Loss per mini-batch")
plt.xlabel("Number of epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
if __name__ == "__main__":
import os
import sys
import argparse
# By default, read csv file in the same directory as this script
csv_file = os.path.join(sys.path[0], "Z:/new_file.csv")
# Parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument("--file", "-f", nargs="?", const=csv_file, default=csv_file,
help="Dataset file used for training")
parser.add_argument("--epochs", "-e", type=int, nargs="?", default=100, help="Number of epochs to train")
args = parser.parse_args()
# Call the main function of the script
train(args.file, args.epochs)
I am getting this error return F.linear(input, self.weight,
self.bias) RuntimeError: mat1 and mat2 shapes cannot be multiplied
(200x7 and 3x15)

Using tensorflow and TFBertForNextSentencePrediction to further train bert on a specific corpus

I'm trying to train TFBertForNextSentencePrediction on my own corpus, not from scratch, but rather taking the existing bert model with only a next sentence prediction head and further train it on a specific cuprous of text (pairs of sentences). Then I want to use the model I trained to be able to extract sentence embeddings from the last hidden state for other texts.
Currently the problem I encounter is that after I train the keras model I am not able to extract the hidden states of the last layer before the next sentence prediction head.
Below is the code. Here I only train it on a few sentences just to make sure the code works.
Any help will be greatly appreciated.
Thanks,
Ayala
import numpy as np
import pandas as pd
import tensorflow as tf
from datetime import datetime
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.callbacks import ModelCheckpoint
from transformers import BertTokenizer, PreTrainedTokenizer, BertConfig, TFBertForNextSentencePrediction
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
PRETRAINED_MODEL = 'bert-base-uncased'
# set paths and file names
time_stamp = str(datetime.now().year) + "_" + str(datetime.now().month) + "_" + str(datetime.now().day) + "_" + \
str(datetime.now().hour) + "_" + str(datetime.now().minute)
model_name = "pretrained_nsp_model"
model_dir_data = model_name + "_" + time_stamp
model_fn = model_dir_data + ".h5"
base_path = os.path.dirname(__file__)
input_path = os.path.join(base_path, "input_data")
output_path = os.path.join(base_path, "output_models")
model_path = os.path.join(output_path, model_dir_data)
if not os.path.exists(model_path):
os.makedirs(model_path)
# set model checkpoint
checkpoint = ModelCheckpoint(os.path.join(model_path, model_fn), monitor="val_loss", verbose=1, save_best_only=True,
save_weights_only=True, mode="min")
# read data
max_length = 512
def get_tokenizer(pretrained_model_name):
tokenizer = BertTokenizer.from_pretrained(pretrained_model_name)
return tokenizer
def tokenize_nsp_data(A, B, max_length):
data_inputs = tokenizer(A, B, add_special_tokens=True, max_length=max_length, truncation=True,
pad_to_max_length=True, return_attention_mask=True,
return_tensors="tf")
return data_inputs
def get_data_features(data_inputs, max_length):
data_features = {}
for key in data_inputs:
data_features[key] = sequence.pad_sequences(data_inputs[key], maxlen=max_length, truncating="post",
padding="post", value=0)
return data_features
def get_transformer_model(transformer_model_name):
# get transformer model
config = BertConfig(output_attentions=True)
config.output_hidden_states = True
config.return_dict = True
transformer_model = TFBertForNextSentencePrediction.from_pretrained(transformer_model_name, config=config)
return transformer_model
def get_keras_model(transformer_model):
# get keras model
input_ids = tf.keras.layers.Input(shape=(max_length,), name='input_ids', dtype='int32')
input_masks_ids = tf.keras.layers.Input(shape=(max_length,), name='attention_mask', dtype='int32')
token_type_ids = tf.keras.layers.Input(shape=(max_length,), name='token_type_ids', dtype='int32')
X = transformer_model({'input_ids': input_ids, 'attention_mask': input_masks_ids, 'token_type_ids': token_type_ids})[0]
model = tf.keras.Model(inputs=[input_ids, input_masks_ids, token_type_ids], outputs=X)
model.summary()
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.optimizers.Adam(learning_rate=0.00005), metrics=['accuracy'])
return model
def get_metrices(true_values, pred_values):
cm = confusion_matrix(true_values, pred_values)
acc_score = accuracy_score(true_values, pred_values)
f1 = f1_score(true_values, pred_values, average="binary")
precision = precision_score(true_values, pred_values, average="binary")
recall = recall_score(true_values, pred_values, average="binary")
metrices = {'confusion_matrix': cm,
'acc_score': acc_score,
'f1': f1,
'precision': precision,
'recall': recall
}
for k, v in metrices.items():
print(k, ':\n', v)
return metrices
# get tokenizer
tokenizer = get_tokenizer(PRETRAINED_MODEL)
# train
prompt = ["Hello", "Hello", "Hello", "Hello"]
next_sentence = ["How are you?", "Pizza", "How are you?", "Pizza"]
train_labels = [0, 1, 0, 1]
train_labels = to_categorical(train_labels)
train_inputs = tokenize_nsp_data(prompt, next_sentence, max_length)
train_data_features = get_data_features(train_inputs, max_length)
# val
prompt = ["Hello", "Hello", "Hello", "Hello"]
next_sentence = ["How are you?", "Pizza", "How are you?", "Pizza"]
val_labels = [0, 1, 0, 1]
val_labels = to_categorical(val_labels)
val_inputs = tokenize_nsp_data(prompt, next_sentence, max_length)
val_data_features = get_data_features(val_inputs, max_length)
# get transformer model
transformer_model = get_transformer_model(PRETRAINED_MODEL)
# get keras model
model = get_keras_model(transformer_model)
callback_list = []
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=4, min_delta=0.005, verbose=1)
callback_list.append(early_stop)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=2, epsilon=0.001)
callback_list.append(reduce_lr)
callback_list.append(checkpoint)
history = model.fit([train_data_features['input_ids'], train_data_features['attention_mask'],
train_data_features['token_type_ids']], np.array(train_labels), batch_size=2, epochs=3,
validation_data=([val_data_features['input_ids'], val_data_features['attention_mask'],
val_data_features['token_type_ids']], np.array(val_labels)), verbose=1,
callbacks=callback_list)
model.layers[3].save_pretrained(model_path) # need to save this and make sure i can get the hidden states
## predict
# load model
transformer_model = get_transformer_model(model_path)
model = get_keras_model(transformer_model)
model.summary()
model.load_weights(os.path.join(model_path, model_fn))
# test
prompt = ["Hello", "Hello"]
next_sentence = ["How are you?", "Pizza"]
test_labels = [0, 1]
test_df = pd.DataFrame({'A': prompt, 'B': next_sentence, 'label': test_labels})
test_labels = to_categorical(val_labels)
test_inputs = tokenize_nsp_data(prompt, next_sentence, max_length)
test_data_features = get_data_features(test_inputs, max_length)
# predict
pred_test = model.predict([test_data_features['input_ids'], test_data_features['attention_mask'], test_data_features['token_type_ids']])
preds = tf.keras.activations.softmax(tf.convert_to_tensor(pred_test)).numpy()
true_test = test_df['label'].to_list()
pred_test = [1 if p[1] > 0.5 else 0 for p in preds]
test_df['pred_val'] = pred_test
metrices = get_metrices(true_test, pred_test)
I am also attaching a picture from the debugging mode in which I try (with no success) to view the hidden state. The problem is I am not able to see and save the transform model I trained and view the embeddings of the last hidden state. I tried converting the KerasTensor to numpy array but without success.
The issue resides in your 'get_keras_model()' function. You defined here that you are only interested in the first of the element of the output (i.e. logits) with:
X = transformer_model({'input_ids': input_ids, 'attention_mask': input_masks_ids, 'token_type_ids': token_type_ids})[0]
Just do the index selection as conditional like this to get the whole output of the model
def get_keras_model(transformer_model, is_training=True):
###your other code
X = transformer_model({'input_ids': input_ids, 'attention_mask': input_masks_ids, 'token_type_ids': token_type_ids})
if is_training:
X= X[0]
###your other code
return model
#predict
###your other code
model = get_keras_model(transformer_model, is_training=False)
###your other code
print(pred_test.keys())
Output:
odict_keys(['logits', 'hidden_states', 'attentions'])
P.S.: The BertTokenizer can truncate and add padding by themself (documentation).

Saving a 'fine-tuned' bert model

I am trying to save a fine tuned bert model. I have ran the code correctly - it works fine, and in the ipython console I am able to call getPrediction and have it result the result.
I have my weight files saved (highest being model.ckpt-333.data-00000-of-00001
I have no idea how I would go about saving the model to be reuseable.
I am using bert-tensorflow.
import json
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
from sklearn.model_selection import train_test_split
import os
print("tensorflow version : ", tf.__version__)
print("tensorflow_hub version : ", hub.__version__)
#Importing BERT modules
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
#set output directory of the model
OUTPUT_DIR = 'model'
##markdown Whether or not to clear/delete the directory and create a new one
DO_DELETE = False ##param {type:"boolean"}
if DO_DELETE:
try:
tf.gfile.DeleteRecursively(OUTPUT_DIR)
except:
pass
tf.io.gfile.makedirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
### Load the data
data = pd.read_csv("data/bbc-text.csv")
data.columns = ['category', 'text']
print('*****Data Loaded: {} *****'.format(data.head()))
#check to see if any null values are present.
print('*****Empty Data: {} *****'.format(data[data.isnull().any(axis=1)]))
#encode category variable into numeric
data.category = pd.Categorical(data.category)
data['code'] = data.category.cat.codes
from sklearn.model_selection import train_test_split
train, test = train_test_split(data, test_size=0.2, random_state=200)
## 2 -- Data Visualisation
print(data.code.unique())
import matplotlib.pyplot as plt
train['code'].value_counts().plot(kind = 'bar')
DATA_COLUMN = 'text'
LABEL_COLUMN = 'code'
label_list = [0, 1, 2, 3, 4]
plt.show()
## 2 -- Data Preprocessing
train_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None,
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
test_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None,
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
# This is a path to an uncased (all lowercase) version of BERT
BERT_MODEL_HUB = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"
def create_tokenizer_from_hub_module():
"""Get the vocab file and casing info from the Hub module."""
with tf.Graph().as_default():
bert_module = hub.Module(BERT_MODEL_HUB)
tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
with tf.compat.v1.Session() as sess:
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
return bert.tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
tokenizer = create_tokenizer_from_hub_module()
# We'll set sequences to be at most 128 tokens long.
MAX_SEQ_LENGTH = 128
# Convert our train and validation features to InputFeatures that BERT understands.
train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)
test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)
#Example on first observation in the training set
print("Example of train[0] as a training set")
print("Sentence : ", train_InputExamples.iloc[0].text_a)
print("-"*30)
print("Tokens : ", tokenizer.tokenize(train_InputExamples.iloc[0].text_a))
print("-"*30)
print("Input IDs : ", train_features[0].input_ids)
print("-"*30)
print("Input Masks : ", train_features[0].input_mask)
print("-"*30)
print("Segment IDs : ", train_features[0].segment_ids)
## 3. Creating a Multiclass Classifier
def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,
num_labels):
bert_module = hub.Module(
BERT_MODEL_HUB,
trainable=True)
bert_inputs = dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
bert_outputs = bert_module(
inputs=bert_inputs,
signature="tokens",
as_dict=True)
# Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output.
output_layer = bert_outputs["pooled_output"]
hidden_size = output_layer.shape[-1].value
# Create our own layer to tune for politeness data.
output_weights = tf.compat.v1.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.compat.v1.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.compat.v1.variable_scope("loss"):
# Dropout helps prevent overfitting
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
# Convert labels into one-hot encoding
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))
# If we're predicting, we want predicted labels and the probabiltiies.
if is_predicting:
return (predicted_labels, log_probs)
# If we're train/eval, compute loss between predicted and actual label
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, predicted_labels, log_probs)
#A function that adapts our model to work for training, evaluation, and prediction.
# model_fn_builder actually creates our model function
# using the passed parameters for num_labels, learning_rate, etc.
def model_fn_builder(num_labels, learning_rate, num_train_steps,
num_warmup_steps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)
# TRAIN and EVAL
if not is_predicting:
(loss, predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
train_op = bert.optimization.create_optimizer(
loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)
# Calculate evaluation metrics.
def metric_fn(label_ids, predicted_labels):
accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)
true_pos = tf.compat.v1.metrics.true_positives(
label_ids,
predicted_labels)
true_neg = tf.compat.v1.metrics.true_negatives(
label_ids,
predicted_labels)
false_pos = tf.compat.v1.metrics.false_positives(
label_ids,
predicted_labels)
false_neg = tf.compat.v1.metrics.false_negatives(
label_ids,
predicted_labels)
return {
"eval_accuracy": accuracy,
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg
}
eval_metrics = metric_fn(label_ids, predicted_labels)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=eval_metrics)
else:
(predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
predictions = {
'probabilities': log_probs,
'labels': predicted_labels
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Return the actual model function in the closure
return model_fn
# Compute train and warmup steps from batch size
# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)
BATCH_SIZE = 16
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 3.0
# Warmup is a period of time where the learning rate is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 300
SAVE_SUMMARY_STEPS = 100
# Compute train and warmup steps from batch size
num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
# Specify output directory and number of checkpoint steps to save
run_config = tf.estimator.RunConfig(
model_dir=OUTPUT_DIR,
save_summary_steps=SAVE_SUMMARY_STEPS,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)
# Specify output directory and number of checkpoint steps to save
run_config = tf.estimator.RunConfig(
model_dir=OUTPUT_DIR,
save_summary_steps=SAVE_SUMMARY_STEPS,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)
#Initializing the model and the estimator
model_fn = model_fn_builder(
num_labels=len(label_list),
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config,
params={"batch_size": BATCH_SIZE})
# Create an input function for training. drop_remainder = True for using TPUs.
train_input_fn = bert.run_classifier.input_fn_builder(
features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=False)
# Create an input function for validating. drop_remainder = True for using TPUs.
test_input_fn = run_classifier.input_fn_builder(
features=test_features,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=False)
# #Training the model
print(f'Beginning Training!')
current_time = datetime.now()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print("Training took time ", datetime.now() - current_time)
#Evaluating the model with Validation set
accuracy = estimator.evaluate(input_fn=test_input_fn, steps=None)
# A method to get predictions
def getPrediction(in_sentences):
# A list to map the actual labels to the predictions
labels = ["business", "entertainment", "politics", "sports", "tech"]
# Transforming the test data into BERT accepted form
input_examples = [run_classifier.InputExample(guid="", text_a=x, text_b=None, label=0) for x in in_sentences]
# Creating input features for Test data
input_features = run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
# Predicting the classes
predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH,
is_training=False, drop_remainder=False)
predictions = estimator.predict(predict_input_fn)
return [(sentence, prediction['probabilities'], prediction['labels'], labels[prediction['labels']]) for
sentence, prediction in zip(in_sentences, predictions)]
pred_sentences = list(test['text'])
predictions = getPrediction(pred_sentences)
enc_labels = []
act_labels = []
for i in range(len(predictions)):
enc_labels.append(predictions[i][2])
act_labels.append(predictions[i][3])
pd.DataFrame(enc_labels, columns = ['category']).to_excel('data/submission_bert.xlsx', index = False)
## Random tester
#Classifying random sentences
tests = getPrediction(['Mr.Modi is the Indian Prime Minister',
'Gaming machines are powered by efficient micro processores and GPUs',
'That HBO TV series is really good',
'A trillion dollar economy '
])
As the question clearly says to save the model, here is how it works:
import torch
torch.save(model, 'path/to/model')
saved_model = torch.load('path/to/model')
I think you can just rename your model.ckpt-333.data-00000-of-00001 to bert_model.ckpt and then use it in the same way you would use a non-finetuned model. For example, run
python run_classifier.py \
--task_name=MRPC \
--do_predict=true \
--data_dir=$GLUE_DIR/MRPC \
--vocab_file=$BERT_BASE_DIR/vocab.txt \
--bert_config_file=$BERT_BASE_DIR/bert_config.json \
--init_checkpoint=$TRAINED_CLASSIFIER
with --init_checkpoint pointing to your model's dir, or run bert-as-service
bert-serving-start -model_dir $TRAINED_CLASSIFIER
with the right -model_dir.
You can use these method:
model = MyModel(num_classes).to(device)
optimizer = AdamW(model.parameters(), lr=2e-5, weight_decay=1e-2)
output_model = './models/nameOfYourModel.pth'
# save
def save(model, optimizer):
# save
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, output_model)
save(model, optimizer)
# load
checkpoint = torch.load(output_model, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
Source: https://github.com/huggingface/transformers/issues/7849#issuecomment-718726121

LSTM implementation / overfitting

I am having a problem on an implementation of LSTM. I am not sure if I have the right implementation or this is just an overfitting problem. I am doing essay grading using a LSTM, scoring text with score from 0 - 10 (or other range of score). I am using the ASAP kaggle competition data as one of the training data.
However, the main goal is to achieve good performance on a private dataset, with around 500 samples. The 500 samples includes validation and training set. I have previously done some experiment and got the model to work, but after fiddling with something, the model doesn't fit anymore. The model does not improve at all. I have also re-implemented the code in a cleaner manner with much more obejct oriented code and still can't reproduce my previous result.
However, I am getting the model to fit to my data, just there is tremendous overfitting. I am not sure if this is an implementation problem of some sort or just overfitting, but I cannot get the model to work. The maximum I can get it to is 0.35 kappa using LSTM on the ASAP data essay set 1. For some bizarre reason, I can get a single layer fully connected model to have 0.75 kappa. I think this is an implementation problem but I am not sure.
Here is my old code:
train.py
import gensim
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import cohen_kappa_score
from torch import nn
import torch.utils.data as data_utils
from torch.optim import Adam
from dataset import AESDataset
from network import Network
from optimizer import Ranger
from qwk import quadratic_weighted_kappa, kappa
batch_size = 32
device = "cuda:0"
torch.manual_seed(1000)
# Load data from csv
file_name = "data/data_new.csv"
data = pd.read_csv(file_name)
arr = data.to_numpy()
text = arr[:, :2]
text = [str(line[0]) + str(line[1]) for line in text]
text = [gensim.utils.simple_preprocess(line) for line in text]
score = arr[:,2]
score = [sco*6 for sco in score]
score = np.asarray(score, dtype=int)
train_dataset = AESDataset(text_arr=text[:400], scores=score[:400])
test_dataset = AESDataset(text_arr=text[400:], scores=score[400:])
score = torch.tensor(score).view(-1,1).long().to(device)
train_loader = data_utils.DataLoader(train_dataset,shuffle=True, batch_size=batch_size, drop_last=True)
test_loader = data_utils.DataLoader(test_dataset,shuffle=True,batch_size=batch_size, drop_last=True)
out_class = 61
epochs = 1000
model = Network(out_class).to(device)
model.load_state_dict(torch.load("model/best_model"))
y_onehot = torch.FloatTensor(batch_size, out_class).to(device)
optimizer = Adam(model.parameters())
criti = torch.nn.CrossEntropyLoss()
# model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
step = 0
for i in range(epochs):
#Testing
if i % 1 == 0:
total_loss = 0
total_kappa = 0
total_batches = 0
model.eval()
for (text, score) in test_loader:
out = model(text)
out_score = torch.argmax(out, 1)
y_onehot.zero_()
y_onehot.scatter_(1, score, 1)
kappa_l = cohen_kappa_score(score.view(batch_size).tolist(), out_score.view(batch_size).tolist())
score = score.view(-1)
loss = criti(out, score.view(-1))
total_loss += loss
total_kappa += kappa_l
total_batches += 1
print(f"Epoch {i} Testing kappa {total_kappa/total_batches} loss {total_loss/total_batches}")
with open(f"model/epoch_{i}", "wb") as f:
torch.save(model.state_dict(),f)
model.train()
#Training
for (text, score) in train_loader:
optimizer.zero_grad()
step += 1
out = model(text)
out_score = torch.argmax(out,1)
y_onehot.zero_()
y_onehot.scatter_(1, score, 1)
kappa_l = cohen_kappa_score(score.view(batch_size).tolist(),out_score.view(batch_size).tolist())
loss = criti(out, score.view(-1))
print(f"Epoch {i} step {step} kappa {kappa_l} loss {loss}")
loss.backward()
optimizer.step()
dataset.py
import gensim
import torch
import numpy as np
class AESDataset(torch.utils.data.Dataset):
def __init__(self, text_arr, scores):
self.data = text_arr
self.scores = scores
self.w2v_model = ("w2vec_model_all")
self.max_len = 500
def __getitem__(self, item):
vector = []
essay = self.data[item]
pad_vec = [1 for i in range(300)]
for i in range(self.max_len - len(essay)):
vector.append(pad_vec)
for word in essay:
word_vec = pad_vec
try:
word_vec = self.w2v_model[word]
except:
#print(f"Skipping word as word {word} not in dictionary")
word_vec = pad_vec
vector.append(word_vec)
#print(len(vector))
vector = np.stack(vector)
tensor = torch.tensor(vector[:self.max_len]).float().to("cuda")
score = self.scores[item]
score = torch.tensor(score).long().to("cuda").view(1)
return tensor, score
def __len__(self):
return len(self.scores)
network.py
import torch.nn as nn
import torch
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self, output_size):
super(Network, self).__init__()
self.lstm = nn.LSTM(300,500,1, batch_first=True)
self.dropout = nn.Dropout(p=0.5)
#self.l2 = nn.L2
self.linear = nn.Linear(500,output_size)
def forward(self,x):
x, _ = self.lstm(x)
x = x[:,-1,:]
x = self.dropout(x)
x = self.linear(x)
return x
My new code: https://github.com/Clement-Hui/EssayGrading
I think the problem is in the training code since you are using LSTM you are supposed to flush down the hidden and cell state after every epoch and detach it from the computation graph after each batch.
network.py
import torch.nn as nn
import torch
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self, output_size):
super(Network, self).__init__()
self.lstm = nn.LSTM(300,500,1, batch_first=True)
self.dropout = nn.Dropout(p=0.5)
#self.l2 = nn.L2
self.linear = nn.Linear(500,output_size)
def forward(self,x,hidden):
x, hidden = self.lstm(x,hidden)
x = x.contiguous().view(-1, 500)
x = self.dropout(x)
x = self.linear(x)
return x , hidden
def init_hidden(self,batch_size):
weights = next(self.parameters()).data
hidden = (weights.new(1 , batch_size,500).zero_().cuda(),
weights.new(1 , batch_size,500).zero_().cuda())
return hidden
train.py
# your code for intializing the model and data and all other stuff
for i in range(epochs):
#Testing
if i % 1 == 0:
total_loss = 0
total_kappa = 0
total_batches = 0
model.eval()
val_h = model.init_hidden(batch_size) # intialize the hidden state
for (text, score) in test_loader:
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
out , val_h = model(text,val_h)
out_score = torch.argmax(out, 1)
y_onehot.zero_()
y_onehot.scatter_(1, score, 1)
kappa_l = cohen_kappa_score(score.view(batch_size).tolist(), out_score.view(batch_size).tolist())
score = score.view(-1)
loss = criti(out, score.view(-1))
total_loss += loss
total_kappa += kappa_l
total_batches += 1
print(f"Epoch {i} Testing kappa {total_kappa/total_batches} loss {total_loss/total_batches}")
with open(f"model/epoch_{i}", "wb") as f:
torch.save(model.state_dict(),f)
model.train()
#Training
h = model.init_hidden(batch_size) # intialize the hidden state
for (text, score) in train_loader:
optimizer.zero_grad()
step += 1
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
out , h = model(text,h)
out_score = torch.argmax(out,1)
y_onehot.zero_()
y_onehot.scatter_(1, score, 1)
kappa_l = cohen_kappa_score(score.view(batch_size).tolist(),out_score.view(batch_size).tolist())
loss = criti(out, score.view(-1))
print(f"Epoch {i} step {step} kappa {kappa_l} loss {loss}")
loss.backward()
optimizer.step()
Do let me know if the changes mentioned works or not.

Tensorflow: How to setup a CheckpointSaverHook

When I start the training on my tf.estimator.Estimator object,
Tensorflow automatically creates a CheckpointSaverHook whilst printing
INFO:tensorflow:Create CheckpointSaverHook.
This automatically created SaverHook will save my model at the very start and the end of the training.
What I want though is to create a checkpoint every n training steps. For this I created my own saving hook and passed it to my estimator when training.
saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir = model_dir,
save_steps = 100
)
model.train(input_fn,steps=1500,hooks=[saver_hook])
This works in theory but my own CheckpointSaverHook will just save *.meta files, while the automatically created one saves *.meta, *.index and *.data-XXXXX-of-XXXXX files.
How can I configure my own SaverHook to do that aswell?
EDIT:
Added my whole network definition
network.py
import pickle
import random
import numpy as np
import tensorflow as tf
LEARNING_RATE = 0.002
class TFDotNet:
def __init__(self,model_dir):
# model def
self.model_dir = model_dir
self.model = tf.estimator.Estimator(model_fn=model_fn,model_dir=model_dir)
# hooks
self.summary_hook = tf.train.SummarySaverHook(
save_steps=50,
output_dir=model_dir,
scaffold=tf.train.Scaffold()
)
self.saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=model_dir,
save_steps=100,
)
def train(self,x_train,y_train,steps=1500,batch_size=128):
""" train the neuralnetwork """
tf.logging.set_verbosity(tf.logging.INFO)
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_train}, y=y_train,batch_size=batch_size, num_epochs=None, shuffle=True
)
self.model.train(input_fn,steps=steps,hooks=[self.summary_hook,self.saver_hook])
def predict(self,x_predict):
""" predict some inputs """
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x':x_predict}, y=None, batch_size=1, shuffle=False
)
return list(self.model.predict(input_fn))
def evaluate(self,x_test,y_test):
""" evaluate network on testset """
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_test}, y=y_test,batch_size=1, shuffle=False
)
return self.model.evaluate(input_fn)
def load_dataset(self,dataset_path):
""" loads a dataset from a serialized data file """
with open(dataset_path,'rb') as f:
return pickle.load(f)
def split_dataset(self,dataset,ratio,random_state=42):
""" splits a loaded dataset into training and testset """
random.seed(random_state)
random.shuffle(dataset)
length = int(ratio * len(dataset))
test_data = dataset[:length]
training_data = dataset[length:]
x_train = np.hstack([x for (x, y) in training_data]).transpose().astype('float32')
y_train = np.asarray([y for (x, y) in training_data]).reshape(-1, 1).astype('float32')
x_test = np.hstack([x for (x, y) in test_data]).transpose().astype('float32')
y_test = np.asarray([y for (x, y) in test_data]).reshape(-1, 1).astype('float32')
return x_train, y_train, x_test, y_test
def export(self):
""" exports the conv net """
def serving_input_receiver_fn():
# The outer dimension (None) allows us to batch up inputs for
# efficiency. However, it also means that if we want a prediction
# for a single instance, we'll need to wrap it in an outer list.
inputs = {"x": tf.placeholder(shape=[None, 900], dtype=tf.float32)}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
self.model.export_savedmodel(
export_dir_base=self.model_dir,
serving_input_receiver_fn=serving_input_receiver_fn)
def cnn_layout(features,reuse,is_training):
with tf.variable_scope('cnn',reuse=reuse):
# resize input to [batchsize,height,width,channel]
x = tf.reshape(features['x'], shape=[-1,30,30,1])
# conv1, 32 filter, 5 kernel
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu, name='conv1')
# pool1, 2 stride, 2 kernel
pool1 = tf.layers.max_pooling2d(conv1, 2, 2, name='pool1')
# conv2, 64 filter, 3 kernel
conv2 = tf.layers.conv2d(pool1, 64, 3, activation=tf.nn.relu, name='conv2')
# pool2, 2 stride, 2 kernel
pool2 = tf.layers.max_pooling2d(conv2, 2, 2, name='pool2')
# flatten pool2
flatten = tf.contrib.layers.flatten(pool2)
# fc1 with 1024 neurons
fc1 = tf.layers.dense(flatten, 1024, name='fc1')
# 75% dropout
drop = tf.layers.dropout(fc1, rate=0.75, training=is_training, name='dropout')
# output logits
output = tf.layers.dense(drop, 1, name='output_logits')
return output
def model_fn(features, labels, mode):
# setup two networks one for training one for prediction while sharing weights
logits_train = cnn_layout(features=features,reuse=False,is_training=True)
logits_test = cnn_layout(features=features,reuse=True,is_training=False)
# predictions
probabilites = tf.sigmoid(logits_test, name='probabilities')
predictions = tf.round(probabilites,name='predictions')
export_outputs = tf.estimator.export.PredictOutput(outputs={'predictions':predictions,'probabilities':probabilites})
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=predictions, export_outputs={'outputs':export_outputs})
# define loss and optimizer
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_train,labels=labels),name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE, name='optimizer')
train = optimizer.minimize(loss, global_step=tf.train.get_global_step(),name='train')
# accuracy for evaluation
accuracy = tf.metrics.accuracy(labels=labels,predictions=predictions,name='accuracy')
# summarys for tensorboard
tf.summary.scalar('loss',loss)
# return training and evalution spec
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train,
eval_metric_ops={'accuracy':accuracy}
)
training.py
from network import TFDotNet
from time import time
# settings
training_steps = 10000
mini_batch_size = 128
model_dir = 'neuralnet_data/02_networks/network01'
dataset_path = 'neuralnet_data/01_datasets/dataset.data'
# init dotnet
dotnet = TFDotNet(model_dir=model_dir)
# load dataset
print('loading dataset ...')
dataset = dotnet.load_dataset(dataset_path)
# split dataset
x_train, y_train, x_test, y_test = dotnet.split_dataset(dataset,0.1)
# train network
print('starting training ...')
t0 = time()
dotnet.train(x_train,y_train,steps=training_steps,batch_size=mini_batch_size)
print('Training took {}s'.format(time()-t0))
The problem here is that, when no Saver is specified (either directly or by the scaffold), CheckpointSaverHook will create a new Saver in its constructor. If the __init__ is not run in the same Graph as your model, then it won't find any variables so nothing will be saved (https://github.com/tensorflow/tensorflow/issues/13265).
Assuming you are using the tf.estimator framework, then the Graph you want simply does not exist yet before the call to train.
You should be able to work around that by creating the saver inside your model_fn, and pass it as a hook to the EstimatorSpec.
here my my code. it works fine, the complete code is on mygithub
start_time = datetime.datetime.now()
saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=FLAGS.train_dir,
save_steps=100,
)
config = tf.estimator.RunConfig()
config = config.replace(session_config=sess_config)
per_example_hook = ExamplesPerSecondHook(FLAGS.train_batch_size, every_n_steps=100)
hooks = [per_example_hook,saver_hook]
classifier = tf.estimator.Estimator(
model_fn=model_fn_cnn,
model_dir= FLAGS.train_dir,
config=config,
)
classifier.train(input_fn=functools.partial(input_fn,subset="training"),
steps=FLAGS.train_steps,
hooks=hooks
)
train_time = datetime.datetime.now() - start_time

Categories