tensorflow tensorboard hparams - python

import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
####### load the model and data here
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([32,64,128,256, 512]))
HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.9))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['Nadam','SGD','RMSprop','adam','Adagrad']))
HP_L2 = hp.HParam('l2 regularizer', hp.RealInterval(.00001,.01))
HP_LeakyReLU=hp.HParam('alpha', hp.RealInterval(0.1, 0.9))
METRIC_ACCURACY = 'accuracy'
with tf.summary.create_file_writer('raw-img/log/hparam_tuning/').as_default():
hp.hparams_config(
hparams=[HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER,HP_L2,HP_LeakyReLU],
metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
)
def train_test_model(hparams):
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(hparams[HP_NUM_UNITS], kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.LeakyReLU(hparams[HP_LeakyReLU]),
tf.keras.layers.Dropout(hparams[HP_DROPOUT]),
tf.keras.layers.Dense(10, activation='softmax'),
])
model.compile(
optimizer=hparams[HP_OPTIMIZER],
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
)
model.fit(x_train, y_train, epochs=2)
_, accuracy = model.evaluate(x_test, y_test)
return accuracy
def run(run_dir, hparams):
with tf.summary.create_file_writer(run_dir).as_default():
hp.hparams(hparams) # record the values used in this trial
accuracy = train_test_model(hparams)
tf.summary.scalar(METRIC_ACCURACY, accuracy, step=2)
session_num = 0
for num_units in HP_NUM_UNITS.domain.values:
for dropout_rate in (HP_DROPOUT.domain.min_value, HP_DROPOUT.domain.max_value):
for l2 in (HP_L2.domain.min_value, HP_L2.domain.max_value):
for alpha in (HP_LeakyReLU.domain.min_value, HP_LeakyReLU.domain.max_value):
for optimizer in HP_OPTIMIZER.domain.values:
hparams = {
HP_NUM_UNITS: num_units,
HP_DROPOUT: dropout_rate,
HP_L2: l2,
HP_LeakyReLU:alpha,
HP_OPTIMIZER: optimizer,
}
run_name = "run-%d" % session_num
print('--- Starting trial: %s' % run_name)
print({h.name: hparams[h] for h in hparams})
run('raw-img/log/hparam_tuning/' + run_name, hparams)
session_num += 1
I have tried to use hparams in TF. I have set dropout, l2 and OPTIMIZER.
I need to set value for learning_rate and test it.
What should I do to set learning_rate like dropout and l2 and test it?
I have tried to do this:
model.compile(
optimizer=hparams[HP_OPTIMIZER](lr=0.001),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
but it doesn't work. I want to select learning_rate different value of learning_rate like(dropout,l2)

You want to separate the used optimizer into a separate variable:
if hparams[HP_OPTIMIZER] == "SGD":
optimizer = tf.keras.optimizers.SGD(learning_rate=float(hparams[HP_LR]))
elif hparams[HP_OPTIMIZER] == "adam":
optimizer = tf.keras.optimizers.Adam(learning_rate=float(hparams[HP_LR]))
else:
raise ValueError("unexpected optimizer name: %r" % hparams[HP_OPTIMIZER])
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
)
I found the solution here.

Related

Dropout not reducing loss in pytorch but works fine with keras

I was comparing loss for two simple MLP models with and without dropout on both TF/Keras and Pytorch frameworks (on Keras imdb dataset). But with PyTorch I am not getting the same results as I hoped for and was wondering perhaps what I am doing incorrectly.
# Keras - IMDB Dataset
model = Sequential()
model.add(Dense(16, activation = "relu", input_shape= (10000,)))
model.add(Dropout(0.5)) # comment out this line for no dropout model
model.add(Dense(16, activation = "relu"))
model.add(Dropout(0.5)) # comment out this line for no dropout model
model.add(Dense(1, activation = "sigmoid"))
model.compile(
optimizer = "rmsprop",
loss = "binary_crossentropy",
metrics = ["accuracy"]
)
history = model.fit(
X_train,
y_train,
epochs = 20,
batch_size = 512,
validation_data = (X_val, y_val)
)
The results I obtained in keras (Left figure without dropout and right with dropout)
# Pytorch - same IMDB dataset from keras
class MLP(nn.Module):
def __init__(self, in_dims, l1, l2, out_dims):
super(MLP, self).__init__()
self.fc1 = nn.Linear(in_dims, l1)
self.fc2 = nn.Linear(l1, l2)
self.fc3 = nn.Linear(l2, out_dims)
self.dropout = nn.Dropout(p=0.5)
def forward(self, X):
out = F.relu(self.fc1(X))
out = self.dropout(out) # comment out this line for no dropout model
out = F.relu(self.fc2(out))
out = self.dropout(out) # comment out this line for no dropout model
out = F.sigmoid(self.fc3(out))
return out
model = MLP(10000, 16, 16, 1)
optimizer = optim.RMSprop(model.parameters(), lr = 0.001)
criterion = nn.BCELoss()
min_val_loss = np.inf
losses = []
val_losses = []
accuracy = []
val_accuracy = []
for e in range(0,20):
running_loss = 0
for i,(X_train, y_train) in enumerate(train_loader):
yhat = model.forward(X_train)
loss = criterion(yhat.flatten(), y_train)
running_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(running_loss / (i+1)) #note its i+1 since i starts from 0
model.eval()
with torch.no_grad():
running_val_loss = 0
for i,(X_val, y_val) in enumerate(val_loader):
yhat_val = model.forward(X_val)
val_loss = criterion(yhat_val.flatten(), y_val)
running_val_loss += val_loss.item()
val_losses.append(running_val_loss / (i + 1))
if val_loss < min_val_loss:
best_params = model.state_dict()
min_val_loss = val_loss
print(f"epochs : {e}, train_loss : {loss}, val_loss : {val_loss}")
Figure on the left is the result from no dropout model which has similar results to the keras model. However the one with dropout doesnot have the same behaviour.

Validation and prediction Accuracy of VGG19 Model is not getting better

Hello I am working on detecting diabetic retinopathy from images in the kaggle dataset (https://www.kaggle.com/c/diabetic-retinopathy-detection) using VGG19, I have pre processed my images with a gaussian filter and resizing them to 224 x 224 x 3 an then I passed them to a vgg19 model with training images = 5760 and validation images = 640. I am getting a training accuracy of 100% but the validation accuracy is not getting better than 60% and the prediction accuracy after the model finished is 50%.
This is my model code:
for count in range(6):
print("*************************************************************************************************")
print("******************************************************Fold " + str(count))
trainDirectory = "Folds/Fold" + str(count) + "-Train"
testDirectory = "Folds/Fold" + str(count) + "-Test"
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(class_mode = 'binary' , batch_size= 64, directory= trainDirectory , target_size=(224, 224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(class_mode = 'binary' , batch_size=64, directory= testDirectory, target_size=(224, 224))
from keras.applications.vgg19 import VGG19
vggmodel = VGG19(weights='imagenet', include_top=True, input_shape=(224, 224, 3))
for layers in (vggmodel.layers)[:19]:
layers.trainable = False
X = vggmodel.layers[-2].output
predictions = Dense(1, activation="sigmoid")(X)
model_final = Model(vggmodel.input, predictions)
opt = keras.optimizers.Adam(learning_rate=0.0001)
model_final.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg19_1.h5", monitor='val_accuracy', verbose=1, save_best_only=True,
save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=50, verbose=1, mode='auto')
h = model_final.fit_generator(generator=traindata, steps_per_epoch= int(5760 / 64), epochs= 10 , validation_data=testdata,
validation_steps= int(640 / 64) , callbacks=[checkpoint, early])
modelHistory.append(h.history)
model_final.save_weights("vgg19_1.h5")
model_final.save('vgg19_1.h5')
model = load_model('vgg19_1.h5')
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
predict_datagen = ImageDataGenerator()
predict_generator = predict_datagen.flow_from_directory(
"Validation",
class_mode= None,
target_size=(224, 224),
)
pred = model.predict_generator(predict_generator)
y_pred = np.argmax(pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(predict_generator.classes, y_pred))
print('Classification Report')
target_names = ['0', '1']
print(classification_report(predict_generator.classes, y_pred, target_names=target_names))
l = len(y_pred)
acc = sum([y_pred[i]==predict_generator.classes[i] for i in range(l)])/l
print(acc)
predictedAccuracy.append(acc)
this is the accuracy that I have got:
this is the plot of the accuracy / loss

How to get the training and validation accuacy of the model?

I am trying to grab the training and validation accuracy values, but it is not returning acc and val_acc values.
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer = 'RMSprop',
lr = 0.001,
metrices = ['accuracy'])
# prepare the generators
train_gen = train_datagen.flow(training_images,
training_labels,
batch_size = 64
)
validation_gen = validation_datagen.flow(testing_images,
testing_labels,
batch_size = 64
)
# Train the Model
history = model.fit_generator(train_gen,
epochs = 20,
validation_data = validation_gen
)

I am getting 0% stack test accuracy for ensemble neural network where base learners are CNN and BiLSTM

I am performing binary classification for classifying the reviews according to polarity.
I have created an ensemble model in Keras using CNN and BiLSTM as a base model and a neural network as meta-learner. I am using Stacking. But I am getting Stack ensemble accuracy as 0%. What all changes should I make in my code to run smoothly? Where I am getting wrong?
text_input_layer = Input(shape=(length_trainshuffle,))
embedding_layer = Embedding(vocab_size, 100)(text_input_layer)
text_layer_cnn = Conv1D(128, 5, activation='relu')(embedding_layer)
text_layer_cnn = GlobalMaxPooling1D()(text_layer_cnn)
text_layer_cnn = Dropout(0.2)(text_layer_cnn)
text_layer_cnn = Dense(5,kernel_initializer='glorot_uniform', activation='tanh')(text_layer_cnn)
output_layer_cnn = Dense(1, kernel_initializer='glorot_uniform',activation='sigmoid')(text_layer_cnn)
model_cnn = Model(text_input_layer, output_layer_cnn)
optimizer = Adamax(lr=0.001,decay=0.0001)
model_cnn.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
filepath_cnn="cnnmodel.best.hdf5"
checkpoint_cnn = ModelCheckpoint(filepath_cnn, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list_cnn = [checkpoint_cnn]
# Fit the model
model_cnn.fit(trainX, array(trainlabelshuffle), epochs=10,batch_size=80, validation_data = (validateX, array(validatelabelshuffle)), callbacks=callbacks_list_cnn, verbose=1)
model_cnn.save(filepath_cnn)
print('>Saved %s' % filepath_cnn)
loss_cnn, acc_cnn = model_cnn.evaluate(testX,array(testlabelshuffle), verbose=0)
print('Test Accuracy CNN: %f' % (acc_cnn*100))
print('Loss CNN: %f' %(loss_cnn))
text_layer_bilstm = Bidirectional(CuDNNLSTM(256))(embedding_layer)
output_layer_bilstm = Dense(1, kernel_initializer='glorot_uniform',activation='sigmoid')(text_layer_bilstm)
model_bilstm = Model(text_input_layer, output_layer_bilstm)
optimizer_bilstm = Adamax(lr=0.001,decay=0.0001)
model_bilstm.compile(optimizer=optimizer_bilstm, loss='binary_crossentropy', metrics=['accuracy'])
filepath_bilstm="bilstm_model.best.hdf5"
checkpoint_bilstm = ModelCheckpoint(filepath_bilstm, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list_bilstm = [checkpoint_bilstm]
# Fit the model
model_bilstm.fit(trainX, array(trainlabelshuffle), epochs=10,batch_size=80, validation_data = (validateX, array(validatelabelshuffle)), callbacks=callbacks_list_bilstm, verbose=1)
model_bilstm.save(filepath_bilstm)
print('>Saved %s' % filepath_bilstm)
loss_bilstm, acc_bilstm = model_bilstm.evaluate(testX,array(testlabelshuffle), verbose=0)
print('Test Accuracy bilstm: %f' % (acc_bilstm*100))
print('Loss bilstm: %f' %(loss_bilstm))
all_models = list()
cnnmodel = load_model(filepath_cnn)
# add to list of members
all_models.append(cnnmodel)
print('>loaded %s' % filepath_cnn)
bilstmmodel = load_model(filepath_bilstm)
# add to list of members
all_models.append(bilstmmodel)
print('>loaded %s' % filepath_bilstm)
def define_stacked_model(all_models):
# update all layers in all models to not be trainable
for i in range(len(all_models)):
model = all_models[i]
for layer in model.layers:
# make not trainable
layer.trainable = False
# rename to avoid 'unique layer name' issue
layer.name = 'ensemble_' + str(i+1) + '_' + layer.name
# define multi-headed input
ensemble_visible = [model.input for model in all_models]
# concatenate merge output from each model
ensemble_outputs = [model.output for model in all_models]
merge = concatenate(ensemble_outputs)
hidden = Dense(10, activation='relu')(merge)
#hidden = Flatten()(hidden)
output = Dense(2, activation='softmax')(hidden)
model = Model(inputs=ensemble_visible, outputs=output)
# plot graph of ensemble
plot_model(model, show_shapes=True, to_file='model_graph.png')
# compile
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def fit_stacked_model(model, inputX, inputy):
# prepare input data
X = [inputX for _ in range(len(model.input))]
# encode output data
inputy_enc = to_categorical(inputy)
# fit model
model.fit(X, inputy_enc, epochs=10, verbose=1)
# make a prediction with a stacked model
def predict_stacked_model(model, inputX):
# prepare input data
X = [inputX for _ in range(len(model.input))]
# make prediction
return model.predict(X, verbose=0)
stacked_model = define_stacked_model(all_models)
stacked_model.summary()
# fit stacked model on test dataset
fit_stacked_model(stacked_model,validateX,array(validatelabelshuffle))
#stacked_model.fit(X=testX,y=array(testlabelshuffle),epochs=10, verbose=1)
# make predictions and evaluate
yhat = predict_stacked_model(stacked_model, testX)
yhat = argmax(yhat, axis=1)
acc = accuracy_score(array(testlabelshuffle), yhat)
print('Stacked Test Accuracy: %.3f' % acc)

Keras + TensorFlow Realtime training chart

I have the following code running inside a Jupyter notebook:
# Visualize training history
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import numpy
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load pima indians dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
history = model.fit(X, Y, validation_split=0.33, epochs=150, batch_size=10, verbose=0)
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
The code collects epochs history, then displays the progress history.
Q: How can I make the chart change while training so I can see the changes in real time?
There is livelossplot Python package for live training loss plots in Jupyter Notebook for Keras (disclaimer: I am the author).
from livelossplot import PlotLossesKeras
model.fit(X_train, Y_train,
epochs=10,
validation_data=(X_test, Y_test),
callbacks=[PlotLossesKeras()],
verbose=0)
To see how does it work, look at its source, especially this file: https://github.com/stared/livelossplot/blob/master/livelossplot/outputs/matplotlib_plot.py (from IPython.display import clear_output and clear_output(wait=True)).
A fair disclaimer: it does interfere with Keras output.
Keras comes with a callback for TensorBoard.
You can easily add this behaviour to your model and then just run tensorboard on top of the logging data.
callbacks = [TensorBoard(log_dir='./logs')]
result = model.fit(X, Y, ..., callbacks=callbacks)
And then on your shell:
tensorboard --logdir=/logs
If you need it in your notebook, you can also write your own callback to get metrics while training:
class LogCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
print(logs["train_accuracy"])
This would get the training accuracy at the end of the current epoch and print it. There's some good documentation around it on the official keras site.
this gives you an idea of the simplest codes.
[ Sample ]:
# https://stackoverflow.com/questions/71748896/how-to-plot-a-graph-of-training-time-and-batch-size-of-neural-network
import os
from os.path import exists
import matplotlib.pyplot as plt
import tensorflow as tf
import time
import h5py
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
os.environ['TF_GPU_ALLOCATOR'] = 'cuda_malloc_async'
print(os.getenv('TF_GPU_ALLOCATOR'))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
epoch_1_time = [ ]
epoch_5_time = [ ]
epoch_10_time = [ ]
epoch_50_time = [ ]
epoch_100_time = [ ]
database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)
if not exists(database_buffer_dir) :
os.mkdir(database_buffer_dir)
print("Create directory: " + database_buffer_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# ...
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
# Create hdf5 file
hdf5_file = h5py.File(database_buffer, mode='w')
# Train images
hdf5_file['x_train'] = train_images
hdf5_file['y_train'] = train_labels
# Test images
hdf5_file['x_test'] = test_images
hdf5_file['y_test'] = test_labels
hdf5_file.close()
# Visualize dataset train sample
hdf5_file = h5py.File(database_buffer, mode='r')
# Load features
# x_train = hdf5_file['x_train'][0: 50000]
# x_test = hdf5_file['x_test'][0: 10000]
# y_train = hdf5_file['y_train'][0: 50000]
# y_test = hdf5_file['y_test'][0: 10000]
x_train = hdf5_file['x_train'][0: 100]
x_test = hdf5_file['x_test'][0: 100]
y_train = hdf5_file['y_train'][0: 100]
y_test = hdf5_file['y_test'][0: 100]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2DTranspose(2, 3, activation='relu', padding="same"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4 * 256),
tf.keras.layers.Reshape((4 * 256, 1)),
tf.keras.layers.LSTM(128, return_sequences=True, return_state=False),
tf.keras.layers.LSTM(128, name='LSTM256'),
tf.keras.layers.Dropout(0.2),
])
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation='relu', name='dense64'))
model.add(tf.keras.layers.Dense(7))
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback_5(tf.keras.callbacks.Callback):
global epoch_5
val_dir = os.path.join(log_dir, 'validation')
print('val_dir: ' + val_dir)
epoch_5 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_5
time_counter = time.perf_counter()
epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_5 = time_counter
if epoch % 5 == 0 :
epoch_5 = time_counter
epoch_5_time.append( epoch_5 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_5", epoch_5, step=epoch)
file_writer.flush()
custom_callback_5 = custom_callback_5()
class custom_callback_10(tf.keras.callbacks.Callback):
global epoch_10
epoch_10 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_10
time_counter = time.perf_counter()
#epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_10 = time_counter
if epoch % 10 == 0 :
epoch_10 = time_counter
epoch_10_time.append( epoch_10 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_10", epoch_10, step=epoch)
file_writer.flush()
custom_callback_10 = custom_callback_10()
class custom_callback_50(tf.keras.callbacks.Callback):
global epoch_50
epoch_50 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_50
time_counter = time.perf_counter()
#epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_50 = time_counter
if epoch % 50 == 0 :
epoch_50 = time_counter
epoch_50_time.append( epoch_50 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_50", epoch_50, step=epoch)
file_writer.flush()
custom_callback_50 = custom_callback_50()
class custom_callback_100(tf.keras.callbacks.Callback):
global epoch_100
epoch_100 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_100
time_counter = time.perf_counter()
#epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_100 = time_counter
if epoch % 100 == 0 :
epoch_100 = time_counter
epoch_100_time.append( epoch_100 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_100", epoch_100, step=epoch)
file_writer.flush()
custom_callback_100 = custom_callback_100()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.MeanSquaredLogarithmicError(reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_logarithmic_error')
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(x_train, y_train, epochs=1000, batch_size=5 ,validation_data=(x_test, y_test), callbacks=[custom_callback_5])
history = model.fit(x_train, y_train, epochs=1000, batch_size=10 ,validation_data=(x_test, y_test), callbacks=[custom_callback_10])
history = model.fit(x_train, y_train, epochs=1000, batch_size=50 ,validation_data=(x_test, y_test), callbacks=[custom_callback_50])
history = model.fit(x_train, y_train, epochs=1000, batch_size=100 ,validation_data=(x_test, y_test), callbacks=[custom_callback_100])
plt.plot(epoch_1_time, epoch_5_time)
plt.plot(epoch_1_time, epoch_10_time)
plt.plot(epoch_1_time, epoch_50_time)
plt.plot(epoch_1_time, epoch_100_time)
plt.legend(["epoch_5_time", "epoch_10_time", "epoch_50_time", "epoch_100_time"])
plt.show()
plt.close()
input('...')
## tensorboard --inspect --logdir="F:\\models\\checkpoint\\test_tf_plot_graph\\"
## tensorboard --logdir="F:\\models\\checkpoint\\test_tf_plot_graph\\"
[ Output ]:
Event statistics for F:\\models\\checkpoint\\test_tf_plot_graph\validation:
audio -
graph -
histograms -
images -
scalars -
sessionlog:checkpoint -
sessionlog:start -
sessionlog:stop -
tensor
first_step 20
last_step 6
max_step 140
min_step 0
num_steps 14
outoforder_steps [(20, 0), (40, 1), (60, 2), (80, 3), (100, 4), (120, 5), (140, 6)]
======================================================================
...

Categories