Keras + TensorFlow Realtime training chart - python

I have the following code running inside a Jupyter notebook:
# Visualize training history
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import numpy
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load pima indians dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
history = model.fit(X, Y, validation_split=0.33, epochs=150, batch_size=10, verbose=0)
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
The code collects epochs history, then displays the progress history.
Q: How can I make the chart change while training so I can see the changes in real time?

There is livelossplot Python package for live training loss plots in Jupyter Notebook for Keras (disclaimer: I am the author).
from livelossplot import PlotLossesKeras
model.fit(X_train, Y_train,
epochs=10,
validation_data=(X_test, Y_test),
callbacks=[PlotLossesKeras()],
verbose=0)
To see how does it work, look at its source, especially this file: https://github.com/stared/livelossplot/blob/master/livelossplot/outputs/matplotlib_plot.py (from IPython.display import clear_output and clear_output(wait=True)).
A fair disclaimer: it does interfere with Keras output.

Keras comes with a callback for TensorBoard.
You can easily add this behaviour to your model and then just run tensorboard on top of the logging data.
callbacks = [TensorBoard(log_dir='./logs')]
result = model.fit(X, Y, ..., callbacks=callbacks)
And then on your shell:
tensorboard --logdir=/logs
If you need it in your notebook, you can also write your own callback to get metrics while training:
class LogCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
print(logs["train_accuracy"])
This would get the training accuracy at the end of the current epoch and print it. There's some good documentation around it on the official keras site.

this gives you an idea of the simplest codes.
[ Sample ]:
# https://stackoverflow.com/questions/71748896/how-to-plot-a-graph-of-training-time-and-batch-size-of-neural-network
import os
from os.path import exists
import matplotlib.pyplot as plt
import tensorflow as tf
import time
import h5py
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
os.environ['TF_GPU_ALLOCATOR'] = 'cuda_malloc_async'
print(os.getenv('TF_GPU_ALLOCATOR'))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
epoch_1_time = [ ]
epoch_5_time = [ ]
epoch_10_time = [ ]
epoch_50_time = [ ]
epoch_100_time = [ ]
database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)
if not exists(database_buffer_dir) :
os.mkdir(database_buffer_dir)
print("Create directory: " + database_buffer_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# ...
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
# Create hdf5 file
hdf5_file = h5py.File(database_buffer, mode='w')
# Train images
hdf5_file['x_train'] = train_images
hdf5_file['y_train'] = train_labels
# Test images
hdf5_file['x_test'] = test_images
hdf5_file['y_test'] = test_labels
hdf5_file.close()
# Visualize dataset train sample
hdf5_file = h5py.File(database_buffer, mode='r')
# Load features
# x_train = hdf5_file['x_train'][0: 50000]
# x_test = hdf5_file['x_test'][0: 10000]
# y_train = hdf5_file['y_train'][0: 50000]
# y_test = hdf5_file['y_test'][0: 10000]
x_train = hdf5_file['x_train'][0: 100]
x_test = hdf5_file['x_test'][0: 100]
y_train = hdf5_file['y_train'][0: 100]
y_test = hdf5_file['y_test'][0: 100]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2DTranspose(2, 3, activation='relu', padding="same"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4 * 256),
tf.keras.layers.Reshape((4 * 256, 1)),
tf.keras.layers.LSTM(128, return_sequences=True, return_state=False),
tf.keras.layers.LSTM(128, name='LSTM256'),
tf.keras.layers.Dropout(0.2),
])
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation='relu', name='dense64'))
model.add(tf.keras.layers.Dense(7))
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback_5(tf.keras.callbacks.Callback):
global epoch_5
val_dir = os.path.join(log_dir, 'validation')
print('val_dir: ' + val_dir)
epoch_5 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_5
time_counter = time.perf_counter()
epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_5 = time_counter
if epoch % 5 == 0 :
epoch_5 = time_counter
epoch_5_time.append( epoch_5 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_5", epoch_5, step=epoch)
file_writer.flush()
custom_callback_5 = custom_callback_5()
class custom_callback_10(tf.keras.callbacks.Callback):
global epoch_10
epoch_10 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_10
time_counter = time.perf_counter()
#epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_10 = time_counter
if epoch % 10 == 0 :
epoch_10 = time_counter
epoch_10_time.append( epoch_10 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_10", epoch_10, step=epoch)
file_writer.flush()
custom_callback_10 = custom_callback_10()
class custom_callback_50(tf.keras.callbacks.Callback):
global epoch_50
epoch_50 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_50
time_counter = time.perf_counter()
#epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_50 = time_counter
if epoch % 50 == 0 :
epoch_50 = time_counter
epoch_50_time.append( epoch_50 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_50", epoch_50, step=epoch)
file_writer.flush()
custom_callback_50 = custom_callback_50()
class custom_callback_100(tf.keras.callbacks.Callback):
global epoch_100
epoch_100 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_100
time_counter = time.perf_counter()
#epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_100 = time_counter
if epoch % 100 == 0 :
epoch_100 = time_counter
epoch_100_time.append( epoch_100 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_100", epoch_100, step=epoch)
file_writer.flush()
custom_callback_100 = custom_callback_100()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.MeanSquaredLogarithmicError(reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_logarithmic_error')
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(x_train, y_train, epochs=1000, batch_size=5 ,validation_data=(x_test, y_test), callbacks=[custom_callback_5])
history = model.fit(x_train, y_train, epochs=1000, batch_size=10 ,validation_data=(x_test, y_test), callbacks=[custom_callback_10])
history = model.fit(x_train, y_train, epochs=1000, batch_size=50 ,validation_data=(x_test, y_test), callbacks=[custom_callback_50])
history = model.fit(x_train, y_train, epochs=1000, batch_size=100 ,validation_data=(x_test, y_test), callbacks=[custom_callback_100])
plt.plot(epoch_1_time, epoch_5_time)
plt.plot(epoch_1_time, epoch_10_time)
plt.plot(epoch_1_time, epoch_50_time)
plt.plot(epoch_1_time, epoch_100_time)
plt.legend(["epoch_5_time", "epoch_10_time", "epoch_50_time", "epoch_100_time"])
plt.show()
plt.close()
input('...')
## tensorboard --inspect --logdir="F:\\models\\checkpoint\\test_tf_plot_graph\\"
## tensorboard --logdir="F:\\models\\checkpoint\\test_tf_plot_graph\\"
[ Output ]:
Event statistics for F:\\models\\checkpoint\\test_tf_plot_graph\validation:
audio -
graph -
histograms -
images -
scalars -
sessionlog:checkpoint -
sessionlog:start -
sessionlog:stop -
tensor
first_step 20
last_step 6
max_step 140
min_step 0
num_steps 14
outoforder_steps [(20, 0), (40, 1), (60, 2), (80, 3), (100, 4), (120, 5), (140, 6)]
======================================================================
...

Related

Accuracy and loss fluctuating in binary classification problem in deep learning

I'm currently working on a classification problem for stroke on UNet. The task is based the size of the lesion area(large - 1, small - 0). Note that the labels is actually produce by me(I will try to improve it) so they are not that accurate. When I trained like 20 epochs, my accuracy waved around 0.5 and loss is around 0.6, which basically says my model makes random choices. So what should I do to make my model learning again?
Here's the Unet I'm using:
`import keras_unet
def define_unet(n_filters=neuron,
n_layers=4,
dropout_rate=0.25):
model_unet = keras_unet.models.custom_unet(input_shape=(img_size, img_size, 3),
activation='relu',
use_batch_norm=True,
upsample_mode='deconv',
dropout=dropout_rate,
dropout_type='spatial',
filters=n_filters,
num_layers=n_layers,
output_activation='linear'
)
GAP = keras.layers.GlobalAveragePooling2D()(model_unet.output)
outputs = keras.layers.Dense(1,activation = 'sigmoid')(GAP)
model_unet = keras.Model(inputs = model_unet.input, outputs = outputs)
#bce is just the binary crossentropy
model_unet.compile(optimizer=adam, loss=bce_loss,metrics=['accuracy'])
model_unet.summary()
return model_unet`
here's the hyperparameters:
`learning_rate = 0.0001
epochs = 20
dropout_rate = 0.2
batch_size = 16
kernel_size = 3
neuron = 8
adam = keras.optimizers.Adam(learning_rate=learning_rate)`
My data set contains 1000 images spilt into 80:20 for training and validation and I'm
using batch_size = 16.
Here's the plot for acc and loss:
I've tried to implement a few learning rate and it didn't work:(
Thanks in advance for your help!!!
Any suggestions would be appreciated.
there are many variances including samples and models to improve the accuracy of binary class-entropy as a single objective. To improves the accuracy first you need to use the correct measurement, the accuracy matric works correctly when you label with 0 to 1 as float since it is binary cross entropy it may reflect minus number result because that is from the square different of the label values.
It is not fluctuating loss and accuracy but you need to use the correct approaches, binary cross-entropy as single is a very fast drive but the return of turn points is deterministic.
Sample: You can apply the mean of all output or simply critical points.
plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(5, 2, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
if predictions[0] > 0.51 :
plt.xlabel(str(list_label_actual[1]) + " score: " + str(predictions[0]))
else :
plt.xlabel(str(list_label_actual[0]) + " score: " + str(predictions[0]))
Sample: You may try to use vectors to improves of the results.
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Candidt Kibt' ]
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
# list_label.append([0, 0])
list_label.append([0.0])
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
# list_label.append([1, 1])
list_label.append([1.0])
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),tf.constant(list_label, shape=(10, 1, 2), dtype=tf.int64)))
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),tf.constant(list_label, shape=(10, 1, 1), dtype=tf.float32)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# optimizer = tf.keras.optimizers.SGD(
# learning_rate=0.001,
# momentum=0.0,
# nesterov=False,
# name='SGD',# )
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.BinaryCrossentropy(
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction=tf.keras.losses.Reduction.AUTO,
name='binary_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
# if( logs['loss'] <= 0.2 ):
# self.model.stop_training = True
if( logs['accuracy'] >= 0.95 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, validation_data=dataset, batch_size=10, epochs=10000, callbacks=[custom_callback] )
model.save_weights(checkpoint_path)
plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(5, 2, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
if predictions[0] > 0.51 :
plt.xlabel(str(list_label_actual[1]) + " score: " + str(predictions[0]))
else :
plt.xlabel(str(list_label_actual[0]) + " score: " + str(predictions[0]))
plt.show()
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
Output results:
Accuracy versus epoaches:

Uncaught (in promise) Error: Unknown layer:

I'm developing a plants disease classification app using React and tensorflow.
To fill this job, I have developed an AI model with CNN and preprocessing layers.
the model works fine on google colab, but when I converted it to the JSON form to deploy it in my app it shows me this error on my browsers console:
Uncaught (in promise) Error: Unknown layer: RandomFlip. This may be due to one of the following reasons:
The layer is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code.
The custom layer is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().
this is my python code:
# -*- coding: utf-8 -*-
"""Copie de Untitled7.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1IFoiRxZ-FLnM-SIc6YfMtVooH2s-cMP4
"""
from google.colab import drive
drive.mount('/content/drive')
import zipfile
import os
zip_ref = zipfile.ZipFile('/content/drive/MyDrive/SoniaKarimDatasetPFA_Agriculture/archive.zip','r')
zip_ref.extractall('/content')
zip_ref.close()
len(os.listdir('/content'))
#importing necessary directories for analysis
import numpy as np
import tensorflow as tf
from tensorflow.keras import models, layers, callbacks
import matplotlib.pyplot as plt
!pip install split-folders
#Some additional parameters
#It is a good practice to declare these parameters
#outside of the functions also in a separate code block
directory = '/content/PlantVillage'
IMAGE_SIZE = 258
BATCH_SIZE = 32
CHANNELS = 3
EPOCHS = 3
import splitfolders
splitfolders.ratio(directory, output="output",
seed=1337, ratio=(.7,.3), group_prefix=None, move=False)
#Now, we gonna do the do! time to deal with
#our images!
#Let's call this tensorflow.keras function which will
#... you know!
dataset = tf.keras.preprocessing.image_dataset_from_directory(
"/content/output/val",
shuffle = True,
image_size = (IMAGE_SIZE,IMAGE_SIZE ),
batch_size = BATCH_SIZE
)
#dataset = dataset.take(int(len(dataset)*0.35))
class_names = dataset.class_names
class_names
len(dataset)
plt.figure(figsize=(10,10))
for image_batch, label_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3,4,i+1)
plt.imshow(image_batch[i].numpy().astype('uint8'))
plt.title(class_names[label_batch[i]])
plt.axis('off')
#print(image_batch[0].shape)
#print(label_batch.numpy())
train_size = int(len(dataset)*0.8)
train_size
train_ds = dataset.take(train_size)
test_ds = dataset.skip(train_size)
validation_size = int(len(dataset)*0.1)
validation_ds = test_ds.take(validation_size)
test_ds = test_ds.skip(validation_size)
def get_dataset_partitions_tf(ds, train_split = 0.8,
val_split = 0.2,
test_split = 0.1,
shuffle = True,
shuffle_size = 10000):
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed = 12)
train_size = int(train_split * ds_size)
val_size = int (val_split * ds_size)
val_ds = ds.skip(train_size).take(val_size)
train_ds = ds.take(train_size)
test_ds = ds.skip(train_size).take(val_size)
return train_ds, val_ds, test_ds
train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset)
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
data_augmentation = tf.keras.Sequential([
layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'),
layers.experimental.preprocessing.RandomRotation(0.2)
])
resize_and_rescale = tf.keras.Sequential([
layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.experimental.preprocessing.Rescaling(1.0/255)
])
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 15
model = models.Sequential([
resize_and_rescale,
data_augmentation,
layers.Conv2D(32 , (3,3), input_shape = input_shape, activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Flatten(),
layers.Dense(64, activation = 'relu'),
layers.Dense(n_classes, activation = 'softmax')
])
model.build(input_shape = input_shape )
model.summary()
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(
min_delta = 0.001,
patience = 10,
restore_best_weights = True,
)
model.compile(
optimizer = 'adam',
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = False),
metrics = ['accuracy']
)
from tensorflow.keras import callbacks
history = model.fit(
train_ds,
epochs = EPOCHS,
batch_size = BATCH_SIZE,
verbose = 1,
callbacks = [early_stopping],
validation_data = val_ds
)
score = model.evaluate(test_ds)
score
history.params
history.history.keys()
len(history.history['accuracy'])
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize = (8,8))
plt.subplot(1,2,1)
plt.plot(range(EPOCHS), acc, label = 'Training Accuracy')
plt.plot(range(EPOCHS), val_acc, label = 'Validation Accuracy')
plt.legend(loc = 'lower right')
plt.title('Training and validation Accuracy')
plt.subplot(1,2,2)
plt.plot(range(EPOCHS), loss, label = 'Training_Loss')
plt.plot(range(EPOCHS), val_loss, label = 'Validation Loss')
plt.legend(loc = 'lower right')
plt.title('Training and validation Loss')
for images_batch, labels_batch in test_ds.take(1):
first_image = (image_batch[0].numpy().astype('uint8'))
first_label = labels_batch[0]
print('first image to predict')
plt.imshow(first_image)
print('actual label: ', class_names[first_label])
batch_prediction = model.predict(images_batch)
print(class_names[np.argmax(batch_prediction[0])])
def predict(model,img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
predicted_class = class_names[np.argmax(predictions[0])]
confidence = round(100* (np.max(predictions[0])),2)
return predicted_class, confidence
plt.figure(figsize = (15,15))
for images, labels in test_ds.take(1):
for i in range (9):
ax = plt.subplot(3,3,i+1)
plt.imshow(images[i].numpy().astype('uint8'))
predicted_class, confidence = predict( model, images[i].numpy())
actual_class = class_names[labels[i]]
plt.title(f"actual: {actual_class}, \n Predicted : {predicted_class} \n Confidence: {confidence} ")
plt.axis('off')
!pip install tensorflowjs
import tensorflowjs as tfjs
tfjs.converters.save_keras_model(model, "mode.h5")
#model.save("model1.h5")
#!pip install tensorflowjs
!tensorflowjs_converter --input_format keras '/content/mode.h5' '/content/sabetna-model'

Tensorflow accuracy score

I am creating a machine learning model in the form of regression.
I started with XGBoost to get my first estimates. However, these were not convincing enough (using XGB Regressor I got only 0.60 R2 Score).
So I started looking for solutions with neural networks and ended up using tensorflow. However, I am relatively new to this module and would like to know if there is an equivalent to xgboost.score?
The first code was using xgboost and I am working now on the second one, with tensorflow.
xgb = XGBRegressor(learning_rate = 0.30012, max_depth = 5, n_estimators = 180, subsample = 0.7, colsample_bylevel = 0.7, colsample_bytree = 0.7, min_child_weight = 4, reg_alpha = 10, reg_lambda = 10)
xgb.fit(X_train, y_train)
print("Score on train data : " + str(xgb.score(X_train, y_train)))
print("Score on validation data : " + str(xgb.score(X_val, y_val)))
The second one using TensorFlow:
tf.random.set_seed(123) #first we set random seed
model = tf.keras.Sequential([
tf.keras.layers.Dense(100, activation = tf.keras.activations.relu),
tf.keras.layers.Dense(10),
tf.keras.layers.Dense(1)
])
model.compile( loss = tf.keras.losses.mae, #mae stands for mean absolute error
optimizer = tf.keras.optimizers.SGD(), #stochastic GD
metrics = ['mae'])
model.fit( X_train, y_train, epochs = 100)
How do I evaluate my tensorflow model using R2 Score?
xgb.score returns the R2 score, you can implement this metric in tensorflow from scratch,
def R_squared(y, y_pred):
residual = tf.reduce_sum(tf.square(tf.subtract(y, y_pred)))
total = tf.reduce_sum(tf.square(tf.subtract(y, tf.reduce_mean(y))))
r2 = tf.subtract(1.0, tf.div(residual, total))
return r2
When compiling your model, pass this function in the metrics parameter so that it shows up when evaluating and training your model, like this:
model.compile( loss = tf.keras.losses.mae, #mae stands for mean absolute error
optimizer = tf.keras.optimizers.SGD(), #stochastic GD
metrics = ['mae', R_squared])
When you evaluate your model using model.evaluate, as in below, the R2 score will appear,
y_pred = model.predict(X_val)
model.evaluate(y_pred, y_val)
You need to understand mean absolute errors then you can have a target return value from its functions.
It is accuracy means that is mean you need only one from the training and testing step. Refer to the function you need to understand about classifier and regression.
[ Conditions ]:
# [ Score ] : score – Mean accuracy of self.predict(X) wrt. y.
# https://xgboost.readthedocs.io/en/stable/python/python_api.html?highlight=xgboost.score#xgboost.XGBClassifier.score
# Return the mean accuracy on the given test data and labels.
# https://xgboost.readthedocs.io/en/stable/python/python_api.html?highlight=xgboost.score#xgboost.XGBRFRegressor.score
# Coefficients are defined only for linear learners
# Coefficients are only defined when the linear model is chosen as base learner (booster=gblinear).
# It is not defined for other base learner types, such as tree learners (booster=gbtree).
# https://xgboost.readthedocs.io/en/stable/search.html?q=xgboost.score&check_keywords=yes&area=default
# [py:method]: xgboost.XGBClassifier.score
# Return the mean accuracy on the given test data and labels. ...
# [py:method]: xgboost.XGBRFClassifier.score
# Return the mean accuracy on the given test data and labels. ...
# [py:method]: xgboost.XGBRFRegressor.score
# Return the coefficient of determination of the prediction. Notes The \(R^2\) score used when calling ...
[ Tensorflows expectation ]:
# https://stackoverflow.com/questions/72123313/tensorflow-accuracy-score
# Tensorflow accuracy score
[ Sample ]:
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(1)
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(9)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),tf.constant(list_label, shape=(10, 1, 1), dtype=tf.int64)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_train_end(self, logs=None):
print( "\ntrain mae: " + str( logs['mae'] ) )
def on_test_end(self, logs=None):
print( "\nevaluation mae: " + str( logs['mae'] ) )
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.SGD( learning_rate=0.01, momentum=0.1, )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.mae
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['mae'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, validation_data=(dataset), validation_steps=1, batch_size=100, epochs=50, callbacks=[custom_callback] )
model.save_weights(checkpoint_path)
# [ Score ] : score – Mean accuracy of self.predict(X) wrt. y.
# https://xgboost.readthedocs.io/en/stable/python/python_api.html?highlight=xgboost.score#xgboost.XGBClassifier.score
# Return the mean accuracy on the given test data and labels.
result = model.evaluate(
dataset, batch_size=100, verbose=0, callbacks=[custom_callback]
)
plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array, callbacks=[custom_callback], verbose=1)
score = tf.nn.softmax(predictions[0])
plt.subplot(5, 2, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
[ Output ]: mae is means accuracy value.
train mae: 3.8480560779571533
evaluation mae: 3.8665459156036377
[ Result ]:

tensorflow tensorboard hparams

import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
####### load the model and data here
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([32,64,128,256, 512]))
HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.9))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['Nadam','SGD','RMSprop','adam','Adagrad']))
HP_L2 = hp.HParam('l2 regularizer', hp.RealInterval(.00001,.01))
HP_LeakyReLU=hp.HParam('alpha', hp.RealInterval(0.1, 0.9))
METRIC_ACCURACY = 'accuracy'
with tf.summary.create_file_writer('raw-img/log/hparam_tuning/').as_default():
hp.hparams_config(
hparams=[HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER,HP_L2,HP_LeakyReLU],
metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
)
def train_test_model(hparams):
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(hparams[HP_NUM_UNITS], kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.LeakyReLU(hparams[HP_LeakyReLU]),
tf.keras.layers.Dropout(hparams[HP_DROPOUT]),
tf.keras.layers.Dense(10, activation='softmax'),
])
model.compile(
optimizer=hparams[HP_OPTIMIZER],
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
)
model.fit(x_train, y_train, epochs=2)
_, accuracy = model.evaluate(x_test, y_test)
return accuracy
def run(run_dir, hparams):
with tf.summary.create_file_writer(run_dir).as_default():
hp.hparams(hparams) # record the values used in this trial
accuracy = train_test_model(hparams)
tf.summary.scalar(METRIC_ACCURACY, accuracy, step=2)
session_num = 0
for num_units in HP_NUM_UNITS.domain.values:
for dropout_rate in (HP_DROPOUT.domain.min_value, HP_DROPOUT.domain.max_value):
for l2 in (HP_L2.domain.min_value, HP_L2.domain.max_value):
for alpha in (HP_LeakyReLU.domain.min_value, HP_LeakyReLU.domain.max_value):
for optimizer in HP_OPTIMIZER.domain.values:
hparams = {
HP_NUM_UNITS: num_units,
HP_DROPOUT: dropout_rate,
HP_L2: l2,
HP_LeakyReLU:alpha,
HP_OPTIMIZER: optimizer,
}
run_name = "run-%d" % session_num
print('--- Starting trial: %s' % run_name)
print({h.name: hparams[h] for h in hparams})
run('raw-img/log/hparam_tuning/' + run_name, hparams)
session_num += 1
I have tried to use hparams in TF. I have set dropout, l2 and OPTIMIZER.
I need to set value for learning_rate and test it.
What should I do to set learning_rate like dropout and l2 and test it?
I have tried to do this:
model.compile(
optimizer=hparams[HP_OPTIMIZER](lr=0.001),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
but it doesn't work. I want to select learning_rate different value of learning_rate like(dropout,l2)
You want to separate the used optimizer into a separate variable:
if hparams[HP_OPTIMIZER] == "SGD":
optimizer = tf.keras.optimizers.SGD(learning_rate=float(hparams[HP_LR]))
elif hparams[HP_OPTIMIZER] == "adam":
optimizer = tf.keras.optimizers.Adam(learning_rate=float(hparams[HP_LR]))
else:
raise ValueError("unexpected optimizer name: %r" % hparams[HP_OPTIMIZER])
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
)
I found the solution here.

Validation and prediction Accuracy of VGG19 Model is not getting better

Hello I am working on detecting diabetic retinopathy from images in the kaggle dataset (https://www.kaggle.com/c/diabetic-retinopathy-detection) using VGG19, I have pre processed my images with a gaussian filter and resizing them to 224 x 224 x 3 an then I passed them to a vgg19 model with training images = 5760 and validation images = 640. I am getting a training accuracy of 100% but the validation accuracy is not getting better than 60% and the prediction accuracy after the model finished is 50%.
This is my model code:
for count in range(6):
print("*************************************************************************************************")
print("******************************************************Fold " + str(count))
trainDirectory = "Folds/Fold" + str(count) + "-Train"
testDirectory = "Folds/Fold" + str(count) + "-Test"
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(class_mode = 'binary' , batch_size= 64, directory= trainDirectory , target_size=(224, 224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(class_mode = 'binary' , batch_size=64, directory= testDirectory, target_size=(224, 224))
from keras.applications.vgg19 import VGG19
vggmodel = VGG19(weights='imagenet', include_top=True, input_shape=(224, 224, 3))
for layers in (vggmodel.layers)[:19]:
layers.trainable = False
X = vggmodel.layers[-2].output
predictions = Dense(1, activation="sigmoid")(X)
model_final = Model(vggmodel.input, predictions)
opt = keras.optimizers.Adam(learning_rate=0.0001)
model_final.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg19_1.h5", monitor='val_accuracy', verbose=1, save_best_only=True,
save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=50, verbose=1, mode='auto')
h = model_final.fit_generator(generator=traindata, steps_per_epoch= int(5760 / 64), epochs= 10 , validation_data=testdata,
validation_steps= int(640 / 64) , callbacks=[checkpoint, early])
modelHistory.append(h.history)
model_final.save_weights("vgg19_1.h5")
model_final.save('vgg19_1.h5')
model = load_model('vgg19_1.h5')
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
predict_datagen = ImageDataGenerator()
predict_generator = predict_datagen.flow_from_directory(
"Validation",
class_mode= None,
target_size=(224, 224),
)
pred = model.predict_generator(predict_generator)
y_pred = np.argmax(pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(predict_generator.classes, y_pred))
print('Classification Report')
target_names = ['0', '1']
print(classification_report(predict_generator.classes, y_pred, target_names=target_names))
l = len(y_pred)
acc = sum([y_pred[i]==predict_generator.classes[i] for i in range(l)])/l
print(acc)
predictedAccuracy.append(acc)
this is the accuracy that I have got:
this is the plot of the accuracy / loss

Categories