I've written the following simple MLP network for the MNIST db.
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import callbacks
batch_size = 100
num_classes = 10
epochs = 20
tb = callbacks.TensorBoard(log_dir='/Users/shlomi.shwartz/tensorflow/notebooks/logs/minist', histogram_freq=10, batch_size=32,
write_graph=True, write_grads=True, write_images=True,
embeddings_freq=10, embeddings_layer_names=None,
embeddings_metadata=None)
early_stop = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=3, verbose=1, mode='auto')
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(200, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(60, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(30, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(x_train, y_train,
callbacks=[tb,early_stop],
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
The model ran fine, and I could see the scalars info on TensorBoard. However when I've changed embeddings_freq=10 to try and visualize the images (Like seen here) I got the following error:
Traceback (most recent call last):
File "/Users/shlomi.shwartz/IdeaProjects/TF/src/minist.py", line 65, in <module>
validation_data=(x_test, y_test))
File "/Users/shlomi.shwartz/tensorflow/lib/python3.6/site-packages/keras/models.py", line 870, in fit
initial_epoch=initial_epoch)
File "/Users/shlomi.shwartz/tensorflow/lib/python3.6/site-packages/keras/engine/training.py", line 1507, in fit
initial_epoch=initial_epoch)
File "/Users/shlomi.shwartz/tensorflow/lib/python3.6/site-packages/keras/engine/training.py", line 1117, in _fit_loop
callbacks.set_model(callback_model)
File "/Users/shlomi.shwartz/tensorflow/lib/python3.6/site-packages/keras/callbacks.py", line 52, in set_model
callback.set_model(model)
File "/Users/shlomi.shwartz/tensorflow/lib/python3.6/site-packages/keras/callbacks.py", line 719, in set_model
self.saver = tf.train.Saver(list(embeddings.values()))
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/training/saver.py", line 1139, in __init__
self.build()
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/training/saver.py", line 1161, in build
raise ValueError("No variables to save")
ValueError: No variables to save
Q: What am I missing? is that the right way of doing it in Keras?
Update: I understand there is some prerequisite in order to use embedding projection, however I haven't found a good tutorial for doing so in Keras, any help would be appreciated.
What is called "embedding" here in callbacks.TensorBoard is, in a broad sense, any layer weight. According to Keras documentation:
embeddings_layer_names: a list of names of layers to keep eye on. If None or empty list all the embedding layer will be watched.
So by default, it's going to monitor the Embedding layers, but you don't really need a Embedding layer to use this visualization tool.
In your provided MLP example, what's missing is the embeddings_layer_names argument. You have to figure out which layers you're going to visualize. Suppose you want to visualize the weights (or, kernel in Keras) of all Dense layers, you can specify embeddings_layer_names like this:
model = Sequential()
model.add(Dense(200, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(60, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(30, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
embedding_layer_names = set(layer.name
for layer in model.layers
if layer.name.startswith('dense_'))
tb = callbacks.TensorBoard(log_dir='temp', histogram_freq=10, batch_size=32,
write_graph=True, write_grads=True, write_images=True,
embeddings_freq=10, embeddings_metadata=None,
embeddings_layer_names=embedding_layer_names)
model.compile(...)
model.fit(...)
Then, you can see something like this in TensorBoard:
You can see the relevant lines in Keras source if you want to figure out what's happening regarding embeddings_layer_names.
Edit:
So here's a dirty solution for visualizing layer outputs. Since the original TensorBoard callback does not support this, implementing a new callback seems inevitable.
Since it will take up a lot of page space to re-write the entire TensorBoard callback here, I'll just extend the original TensorBoard, and write out the parts that are different (which is already quite lengthy). But to avoid duplicated computations and model saving, re-writing the TensorBoard callback will be a better and cleaner way.
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from keras import backend as K
from keras.models import Model
from keras.callbacks import TensorBoard
class TensorResponseBoard(TensorBoard):
def __init__(self, val_size, img_path, img_size, **kwargs):
super(TensorResponseBoard, self).__init__(**kwargs)
self.val_size = val_size
self.img_path = img_path
self.img_size = img_size
def set_model(self, model):
super(TensorResponseBoard, self).set_model(model)
if self.embeddings_freq and self.embeddings_layer_names:
embeddings = {}
for layer_name in self.embeddings_layer_names:
# initialize tensors which will later be used in `on_epoch_end()` to
# store the response values by feeding the val data through the model
layer = self.model.get_layer(layer_name)
output_dim = layer.output.shape[-1]
response_tensor = tf.Variable(tf.zeros([self.val_size, output_dim]),
name=layer_name + '_response')
embeddings[layer_name] = response_tensor
self.embeddings = embeddings
self.saver = tf.train.Saver(list(self.embeddings.values()))
response_outputs = [self.model.get_layer(layer_name).output
for layer_name in self.embeddings_layer_names]
self.response_model = Model(self.model.inputs, response_outputs)
config = projector.ProjectorConfig()
embeddings_metadata = {layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()}
for layer_name, response_tensor in self.embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = response_tensor.name
# for coloring points by labels
embedding.metadata_path = embeddings_metadata[layer_name]
# for attaching images to the points
embedding.sprite.image_path = self.img_path
embedding.sprite.single_image_dim.extend(self.img_size)
projector.visualize_embeddings(self.writer, config)
def on_epoch_end(self, epoch, logs=None):
super(TensorResponseBoard, self).on_epoch_end(epoch, logs)
if self.embeddings_freq and self.embeddings_ckpt_path:
if epoch % self.embeddings_freq == 0:
# feeding the validation data through the model
val_data = self.validation_data[0]
response_values = self.response_model.predict(val_data)
if len(self.embeddings_layer_names) == 1:
response_values = [response_values]
# record the response at each layers we're monitoring
response_tensors = []
for layer_name in self.embeddings_layer_names:
response_tensors.append(self.embeddings[layer_name])
K.batch_set_value(list(zip(response_tensors, response_values)))
# finally, save all tensors holding the layer responses
self.saver.save(self.sess, self.embeddings_ckpt_path, epoch)
To use it:
tb = TensorResponseBoard(log_dir=log_dir, histogram_freq=10, batch_size=10,
write_graph=True, write_grads=True, write_images=True,
embeddings_freq=10,
embeddings_layer_names=['dense_1'],
embeddings_metadata='metadata.tsv',
val_size=len(x_test), img_path='images.jpg', img_size=[28, 28])
Before launching TensorBoard, you'll need to save the labels and images to log_dir for visualization:
from PIL import Image
img_array = x_test.reshape(100, 100, 28, 28)
img_array_flat = np.concatenate([np.concatenate([x for x in row], axis=1) for row in img_array])
img = Image.fromarray(np.uint8(255 * (1. - img_array_flat)))
img.save(os.path.join(log_dir, 'images.jpg'))
np.savetxt(os.path.join(log_dir, 'metadata.tsv'), np.where(y_test)[1], fmt='%d')
Here's the result:
You need at least one Embedding Layer in Keras. On stats was a good explanation about them. It is not directly for Keras, but the concepts are roughly the same. What is an embedding layer in a neural network
So, I conclude that what you actually want (it's not completely clear from your post) is to visualize the predictions of your model, in a manner similar to this Tensorboard demo.
To start with, reproducing this stuff is non-trivial even in Tensorflow, let alone Keras. The said demo makes very brief and passing references to things like metadata & sprite images that are necessary in order to obtain such visualizations.
Bottom line: although non-trivial, it is indeed possible to do it with Keras. You don't need the Keras callbacks; all you need is your model predictions, the necessary metadata & sprite image, and some pure TensorFlow code. So,
Step 1 - get your model predictions for the test set:
emb = model.predict(x_test) # 'emb' for embedding
Step 2a - build a metadata file with the real labels of the test set:
import numpy as np
LOG_DIR = '/home/herc/SO/tmp' # FULL PATH HERE!!!
metadata_file = os.path.join(LOG_DIR, 'metadata.tsv')
with open(metadata_file, 'w') as f:
for i in range(len(y_test)):
c = np.nonzero(y_test[i])[0][0]
f.write('{}\n'.format(c))
Step 2b - get the sprite image mnist_10k_sprite.png as provided by the TensorFlow guys here, and place it in your LOG_DIR
Step 3 - write some Tensorflow code:
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
embedding_var = tf.Variable(emb, name='final_layer_embedding')
sess = tf.Session()
sess.run(embedding_var.initializer)
summary_writer = tf.summary.FileWriter(LOG_DIR)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
# Specify the metadata file:
embedding.metadata_path = os.path.join(LOG_DIR, 'metadata.tsv')
# Specify the sprite image:
embedding.sprite.image_path = os.path.join(LOG_DIR, 'mnist_10k_sprite.png')
embedding.sprite.single_image_dim.extend([28, 28]) # image size = 28x28
projector.visualize_embeddings(summary_writer, config)
saver = tf.train.Saver([embedding_var])
saver.save(sess, os.path.join(LOG_DIR, 'model2.ckpt'), 1)
Then, running Tensorboard in your LOG_DIR, and selecting color by label, here is what you get:
Modifying this in order to get predictions for other layers is straightforward, although in this case the Keras Functional API may be a better choice.
Related
I am working on a ResNet50 model to predict covid/non-covid presence in chest x-rays. However, my model currently only predicts class label 1... I have tried 3 different optimizers, 2 different loss functions, changing the learning rate multiple times from 1e-6 to 0.5, and changing the weights on the class labels...
Does anyone have any ideas what the issue could be? Why does it always predict class label 1?
Here is the code:
# import data
# train_ds = tf.keras.utils.image_dataset_from_directory(
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
DATASET_PATH+"Covid/",
labels="inferred",
batch_size=64,
image_size=(256, 256),
shuffle=True,
seed=COVID_SEED,
validation_split=0.2,
subset="training",
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
DATASET_PATH+"Covid/",
labels="inferred",
batch_size=64,
image_size=(256, 256),
shuffle=True,
seed=COVID_SEED,
validation_split=0.2,
subset="validation",
)
# split data
train_X = list()
train_y = list()
test_X = list()
test_y = list()
for image_batch_train, labels_batch_train in train_ds:
for index in range(0, len(image_batch_train)):
train_X.append(image_batch_train[index])
train_y.append(labels_batch_train[index])
for image_batch, labels_batch in val_ds:
for index in range(0, len(image_batch)):
test_X.append(image_batch[index])
test_y.append(labels_batch[index])
Conv_Base = ResNet50(weights=None, input_shape=(256, 256, 3), classes=2)
# The Convolutional Base of the Pre-Trained Model will be added as a Layer in this Model
for layer in Conv_Base.layers[:-8]:
layer.trainable = False
model = Sequential()
model.add(Conv_Base)
model.add(Flatten())
model.add(Dense(units = 1024, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(units = 1, activation = 'sigmoid'))
model.summary()
opt = Adadelta(learning_rate=0.3)
model.compile(optimizer = opt, loss = 'BinaryCrossentropy', metrics = ['accuracy'])
# try to add class weights to make it predict 0, since we currently only predict class label 1
class_weight = {0: 50.,
1: 1.}
r=model.fit(x = train_ds, validation_data = val_ds, epochs = COVID_EPOCHS, class_weight=class_weight)
#print the class labels of prediction
predictions = model.predict(val_ds)
predictions = np.ndarray.flatten(predictions)
predictions = np.where(predictions < 0, 0, 1) # Convert to 0 and 1.
np.set_printoptions(threshold=np.inf)
print(predictions)
Well done! I'll leave an answer here as well because I think you need to do more besides normalization.
When the weights are None (see here) the resnet weights are randomized. You are using a large convolutional feature extractor (the first layers of a Resnet) but this extractor was not trained on anything. You may achieve decent performance because the Dense layer that succeeds it compensates for this random initialization but chances are it's not what you're aiming for. Keep in mind your resnet weights are not trainable, so the feature extraction will never change.
The reason I suggested imagenet weights is because you're working with images and therefore it's reasonable to assume that your convolutional feature extractor needs to extract important image features such as colors, shapes, edges etc. The fact that the imagenet resnet was trained on 1000 classes or so is irrelevant because you chop it off before it reaches the output layer, which is where the class number bottleneck occurs. I would pursue the weights = 'imagenet' thing.
I am using Tensorflow's flow_from_directory to collect a large image dataset and then train on it. I want to use Keras Tuner but when I run
tuner.search(test_data_gen, epochs=50,
validation_split=0.2, callbacks=[stop_early])
It throws the following error,
ValueError: `validation_split` is only supported for Tensors or NumPy arrays, found following types in the input: [<class 'tensorflow.python.keras.preprocessing.image.DirectoryIterator'>]
I don't know much about converting between data types in AI so any help is truly appreciated.
Here is the rest of my code:
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as display
from PIL import Image, ImageSequence
import os
import pathlib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import cv2
import datetime
import kerastuner as kt
tf.compat.v1.enable_eager_execution()
epochs = 50
steps_per_epoch = 10
batch_size = 20
IMG_HEIGHT = 200
IMG_WIDTH = 200
train_dir = "Data/Train"
test_dir = "Data/Val"
train_image_generator = ImageDataGenerator(rescale=1. / 255)
test_image_generator = ImageDataGenerator(rescale=1. / 255)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='sparse')
test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,
directory=test_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='sparse')
def model_builder(hp):
model = keras.Sequential()
model.add(Conv2D(265, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)))
model.add(MaxPooling2D())
model.add(Conv2D(64, 3, padding='same', activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(32, 3, padding='same', activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(keras.layers.Dense(256, activation="relu"))
hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
model.add(keras.layers.Dense(hp_units, activation="relu"))
model.add(keras.layers.Dense(80, activation="softmax"))
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['top_k_categorical_accuracy'])
return model
tuner = kt.Hyperband(model_builder,
objective='val_accuracy',
max_epochs=30,
factor=3,
directory='Hypertuner_Dir',
project_name='AIOS')
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
and start searching with tuner
tuner.search(train_data_gen, epochs=50, validation_split=0.2, callbacks=[stop_early])
# Get the optimal hyperparameters
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
print(f"""
The hyperparameter search is complete. The optimal number of units in the first densely-connected
layer is {best_hps.get('units')} and the optimal learning rate for the optimizer
is {best_hps.get('learning_rate')}.
""")
model = tuner.hypermodel.build(best_hps)
model.summary()
tf.keras.utils.plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True, rankdir='TB')
checkpoint_path = "training/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
os.system("rm -r logs")
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
#history = model.fit(train_data_gen,steps_per_epoch=steps_per_epoch,epochs=epochs,validation_data=test_data_gen,validation_steps=10,callbacks=[cp_callback, tensorboard_callback])
history = model.fit(train_data_gen,steps_per_epoch=steps_per_epoch,epochs=epochs,validation_split=0.2,validation_steps=10,callbacks=[cp_callback, tensorboard_callback])
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.save('model.h5', include_optimizer=True)
test_loss, test_acc = model.evaluate(test_data_gen)
print("Tested Acc: ", test_acc)
print("Tested Acc: ", test_acc*100, "%")
val_acc_per_epoch = history.history['val_accuracy']
best_epoch = val_acc_per_epoch.index(max(val_acc_per_epoch)) + 1
print('Best epoch: %d' % (best_epoch,))
===================================EDIT====================================
According to the doc about validation_split:
validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the x and y data provided, before shuffling. This argument is not supported when x is a dataset, generator or keras.utils.Sequence instance.
Now, as you've generator, try as follows, reference
tuner.search(train_data_gen,
epochs=50,
validation_data=test_data_gen,
callbacks=[stop_early])
Also, ensure that each of your generators properly generates the valid batches.
Unfortunately doing a validation_split=0.2 does not work in this case, because this argument assumes that the data is a Tensor or a NumPy array. Since you have the data stored as a generator (which is a good idea), you can't simply split it.
You'll need to create a validation generator, just like you did with test_data_gen, and change validation_split=0.2 to validation_data=val_data_gen.
I have run the base model to a good accuracy and now i want to load these weights and use them for a model with a few additional layers and later for hyperparameter tuning.
First i construct this new model
input_tensor = Input(shape=train_generator.image_shape)
base_model = applications.ResNet152(weights='imagenet', include_top=False, input_tensor=input_tensor)
for layer in base_model.layers[:]:
layer.trainable = False
x = Flatten()(base_model.output)
x = Dense(1024, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01),
kernel_initializer=tf.keras.initializers.HeNormal(), kernel_constraint=tf.keras.constraints.UnitNorm(axis=0))(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
x = Dropout(rate=0.1)(x)
x = Dense(512, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01),
kernel_initializer=tf.keras.initializers.HeNormal(), kernel_constraint=tf.keras.constraints.UnitNorm(axis=0))(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
predictions = Dense(num_classes, activation= 'softmax')(x)
model = Model(inputs = base_model.input, outputs = predictions)
Then i compile it because that is necessary at this stage because i have to run the model fit with dummy input before i load the weights. (i think, i have tried to put these code blocks in many different orders to make it work, but i have failed each time)
opt = tfa.optimizers.LazyAdam(lr=0.000074)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy']
)
dummy_input = tf.random.uniform([32, 224, 224, 3])
dummy_label = tf.random.uniform([32,])
hist = model.fit(dummy_input, dummy_label)
Then i load the weights for the base model:
base_model.load_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights2.h5', by_name=True)
Then i load the weights for the optimizer:
import pickle
with open("/content/drive/MyDrive/weight_values2optimizer.pkl", "rb") as f:
weights = pickle.load(f)
opt = model.optimizer.set_weights(weights)
This results in the following error:
ValueError: You called `set_weights(weights)` on optimizer LazyAdam
with a weight list of length 1245,
but the optimizer was expecting 13 weights.
Provided weights: [63504, array([[[[ 0.00000000e+00, -5.74126025e-04...
Anyone have ideas on how to solve this?
If you have a solution with Adam instead of LazyAdam that is fine too.(i have no idea if that would make a difference)
edit:
I have tried many new things last couple of days but nothing is working. Here is the entire code where i stand right now. It includes both the part where i am saving and the part where i am loading.
import tarfile
my_tar2 = tarfile.open('test.tgz')
my_tar2.extractall('test') # specify which folder to extract to
my_tar2.close()
import zipfile
with zipfile.ZipFile("/content/tot_train_bremoved2.zip", 'r') as zip_ref:
zip_ref.extractall("/content/train/")
import pandas as pd
train_info = pd.read_csv("/content/drive/MyDrive/train_info.csv")
test_info = pd.read_csv("/content/drive/MyDrive/test_info.csv")
train_folder = "/content/train"
test_folder = "/content/test/test"
import tensorflow as tf
import tensorflow.keras as keras
from keras.layers import Input, Lambda, Dense, Flatten, BatchNormalization, Dropout, PReLU, GlobalAveragePooling2D, LeakyReLU, MaxPooling2D
from keras.models import Model
from tensorflow.keras.applications.resnet_v2 import ResNet152V2, preprocess_input
from keras import applications
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.losses import sparse_categorical_crossentropy
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping, TensorBoard
import tensorflow_addons as tfa
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
num_classes = 423
epochs = 20
batch_size = 32
img_height = 224
img_width = 224
IMAGE_SIZE = [img_height, img_width]
_train_generator = ImageDataGenerator(
rotation_range=180,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.3,
horizontal_flip=True,
vertical_flip=True,
preprocessing_function=preprocess_input)
_val_generator = ImageDataGenerator(
preprocessing_function=preprocess_input)
train_generator = _train_generator.flow_from_dataframe(dataframe = train_info,
directory = train_folder, x_col = "filename",
y_col = "artist", seed = 42,
batch_size = batch_size, shuffle = True,
class_mode="sparse", target_size = IMAGE_SIZE)
valid_generator = _val_generator.flow_from_dataframe(dataframe = test_info,
directory = test_folder, x_col = "filename",
y_col = "artist", seed = 42,
batch_size = batch_size, shuffle = True,
class_mode="sparse", target_size = IMAGE_SIZE)
def get_uncompiled_model():
input_tensor = Input(shape=train_generator.image_shape)
base_model = applications.ResNet152(weights='imagenet', include_top=False, input_tensor=input_tensor)
for layer in base_model.layers[:]:
layer.trainable = True
x = Flatten()(base_model.output)
predictions = Dense(num_classes, activation= 'softmax')(x)
model = Model(inputs = base_model.input, outputs = predictions)
return model
opt = keras.optimizers.Adam(lr=0.000074)
def get_compiled_model():
model = get_uncompiled_model()
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy']
)
return model
earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, verbose=1, min_delta=1e-4, mode='min')
model = get_compiled_model()
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
model.fit(
train_generator,
validation_data=valid_generator,
epochs=epochs,
verbose = 1,
steps_per_epoch=len_train // batch_size,
validation_steps=len_test // batch_size,
callbacks=[earlyStopping, reduce_lr]
)
import keras.backend as K
import pickle
model.save_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights5.h5')
symbolic_weights = getattr(model.optimizer, 'weights')
weight_values = K.batch_get_value(symbolic_weights)
with open('/content/drive/MyDrive/MODELS_SAVED/optimizer3.pkl', 'wb') as f:
pickle.dump(weight_values, f)
#Here i am building the new model and its from here i am having problems
input_tensor = Input(shape=train_generator.image_shape)
base_model = applications.ResNet152(weights='imagenet', include_top=False, input_tensor=input_tensor)
for layer in base_model.layers[:]:
layer.trainable = False
x = Flatten()(base_model.output)
x = Dense(512, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01),
kernel_initializer=tf.keras.initializers.HeNormal(),
kernel_constraint=tf.keras.constraints.UnitNorm(axis=0))(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
predictions = Dense(num_classes, activation= 'softmax')(x)
model = Model(inputs = base_model.input, outputs = predictions)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
base_model.load_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights5.h5', by_name=True)
with open('/content/drive/MyDrive/MODELS_SAVED/optimizer3.pkl', 'rb') as f:
weight_values = pickle.load(f)
model.optimizer.set_weights(weight_values)
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
epochs = 2
model.fit(
train_generator,
validation_data=valid_generator,
epochs=epochs,
steps_per_epoch=len_train // batch_size,
validation_steps=len_test // batch_size,
verbose = 1,
callbacks=[earlyStopping, reduce_lr]
)
Now i am getting the following error running this code block (which above in the complete code is right before the model.fit):
with open('/content/drive/MyDrive/MODELS_SAVED/optimizer3.pkl', 'rb') as f:
weight_values = pickle.load(f)
model.optimizer.set_weights(weight_values)
ValueError: You called `set_weights(weights)` on optimizer Adam with a weight list of length 1245, but the optimizer was expecting 13 weights. Provided weights: [11907, array([[[[ 0.00000000e+00, -8.27514916e-04...
All i am trying to do is to save the weights for the model and optimizer and then build a new model where i am adding a few layers and loading the weights from the base of the model and the weights from the optimizer.
Both models have different architectures so weights of one can't be loaded into another,irrespective of that they inherited same base model. I think it is a simple case of fine-tuning a model (saved model in your case).
What you should do is change the way to create new model, i.e. rather than loading the original resnet model as base model with include_top = False, you should try loading the saved model and implementing your own include_top. This can be done as:
for layer in saved_model.layers[:]:
layer.trainable = False
x = Flatten()(saved_model.layers[-2].output)
Here the key thing is saved_model.layers[-2].output which means output from the second last layer.
Hope it helps, if not please clarify your doubts or let me know what I missed.
Please refer to the following notebook:
https://colab.research.google.com/drive/1j_zLqG1zUMi6UYPdc6gtmkJvHuawL4Sk?usp=sharing
{save,load}_weights on a model includes the weights of the optimizer. That would be the preferred way to initialise the optimizer weights.
You can copy the optimizer from one model to another.
The reason you are getting the error above is that the optimizer doesn't allocate its weights until training starts; If you really want to do it manually, just trigger model.fit() for 1 epoch 1 datapoint and then load the data manually.
You can replace
base_model.load_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights5.h5', by_name=True)
with open('/content/drive/MyDrive/MODELS_SAVED/optimizer3.pkl', 'rb') as f:
weight_values = pickle.load(f)
model.optimizer.set_weights(weight_values)
with:
base_model.load_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights5.h5', by_name=True)
model.optimizer = base_model.optimizer
After saving the first model's weights with model.save_weights('name.h5'), you should build a second model, exactly like the first one, let's call it model2. Then load the weights you saved before into it. The code should be model2.load_weights('name.h5'). See model.summary() to see the names and number of the first model's layers. For each layer, you need to define a variable and add those weights (and also biases) to that variable with a method called get_weights() . Here is an example:
x1 = model2.layers[1].get_weights()
Here, I put the weights and biases of the first layer (which in mine was a convolution layer) in the variable x1.
x1[0] is a list of the weights of the layer #1.
x1[1] is a list of the biases of the layer #1.
This is the error message I got
Traceback (most recent call last):
File "/home/xxx/Documents/program/test.py", line 27, in <module>
model.load_weights('models/model.h5')
File "/home/xxx/Documents/program/venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/network.py", line 1391, in load_weights
saving.load_weights_from_hdf5_group(f, self.layers)
File "/home/xxx/Documents/program/venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/saving.py", line 732, in load_weights_from_hdf5_group
' layers.')
ValueError: You are trying to load a weight file containing 2 layers into a model with 0 layers.
From this minimal example that produces the error
from tensorflow import keras
from data import get_data
X_train, y_train, X_val, y_val = get_data() # get some train and val data
model = keras.Sequential()
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(7, activation='softmax'))
model.compile(
optimizer=keras.optimizers.Adam(1e-4),
loss='categorical_crossentropy',
metrics=['accuracy']
)
model.fit(
x=X_train,
y=y_train,
batch_size=500,
epochs=200,
verbose=2,
validation_data=(X_val, y_val)
)
model.save_weights('models/model.h5')
model.load_weights('models/model.h5')
Directly running this does not produce the error. However, when I run the program for a second time commenting out the training part (from line 10 to 25) trying to load the weights, it gives me this error.
I am using Tensorflow 1.9.0 and the built-in Keras.
As mentioned above, there seems to be a bug in keras sequential mode: https://github.com/keras-team/keras/issues/10417.
However, you can get around this by using the Keras Functional API (you'll also find the Functional API much more useful when you're building trickier RNNs models with complicated I/O and tensor concatenations).
The disadvantage of using model.save_weights() method to save your neural network is that you have to invoke the model architecture before you load .h5 weights into the NN. If you instead save the whole model (both parameters AND architecture), you'll find that it's much easier to load trained model into a Python object. You can achieve this by using model.save() method.
### TRAINING CODE
import tensorflow as tf
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# some data
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
y_train_oh = tf.keras.utils.to_categorical(y_train)
y_val_oh = tf.keras.utils.to_categorical(y_val)
# Keras Functional API
x = tf.keras.Input(shape=(4,))
dense = tf.keras.layers.Dense(64, activation='relu')(x)
dense = tf.keras.layers.Dense(3, activation='softmax')(dense)
model = tf.keras.Model(inputs=x, outputs=dense)
model.compile(optimizer=tf.keras.optimizers.Adam(1e-4),
loss='categorical_crossentropy',
metrics=['accuracy'])
# training
model.fit(X_train, y_train_oh, 16, epochs=20, validation_data=(X_val, y_val_oh))
# save weights
model.save_weights('models/model_weights.h5')
# save weights AND architecture
model.save('models/model.h5')
### TESTING CODE
# Model loading using .h5 weights file
import tensorflow as tf
x = tf.keras.Input(shape=(4,))
dense = tf.keras.layers.Dense(64, activation='relu')(x)
dense = tf.keras.layers.Dense(3, activation='softmax')(dense)
model2 = tf.keras.Model(inputs=x, outputs=dense)
model2.load_weights('models/model_weights.h5')
# Model loading using .h5 model file
import tensorflow as tf
model3 = tf.keras.models.load_model('models/model.h5') # simpler API, but bigger filesize
I am trying to train a deep neural network using transfer learning in Keras with tensorflow. There are different ways to do that, if your data is small you can afford computing features using the pre-trained model for the entire data and then use those features to train and test a small network, this is good as you don't need to compute those features for each batch and at each epoch. However, if the data is large, it will be impossible to compute features for the entire data, in this case we use ImageDataGenerator, flow_from_directory and fit_generator. In this case features are computed each time fore each batch at each epoch which make things much slower. I was assuming that both approaches produce similar results in terms of accuracy and loss. The problem is that I took a small data-set and tried both approaches and got completely different results. I will appreciate if someone can tell if something is wrong in the provided code and/or why I am getting different results please?
Approach when having large data-set:
from keras.applications.inception_v3 import InceptionV3,preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
datagen= ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = datagen.flow_from_directory('data/train',
class_mode='categorical',
batch_size=64,...)
vaild_generator = datagen.flow_from_directory('data/valid',
class_mode='categorical',
batch_size=64,...)
base_model = InceptionV3(weights='imagenet', include_top=False)
x = base_model.output
x = Conv2D(filters = 128 , kernel_size = (2,2)) (x)
x = MaxPooling2D()(x)
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(2, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',...)
model.fit_generator(generator = train_generator,
steps_per_epoch = len (train_generator),
validation_data = valid_generator ,
validation_steps = len(valid_generator),
...)
Approach when having small data-set:
from keras.applications.inception_v3 import InceptionV3,preprocess_input
from keras.models import Sequential
from keras.utils import np_utils
base_model = InceptionV3(weights='imagenet', include_top=False)
train_features = base_model.predict(preprocess_input(train_data))
valid_features = base_model.predict(preprocess_input(valid_data))
model = Sequential()
model.add(Conv2D(filters = 128 , kernel_size = (2,2),
input_shape=(train_features [1],
train_features [2],
train_features [3])))
model.add(MaxPooling2D())
model.add(GlobalAveragePooling2D())
model.add(Dense(1024, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',...)
model.fit(train_features, np_utils.to_categorical(y_train,2),
validation_data=(valid_features, np_utils.to_categorical(y_valid,2)),
batch_size=64,...)