Uncaught (in promise) Error: Unknown layer: - python

I'm developing a plants disease classification app using React and tensorflow.
To fill this job, I have developed an AI model with CNN and preprocessing layers.
the model works fine on google colab, but when I converted it to the JSON form to deploy it in my app it shows me this error on my browsers console:
Uncaught (in promise) Error: Unknown layer: RandomFlip. This may be due to one of the following reasons:
The layer is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code.
The custom layer is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().
this is my python code:
# -*- coding: utf-8 -*-
"""Copie de Untitled7.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1IFoiRxZ-FLnM-SIc6YfMtVooH2s-cMP4
"""
from google.colab import drive
drive.mount('/content/drive')
import zipfile
import os
zip_ref = zipfile.ZipFile('/content/drive/MyDrive/SoniaKarimDatasetPFA_Agriculture/archive.zip','r')
zip_ref.extractall('/content')
zip_ref.close()
len(os.listdir('/content'))
#importing necessary directories for analysis
import numpy as np
import tensorflow as tf
from tensorflow.keras import models, layers, callbacks
import matplotlib.pyplot as plt
!pip install split-folders
#Some additional parameters
#It is a good practice to declare these parameters
#outside of the functions also in a separate code block
directory = '/content/PlantVillage'
IMAGE_SIZE = 258
BATCH_SIZE = 32
CHANNELS = 3
EPOCHS = 3
import splitfolders
splitfolders.ratio(directory, output="output",
seed=1337, ratio=(.7,.3), group_prefix=None, move=False)
#Now, we gonna do the do! time to deal with
#our images!
#Let's call this tensorflow.keras function which will
#... you know!
dataset = tf.keras.preprocessing.image_dataset_from_directory(
"/content/output/val",
shuffle = True,
image_size = (IMAGE_SIZE,IMAGE_SIZE ),
batch_size = BATCH_SIZE
)
#dataset = dataset.take(int(len(dataset)*0.35))
class_names = dataset.class_names
class_names
len(dataset)
plt.figure(figsize=(10,10))
for image_batch, label_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3,4,i+1)
plt.imshow(image_batch[i].numpy().astype('uint8'))
plt.title(class_names[label_batch[i]])
plt.axis('off')
#print(image_batch[0].shape)
#print(label_batch.numpy())
train_size = int(len(dataset)*0.8)
train_size
train_ds = dataset.take(train_size)
test_ds = dataset.skip(train_size)
validation_size = int(len(dataset)*0.1)
validation_ds = test_ds.take(validation_size)
test_ds = test_ds.skip(validation_size)
def get_dataset_partitions_tf(ds, train_split = 0.8,
val_split = 0.2,
test_split = 0.1,
shuffle = True,
shuffle_size = 10000):
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed = 12)
train_size = int(train_split * ds_size)
val_size = int (val_split * ds_size)
val_ds = ds.skip(train_size).take(val_size)
train_ds = ds.take(train_size)
test_ds = ds.skip(train_size).take(val_size)
return train_ds, val_ds, test_ds
train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset)
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
data_augmentation = tf.keras.Sequential([
layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'),
layers.experimental.preprocessing.RandomRotation(0.2)
])
resize_and_rescale = tf.keras.Sequential([
layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.experimental.preprocessing.Rescaling(1.0/255)
])
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 15
model = models.Sequential([
resize_and_rescale,
data_augmentation,
layers.Conv2D(32 , (3,3), input_shape = input_shape, activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Flatten(),
layers.Dense(64, activation = 'relu'),
layers.Dense(n_classes, activation = 'softmax')
])
model.build(input_shape = input_shape )
model.summary()
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(
min_delta = 0.001,
patience = 10,
restore_best_weights = True,
)
model.compile(
optimizer = 'adam',
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = False),
metrics = ['accuracy']
)
from tensorflow.keras import callbacks
history = model.fit(
train_ds,
epochs = EPOCHS,
batch_size = BATCH_SIZE,
verbose = 1,
callbacks = [early_stopping],
validation_data = val_ds
)
score = model.evaluate(test_ds)
score
history.params
history.history.keys()
len(history.history['accuracy'])
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize = (8,8))
plt.subplot(1,2,1)
plt.plot(range(EPOCHS), acc, label = 'Training Accuracy')
plt.plot(range(EPOCHS), val_acc, label = 'Validation Accuracy')
plt.legend(loc = 'lower right')
plt.title('Training and validation Accuracy')
plt.subplot(1,2,2)
plt.plot(range(EPOCHS), loss, label = 'Training_Loss')
plt.plot(range(EPOCHS), val_loss, label = 'Validation Loss')
plt.legend(loc = 'lower right')
plt.title('Training and validation Loss')
for images_batch, labels_batch in test_ds.take(1):
first_image = (image_batch[0].numpy().astype('uint8'))
first_label = labels_batch[0]
print('first image to predict')
plt.imshow(first_image)
print('actual label: ', class_names[first_label])
batch_prediction = model.predict(images_batch)
print(class_names[np.argmax(batch_prediction[0])])
def predict(model,img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
predicted_class = class_names[np.argmax(predictions[0])]
confidence = round(100* (np.max(predictions[0])),2)
return predicted_class, confidence
plt.figure(figsize = (15,15))
for images, labels in test_ds.take(1):
for i in range (9):
ax = plt.subplot(3,3,i+1)
plt.imshow(images[i].numpy().astype('uint8'))
predicted_class, confidence = predict( model, images[i].numpy())
actual_class = class_names[labels[i]]
plt.title(f"actual: {actual_class}, \n Predicted : {predicted_class} \n Confidence: {confidence} ")
plt.axis('off')
!pip install tensorflowjs
import tensorflowjs as tfjs
tfjs.converters.save_keras_model(model, "mode.h5")
#model.save("model1.h5")
#!pip install tensorflowjs
!tensorflowjs_converter --input_format keras '/content/mode.h5' '/content/sabetna-model'

Related

ValueError: Shapes (None, 1) and (None, 101) are incompatible

The output dense layer is 101 but this error pops up. The error could be with respect to train_dataset and test_dataset or model compiling. Help me out with this !!
Also if I want to sample the dataset and then assign the sample to training and testing. How do I it? I'm new to Tensorflow and python and the syntax is confusing.
BATCH_SIZE = 32
IMG_SIZE = (224, 224)
directory = "/Food/Food 101/images"
train_dataset = image_dataset_from_directory(directory,
shuffle=True,
batch_size=BATCH_SIZE,
label_mode = "int",
image_size=IMG_SIZE,
validation_split=0.2,
subset='training',
seed=42)
validation_dataset = image_dataset_from_directory(directory,
shuffle=True,
batch_size=BATCH_SIZE,
label_mode = "int",
image_size=IMG_SIZE,
validation_split=0.2,
subset='validation',
seed=42)
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
def data_augmenter():
data_augmentation = tf.keras.Sequential()
data_augmentation.add(tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"))
data_augmentation.add(tf.keras.layers.experimental.preprocessing.RandomRotation(0.2))
return data_augmentation
data_augmentation = data_augmenter()
for image, _ in train_dataset.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0] / 255)
plt.axis('off')
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
IMG_SHAPE = IMG_SIZE + (3,)
IMG_SHAPE
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=True,
weights='imagenet')
base_model.summary()
nb_layers = len(base_model.layers)
print(base_model.layers[nb_layers - 2].name)
print(base_model.layers[nb_layers - 1].name)
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
label_batch
base_model.trainable = False
image_var = tf.Variable(image_batch)
pred = base_model(image_var)
tf.keras.applications.mobilenet_v2.decode_predictions(pred.numpy(), top=2)
image_shape=IMG_SIZE
def FC_model(image_shape=IMG_SIZE, data_augmentation=data_augmenter()):
input_shape = image_shape + (3,)
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
weights='imagenet')
base_model.trainable = False
inputs = tf.keras.Input(shape = image_shape + (3, ))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
prediction_layer = tf.keras.layers.Dense(101)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
return model
model2 = FC_model(IMG_SIZE, data_augmentation)
model2.summary()
base_learning_rate = 0.001
model2.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True),
metrics=['accuracy'])
initial_epochs = 10
history = model2.fit(train_dataset, validation_data=validation_dataset, epochs=initial_epochs)
The problem is in how you are loading the data:
train_dataset = image_dataset_from_directory(directory,
shuffle=True,
batch_size=BATCH_SIZE,
label_mode = "int",
image_size=IMG_SIZE,
validation_split=0.2,
subset='training',
seed=42)
In particular label_mode="int" means that your target variable is encoded as an integer (i.e., 1 if cat, 2 if dog, 3 if tree). You want to change it to label_mode="categorical".

Evaluate U-Net by layer

I am coming from medical background and a newbie in this machine learning field. I am trying to train my U-Net model using keras and tensorflow for image segmentation. However, my loss value is all NaN and the prediction is all black.
I would like to check the U-Net layer by layer but I don't know how to feed the data and from where to start. What I meant by checking for each layer is that I want to feed my images to first layer for example and see the output from the first layer and then moving on to the second layer and until to the last layer. Just want to see how the output is produced for each layer and to check from where the nan value is started. Really appreciate for your help.
These are my codes.
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from keras_preprocessing.image
import ImageDataGenerator
from tensorflow import keras
#Constants
SEED = 42
BATCH_SIZE_TRAIN = 16
BATCH_SIZE_TEST = 16
IMAGE_HEIGHT = 512
IMAGE_WIDTH = 512
IMG_SIZE = (IMAGE_HEIGHT, IMAGE_WIDTH)
data_dir = 'data'
data_dir_train = os.path.join(data_dir, 'training')
data_dir_train_image = os.path.join(data_dir_train, 'img')
data_dir_train_mask = os.path.join(data_dir_train, 'mask')
data_dir_test = os.path.join(data_dir, 'test')
data_dir_test_image = os.path.join(data_dir_test, 'img')
data_dir_test_mask = os.path.join(data_dir_test, 'mask')
NUM_TRAIN = 1413
NUM_TEST = 210
NUM_OF_EPOCHS = 10
def create_segmentation_generator_train(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def create_segmentation_generator_test(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def display(display_list):
plt.figure(figsize=(15,15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]), cmap='gray')
plt.show()
def show_dataset(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
display([image[0], mask[0]])
def unet(n_levels, initial_features=32, n_blocks=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1):
#n_blocks = how many conv in each level
inputs = keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
x = inputs
convpars = dict(kernel_size=kernel_size, activation='relu', padding='same')
#downstream
skips = {}
for level in range(n_levels):
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
if level < n_levels - 1:
skips[level] = x
x = keras.layers.MaxPool2D(pooling_size)(x)
#upstream
for level in reversed(range(n_levels-1)):
x = keras.layers.Conv2DTranspose(initial_features * 2 ** level, strides=pooling_size, **convpars)(x)
x = keras.layers.Concatenate()([x, skips[level]])
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
#output
activation = 'sigmoid' if out_channels == 1 else 'softmax'
x = keras.layers.Conv2D(out_channels, kernel_size=1, activation='sigmoid', padding='same')(x)
return keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}')
EPOCH_STEP_TRAIN = NUM_TRAIN // BATCH_SIZE_TRAIN
EPOCH_STEP_TEST = NUM_TEST // BATCH_SIZE_TRAIN
model = unet(4)
model.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])
model.fit_generator(generator=train_generator, steps_per_epoch=EPOCH_STEP_TRAIN, validation_data=test_generator, validation_steps=EPOCH_STEP_TEST, epochs=NUM_OF_EPOCHS)
def show_prediction(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
pred_mask = model.predict(image)[0] > 0.5
display([image[0], mask[0], pred_mask])
show_prediction(test_generator, 2)
To investigate your model layer-by-layer please see example how to show summary of the model and also how to save the model:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
#luodaan input
inputs=keras.Input(shape=(1,))
#luodaan kerros
dense=layers.Dense(64,activation="relu")
x=dense(inputs)
x=layers.Dense(64,activation="relu")(x)
outputs=layers.Dense(10)(x)
#Koostetaa
model=keras.Model(inputs=inputs,outputs=outputs,name="Spesiaali")
#Tarkastellaan
model.summary()
#Tallennellaan
model.save(".\model_to_be_investigated_by_someone_else_to_help_you")
...this makes it possible for you to see the whole model structure for "debugging your AI". If you do not find the solution itself, then add the last row of example to your own code, and then put the resulting folder e.g. to github and ask someone other to see the structure of your model to help you in solving the problem.
The blue drawing illustrates the output of command model.summary() and the red line illustrates the output shape of the first dense layer.

Failed to train with tf.keras.applications.MobileNetV2

Environment:
TF2.0
Python 3.5
ubuntu 16.04
Problem:
I try to use the pre-trained mobilenet_V2 but accuracy doesn't increase:
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
The script is copied from the tutorial of the tensorflow 2.0(https://www.tensorflow.org/tutorials/images/transfer_learning?hl=zh-cn)
The only change I made is the dataset which feed into the network. The original code makes binary classification between dogs and cats, and everything works. However, the accuracy never increases while using multi-classes datasets like: "mnist", "tf_flowers". Please note that, I used the correct loss function and metrics.
Naive model and results:
Keras.mobilenetv2:
Here is the code:
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, GlobalAveragePooling2D
from tensorflow.keras import Model
keras = tf.keras
import tensorflow_datasets as tfds
# tfds.disable_progress_bar()
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
def format_example(image, label):
if image.shape[-1] == 1:
image = tf.concat([image, image, image], 2)
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
##----functional model----##
class TinyModel():
def __init__(self, num_classes, hiddens=32, input_shape=IMG_SHAPE):
import tensorflow as tf
self.num_classes = num_classes
self.input_shape = input_shape
self.hiddens = hiddens
def build(self):
inputs = Input(shape=self.input_shape)
x = Conv2D(16, 3, activation="relu", strides=2)(inputs)
x = Conv2D(32, 3, activation="relu", strides=2)(x)
x = Conv2D(32, 3, activation="relu", strides=2)(x)
x = Conv2D(16, 3, activation="relu")(x)
x = Flatten()(x)
x = Dense(self.hiddens, activation="relu")(x)
outputs = Dense(self.num_classes, activation="softmax")(x)
model = Model(inputs=inputs, outputs=outputs, name='my_model')
return model
def assemble_model(num_classes, model_name='MobileNetV2'):
import tensorflow as tf
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
weights='imagenet',
include_top=False)
model = tf.keras.Sequential([
base_model,
GlobalAveragePooling2D(),
Dense(num_classes, activation='softmax')
])
model.trainable = True
return model
## ---- dataset preparation -----##
SPLIT_WEIGHTS = (8, 1, 1)
splits = tfds.Split.TRAIN.subsplit(weighted=SPLIT_WEIGHTS)
(raw_train, raw_validation, raw_test), metadata = tfds.load(
'tf_flowers', split=list(splits),
with_info=True, as_supervised=True)
get_label_name = metadata.features['label'].int2str
train = raw_train.map(format_example)
validation = raw_validation.map(format_example)
test = raw_test.map(format_example)
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 1000
train_ds = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
validation_ds = validation.batch(BATCH_SIZE)
test_ds = test.batch(BATCH_SIZE)
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
## ----- model config ---- ##
# Create an instance of the model
model = TinyModel(num_classes=5).build() # model 1
# model = assemble_model(num_classes=5) # model 2
model.summary()
## ----- training config -----##
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
## ----- training loop -----##
#tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
#tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 5
for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
----------------------SOLVED-----------------------
Solution:add the argument "training=True" when training the keras.application.. For example
model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,weights="imagenet",include_top=False)
pred = model(inputs, training=True)
The reason might be caused by "batchnorm" layer. Those model which has BN layers works well in keras training loop, "model.fit()", and nothing to takecare. However, they cannot learn anything by costume training loop if you forget to set training=True in model()
The problem is that you set all your parameters to be non-trainable, check this on hte summary of the model, you will see something like this
Change this line, (or just delete it)
base_model.trainable = False
To
base_model.trainable = True
And everything will work fine

ValueError: Error when checking target: expected flatten_1 to have shape (2048,) but got array with shape (2,)

I'm trying to run this code, and I have this error:
ValueError: Error when checking target: expected flatten_4 to have shape (2048,) but got array with shape (2,)
NUM_CLASSES = 2
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
NUM_EPOCHS = 10
EARLY_STOP_PATIENCE = 3
STEPS_PER_EPOCH_TRAINING = 10
STEPS_PER_EPOCH_VALIDATION = 10
BATCH_SIZE_TRAINING = 100
BATCH_SIZE_VALIDATION = 100
BATCH_SIZE_TESTING = 1
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = Sequential()
train_data_dir = "C:\\Users\\Desktop\\RESNET"
model = ResNet50(include_top=True, weights='imagenet')
model.layers.pop()
model = Model(input=model.input,output=model.layers[-1].output)
model.summary()
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics= ['binary_accuracy'])
data_dir = "C:\\Users\\Desktop\\RESNET"
batch_size = 32
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
image_size = IMAGE_RESIZE
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
def append_ext(fn):
return fn+".jpg"
from os import listdir
from os.path import isfile, join
dir_path = os.path.dirname(os.path.realpath(__file__))
train_dir_path = dir_path + '\data'
onlyfiles = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
data_labels = [0, 1]
t = []
maxi = 25145
LieOffset = 15799
i = 0
while i < maxi: # t = tuple
if i <= LieOffset:
t.append(label['Lie'])
else:
t.append(label['Truth'])
i = i+1
train_datagenerator = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2)
train_generator = train_datagenerator.flow_from_directory(
train_data_dir,
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='training')
validation_generator = train_datagenerator.flow_from_directory(
train_data_dir, # same directory as training data kifkif
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='validation')
(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
cb_early_stopper = EarlyStopping(monitor = 'val_loss', patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = '../working/best.hdf5', monitor = 'val_loss', save_best_only = True, mode = 'auto')
from sklearn.grid_search import ParameterGrid
param_grid = {'epochs': [5, 10, 15], 'steps_per_epoch' : [10, 20, 50]}
grid = ParameterGrid(param_grid)
val_loss as final model
for params in grid:
print(params)
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper])
model.load_weights("../working/best.hdf5")
The error suggests that your models output layer should have 2 nodes whereas you have 2048 as you are using the output of avg_pool layer of ResNet50 model as your model output. So, you can add a Dense layer having 2 nodes on top of the avg_pool layer to solve the problem.
model = ResNet50(include_top=True, weights='imagenet')
print(model.summary())
x = model.get_layer('avg_pool').output
predictions = Dense(2, activation='sigmoid')(x)
model = Model(input = model.input, output = predictions)
print(model.summary())
As I'm not quite sure about what type of problem you are solving, i assumed that multilabel (2) classification as your data label shape is (2,).
However, if you are solving a binary classification problem then you need to change your label so that it's either 1 or 0. So, Change class_mode='categorical' to class_mode='binary' in both train_generator and validation_generator. In that case the model output layer should have 1 node.
predictions = Dense(1, activation='sigmoid')(x)

Keras + TensorFlow Realtime training chart

I have the following code running inside a Jupyter notebook:
# Visualize training history
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import numpy
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load pima indians dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
history = model.fit(X, Y, validation_split=0.33, epochs=150, batch_size=10, verbose=0)
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
The code collects epochs history, then displays the progress history.
Q: How can I make the chart change while training so I can see the changes in real time?
There is livelossplot Python package for live training loss plots in Jupyter Notebook for Keras (disclaimer: I am the author).
from livelossplot import PlotLossesKeras
model.fit(X_train, Y_train,
epochs=10,
validation_data=(X_test, Y_test),
callbacks=[PlotLossesKeras()],
verbose=0)
To see how does it work, look at its source, especially this file: https://github.com/stared/livelossplot/blob/master/livelossplot/outputs/matplotlib_plot.py (from IPython.display import clear_output and clear_output(wait=True)).
A fair disclaimer: it does interfere with Keras output.
Keras comes with a callback for TensorBoard.
You can easily add this behaviour to your model and then just run tensorboard on top of the logging data.
callbacks = [TensorBoard(log_dir='./logs')]
result = model.fit(X, Y, ..., callbacks=callbacks)
And then on your shell:
tensorboard --logdir=/logs
If you need it in your notebook, you can also write your own callback to get metrics while training:
class LogCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
print(logs["train_accuracy"])
This would get the training accuracy at the end of the current epoch and print it. There's some good documentation around it on the official keras site.
this gives you an idea of the simplest codes.
[ Sample ]:
# https://stackoverflow.com/questions/71748896/how-to-plot-a-graph-of-training-time-and-batch-size-of-neural-network
import os
from os.path import exists
import matplotlib.pyplot as plt
import tensorflow as tf
import time
import h5py
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
os.environ['TF_GPU_ALLOCATOR'] = 'cuda_malloc_async'
print(os.getenv('TF_GPU_ALLOCATOR'))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
epoch_1_time = [ ]
epoch_5_time = [ ]
epoch_10_time = [ ]
epoch_50_time = [ ]
epoch_100_time = [ ]
database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)
if not exists(database_buffer_dir) :
os.mkdir(database_buffer_dir)
print("Create directory: " + database_buffer_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# ...
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
# Create hdf5 file
hdf5_file = h5py.File(database_buffer, mode='w')
# Train images
hdf5_file['x_train'] = train_images
hdf5_file['y_train'] = train_labels
# Test images
hdf5_file['x_test'] = test_images
hdf5_file['y_test'] = test_labels
hdf5_file.close()
# Visualize dataset train sample
hdf5_file = h5py.File(database_buffer, mode='r')
# Load features
# x_train = hdf5_file['x_train'][0: 50000]
# x_test = hdf5_file['x_test'][0: 10000]
# y_train = hdf5_file['y_train'][0: 50000]
# y_test = hdf5_file['y_test'][0: 10000]
x_train = hdf5_file['x_train'][0: 100]
x_test = hdf5_file['x_test'][0: 100]
y_train = hdf5_file['y_train'][0: 100]
y_test = hdf5_file['y_test'][0: 100]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2DTranspose(2, 3, activation='relu', padding="same"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4 * 256),
tf.keras.layers.Reshape((4 * 256, 1)),
tf.keras.layers.LSTM(128, return_sequences=True, return_state=False),
tf.keras.layers.LSTM(128, name='LSTM256'),
tf.keras.layers.Dropout(0.2),
])
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation='relu', name='dense64'))
model.add(tf.keras.layers.Dense(7))
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback_5(tf.keras.callbacks.Callback):
global epoch_5
val_dir = os.path.join(log_dir, 'validation')
print('val_dir: ' + val_dir)
epoch_5 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_5
time_counter = time.perf_counter()
epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_5 = time_counter
if epoch % 5 == 0 :
epoch_5 = time_counter
epoch_5_time.append( epoch_5 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_5", epoch_5, step=epoch)
file_writer.flush()
custom_callback_5 = custom_callback_5()
class custom_callback_10(tf.keras.callbacks.Callback):
global epoch_10
epoch_10 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_10
time_counter = time.perf_counter()
#epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_10 = time_counter
if epoch % 10 == 0 :
epoch_10 = time_counter
epoch_10_time.append( epoch_10 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_10", epoch_10, step=epoch)
file_writer.flush()
custom_callback_10 = custom_callback_10()
class custom_callback_50(tf.keras.callbacks.Callback):
global epoch_50
epoch_50 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_50
time_counter = time.perf_counter()
#epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_50 = time_counter
if epoch % 50 == 0 :
epoch_50 = time_counter
epoch_50_time.append( epoch_50 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_50", epoch_50, step=epoch)
file_writer.flush()
custom_callback_50 = custom_callback_50()
class custom_callback_100(tf.keras.callbacks.Callback):
global epoch_100
epoch_100 = 0
def on_epoch_end( self, epoch, logs={} ):
global epoch_100
time_counter = time.perf_counter()
#epoch_1_time.append( epoch )
if epoch == 1 :
###
epoch_100 = time_counter
if epoch % 100 == 0 :
epoch_100 = time_counter
epoch_100_time.append( epoch_100 )
### updates ###
with file_writer.as_default():
tf.summary.scalar("epoch_100", epoch_100, step=epoch)
file_writer.flush()
custom_callback_100 = custom_callback_100()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.MeanSquaredLogarithmicError(reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_logarithmic_error')
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(x_train, y_train, epochs=1000, batch_size=5 ,validation_data=(x_test, y_test), callbacks=[custom_callback_5])
history = model.fit(x_train, y_train, epochs=1000, batch_size=10 ,validation_data=(x_test, y_test), callbacks=[custom_callback_10])
history = model.fit(x_train, y_train, epochs=1000, batch_size=50 ,validation_data=(x_test, y_test), callbacks=[custom_callback_50])
history = model.fit(x_train, y_train, epochs=1000, batch_size=100 ,validation_data=(x_test, y_test), callbacks=[custom_callback_100])
plt.plot(epoch_1_time, epoch_5_time)
plt.plot(epoch_1_time, epoch_10_time)
plt.plot(epoch_1_time, epoch_50_time)
plt.plot(epoch_1_time, epoch_100_time)
plt.legend(["epoch_5_time", "epoch_10_time", "epoch_50_time", "epoch_100_time"])
plt.show()
plt.close()
input('...')
## tensorboard --inspect --logdir="F:\\models\\checkpoint\\test_tf_plot_graph\\"
## tensorboard --logdir="F:\\models\\checkpoint\\test_tf_plot_graph\\"
[ Output ]:
Event statistics for F:\\models\\checkpoint\\test_tf_plot_graph\validation:
audio -
graph -
histograms -
images -
scalars -
sessionlog:checkpoint -
sessionlog:start -
sessionlog:stop -
tensor
first_step 20
last_step 6
max_step 140
min_step 0
num_steps 14
outoforder_steps [(20, 0), (40, 1), (60, 2), (80, 3), (100, 4), (120, 5), (140, 6)]
======================================================================
...

Categories