Saving model on Tensorflow 2.7.0 with data augmentation layer - python

I am getting an error when trying to save a model with data augmentation layers with Tensorflow version 2.7.0.
Here is the code of data augmentation:
input_shape_rgb = (img_height, img_width, 3)
data_augmentation_rgb = tf.keras.Sequential(
[
layers.RandomFlip("horizontal"),
layers.RandomFlip("vertical"),
layers.RandomRotation(0.5),
layers.RandomZoom(0.5),
layers.RandomContrast(0.5),
RandomColorDistortion(name='random_contrast_brightness/none'),
]
)
Now I build my model like this:
# Build the model
input_shape = (img_height, img_width, 3)
model = Sequential([
layers.Input(input_shape),
data_augmentation_rgb,
layers.Rescaling((1./255)),
layers.Conv2D(16, kernel_size, padding=padding, activation='relu', strides=1,
data_format='channels_last'),
layers.MaxPooling2D(),
layers.BatchNormalization(),
layers.Conv2D(32, kernel_size, padding=padding, activation='relu'), # best 4
layers.MaxPooling2D(),
layers.BatchNormalization(),
layers.Conv2D(64, kernel_size, padding=padding, activation='relu'), # best 3
layers.MaxPooling2D(),
layers.BatchNormalization(),
layers.Conv2D(128, kernel_size, padding=padding, activation='relu'), # best 3
layers.MaxPooling2D(),
layers.BatchNormalization(),
layers.Flatten(),
layers.Dense(128, activation='relu'), # best 1
layers.Dropout(0.1),
layers.Dense(128, activation='relu'), # best 1
layers.Dropout(0.1),
layers.Dense(64, activation='relu'), # best 1
layers.Dropout(0.1),
layers.Dense(num_classes, activation = 'softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=metrics)
model.summary()
Then after the training is done I just make:
model.save("./")
And I'm getting this error:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-84-87d3f09f8bee> in <module>()
----> 1 model.save("./")
/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in
error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
/usr/local/lib/python3.7/dist-
packages/tensorflow/python/saved_model/function_serialization.py in
serialize_concrete_function(concrete_function, node_ids, coder)
66 except KeyError:
67 raise KeyError(
---> 68 f"Failed to add concrete function '{concrete_function.name}' to
object-"
69 f"based SavedModel as it captures tensor {capture!r} which is
unsupported"
70 " or not reachable from root. "
KeyError: "Failed to add concrete function
'b'__inference_sequential_46_layer_call_fn_662953'' to object-based SavedModel as it
captures tensor <tf.Tensor: shape=(), dtype=resource, value=<Resource Tensor>> which
is unsupported or not reachable from root. One reason could be that a stateful
object or a variable that the function depends on is not assigned to an attribute of
the serialized trackable object (see SaveTest.test_captures_unreachable_variable)."
I inspected the reason of getting this error by changing the architecture of my model and I just found that reason came from the data_augmentation layer since the RandomFlip and RandomRotation and others are changed from layers.experimental.prepocessing.RandomFlip to layers.RandomFlip, but still the error appears.

This seems to be a bug in Tensorflow 2.7 when using model.save combined with the parameter save_format="tf", which is set by default. The layers RandomFlip, RandomRotation, RandomZoom, and RandomContrast are causing the problems, since they are not serializable. Interestingly, the Rescaling layer can be saved without any problems. A workaround would be to simply save your model with the older Keras H5 format model.save("test", save_format='h5'):
import tensorflow as tf
import numpy as np
class RandomColorDistortion(tf.keras.layers.Layer):
def __init__(self, contrast_range=[0.5, 1.5],
brightness_delta=[-0.2, 0.2], **kwargs):
super(RandomColorDistortion, self).__init__(**kwargs)
self.contrast_range = contrast_range
self.brightness_delta = brightness_delta
def call(self, images, training=None):
if not training:
return images
contrast = np.random.uniform(
self.contrast_range[0], self.contrast_range[1])
brightness = np.random.uniform(
self.brightness_delta[0], self.brightness_delta[1])
images = tf.image.adjust_contrast(images, contrast)
images = tf.image.adjust_brightness(images, brightness)
images = tf.clip_by_value(images, 0, 1)
return images
def get_config(self):
config = super(RandomColorDistortion, self).get_config()
config.update({"contrast_range": self.contrast_range, "brightness_delta": self.brightness_delta})
return config
input_shape_rgb = (256, 256, 3)
data_augmentation_rgb = tf.keras.Sequential(
[
tf.keras.layers.RandomFlip("horizontal"),
tf.keras.layers.RandomFlip("vertical"),
tf.keras.layers.RandomRotation(0.5),
tf.keras.layers.RandomZoom(0.5),
tf.keras.layers.RandomContrast(0.5),
RandomColorDistortion(name='random_contrast_brightness/none'),
]
)
input_shape = (256, 256, 3)
padding = 'same'
kernel_size = 3
model = tf.keras.Sequential([
tf.keras.layers.Input(input_shape),
data_augmentation_rgb,
tf.keras.layers.Rescaling((1./255)),
tf.keras.layers.Conv2D(16, kernel_size, padding=padding, activation='relu', strides=1,
data_format='channels_last'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(32, kernel_size, padding=padding, activation='relu'), # best 4
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(64, kernel_size, padding=padding, activation='relu'), # best 3
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(128, kernel_size, padding=padding, activation='relu'), # best 3
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'), # best 1
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(128, activation='relu'), # best 1
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'), # best 1
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(5, activation = 'softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()
model.save("test", save_format='h5')
Loading your model with your custom layer would look like this then:
model = tf.keras.models.load_model('test.h5', custom_objects={'RandomColorDistortion': RandomColorDistortion})
where RandomColorDistortion is the name of your custom layer.

You can also downgrade Keras and Tensorflow to version 2.6.

Related

Implementing DCGAN in keras but it is not properly trained

I am trying to implement DCGAN presented in this article. Here is my Generator and Discriminator:
ki = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
def discriminator_model():
discriminator = Sequential([
Conv2D(64, (3,3), strides=(2, 2), padding='same', kernel_initializer=ki, input_shape=[64,64, 3]), # No BatchNormilization in this layer
LeakyReLU(alpha=0.2),
Dropout(0.4),
Conv2D(64, (3,3), strides=(2, 2), padding='same', kernel_initializer=ki),
BatchNormalization(),
LeakyReLU(alpha=0.2),
Dropout(0.4),
Flatten(),
Dense(1, activation='sigmoid', kernel_initializer=ki)
])
return discriminator
===========================================
noise_shape = 100
def generator_model():
generator = Sequential([
Dense(4*4*512, input_shape=[noise_shape]),
Reshape([4,4,512]),
Conv2DTranspose(256, kernel_size=4, strides=2, padding="same", kernel_initializer= ki),
BatchNormalization(),
ReLU(),
Conv2DTranspose(128, kernel_size=4, strides=2, padding="same", kernel_initializer=ki),
BatchNormalization(),
ReLU(),
Conv2DTranspose(64, kernel_size=4, strides=2, padding="same", kernel_initializer=ki),
BatchNormalization(),
ReLU(),
Conv2DTranspose(3, kernel_size=4, strides=2, padding="same", kernel_initializer=ki, activation='tanh') # 3 filters, also no BatchNormilization in this layer
])
return generator
Here I have combined these to to build DCGAN:
DCGAN = Sequential([generator,discriminator])
opt = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
discriminator.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
discriminator.trainable = False
DCGAN.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
Then I prepared my batches and tried to train my model. Here is the code:
epochs = 500
batch_size = 128
loss_from_discriminator_model=[]
loss_from_generator_model=[]
acc_dis = []
acc_gen = []
with tf.device('/gpu:0'):
for epoch in range(epochs):
for i in range(images.shape[0]//batch_size):
# Training Discriminator
noise = np.random.uniform(-1,1,size=[batch_size, noise_shape])
gen_image = generator.predict_on_batch(noise) # Generating fake images
train_dataset = images[i*batch_size:(i+1)*batch_size]
train_labels_real = np.ones(shape=(batch_size,1)) # Real image labels
discriminator.trainable = True
d_loss_real, d_acc_real = discriminator.train_on_batch(train_dataset,train_labels_real) # Training on real images
train_labels_fake = np.zeros(shape=(batch_size,1))
d_loss_fake, d_acc_fake = discriminator.train_on_batch(gen_image,train_labels_fake) # Training on fake images
# Training Generator
noise = np.random.uniform(-1, 1, size=[batch_size,noise_shape])
train_label_fake_for_gen_training = np.ones(shape=(batch_size,1))
discriminator.trainable = False
g_loss, g_acc = DCGAN.train_on_batch(noise, train_label_fake_for_gen_training)
loss_from_discriminator_model.append(d_loss_real+d_loss_fake)
loss_from_generator_model.append(g_loss)
acc_dis.append((d_acc_real + d_acc_fake) / 2)
acc_gen.append(g_acc)
The problem is my modell doesn't seem to learn anything. values of accuracy and loss don't seem rational. Here is a plot of values of generator and discriminator loss during training.
Thanks in advance.

Dimension error encoder input it different from decoder output

i am new to auto encoders and i cannot understand why i am getting this error.
'''
class Autoencoder(Model):
def __init__(self, name="autoencoder"):
super(Autoencoder, self).__init__()
self.encoder_input = tf.keras.Input(shape=(128,219,1))
self.encoder = layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2, kernel_initializer=HeNormal())(self.encoder_input)
self.encoder_output = layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2, kernel_initializer=HeNormal())(self.encoder)
self.encoder_model = tf.keras.Model(self.encoder_input, self.encoder_output)
# ------------------------------------------
# The Autoencoder has an decoder which we can make using normal layers
# Here we use the Functional API which requires 3 things (Input, model and output)
# ------------------------------------------
self.decoder_input = tf.keras.Input(shape=(32,55,8))
self.decoder = layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same', kernel_initializer=HeNormal())(self.decoder_input)
self.decoder_second = layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same', kernel_initializer=HeNormal())(self.decoder)
self.decoder_output = layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same', kernel_initializer=HeNormal())(self.decoder_second)
self.decoder_model = tf.keras.Model(self.decoder_input, self.decoder_output)
# ------------------------------------------
# The forward pass
def call(self, x):
# Encode the inputs x using the propery defined above
# In the previous notebook we created the encoder and decoder as Sequential Models
# but now since we used the Functional API we need to call the Model objects we created above
encoded = self.encoder_model(x)
# Decode the encoded vector using the property defined above
decoded = self.decoder_model(encoded)
# Return the decoded (28,28) data
return decoded
autoencoder = Autoencoder()
I defined the encoder part then the decoder part but after calling the fit model i cam getting
ValueError: Dimensions must be equal, but are 220 and 219 for '{{node mean_squared_error/SquaredDifference}} = SquaredDifference[T=DT_FLOAT](autoencoder_3/model_7/conv2d_15/Sigmoid, IteratorGetNext:1)' with input shapes: [?,128,220,1], [?,128,219,1].
Am i doing something wrong?
The input image is of shape 128,219,1

What should I change input_shape to?

I'm going to learn data with 3Dtensor input.
My model is
from keras.models import Sequential
from keras.layers import Dense, InputLayer
import tensorflow as tf
from tensorflow import keras
class AnomalyDetector(Model):
def __init__(self):
super(AnomalyDetector, self).__init__()
self.encoder = tf.keras.Sequential([
tf.keras.layers.Dense(32,input_shape=(36,501), activation= "relu"),
tf.keras.layers.Dense(16, activation= "relu"),
tf.keras.layers.Dense(8, activation= "relu")
])
self.decoder = tf.keras.Sequential([
tf.keras.layers.Dense(16,input_shape=(36,501), activation= "relu"),
tf.keras.layers.Dense(32, activation= "relu"),
tf.keras.layers.Dense(140, activation= "sigmoid")
])
def call(self,x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
model = AnomalyDetector()
The shape of the 3dtensor is
(1500, 36, 501)
model.compile(optimizer='adam', loss='mae', metrics=['accuracy'])
model.fit(train_X, train_y, epochs=100, batch_size = 512, validation_data=(vali_X, vali_y), shuffle=True)
and Error is...
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Input In [155], in <cell line: 12>()
9 print(e)
11 model.compile(optimizer='adam', loss='mae', metrics=['accuracy'])
---> 12 model.fit(train_X, train_y, epochs=100, batch_size = 512, validation_data=(vali_X, vali_y), shuffle=True)
.
.
.
ValueError: Input 0 of layer "sequential_58" is incompatible with the layer: expected shape=(None, 36, 501), found shape=(None, 36, 8)
Call arguments received by layer "anomaly_detector_30" (type AnomalyDetector):
• x=tf.Tensor(shape=(None, 36, 501), dtype=float32)
I already change encoder's Dense 8 to 501. but another error occurred.
How should I change this?
Dense doesn't take 3d input it takes 1d input for example :
input_shape = ( ,40) here 40 is the number of columns and i left a space because at start your model dosen't knows the length of data that's why in model.summary()
input_shape = (None,40) comes.
Is your input an image ?? if yes than Dense dosen't work on image use Conv2D or Conv3D as per your input data.
If i am wrong and Dense layer takes 3d input, correct me and please provide the link where you read about it.Thanks
Your problem is in the input of your decoder. The decoder sets to get tensors of size (36,501) but it gets the output of the encoder that is a tensor of size (36,8) if you change your decoder input your code work :
class AnomalyDetector(tf.keras.Model):
def __init__(self):
super(AnomalyDetector, self).__init__()
self.encoder = tf.keras.Sequential([
tf.keras.layers.Dense(32,input_shape=(36,501), activation= "relu"),
tf.keras.layers.Dense(16, activation= "relu"),
tf.keras.layers.Dense(8, activation= "relu")
])
self.decoder = tf.keras.Sequential([
tf.keras.layers.Dense(16,input_shape=(36,8), activation= "relu"),
tf.keras.layers.Dense(32, activation= "relu"),
tf.keras.layers.Dense(140, activation= "sigmoid")
])
def call(self,x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
But be careful your encoder just embeds the second size of (Y) your image. I think you should flatten the image for your first layer which means something like this :
class AnomalyDetector(tf.keras.Model):
def __init__(self):
super(AnomalyDetector, self).__init__()
self.encoder = tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation= "relu"),
tf.keras.layers.Dense(16, activation= "relu"),
tf.keras.layers.Dense(8, activation= "relu")
])
self.decoder = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation= "relu"),
tf.keras.layers.Dense(32, activation= "relu"),
tf.keras.layers.Dense(140, activation= "sigmoid")
])
def call(self,x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded

how to print confusion matrix of keras Multi output?

I have one question.
I want to print a confusion matrix.
my model is functional api of keras.
and
model = Model(inputs=[data_input], outputs=[output_1, output_2])
output_1 = 9 classes
output_2 = 5 classes
My multi-classification model
data_input = Input(shape=(trainX.shape[1], trainX.shape[2]))
Conv1 = Conv1D(filters=50, kernel_size=4, padding='valid', activation='relu', strides=1)(data_input)
Conv1 = MaxPooling1D(pool_size=2)(Conv1)
Conv2 = Conv1D(filters=50, kernel_size=4, padding='valid', activation='relu', strides=1)(Conv1)
Conv2 = MaxPooling1D(pool_size=2)(Conv2)
Conv3 = Conv1D(filters=50, kernel_size=4, padding='valid', activation='relu', strides=1)(Conv2)
Conv3 = MaxPooling1D(pool_size=2)(Conv3)
Classification1 = LSTM(128, input_shape=(47, 50), return_sequences=False)(Conv3)
Classification2 = GRU(128, input_shape=(47, 50), return_sequences=False)(Conv3)
activity = Dense(9)(Classification1)
activity = Activation('softmax')(activity)
speed = Dense(5)(Classification2)
speed = Activation('softmax')(speed)
model = Model(inputs=[data_input], outputs=[activity, speed])
model.compile(loss= 'categorical_crossentropy' , optimizer='adam', metrics=[ 'accuracy' ])
print(model.summary())
history = model.fit(trainX, {'activation_1': trainY_Activity, 'activation_2': trainY_Speed},
validation_data=(testX, {'activation_1': testY_Activity, 'activation_2': testY_Speed}),
epochs=epochs, batch_size=batch_size, verbose=1, shuffle=False)

Python Keras Tensorflow Embedding Layer Indices[i,j] = k is not in [0,max_features]

I am trying to do Author Identification, my train_vecs_w2v.shape = (15663, 400).
y_train.shape = (15663,3) which has 3 label one hot encoded.
Now the problem is I am having an error in the Embedding layer. Indices[0,X] = -1 is not in [0, 15663). How to solve this? Is it my code or Keras/Tensorflow?
print('Building Model')
n=19579
max_features = 15663
max_length = 400
EMBEDDING_DIM = 100
model7 = Sequential()
model7.add(Embedding(len(train_vecs_w2v), EMBEDDING_DIM, input_length=max_length, dtype='float32', trainable=True, weights=None, embeddings_initializer='uniform', embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None))
print(model7.output_shape)
model7.add(Convolution1D(filters =128, kernel_size = 3, strides=1, activation='relu', use_bias=False, border_mode='same'))
print(model7.output_shape)
model7.add(MaxPooling1D(pool_size = 3))
print(model7.output_shape)
model7.add(Convolution1D(filters = 64, kernel_size = 5, strides=1, activation='relu', border_mode='same'))
print(model7.output_shape)
model7.add(MaxPooling1D(pool_size = 5))
print(model7.output_shape)
model7.add(Flatten()) # model.output_shape == (None, 64*input_shape of convolution layer)
print(model7.output_shape)
model7.add(Dense(output_dim = 64, activation='relu')) # input_shape = (batch_size, input_dim)
print(model7.output_shape)
model7.add(Dense(output_dim = 32, activation='relu'))
print(model7.output_shape)
model7.add(Dense(output_dim = 3, activation='softmax'))
model7.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['categorical_accuracy'])
model7.fit(train_vecs_w2v, y_train_vec, epochs=50, batch_size=32, verbose=2)
The error I am getting
InvalidArgumentError (see above for traceback): indices[0,1] = -1 is not in [0, 15663)
[[Node: embedding_1/Gather = Gather[Tindices=DT_INT32, Tparams=DT_FLOAT, validate_indices=true, _device="/job:localhost/replica:0/task:0/device:CPU:0"](embedding_1/embeddings/read, embedding_1/Cast)]]
I think the problem here is with the word vectors count.
It should be
len(train_vecs_w2v) + 1

Categories