How to change input shape of the model with lambda layer - python

Lets suppose I have specified mobilenet from keras models this way:
base_model = MobileNetV2(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(12, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(loss='categorical_crossentropy', optimizer = Adam(),
metrics=['accuracy'])
But I would like to add custom layer to preporess input image this way:
def myFunc(x):
return K.reshape(x/255,(-1,224,224,3))
new_model = Sequential()
new_model.add(Lambda(myFunc,input_shape =( 224, 224, 3), output_shape=(224, 224, 3)))
new_model.add(model)
new_model.compile(loss='categorical_crossentropy', optimizer = Adam(),
metrics=['accuracy'])
new_model.summary()
It works pretty well but now I need to have it input shape 224 224 3 instead of (None, 224, 224, 3) - how to make it

In order to expand the dimension of your tensor, you can use
import tensorflow.keras.backend as K
# adds a new dimension to a tensor
K.expand_dims(tensor, 0)
However, I do not see why you would need it, just like #meonwongac mentioned.
If you still want to use a Lambda layer instead of resizing / applying other operations on images with skimage/OpenCV/ other library, one way of using the Lambda layer is the following:
import tensorflow as tf
input_ = Input(shape=(None, None, 3))
next_layer = Lambda(lambda image: tf.image.resize_images(image, (128, 128))(input_)

Related

keras sequential tensor as argument

I am trying to reproduce the image classification problem cat or dog using tensorflow and transfer learning (Xception model pretrained with imagenet). The code is:
base_model = keras.applications.Xception(
weights='imagenet',
# image shape = 128x128x3
input_shape=(128, 128, 3),
include_top=False)
# freeze layers
base_model.trainable = False
inputs = keras.Input(shape=(128, 128, 3))
x = data_augmentation(inputs)
x = tf.keras.applications.xception.preprocess_input(x)
x = base_model(x, training=False)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(128, activation='relu')(x)
outputs = keras.layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
I am now trying to make use of models.Sequential. So far my code looks like this:
theModel=models.Sequential([
tf.keras.Input(shape=(128, 128, 3)),
tf.keras.applications.xception.preprocess_input(), <-------- how to pass tensor as argument?
base_model,
Flatten(),
Dense(128, activation='relu'),
Dense(1,activation='sigmoid')
])
My question, is there a way to make use of models.Sequentials, defining everything as I've done but passing the tensor as argument like in the first code snipped?
Thanks in advance,
metc
You cannot use tf.keras.applications.xception.preprocess_input() inside the sequential model. You have to define it outside the model and can pass the output of it to the sequential model by assigning values to the tensor argument in the input layer.
x=tf.random.uniform(shape=(1,128,128,3))
x= tf.keras.applications.xception.preprocess_input(x)
theModel=tf.keras.models.Sequential([
tf.keras.Input(tensor=x),
base_model,
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1,activation='sigmoid')
])
For more details, Please refer to this gist.Thank You!

how to solve input tensor error in adopting pre-trained keras models

I am trying to adopt a pre-trained keras model as follow, but it requires an input to be a tensor. can anyone help to solve it?
from keras.applications.vgg19 import VGG19
inputs = layers.Input(shape = (32,32,4))
vgg_model = VGG19(weights='imagenet', include_top=False)
vgg_model.trainable = False
x = tensorflow.keras.layers.Flatten(name='flatten')(vgg_model)
x = tensorflow.keras.layers.Dense(512, activation='relu', name='fc1')(x)
x = tensorflow.keras.layers.Dense(512, activation='relu', name='fc2')(x)
x = tensorflow.keras.layers.Dense(1,name='predictions')(x)
new_model = tensorflow.keras.models.Model(inputs=inputs, outputs=x)
new_model.compile(optimizer='adam', loss='mean_squared_error',
metrics=['mae'])
error:
TypeError: Inputs to a layer should be tensors. Got: <keras.engine.functional.Functional object at 0x000001F48267B588>
If you want to use the VGG19 as your base model, you will have to use its output as the input to your custom model:
import tensorflow as tf
from keras.applications.vgg19 import VGG19
vgg_model = VGG19(weights='imagenet', include_top=False, input_shape=(32, 32, 3))
vgg_model.trainable = False
x = vgg_model.output
x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x)
x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x)
x = tf.keras.layers.Dense(1, name='predictions')(x)
new_model = tf.keras.Model(inputs=vgg_model.input, outputs=x)
new_model.compile(optimizer='adam', loss='mean_squared_error',
metrics=['mae'])
new_model(tf.random.normal((1, 32, 32, 3)))
Note that I removed your Flatten layer, since the output from the vgg_model already has the shape (batch_size, features).

Input Shape in Keras Autoencoder

i'm trying to train an autoencoder in the following code:
encoder_input = keras.layers.Input(shape=(x_Train.shape[1]), name='img')
encoder_out = keras.layers.Dense(1, activation = "relu")(encoder_input)
encoder = keras.Model(encoder_input, encoder_out, name="encoder")
decoder_input = keras.layers.Dense(602896, activation = "relu")(encoder_out)
decoder_output = keras.layers.Reshape((769, 28, 28))(decoder_input)
opt = keras.optimizers.RMSprop(learning_rate=1e-3)
autoencoder = keras.Model(encoder_input, decoder_output, name = "autoencoder")
autoencoder.summary()
autoencoder.compile(opt, loss='mse')
autoencoder.fit(x_Train, x_Train, epochs=10, batch_size=64, validation_split = 0.1)
However, it returns the error:
"tensorflow:Model was constructed with shape (None, 28) for input KerasTensor(type_spec=TensorSpec(shape=(None, 28), dtype=tf.float32, name='img'), name='img', description="created by layer 'img'"), but it was called on an input with incompatible shape (None, 28, 28)."
I don't know how to deal with that or to resize my input. My x_train is a vector with size [769,28,28]
Could someone help me to handle the error?
That's the summary
Thanks
Your input shape for your autoencoder is a little weird, your training data has a shaped of 28x28, with 769 as your batch, so the fix should be like this:
encoder_input = keras.layer.Input(shape=(28, 28), name='img')
encoder_out = keras.layers.Dense(1, activation = "relu")(encoder_input)
# For ur decoder, you need to change a bit as well
decoder_input = keras.layers.Dense(784, activation = "sigmoid")(encoder_out) # Flatten until 28x28 =784
decoder_output = keras.layers.Reshape((28, 28))(decoder_input) # From there reshape back to 28x28
The problem (apart from the wrong shape in the input layer (has to be shape=(28, 28) and the output layer (has to be (28,28)) like in Edwin Cheong 's answer) is that you forgot a flatten layer after your input layer. This leads to the incompatible shape.
Adapted the answer from above:
encoder_input = keras.layer.Input(shape=(28, 28), name='img')
encoder_input = keras.layer.Flatten()(encoder_input)
encoder_out = keras.layers.Dense(1, activation = "relu")(encoder_input)
decoder_input = keras.layers.Dense(784, activation = "sigmoid")(encoder_out)
decoder_output = keras.layers.Reshape((28, 28))(decoder_input)

Transfer learning, wrong dense layer's shape

I am trying to apply transfer learning to my ANN for image classification.
I have found an example of it, and I would personalize the network.
Here there are the main blocks of code:
model = VGG19(weights='imagenet',
include_top=False,
input_shape=(224, 224, 3))
batch_size = 16
for layer in model.layers[:5]:
layer.trainable = False
x = model.output
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = Dense(1024, activation="relu")(x)
predictions = Dense(16, activation="sigmoid")(x)
model_final = Model(input = model.input, output = predictions)
model_final.fit_generator(
train_generator,
samples_per_epoch = nb_train_samples,
epochs = epochs,
validation_data = validation_generator,
validation_steps = nb_validation_samples,
callbacks = [checkpoint, early])
When I run the code above I get this error:
ValueError: Error when checking target: expected dense_3 to have shape (16,) but got array with shape (1,).
I suppose that the problem is about the dimensions' order in the dense layer, I have tried to transpose it, but I get the same error.
Maybe this simple example can help:
import numpy as np
test = np.array([1,2,3])
print(test.shape) # (3,)
test = test[np.newaxis]
print(test.shape) # (1, 3)
Try apply [np.newaxis] in your train_generator output.

Keras - All layer names should be unique

I combine two VGG net in keras together to make classification task. When I run the program, it shows an error:
RuntimeError: The name "predictions" is used 2 times in the model. All layer names should be unique.
I was confused because I only use prediction layer once in my code:
from keras.layers import Dense
import keras
from keras.models import Model
model1 = keras.applications.vgg16.VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000)
model1.layers.pop()
model2 = keras.applications.vgg16.VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000)
model2.layers.pop()
for layer in model2.layers:
layer.name = layer.name + str("two")
model1.summary()
model2.summary()
featureLayer1 = model1.output
featureLayer2 = model2.output
combineFeatureLayer = keras.layers.concatenate([featureLayer1, featureLayer2])
prediction = Dense(1, activation='sigmoid', name='main_output')(combineFeatureLayer)
model = Model(inputs=[model1.input, model2.input], outputs= prediction)
model.summary()
Thanks for #putonspectacles help, I follow his instruction and find some interesting part. If you use model2.layers.pop() and combine the last layer of two models using "model.layers.keras.layers.concatenate([model1.output, model2.output])", you will find that the last layer information is still showed using the model.summary(). But actually they do not exist in the structure. So instead, you can use model.layers.keras.layers.concatenate([model1.layers[-1].output, model2.layers[-1].output]). It looks tricky but it works.. I think it is a problem about synchronization of the log and structure.
First, based on the code you posted you have no layers with a name attribute 'predictions', so this error has nothing to do with your layer
Dense layer prediction: i.e:
prediction = Dense(1, activation='sigmoid',
name='main_output')(combineFeatureLayer)
The VGG16 model has a Dense layer with name predictions. In particular this line:
x = Dense(classes, activation='softmax', name='predictions')(x)
And since you're using two of these models you have layers with duplicate names.
What you could do is rename the layer in the second model to something other than predictions, maybe predictions_1, like so:
model2 = keras.applications.vgg16.VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000)
# now change the name of the layer inplace.
model2.get_layer(name='predictions').name='predictions_1'
You can change the layer's name in keras, don't use 'tensorflow.python.keras'.
Here is my sample code:
from keras.layers import Dense, concatenate
from keras.applications import vgg16
num_classes = 10
model = vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(64,64,3), pooling='avg')
inp = model.input
out = model.output
model2 = vgg16.VGG16(include_top=False,weights='imagenet', input_tensor=None, input_shape=(64,64,3), pooling='avg')
for layer in model2.layers:
layer.name = layer.name + str("_2")
inp2 = model2.input
out2 = model2.output
merged = concatenate([out, out2])
merged = Dense(1024, activation='relu')(merged)
merged = Dense(num_classes, activation='softmax')(merged)
model_fusion = Model([inp, inp2], merged)
model_fusion.summary()
Example:
# Network for affine transform estimation
affine_transform_estimator = MobileNet(
input_tensor=None,
input_shape=(config.IMAGE_H // 2, config.IMAGE_W //2, config.N_CHANNELS),
alpha=1.0,
depth_multiplier=1,
include_top=False,
weights='imagenet'
)
affine_transform_estimator.name = 'affine_transform_estimator'
for layer in affine_transform_estimator.layers:
layer.name = layer.name + str("_1")
# Network for landmarks regression
landmarks_regressor = MobileNet(
input_tensor=None,
input_shape=(config.IMAGE_H // 2, config.IMAGE_W // 2, config.N_CHANNELS),
alpha=1.0,
depth_multiplier=1,
include_top=False,
weights='imagenet'
)
landmarks_regressor.name = 'landmarks_regressor'
for layer in landmarks_regressor.layers:
layer.name = layer.name + str("_2")
input_image = Input(shape=(config.IMAGE_H, config.IMAGE_W, config.N_CHANNELS))
downsampled_image = MaxPooling2D(pool_size=(2,2))(input_image)
x1 = affine_transform_estimator(downsampled_image)
x2 = landmarks_regressor(downsampled_image)
x3 = add([x1,x2])
model = Model(inputs=input_image, outputs=x3)
optimizer = Adadelta()
model.compile(optimizer=optimizer, loss=mae_loss_masked)

Categories