keras sequential tensor as argument - python

I am trying to reproduce the image classification problem cat or dog using tensorflow and transfer learning (Xception model pretrained with imagenet). The code is:
base_model = keras.applications.Xception(
weights='imagenet',
# image shape = 128x128x3
input_shape=(128, 128, 3),
include_top=False)
# freeze layers
base_model.trainable = False
inputs = keras.Input(shape=(128, 128, 3))
x = data_augmentation(inputs)
x = tf.keras.applications.xception.preprocess_input(x)
x = base_model(x, training=False)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(128, activation='relu')(x)
outputs = keras.layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
I am now trying to make use of models.Sequential. So far my code looks like this:
theModel=models.Sequential([
tf.keras.Input(shape=(128, 128, 3)),
tf.keras.applications.xception.preprocess_input(), <-------- how to pass tensor as argument?
base_model,
Flatten(),
Dense(128, activation='relu'),
Dense(1,activation='sigmoid')
])
My question, is there a way to make use of models.Sequentials, defining everything as I've done but passing the tensor as argument like in the first code snipped?
Thanks in advance,
metc

You cannot use tf.keras.applications.xception.preprocess_input() inside the sequential model. You have to define it outside the model and can pass the output of it to the sequential model by assigning values to the tensor argument in the input layer.
x=tf.random.uniform(shape=(1,128,128,3))
x= tf.keras.applications.xception.preprocess_input(x)
theModel=tf.keras.models.Sequential([
tf.keras.Input(tensor=x),
base_model,
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1,activation='sigmoid')
])
For more details, Please refer to this gist.Thank You!

Related

Keras, using two pre-trained autoencoder models

I'm looking to use two pre-trained autoencoder models and build a model on top with binary output. This is my code:
autoencoder = load_model("autoencoder.h5")
model1 = autoencoder
model2 = autoencoder
model1_out = model1.get_layer(index=7).output
model2_out = model2.get_layer(index=7).output
x = tf.keras.layers.concatenate([model1_out, model2_out])
x = Dense(400, activation='softmax')(x)
x = Dense(200, activation='softmax')(x)
x = Dense(100, activation='softmax')(x)
x = Dense(1, activation='sigmoid')(x)
model=tf.keras.Model(inputs=[model1.input,model2.input],outputs=x)
model.compile(optimizer=Adam(learning_rate=0.001),loss='binary_crossentropy',
metrics=['accuracy'])
model.fit([X_train,X_train], y_train)
I get the following error:
ValueError: The list of inputs passed to the model contains the same input multiple times.
All inputs should only appear once.
Received inputs=
[<KerasTensor: shape=(None, 768) dtype=float32 (created by layer 'input_1')>,
<KerasTensor: shape=(None, 768) dtype=float32 (created by layer 'input_1')>]
Can somebody tell me what I am doing wrong?
Thanks!

how to solve input tensor error in adopting pre-trained keras models

I am trying to adopt a pre-trained keras model as follow, but it requires an input to be a tensor. can anyone help to solve it?
from keras.applications.vgg19 import VGG19
inputs = layers.Input(shape = (32,32,4))
vgg_model = VGG19(weights='imagenet', include_top=False)
vgg_model.trainable = False
x = tensorflow.keras.layers.Flatten(name='flatten')(vgg_model)
x = tensorflow.keras.layers.Dense(512, activation='relu', name='fc1')(x)
x = tensorflow.keras.layers.Dense(512, activation='relu', name='fc2')(x)
x = tensorflow.keras.layers.Dense(1,name='predictions')(x)
new_model = tensorflow.keras.models.Model(inputs=inputs, outputs=x)
new_model.compile(optimizer='adam', loss='mean_squared_error',
metrics=['mae'])
error:
TypeError: Inputs to a layer should be tensors. Got: <keras.engine.functional.Functional object at 0x000001F48267B588>
If you want to use the VGG19 as your base model, you will have to use its output as the input to your custom model:
import tensorflow as tf
from keras.applications.vgg19 import VGG19
vgg_model = VGG19(weights='imagenet', include_top=False, input_shape=(32, 32, 3))
vgg_model.trainable = False
x = vgg_model.output
x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x)
x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x)
x = tf.keras.layers.Dense(1, name='predictions')(x)
new_model = tf.keras.Model(inputs=vgg_model.input, outputs=x)
new_model.compile(optimizer='adam', loss='mean_squared_error',
metrics=['mae'])
new_model(tf.random.normal((1, 32, 32, 3)))
Note that I removed your Flatten layer, since the output from the vgg_model already has the shape (batch_size, features).

Extract subnetwork from Keras Sequential model

I trained a very simple autoencoder network similar to this example:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
layers.Dense(128, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(32, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(8, activation="relu", name="latent_space"),
layers.Dense(16, activation="relu"),
layers.Dense(32, activation="relu", name="decode_32"),
layers.Dense(64, activation="relu"),
layers.Dense(128, activation="sigmoid"),
])
model.compile(...)
model.fit(...)
# Extract subnetwork here after training
I would like to know if it is possible to feed data to the latent_space layer such that I can afterwards extract the activations from layer decode_32? Ideally I would like to crop a subnetwork after training with the latent_space layer as the input and the decode_32 layer as the output layer. Is that possible?
Does this answer fits your question?
def extract_layers(main_model, starting_layer_ix, ending_layer_ix) :
# create an empty model
new_model = Sequential()
for ix in range(starting_layer_ix, ending_layer_ix + 1):
curr_layer = main_model.get_layer(index=ix)
# copy this layer over to the new model
new_model.add(curr_layer)
return new_model
If you prefer selecting your subnetwork with the names of the first and last layers, the get_layer method also has an argument for the layer's name, but an easier solution would be to retrieve the indexes of the layers to select thanks to the layer.name argument.
That way, you just have to modify the previous function by adding
layer_names = [layer.name for layer in main_model.layers]
starting_layer_ix = layer_names.index(starting_layer_name)
ending_layer_ix = layer_names.index(ending_layer_name)

TensorFlow: How do I use make a convolutional layer for tabular (1-D) features?

Using TensorFlow in Python, I am making a neural network that has a 1 dimensional array as input. I would like to add a convolutional layer to the network, but can't seem to get it to work.
My training data looks something like this:
n_samples = 20
length_feature = 10
features = np.random.random((n_samples, length_feature))
labels = np.array([1 if sum(e)>5 else 0 for e in features])
If I make a neural network like this one
model = keras.Sequential([
keras.layers.Dense(10, activation='relu', input_shape=(length_feature, )),
keras.layers.Dense(2, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(features, labels, batch_size=5, validation_split = 0.2, epochs=10)
and this works just fine. But if I add a convolutional layer like this
model = keras.Sequential([
keras.layers.Dense(10, activation='relu', input_shape=(length_feature, )),
keras.layers.Conv1D(kernel_size = 3, filters = 2),
keras.layers.Dense(2, activation='softmax')
])
then I get the error
ValueError: Input 0 of layer conv1d_4 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 10]
How can I add a convolutional layer to my neural network?
Conv1D expects a 3D output( batch_size, width, channels). But the dense layers produces a 2D output. Simply change your model to the following,
model = keras.Sequential([
keras.layers.Dense(10, activation='relu', input_shape=(length_feature, )),
keras.layers.Lambda(lambda x: K.expand_dims(x, axis=-1))
keras.layers.Conv1D(kernel_size = 3, filters = 2),
keras.layers.Dense(2, activation='softmax')
])
Where K is either keras.backend or tf.keras.backend depending on which one you used to get layers.

How to change input shape of the model with lambda layer

Lets suppose I have specified mobilenet from keras models this way:
base_model = MobileNetV2(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(12, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(loss='categorical_crossentropy', optimizer = Adam(),
metrics=['accuracy'])
But I would like to add custom layer to preporess input image this way:
def myFunc(x):
return K.reshape(x/255,(-1,224,224,3))
new_model = Sequential()
new_model.add(Lambda(myFunc,input_shape =( 224, 224, 3), output_shape=(224, 224, 3)))
new_model.add(model)
new_model.compile(loss='categorical_crossentropy', optimizer = Adam(),
metrics=['accuracy'])
new_model.summary()
It works pretty well but now I need to have it input shape 224 224 3 instead of (None, 224, 224, 3) - how to make it
In order to expand the dimension of your tensor, you can use
import tensorflow.keras.backend as K
# adds a new dimension to a tensor
K.expand_dims(tensor, 0)
However, I do not see why you would need it, just like #meonwongac mentioned.
If you still want to use a Lambda layer instead of resizing / applying other operations on images with skimage/OpenCV/ other library, one way of using the Lambda layer is the following:
import tensorflow as tf
input_ = Input(shape=(None, None, 3))
next_layer = Lambda(lambda image: tf.image.resize_images(image, (128, 128))(input_)

Categories