Connecting splitted dense layers in Neural Networks - Keras - python

I've seen that in keras I can use tf.slpit to split layers. My problem is that I don't understand how to do the connections between the "forked ways" that each layer must take.
Here is an image of an example I'm trying to do. It is basically an Input layer that splits into 2 sub-NN and then reunite in a layer before the output.
Diagram (thin black lines white poligons represent the wheight connections matrixes):

After googling with better words (as merge neural networks) I found that Keras Functional API are the answer.
Helpful links:
https://machinelearningmastery.com/keras-functional-api-deep-learning/
https://www.educative.io/answers/how-to-merge-two-different-models-in-keras
Example code from first link:
# Shared Input Layer
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
# input layer
visible = Input(shape=(64,64,1))
# first feature extractor
conv1 = Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
flat1 = Flatten()(pool1)
# second feature extractor
conv2 = Conv2D(16, kernel_size=8, activation='relu')(visible)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
flat2 = Flatten()(pool2)
# merge feature extractors
merge = concatenate([flat1, flat2])
# interpretation layer
hidden1 = Dense(10, activation='relu')(merge)
# prediction output
output = Dense(1, activation='sigmoid')(hidden1)
model = Model(inputs=visible, outputs=output)
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='shared_input_layer.png')

Related

How to replace a Conv2D layer in keras with multiple sequential Conv2D layers?

I am trying to take a particular convolution layer of VGG 19 and trying to replace it with 3 different convolutional layers such that the first conv layer of the 3 layers will satisfy the input conditions of previous layer and the last conv layer produce exactly the same output as that of original convolution layer.
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg19 import preprocess_input
from tensorflow.keras.models import Model
import numpy as np
model_old = VGG19(weights='imagenet')
I have named the VGG model as 'model_old'.
Here is the function for a 3 Conv layer:
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
def multiple_conv_layer(layer_id):
model = Sequential()
model.add(Conv2D(3, kernel_size=1,input_shape=(28,28,3), strides = (1,1), \
padding = 'same',dilation_rate = (1,1), activation = 'relu', use_bias = False))
model.add(Conv2D(8, kernel_size=3, activation = 'relu', \
use_bias = False))
model.add(Conv2D(64, kernel_size=1, strides = (1,1), \
padding = 'same', dilation_rate = a.dilation_rate, use_bias = False))
return(model)
When I tried doing like model_old.layers[1] = multiple_conv_layer( 1 ) the layer was not getting replaced by the new sequential model. Any leads on how to do this?

Adding layers to RESNET50 in order to build a JOIN CNN Model

This is my code in order to join resnet50 model with this model (that I want to train on my dataset). I want to freeze layers of the resnet50 model ( see Trainable=false) in the code .
Here I'm importing resnet 50 model
``
import tensorflow.keras
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
resnet50_imagnet_model = tensorflow.keras.applications.resnet.ResNet50(weights = "imagenet",
include_top=False,
input_shape = (150, 150, 3),
pooling='max')
``
Here I create my model
```
# freeze feature layers and rebuild model
for l in resnet50_imagnet_model.layers:
l.trainable = False
#construction du model
model5 = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(12, activation='softmax')
]
#Jointure des deux modeles
model_using_pre_trained_resnet50 = tf.keras.Sequential(resnet50_imagnet_model.layers + model5 )
```
Last line doesn't work and I have this error :
Input 0 of layer conv2_block1_3_conv is incompatible with the layer: expected axis -1 of input shape to have value 64 but received input with shape [None, 38, 38, 256
Thanks for help .
You can also use keras' functional API, like below
from tensorflow.keras.applications.resnet50 import ResNet50
import tensorflow as tf
resnet50_imagenet_model = ResNet50(include_top=False, weights='imagenet', input_shape=(150, 150, 3))
#Flatten output layer of Resnet
flattened = tf.keras.layers.Flatten()(resnet50_imagenet_model.output)
#Fully connected layer 1
fc1 = tf.keras.layers.Dense(128, activation='relu', name="AddedDense1")(flattened)
#Fully connected layer, output layer
fc2 = tf.keras.layers.Dense(12, activation='softmax', name="AddedDense2")(fc1)
model = tf.keras.models.Model(inputs=resnet50_imagenet_model.input, outputs=fc2)
Also refer this question.

ValueError: Graph disconnected: cannot obtain value for tensor Tensor...The following previous layers were accessed without issue: []

I have been trying to create a multi-input model using Keras, but got errors. The idea is to combine the text and corresonding topics to make predictions for sentiments. Here's the code:
import numpy as np
text = np.random.randint(5000, size=(442702, 200), dtype='int32')
topic = np.random.randint(2, size=(442702, 227), dtype='int32')
sentiment = to_categorical(np.random.randint(5, size=442702), dtype='int32')
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, Flatten, GlobalMaxPool1D, Dropout, Conv1D
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras.losses import binary_crossentropy
from keras.optimizers import Adam
text_input = Input(shape=(200,), dtype='int32', name='text')
text_encoded = Embedding(input_dim=5000, output_dim=20, input_length=200)(text_input)
text_encoded = Dropout(0.1)(text_encoded)
text_encoded = Conv1D(300, 3, padding='valid', activation='relu', strides=1)(text_encoded)
text_encoded = GlobalMaxPool1D()(text_encoded)
topic_input = Input(shape=(227,), dtype='int32', name='topic')
concatenated = concatenate([text_encoded, topic_input])
sentiment = Dense(5, activation='softmax')(concatenated)
model = Model(inputs=[text_encoded, topic_input], outputs=sentiment)
# summarize layers
print(model.summary())
# plot graph
plot_model(model)
However, this gives me the below error:
TypeError: Tensors in list passed to 'values' of 'ConcatV2' Op have types [float32, int32] that don't all match.
Now if I change dtype of topic_input from 'int32' to 'float32', I got a different error:
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("text_37:0", shape=(?, 200), dtype=int32) at layer "text". The following previous layers were accessed without issue: []
On the other hand, part of the model works just fine with the sequential API.
model = Sequential()
model.add(Embedding(5000, 20, input_length=200))
model.add(Dropout(0.1))
model.add(Conv1D(300, 3, padding='valid', activation='relu', strides=1))
model.add(GlobalMaxPool1D())
model.add(Dense(227))
model.add(Activation('sigmoid'))
print(model.summary())
Any pointers are highly appreciated.
There are few issues with your Keras functional API implementation,
You should use the Concatenate layer as Concatenate(axis=-1)([text_encoded, topic_input]).
In the concatenate layer you are trying to combine an int32 tensor and a float32 tensor, which is not allowed. What you should do is, from keras.backend import cast and concatenated = Concatenate(axis=-1)([text_encoded, cast(topic_input, 'float32')]).
You got variable conflicts, there are two sentiment variables, one pointing to a to_categorical output and the other the output of the final Dense layer.
Your model inputs cannot be intermediate tensors like text_encoded. They should come from Input layers.
To help with your implementation, here's a working version of your code (I am not sure if this is exactly what you wanted though) in TF 1.13.
from keras.utils import to_categorical
text = np.random.randint(5000, size=(442702, 200), dtype='int32')
topic = np.random.randint(2, size=(442702, 227), dtype='int32')
sentiment1 = to_categorical(np.random.randint(5, size=442702), dtype='int32')
from keras.models import Sequential
from keras.layers import Input, Dense, Activation, Embedding, Flatten, GlobalMaxPool1D, Dropout, Conv1D, Concatenate, Lambda
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras.losses import binary_crossentropy
from keras.optimizers import Adam
from keras.backend import cast
from keras.models import Model
text_input = Input(shape=(200,), dtype='int32', name='text')
text_encoded = Embedding(input_dim=5000, output_dim=20, input_length=200)(text_input)
text_encoded = Dropout(0.1)(text_encoded)
text_encoded = Conv1D(300, 3, padding='valid', activation='relu', strides=1)(text_encoded)
text_encoded = GlobalMaxPool1D()(text_encoded)
topic_input = Input(shape=(227,), dtype='int32', name='topic')
topic_float = Lambda(lambda x:cast(x, 'float32'), name='Floatconverter')(topic_input)
concatenated = Concatenate(axis=-1)([text_encoded, topic_float])
sentiment = Dense(5, activation='softmax')(concatenated)
model = Model(inputs=[text_input, topic_input], outputs=sentiment)
# summarize layers
print(model.summary())
Hope these help.

How to pass sequence of image through Conv2D in Keras?

I have a sequence of 5 images that I want to pass through a CNN sequentially. A single input will have size: (5, width, height, channels) and I want to pass each image in the sequence in order to a 2D CNN, concatenate all 5 outputs at some layer and then feed to an LSTM. My model looks something like this:
from keras.models import Model
from keras.layers import Dense, Input, LSTM, Flatten, Conv2D, MaxPooling2D
# Feed images in sequential order here
inputs = Input(shape=(128, 128, 3))
x = Conv2D(16, 3, activation='relu')(inputs)
x = MaxPooling2D((2, 2))(x)
...
# Concatenate sequence outputs here
x = LSTM(8)(x)
x = Flatten()(x)
outputs = Dense(5, activation='sigmoid')
model = Model(inputs=inputs, outputs=outputs)
Eventually I want to concatenate all 5 outputs together at some point in the network and feed them to an LSTM but I am having trouble figuring out how to feed sequence of images in order to a 2D convolutional layer. I have looked into 3D convolutional layers and the ConvLSTM2D layer but I want to figure out how I can do it this way instead.

Convolutional networks: passing hidden layer weights as input to other model

We built a small cnn using keras, tensorflow.
We used keras's functional API for that matter.
We're interested in passing last convolutional layer's weights (the one before the fully connected layers) as an input to other cnn.
for simplicity I suggest the next simplified code to discuss upon:
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
visible = Input(shape=(64,64,1))
conv1 = Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(16, kernel_size=4, activation='relu')(pool1)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
hidden1 = Dense(10, activation='relu')(pool2)
output = Dense(1, activation='sigmoid')(hidden1)
model = Model(inputs=visible, outputs=output)
model.compile(optimizer='Adam',
loss=['sparse_categorical_crossentropy', None],
metrics=['accuracy'])
model.fit(train_dataset,
train_labels,
epochs=400,
batch_size=512,
validation_data=(valid_dataset, valid_labels),
verbose=1,
callbacks=[early_stop])
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='convolutional_neural_network.png')
question is: how can I pass pool2 layer as an input to some other simple model using keras, so it will train simultaniously with the first model described above?
One possible way would be to add to your model so that everything is contained in a single model that ends with 2 branches. The functional API in keras allows you to define connections between layers however you want, and also provides the infrastructure for having multiple outputs and loss functions.
For example:
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
visible = Input(shape=(64,64,1))
conv1 = Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(16, kernel_size=4, activation='relu')(pool1)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
#add your second model here
X = FirstLayer()(pool2) #replace with your actual network layer
# ...
output2 = YourSecondOutput()(X)
hidden1 = Dense(10, activation='relu')(pool2)
output = Dense(1, activation='sigmoid')(hidden1)
model = Model(inputs=visible, outputs=[output, output2]) #list of outputs
model.compile(optimizer='Adam',
loss=['sparse_categorical_crossentropy', None],
metrics=['accuracy'])
model.fit(train_dataset,
train_labels,
epochs=400,
batch_size=512,
validation_data=(valid_dataset, valid_labels),
verbose=1,
callbacks=[early_stop])
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='convolutional_neural_network.png')
Then you’ll just need to update your inputs to fit so that you have labels for each output. You can find more info in the keras documentation on multi input and output models

Categories