The code I'm trying to run to create a Neural Network model to predict some damping ratio and added mass coefficient. I have 3 inputs of the model (float numbers) and 4 outputs of the model (float numbers).
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.optimizers import SGD
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Concatenate
from sklearn.model_selection import train_test_split
from Reasearch_project_Code import *
"""Create model"""
Input1 = Input(shape=(1),name="Input1")
Input2 = Input(shape=(1),name="Input2")
Input3 = Input(shape=(1),name="Input3")
Inputs = Concatenate(axis=-1,name="Inputs")([Input1,Input2,Input3])
hidden1 = Dense(100, activation='relu',name="Hidden1")(Inputs)
hidden2 = Dense(100, activation='relu',name="Hidden2")(hidden1)
# Output layer
output1 = Dense(1, activation='linear',name="Output_1")(hidden2)
output2 = Dense(1, activation='linear',name="Output_2")(hidden2)
output3 = Dense(1, activation='linear',name="Output_3")(hidden2)
output4 = Dense(1, activation='linear',name="Output_4")(hidden2)
# output
model = Model(inputs=[Input1,Input2,Input3], outputs=[output1, output2, output3, output4])
# summarize layers
print(model.summary())
opt = SGD(0.001)
model.compile(loss='mean_squared_error', optimizer=opt)
"""Load Data"""
dpath="C:\\Users\\jules\\OneDrive - University of Southampton\\Documents\\Research Project\\5- Codes"
R=RP(dpath)
data="Data/Test3/Test3.csv"
labelss=np.genfromtxt(data,skip_header=1,usecols=(1,2,3),delimiter=",")
sampless=np.genfromtxt(data,skip_header=1,usecols=(4,5,6,7),delimiter=",")
"""scaled data"""
scaler=MinMaxScaler(feature_range=(0,1))
samples = scaler.fit_transform(sampless)
labels = scaler.fit_transform(labelss)
"""split data"""
labels_train, labels_test, samples_train , samples_test= train_test_split(labels,samples,train_size=0.9,random_state=42)
print(f"labels_train:{labels_train.shape}")
print(f"labels_test:{labels_test.shape}")
print(f"samples_train:{samples_train.shape}")
print(f"samples_test:{samples_test.shape}")
history = model.fit(labels_train, samples_train, validation_data=(labels_test, samples_test), epochs=200,batch_size=20, verbose=1)
shape of the data:
labels_train:(6753, 3)
labels_test:(751, 3)
samples_train:(6753, 4)
samples_test:(751, 4)
model:
Model: "functional_115"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
Input1 (InputLayer) [(None, 1)] 0
__________________________________________________________________________________________________
Input2 (InputLayer) [(None, 1)] 0
__________________________________________________________________________________________________
Input3 (InputLayer) [(None, 1)] 0
__________________________________________________________________________________________________
Inputs (Concatenate) (None, 3) 0 Input1[0][0]
Input2[0][0]
Input3[0][0]
__________________________________________________________________________________________________
Hidden1 (Dense) (None, 100) 400 Inputs[0][0]
__________________________________________________________________________________________________
Hidden2 (Dense) (None, 100) 10100 Hidden1[0][0]
__________________________________________________________________________________________________
Output_1 (Dense) (None, 1) 101 Hidden2[0][0]
__________________________________________________________________________________________________
Output_2 (Dense) (None, 1) 101 Hidden2[0][0]
__________________________________________________________________________________________________
Output_3 (Dense) (None, 1) 101 Hidden2[0][0]
__________________________________________________________________________________________________
Output_4 (Dense) (None, 1) 101 Hidden2[0][0]
==================================================================================================
Total params: 10,904
Trainable params: 10,904
Non-trainable params: 0
__________________________________________________________________________________________________
Assertions error:
AssertionError: in user code:
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:806 train_function *
return step_function(self, iterator)
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:789 run_step **
outputs = model.train_step(data)
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:747 train_step
y_pred = self(x, training=True)
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:985 __call__
outputs = call_fn(inputs, *args, **kwargs)
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\keras\engine\functional.py:385 call
return self._run_internal_graph(
C:\Users\jules\anaconda3\lib\site-packages\tensorflow\python\keras\engine\functional.py:517 _run_internal_graph
assert x_id in tensor_dict, 'Could not compute output ' + str(x)
AssertionError: Could not compute output Tensor("Output_1/BiasAdd_57:0", shape=(None, 1), dtype=float32)
I'm a beginner and I don't understand where the error come from because my inputs and outputs seems to hae the correct dimensions.
Previously, instead of using 3 inputs layers, I used one with a shape of 3:
Inputs = Input(shape=(3,),name="Inputs")
But this gave me terrible predictions (negative R2)
Related
I am working on customizing a layer to use in my model.
The core part is the "call" function as,
class Custom_Layer(Layer):
// some code
def call(self, inputs, **kwargs):
kernel = mul(self.base, self.diag_start - self.diag_end)
outputs = matmul(a=inputs, b=kernel)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
outputs = self.activation(outputs)
return outputs
// some code
and it is used in a simple model.
inputs = tf.keras.layers.Input(shape=(784,),dtype='float32')
layer1 = Custom_layer(2000, **Custom_layer_config, activation='tanh')(inputs)
layer2 = Custom_layer(200, **Custom_layer_config, activation='tanh')(layer1)
output_lay = Custom_layer(10, **Custom_layer_config, activation='softmax')(layer2)
model = tf.keras.models.Model(inputs=inputs, outputs=output_lay)
opt = tf.keras.optimizers.Adamax(learning_rate=0.02)
model.compile(optimizer=opt,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
It is supposed to print like this:
Model: "functional_13"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_8 (InputLayer) [(None, 784)] 0
_________________________________________________________________
CustomLayer_18 (Custom_Layer) (None, 2000) 1570784
_________________________________________________________________
CustomLayer_19 (Custom_Layer) (None, 200) 402200
_________________________________________________________________
CustomLayer_20 (Custom_Layer) (None, 10) 2210
=================================================================
Total params: 1,975,194
Trainable params: 5,194
Non-trainable params: 1,970,000
_________________________________________________________________
But prints this:
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 784)] 0
tf.linalg.matmul_3 (TFOpLam (None, 2000) 0
bda)
tf.math.tanh_2 (TFOpLambda) (None, 2000) 0
tf.linalg.matmul_4 (TFOpLam (None, 200) 0
bda)
tf.math.tanh_3 (TFOpLambda) (None, 200) 0
tf.linalg.matmul_5 (TFOpLam (None, 10) 0
bda)
tf.compat.v1.nn.softmax_1 ( (None, 10) 0
TFOpLambda)
=================================================================
Total params: 0
Trainable params: 0
Non-trainable params: 0
The first summary is what I got from author's repository and the second summary is from my run of the same code without changing anything..
The code is not a complex one but it is weird why there is no parameters at all.
My question is that what is wrong here.
Try to make it as an inherited class from this example.
Sample: Custom LSTM class
import tensorflow as tf
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Definition
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MyLSTMLayer( tf.keras.layers.LSTM ):
def __init__(self, units, return_sequences, return_state):
super(MyLSTMLayer, self).__init__( units, return_sequences=True, return_state=False )
self.num_units = units
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.num_units])
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
start = 3
limit = 12
delta = 3
sample = tf.range( start, limit, delta )
sample = tf.cast( sample, dtype=tf.float32 )
sample = tf.constant( sample, shape=( 1, 1, 3 ) )
layer = MyLSTMLayer( 3, True, False )
model = tf.keras.Sequential([
tf.keras.Input(shape=(1, 3)),
layer,
])
model.summary()
print( sample )
print( model.predict(sample) )
Output:
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
my_lstm_layer (MyLSTMLayer) (None, 1, 3) 9
=================================================================
Total params: 9
Trainable params: 9
Non-trainable params: 0
_________________________________________________________________
tf.Tensor([[[3. 6. 9.]]], shape=(1, 1, 3), dtype=float32)
1/1 [==============================] - 1s 575ms/step
[[[-2.8894916 -2.146874 13.688236 ]]]
I wrote the following model fn:
from tensorflow.keras.layers import Dense, LSTM, Dropout, Input, BatchNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import tensorflow_addons as tfa
import tensorflow as tf
def get_model(num_features, output_size, output_bias=None):
output_bias = tf.keras.initializers.Constant(output_bias)
opt = Adam(learning_rate=0.0008)
inputs = Input(shape=[None, num_features], dtype=tf.float32, ragged=True)
layers = LSTM(32, activation='tanh')(
inputs.to_tensor(), mask=tf.sequence_mask(inputs.row_lengths()))
layers = BatchNormalization()(layers)
layers = Dropout(0.05)(layers)
layers = Dense(32, activation='relu')(layers)
layers = BatchNormalization()(layers)
layers = Dropout(0.05)(layers)
layers = Dense(32, activation='relu')(layers)
layers = BatchNormalization()(layers)
layers = Dropout(0.05)(layers)
layers = Dense(output_size, activation='sigmoid',
bias_initializer=output_bias)(layers)
model = Model(inputs, layers)
model.compile(loss=tf.keras.losses.BinaryCrossentropy(), optimizer=opt, metrics=[tfa.metrics.F1Score(num_classes=2)])
model.summary()
return model
here is the model summary:
Model: "model_5"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_6 (InputLayer) [(None, None, 11)] 0
__________________________________________________________________________________________________
input.row_lengths_5 (InstanceMe (None,) 0 input_6[0][0]
__________________________________________________________________________________________________
input.to_tensor_5 (InstanceMeth (None, None, 11) 0 input_6[0][0]
__________________________________________________________________________________________________
tf.sequence_mask_5 (TFOpLambda) (None, None) 0 input.row_lengths_5[0][0]
__________________________________________________________________________________________________
lstm_5 (LSTM) (None, 32) 5632 input.to_tensor_5[0][0]
tf.sequence_mask_5[0][0]
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 32) 128 lstm_5[0][0]
__________________________________________________________________________________________________
dropout_15 (Dropout) (None, 32) 0 batch_normalization_15[0][0]
__________________________________________________________________________________________________
dense_15 (Dense) (None, 32) 1056 dropout_15[0][0]
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 32) 128 dense_15[0][0]
__________________________________________________________________________________________________
dropout_16 (Dropout) (None, 32) 0 batch_normalization_16[0][0]
__________________________________________________________________________________________________
dense_16 (Dense) (None, 32) 1056 dropout_16[0][0]
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 32) 128 dense_16[0][0]
__________________________________________________________________________________________________
dropout_17 (Dropout) (None, 32) 0 batch_normalization_17[0][0]
__________________________________________________________________________________________________
dense_17 (Dense) (None, 1) 33 dropout_17[0][0]
==================================================================================================
Total params: 8,161
Trainable params: 7,969
Non-trainable params: 192
__________________________________________________________________________________________________
And here are the shapes of my data:
print(train_x.shape,train_y.shape)
print(val_x.shape,val_y.shape)
(52499, None, 11) (52499,)
(17500, None, 11) (17500,)
When trying to fit my model, I get the following error:
model.fit(train_x, train_y, epochs=300, batch_size=500, validation_data=(val_x, val_y))
ValueError: Dimension 0 in both shapes must be equal, but are 2 and 1. Shapes are [2] and [1].
I can't understand what is wrong with the shapes.
Your model seems fine. The problem is that you are running into an open issue with the tfa.metrics.F1Score. For your binary case, you will have to change the parameters of the F1Score to tfa.metrics.F1Score(num_classes=1, threshold=0.5). Here is a complete working example:
from tensorflow.keras.layers import Dense, LSTM, Dropout, Input, BatchNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import tensorflow_addons as tfa
import tensorflow as tf
def get_model(num_features, output_size, output_bias=0.001):
output_bias = tf.keras.initializers.Constant(output_bias)
opt = Adam(learning_rate=0.0008)
inputs = Input(shape=[None, num_features], dtype=tf.float32, ragged=True)
layers = LSTM(32, activation='tanh')(
inputs.to_tensor(), mask=tf.sequence_mask(inputs.row_lengths()))
layers = BatchNormalization()(layers)
layers = Dropout(0.05)(layers)
layers = Dense(32, activation='relu')(layers)
layers = BatchNormalization()(layers)
layers = Dropout(0.05)(layers)
layers = Dense(32, activation='relu')(layers)
layers = BatchNormalization()(layers)
layers = Dropout(0.05)(layers)
layers = Dense(output_size, activation='sigmoid',
bias_initializer=output_bias)(layers)
model = Model(inputs, layers)
model.compile(loss=tf.keras.losses.BinaryCrossentropy(), optimizer=opt, metrics=[tfa.metrics.F1Score(num_classes=1, threshold=0.5)])
model.summary()
return model
model = get_model(11, 1)
rt = tf.RaggedTensor.from_row_splits(values=tf.ones([5, 11], tf.int32),
row_splits=[0, 2, 5])
model.fit(rt, tf.random.uniform((2,1), maxval=2), epochs=300, batch_size=2, verbose=2)
Alternatively, you just define your own F1Score method and set it as metric in your model. See this post for more information.
I'm trying to train DC-CNN model for text classification on a given dataset.
What am I doing wrong here?
Code for Model:
def define_model(length, vocab_size):
# channel 1
inputs1 = Input(shape=(length,))
embedding1 = Embedding(vocab_size, 100)(inputs1)
conv1 = Conv1D(filters=32, kernel_size=4, activation='relu')(embedding1)
drop1 = Dropout(0.5)(conv1)
pool1 = MaxPooling1D(pool_size=1)(drop1)
flat1 = Flatten()(pool1)
# channel 2
inputs2 = Input(shape=(length,))
embedding2 = Embedding(vocab_size, 100)(inputs2)
conv2 = Conv1D(filters=32, kernel_size=6, activation='relu')(embedding2)
drop2 = Dropout(0.5)(conv2)
pool2 = MaxPooling1D(pool_size=1)(drop2)
flat2 = Flatten()(pool2)
merged = concatenate([flat1, flat2])
# interpretation
dense1 = Dense(10, activation='relu')(merged)
outputs = Dense(1, activation='sigmoid')(dense1)
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
# compile
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# summarize
print(model.summary())
return model
model = define_model(length, vocab_size)
model.fit([trainX], array(trainLabels), epochs=10, batch_size=16)
I am getting this error:
AssertionError: in user code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:806 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:789 run_step **
outputs = model.train_step(data)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:747 train_step
y_pred = self(x, training=True)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py:985 __call__
outputs = call_fn(inputs, *args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/functional.py:386 call
inputs, training=training, mask=mask)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/functional.py:517 _run_internal_graph
assert x_id in tensor_dict, 'Could not compute output ' + str(x)
AssertionError: Could not compute output Tensor("dense_17/Sigmoid:0", shape=(None, 1), dtype=float32)
I have tried to reshape the inputs "trainX" and "trainLabels" by using this code but I got the same error
trainX=np.reshape(trainX,(40, 50))
trainLabels=np.reshape(trainLabels,(40, 1))
This is the summary of the model :
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_17 (InputLayer) [(None, 20)] 0
__________________________________________________________________________________________________
input_18 (InputLayer) [(None, 20)] 0
__________________________________________________________________________________________________
embedding_16 (Embedding) (None, 20, 100) 541100 input_17[0][0]
__________________________________________________________________________________________________
embedding_17 (Embedding) (None, 20, 100) 541100 input_18[0][0]
__________________________________________________________________________________________________
conv1d_16 (Conv1D) (None, 17, 32) 12832 embedding_16[0][0]
__________________________________________________________________________________________________
conv1d_17 (Conv1D) (None, 15, 32) 19232 embedding_17[0][0]
__________________________________________________________________________________________________
dropout_16 (Dropout) (None, 17, 32) 0 conv1d_16[0][0]
__________________________________________________________________________________________________
dropout_17 (Dropout) (None, 15, 32) 0 conv1d_17[0][0]
__________________________________________________________________________________________________
max_pooling1d_16 (MaxPooling1D) (None, 17, 32) 0 dropout_16[0][0]
__________________________________________________________________________________________________
max_pooling1d_17 (MaxPooling1D) (None, 15, 32) 0 dropout_17[0][0]
__________________________________________________________________________________________________
flatten_16 (Flatten) (None, 544) 0 max_pooling1d_16[0][0]
__________________________________________________________________________________________________
flatten_17 (Flatten) (None, 480) 0 max_pooling1d_17[0][0]
__________________________________________________________________________________________________
concatenate_8 (Concatenate) (None, 1024) 0 flatten_16[0][0]
flatten_17[0][0]
__________________________________________________________________________________________________
dense_16 (Dense) (None, 10) 10250 concatenate_8[0][0]
__________________________________________________________________________________________________
dense_17 (Dense) (None, 1) 11 dense_16[0][0]
==================================================================================================
Total params: 1,124,525
Trainable params: 1,124,525
Non-trainable params: 0
How can I fix this error Please?
since you have 2 inputs in keras model, so you have to split your trainX in to 2 different arrays, or a tuple of 2 arrays. you cannot give single array as input.
I have a subclass Model of tf.keras.Model,code is following
import tensorflow as tf
class Mymodel(tf.keras.Model):
def __init__(self, classes, backbone_model, *args, **kwargs):
super(Mymodel, self).__init__(self, args, kwargs)
self.backbone = backbone_model
self.classify_layer = tf.keras.layers.Dense(classes,activation='sigmoid')
def call(self, inputs):
x = self.backbone(inputs)
x = self.classify_layer(x)
return x
inputs = tf.keras.Input(shape=(224, 224, 3))
model = Mymodel(inputs=inputs, classes=61,
backbone_model=tf.keras.applications.MobileNet())
model.build(input_shape=(20, 224, 224, 3))
model.summary()
the result is :
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
mobilenet_1.00_224 (Model) (None, 1000) 4253864
_________________________________________________________________
dense (Dense) multiple 61061
=================================================================
Total params: 4,314,925
Trainable params: 4,293,037
Non-trainable params: 21,888
_________________________________________________________________
but I want to see the all layers of mobilenet,then I tried to extract all layers of mobilenet and put in the model:
import tensorflow as tf
class Mymodel(tf.keras.Model):
def __init__(self, classes, backbone_model, *args, **kwargs):
super(Mymodel, self).__init__(self, args, kwargs)
self.backbone = backbone_model
self.classify_layer = tf.keras.layers.Dense(classes,activation='sigmoid')
def my_process_layers(self,inputs):
layers = self.backbone.layers
tmp_x = inputs
for i in range(1,len(layers)):
tmp_x = layers[i](tmp_x)
return tmp_x
def call(self, inputs):
x = self.my_process_layers(inputs)
x = self.classify_layer(x)
return x
inputs = tf.keras.Input(shape=(224, 224, 3))
model = Mymodel(inputs=inputs, classes=61,
backbone_model=tf.keras.applications.MobileNet())
model.build(input_shape=(20, 224, 224, 3))
model.summary()
then the resule not changed.
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
mobilenet_1.00_224 (Model) (None, 1000) 4253864
_________________________________________________________________
dense (Dense) multiple 61061
=================================================================
Total params: 4,314,925
Trainable params: 4,293,037
Non-trainable params: 21,888
_________________________________________________________________
then I tried to extract one layer insert to the model :
import tensorflow as tf
class Mymodel(tf.keras.Model):
def __init__(self, classes, backbone_model, *args, **kwargs):
super(Mymodel, self).__init__(self, args, kwargs)
self.backbone = backbone_model
self.classify_layer = tf.keras.layers.Dense(classes,activation='sigmoid')
def call(self, inputs):
x = self.backbone.layers[1](inputs)
x = self.classify_layer(x)
return x
inputs = tf.keras.Input(shape=(224, 224, 3))
model = Mymodel(inputs=inputs, classes=61,
backbone_model=tf.keras.applications.MobileNet())
model.build(input_shape=(20, 224, 224, 3))
model.summary()
It did not change either.I am so confused.
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
mobilenet_1.00_224 (Model) (None, 1000) 4253864
_________________________________________________________________
dense (Dense) multiple 244
=================================================================
Total params: 4,254,108
Trainable params: 4,232,220
Non-trainable params: 21,888
_________________________________________________________________
but I find that the parameter of dense layer changed,I dont know what happend.
#Ioannis 's answer is perfectly fine, but unfortunately it drops the keras 'Model Subclassing' structure that is present in the question. If, just like me, you want to keep this model subclassing and still show all layers in the summary, you can branch down into all the individual layers of the more complex model using a for loop:
class MyMobileNet(tf.keras.Sequential):
def __init__(self, input_shape=(224, 224, 3), classes=61):
super(MyMobileNet, self).__init__()
self.backbone_model = [layer for layer in
tf.keras.applications.MobileNet(input_shape, include_top=False, pooling='avg').layers]
self.classificator = tf.keras.layers.Dense(classes,activation='sigmoid', name='classificator')
def call(self, inputs):
x = inputs
for layer in self.backbone_model:
x = layer(x)
x = self.classificator(x)
return x
model = MyMobileNet()
After this we can directly build the model and call the summary:
model.build(input_shape=(None, 224, 224, 3))
model.summary()
>
Model: "my_mobile_net"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1_pad (ZeroPadding2D) (None, 225, 225, 3) 0
_________________________________________________________________
conv1 (Conv2D) (None, 112, 112, 32) 864
_________________________________________________________________
conv1_bn (BatchNormalization (None, 112, 112, 32) 128
_________________________________________________________________
....
....
conv_pw_13 (Conv2D) (None, 7, 7, 1024) 1048576
_________________________________________________________________
conv_pw_13_bn (BatchNormaliz (None, 7, 7, 1024) 4096
_________________________________________________________________
conv_pw_13_relu (ReLU) (None, 7, 7, 1024) 0
_________________________________________________________________
global_average_pooling2d_13 (None, 1024) 0
_________________________________________________________________
classificator (Dense) multiple 62525
=================================================================
Total params: 3,291,389
Trainable params: 3,269,501
Non-trainable params: 21,888
_________________________________________________________________
In order to be able to view backbone's layers, you' ll have to construct your new model using backbone.input and backbone.output
from tensorflow.keras.models import Model
def Mymodel(backbone_model, classes):
backbone = backbone_model
x = backbone.output
x = tf.keras.layers.Dense(classes,activation='sigmoid')(x)
model = Model(inputs=backbone.input, outputs=x)
return model
input_shape = (224, 224, 3)
model = Mymodel(backbone_model=tf.keras.applications.MobileNet(input_shape=input_shape, include_top=False, pooling='avg'),
classes=61)
model.summary()
There is an argument expand_nested in the Method summary.
model.summary(expand_nested=True)
for layer in model.layers:
layer.summary()
Hi I am trying to do a multi-class classification using embedding, and stack Conv1D with Bidirectional LSTM, Here is my script:
embed_dim = 100
lstm_out = 128
max_features = 5000
model8 = Sequential()
model8.add(Embedding(max_features, embed_dim, input_length = X.shape[1]))
model8.add(Dropout(0.2))
model8.add(Conv1D(filters=100, kernel_size=3, padding='same', activation='relu'))
model8.add(MaxPooling1D(pool_size=2))
model8.add(Bidirectional(LSTM(lstm_out)))
model8.add(Dense(124,activation='softmax'))
model8.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
print model8.summary()
I got error message as below:
TypeErrorTraceback (most recent call last)
<ipython-input-51-6c831fc4581f> in <module>()
9 model8.add(Embedding(max_features, embed_dim))
10 model8.add(Dropout(0.2))
---> 11 model8.add(Conv1D(filters=100, kernel_size=3, padding='same', activation='relu'))
12 model8.add(MaxPooling1D(pool_size=2))
13 model8.add(Bidirectional(LSTM(lstm_out)))
/jupyter/local/lib/python2.7/site-packages/tensorflow/python/training/checkpointable/base.pyc in _method_wrapper(self, *args, **kwargs)
362 self._setattr_tracking = False # pylint: disable=protected-access
363 try:
--> 364 method(self, *args, **kwargs)
365 finally:
366 self._setattr_tracking = previous_value # pylint: disable=protected-access
/jupyter/local/lib/python2.7/site-packages/tensorflow/python/keras/engine/sequential.pyc in add(self, layer)
128 raise TypeError('The added layer must be '
129 'an instance of class Layer. '
--> 130 'Found: ' + str(layer))
131 self.built = False
132 if not self._layers:
TypeError: The added layer must be an instance of class Layer. Found: <keras.layers.convolutional.Conv1D object at 0x7f62907f8590>
What I did wrong? Thanks!
from keras.layers import Dense, Embedding, Dropout, LSTM
from keras.models import Sequential
from keras.layers import Bidirectional
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
embed_dim = 100
lstm_out = 128
max_features = 5000
model8 = Sequential()
model8.add(Embedding(max_features, embed_dim, input_length = X.shape[1]))
model8.add(Dropout(0.2))
model8.add(Conv1D(filters=100, kernel_size=3, padding='same', activation='relu'))
model8.add(MaxPooling1D(pool_size=2))
model8.add(Bidirectional(LSTM(lstm_out)))
model8.add(Dense(124,activation='softmax'))
model8.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics =
['accuracy'])
print(model8.summary())
Prints the model summary without any error:
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_8 (Embedding) (None, 100, 100) 500000
_________________________________________________________________
dropout_5 (Dropout) (None, 100, 100) 0
_________________________________________________________________
conv1d_3 (Conv1D) (None, 100, 100) 30100
_________________________________________________________________
max_pooling1d_3 (MaxPooling1 (None, 50, 100) 0
_________________________________________________________________
bidirectional_7 (Bidirection (None, 256) 234496
_________________________________________________________________
dense_7 (Dense) (None, 124) 31868
=================================================================
Total params: 796,464
Trainable params: 796,464
Non-trainable params: 0
_________________________________________________________________
None