model.summary() - AttributeError: 'Tensor' object has no attribute 'summary' - python

This are my imports:
import tensorflow as tf
import keras
from keras.models import Sequential, Model
from keras.layers import Conv2D, Flatten, MaxPooling2D, Dense, Input, Reshape, Concatenate, GlobalAveragePooling2D, BatchNormalization, Dropout, Activation, GlobalMaxPooling2D
from keras.utils import Sequence
I defined this model:
def create_ST_layer(input_shape = (64, 128, 3)):
input_img = Input(shape=input_shape)
model = Conv2D(48, kernel_size=(5, 5), input_shape = input_shape, strides = (1, 1), activation = "relu")(input_img)
model = MaxPooling2D(pool_size=(2, 2), strides = (2, 2))(model)
model = Conv2D(32, kernel_size=(5, 5), strides = (1, 1), activation = "relu")(model)
model = MaxPooling2D(pool_size=(2, 2), strides = (2, 2))(model)
model = Dense(50, activation = "relu")(model)
model = Dense(6)(model)
return model
And created the model by:
model = create_ST_layer()
When I now try to get the summary of the model:
model.summary()
I get the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-7-5f15418b3570> in <module>()
----> 1 model.summary()
AttributeError: 'Tensor' object has no attribute 'summary'
Is there something wrong with my imports?
Thanks a lot!

I tested this on tensorflow 2.2.0 on Google Colab.
I would change a couple of things to start with. With the new tensorflow version, rather than importing keras you should import tensorflow.keras.
So your code would look like this for imports:
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, Flatten, MaxPooling2D, Dense, Input, Reshape, Concatenate, GlobalAveragePooling2D, BatchNormalization, Dropout, Activation, GlobalMaxPooling2D
from tensorflow.keras.utils import Sequence
Also you need to call the following line to groups layers into an object with training and inference features. [Model link] : https://www.tensorflow.org/api_docs/python/tf/keras/Model
So your complete code would look something like this:
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, Flatten, MaxPooling2D, Dense, Input, Reshape, Concatenate, GlobalAveragePooling2D, BatchNormalization, Dropout, Activation, GlobalMaxPooling2D
from tensorflow.keras.utils import Sequence
def create_ST_layer(input_shape = (64, 128, 3)):
input_img = Input(shape=input_shape)
model = Conv2D(48, kernel_size=(5, 5), input_shape = input_shape, strides = (1, 1), activation = "relu")(input_img)
model = MaxPooling2D(pool_size=(2, 2), strides = (2, 2))(model)
model = Conv2D(32, kernel_size=(5, 5), strides = (1, 1), activation = "relu")(model)
model = MaxPooling2D(pool_size=(2, 2), strides = (2, 2))(model)
model = Dense(50, activation = "relu")(model)
model = Dense(6)(model)
model = tf.keras.Model(inputs=input_img, outputs= model)
return model
model = create_ST_layer()
model.summary()
I get the following output with your model:
enter image description here

Because just adding layers one by one will create a Tensorflow graph. If you want to create Keras model, you should either-
add layers using model.add(). [link]
create Keras model after creating a Tensorflow graph by keras.models.Model().
Using 2nd method, you could do:
def create_ST_layer(input_shape = (64, 128, 3)):
input_img = Input(shape=input_shape)
model = Conv2D(48, kernel_size=(5, 5), input_shape = input_shape, strides = (1, 1), activation = "relu")(input_img)
model = MaxPooling2D(pool_size=(2, 2), strides = (2, 2))(model)
model = Conv2D(32, kernel_size=(5, 5), strides = (1, 1), activation = "relu")(model)
model = MaxPooling2D(pool_size=(2, 2), strides = (2, 2))(model)
model = Dense(50, activation = "relu")(model)
model = Dense(6)(model)
myModel = Model(input_img, model)
return myModel
model = create_ST_layer()
model.summary()

Related

'BatchNormalization' is not defined

Trying to train a Robust CNN model which is defined as follows:
from keras.datasets import cifar10
from keras.utils import np_utils
from keras import metrics
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, LSTM, merge
from keras.layers import BatchNormalization
from keras import metrics
from keras.losses import categorical_crossentropy
from keras.optimizers import SGD
import pickle
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import layers
from keras.callbacks import EarlyStopping
def Robust_CNN():
model = Sequential()
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform', input_shape=(2,128,1)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 2), padding='valid', data_format=None))
model.add(layers.Dropout(.3))
model.add(Conv2D(128, (3, 3), activation='relu', init='glorot_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 2), padding='valid', data_format=None))
model.add(layers.Dropout(.3))
model.add(Conv2D(64, (3, 3), activation='relu', init='glorot_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 2), padding='valid', data_format=None))
model.add(layers.Dropout(.3))
model.add(Conv2D(64, (3, 3), activation='relu', init='glorot_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 2), padding='valid', data_format=None))
model.add(layers.Dropout(.3))
model.add(Flatten())
model.add(Dense(128, activation='relu', init='he_normal'))
model.add(BatchNormalization())
model.add(Dense(11, activation='softmax', init='he_normal'))
return model
However, when trying to do so I recieve a NameError that name 'BatchNormalization' is not defined. The complete error message is as follows:
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-11-8084d29438f8> in <module>
55 # >>>>>>>>>>>>>>>>>>>>> choose a model by un-commenting only one of the three <<<<<<<<<<<<<<<<<<<<<<<<<<<
56 #xx_shape = (2,128,1)
---> 57 models = Robust_CNN()
58 #models = CLDNN()
59 #models = resnet(xx_shape)
~\AppData\Local\Programs\Python\Python37\Scripts\FYP\Optimizing-Modulation-Classification-with-Deep-Learning-master\Optimizing-Modulation-Classification-with-Deep-Learning-master\Robust_CNN Model\model.py in Robust_CNN()
19 def Robust_CNN():
20
---> 21 model = Sequential()
22 model.add(Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform', input_shape=(2,128,1)))
23 model.add(BatchNormalization())
NameError: name 'BatchNormalization' is not defined
Can't seem to figure out why this is even when I've already imported BatchNormalization.
First import BatchNormalization from tensorflow.keras.layers , then run your code
from tensorflow.keras.layers import BatchNormalization
Add this to your code-
from tensorflow.keras.layers import BatchNormalization
# import BatchNormalization
from keras.layers.normalization import BatchNormalization

The validation loss can't improve at the beginning with MobileV2 in Keras

I used two different models for my case.
The case is classification for different type of surface defect.
The input shape is (200, 200, 1), and there are 6 classes.
The numbers of training data is 1440(240 for 1 class), and the number of validation data is 360(60 for 1 class).
The training process is very well with first model. Both training loss and validation loss are dropping very quickly.
After that, I want to use MobileNetV2 from keras for comparing the training result. The training loss and accuracy in MobileV2 are improve, but the validation accuracy stuck in 0.1667(the loss is bumpy).
I wonder know what causes this result, Can I call this situation 'Over-fitting'? Or just this model is too deep to my case?
First model:
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization
import parameter
input_shape = (parameter.IMAGE_SIZE_Y, parameter.IMAGE_SIZE_X, parameter.channel)
def MyModel():
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape = input_shape, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(6, activation = 'softmax'))
model.summary()
return model
Second model:
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.applications import MobileNetV2
import parameter
def MyMobileNetV2():
input_shape = (parameter.IMAGE_SIZE_X, parameter.IMAGE_SIZE_Y, parameter.channel)
model = MobileNetV2(input_shape = input_shape,
include_top = False,
weights = 'imagenet')
x = model.output
x = GlobalAveragePooling2D()(x)
x = BatchNormalization()(x)
x = Dense(1280, activation='relu')(x)
x = BatchNormalization()(x)
predictions = Dense(6, activation='softmax', kernel_initializer='random_uniform', bias_initializer='zeros')(x)
model = Model(inputs = model.input, outputs = predictions)
optimizer = Adam(lr=0.01)
loss = "categorical_crossentropy"
for layer in model.layers:
layer.trainable = True
model.compile(optimizer=optimizer,
loss=loss,
metrics=["accuracy"])
model.summary()
for i, layer in enumerate(model.layers):
print(i, layer.name, layer.trainable)
return model

Error using Colab GPU, while none with CPU

I'm trying some code in Google Colab.
Using CPU it works fine, but when I switch to GPU it shows errors.
Self-contained code:
import numpy as np
import tensorflow as tf
import keras
from keras.layers import Input, BatchNormalization, Activation
from keras.layers import ZeroPadding2D, MaxPooling2D, Dense
from keras.layers import Reshape, Add, Dropout
from keras.layers import Conv2D
from keras.layers import Conv3DTranspose, Conv2DTranspose
from keras.initializers import VarianceScaling
from keras.models import Model
from keras.regularizers import l2
from keras.optimizers import SGD
import sys
# hyperparameters
BATCH_NORM_MOMENTUM = 0.1
BATCH_NORM_EPS = 1e-5
KERNEL_REGULARIZER = 0.0001
batchSize = 4
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
def step1(input_shape = (3, 256, 256)):
step = 'step1_'
X_input = Input(input_shape, name = step + 'input')
X = Conv2D(64, (7, 7), strides = (2, 2), padding='same', data_format = 'channels_first', kernel_initializer="he_normal",kernel_regularizer=l2(KERNEL_REGULARIZER), name = step+'b1_conv_a',)(X_input)
X = BatchNormalization(axis = 1, momentum=BATCH_NORM_MOMENTUM, epsilon = BATCH_NORM_EPS, name = step+'b1_bn_a')(X)
X = Activation('relu', name = step+'b1_act_a')(X)
X = MaxPooling2D((3, 3), strides=(2, 2), data_format='channels_first', padding='same', name = step + 'b1_maxpool2d_a')(X)
print(X.shape)
model = Model(inputs = X_input, outputs = X, name='step1')
return model
step1Model = step1((3,256,256))
Error:
ValueError: Shape must be rank 1 but is rank 0 for 'step1_b1_bn_a/cond/Reshape_4' (op: 'Reshape') with input shapes: [1,64,1,1], [].
Why is there this difference between using CPU and GPU ?
This probably has to do with tensorflow and tensorflow-gpu packages on CPU and GPU kernels respectively.
You can bypass it but removing axis=1 from BatchNormalization layer
change:
X = BatchNormalization(axis = 1, momentum=BATCH_NORM_MOMENTUM, epsilon = BATCH_NORM_EPS, name = step+'b1_bn_a')(X)
to:
X = BatchNormalization(momentum=BATCH_NORM_MOMENTUM, epsilon = BATCH_NORM_EPS, name = step+'b1_bn_a')(X)

Importing weights from csv file:Layer weight shape (672, 7) not compatible with provided weight shape (1, 1, 672, 7)

I'm writing Deep learning network in Keras, previously tested in Matlab. To avoid doing all the learning, I exported weights and biases of the final layer in Matlab as the .csv file, and want to use them in my network - so my network would just test score based on given weights, instead of whole learning.
Code:
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Convolution2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#from keras import backend as K
from keras.preprocessing import image
print("\n")
print("\n")
trained_weights = []
trained_bias = []
trained_weights = np.loadtxt('Weights.csv', delimiter=';')
trained_bias = np.loadtxt('Bias.csv', delimiter=';')
network_weights = np.array([[trained_weights]])
network_bias = np.array([[trained_bias]])
network_outputs = np.array([['a','c','d','e','f','g','h']])
# Load test image
img = load_img('note_a.png')
note = image.img_to_array(img)
#note = np.expand_dims(note, axis=0)
note = np.reshape(note, [1, 50, 30, 3])
# Model architecture
model = Sequential()
# Layer 1
model.add(Convolution2D(12, (6, 6), batch_size=1, input_shape=(50, 30, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
# Layer 2
model.add(Convolution2D(24, (6, 6), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
# Layer 3
model.add(Convolution2D(48, (6, 6), activation='relu'))
model.add(Flatten())
layer2=Dense(7, weights=[network_weights, network_bias], activation='softmax')
model.add(layer2)
model.summary()
print("\n")
print("\n")
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics = ['accuracy'])
#model.fit((note,network_outputs), batch_size=32, nb_epoch=10, verbose=1)
#score = model.evaluate(note, network_outputs)
I was trying to use
model.set_weights([network_outputs])
but it seems like it assigns a weights only to my first layer of network, so I just assigned weights to final layer. Result is the following error:
ValueError: Layer weight shape (672, 7) not compatible with provided weight shape (1, 1, 672, 7)
And this is quite confusing for me. How, by doing weights=[network_weights, network_bias] I get 4 dimensions? Is it because network_weights has dimensions [1,672], and network_bias=[1,7], which makes [1,1,672,7]?
And how I can properly resize this weights parameter?
np.array([[trained_weights]]) creates an array out of your data surrounded by 2 empty dimensions, so your final shape is (1, 1, x, y). Same for your trained_bias. Applying [network_weights, network_bias] does again surround your 2 arrays with a dimension, which does obviously not match.
I think you just need to clean all the unnecessary surroundings by using something like:
weights=[trained_weights, trained_bias]

Is it possible to set keras layer output?

I need to modify output of last feature map of my second convolution layer.
Or add array to my conv layer output if it's possible.
Below is python script i created and example of desired change in output.
Thank you for your help!
import numpy as np
from keras import backend as K
num=18
m=11
n=50
k=3
np.random.seed(100)
features = np.random.rand(num,m,n,k)
model
input_shape=features.shape[1:]
model = Sequential()
model.add(Conv2D(2, kernel_size=(1, 3), strides=(1,1),activation='relu',input_shape=input_shape))
model.add(Conv2D(21, kernel_size=(1, 48), strides=(1,1),padding="valid",activation='relu'))
model.add(Conv2D(1, kernel_size=(1, 1), strides=(1, 1),activation='relu',padding="valid"))
model.add(Dense(1, activation='softmax'))
Adam = optimizers.Adam(lr=0.00003, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='mse',optimizer=Adam)
get_1st_layer_output = K.function([model.layers[0].input],
[model.layers[1].output])
layer_output = get_1st_layer_output([features])
Setting DESIRED layer_output values
I need to do it every propagation step.
for i in range(0,11):
layer_output[0][0][i][0][20]=0.1
print(layer_output[0][0][i][0][20])
I think I would use a concatenation with a constant tensor in that case. Unfortunately, I can't quite get it to work, but I'll share my work anyway to hopefully help you on your way.
import numpy as np
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Concatenate
from keras import optimizers
num=18
m=11
n=50
k=3
np.random.seed(100)
features = np.random.rand(num, m, n, k)
custom_tensor = K.constant(0.1, shape=(11, 48, 1))
input_shape = features.shape[1:]
input = keras.Input(shape=input_shape)
print(K.ndim(input))
layer0 = Conv2D(2, kernel_size=(1, 3), strides=(1,1),activation='relu')(input)
layer0_added = Concatenate(axis=-1)([layer0, custom_tensor])
layer1 = Conv2D(20, kernel_size=(1, 48), strides=(1,1),padding="valid",activation='relu')(layer0_added)
layer2 = Conv2D(1, kernel_size=(1, 1), strides=(1, 1),activation='relu',padding="valid")(layer1)
layer3 = Dense(1, activation='softmax')(layer2)
model = keras.models.model(layer0)
Adam = optimizers.Adam(lr=0.00003, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='mse', optimizer=Adam)
It produced an error
ValueError: `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got inputs shapes: [(None, 11, 48, 2), (11, 48, 1)]
But hopefully this helps you along anyway.

Categories