Trying to train a Robust CNN model which is defined as follows:
from keras.datasets import cifar10
from keras.utils import np_utils
from keras import metrics
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, LSTM, merge
from keras.layers import BatchNormalization
from keras import metrics
from keras.losses import categorical_crossentropy
from keras.optimizers import SGD
import pickle
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import layers
from keras.callbacks import EarlyStopping
def Robust_CNN():
model = Sequential()
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform', input_shape=(2,128,1)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 2), padding='valid', data_format=None))
model.add(layers.Dropout(.3))
model.add(Conv2D(128, (3, 3), activation='relu', init='glorot_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 2), padding='valid', data_format=None))
model.add(layers.Dropout(.3))
model.add(Conv2D(64, (3, 3), activation='relu', init='glorot_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 2), padding='valid', data_format=None))
model.add(layers.Dropout(.3))
model.add(Conv2D(64, (3, 3), activation='relu', init='glorot_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 2), padding='valid', data_format=None))
model.add(layers.Dropout(.3))
model.add(Flatten())
model.add(Dense(128, activation='relu', init='he_normal'))
model.add(BatchNormalization())
model.add(Dense(11, activation='softmax', init='he_normal'))
return model
However, when trying to do so I recieve a NameError that name 'BatchNormalization' is not defined. The complete error message is as follows:
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-11-8084d29438f8> in <module>
55 # >>>>>>>>>>>>>>>>>>>>> choose a model by un-commenting only one of the three <<<<<<<<<<<<<<<<<<<<<<<<<<<
56 #xx_shape = (2,128,1)
---> 57 models = Robust_CNN()
58 #models = CLDNN()
59 #models = resnet(xx_shape)
~\AppData\Local\Programs\Python\Python37\Scripts\FYP\Optimizing-Modulation-Classification-with-Deep-Learning-master\Optimizing-Modulation-Classification-with-Deep-Learning-master\Robust_CNN Model\model.py in Robust_CNN()
19 def Robust_CNN():
20
---> 21 model = Sequential()
22 model.add(Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform', input_shape=(2,128,1)))
23 model.add(BatchNormalization())
NameError: name 'BatchNormalization' is not defined
Can't seem to figure out why this is even when I've already imported BatchNormalization.
First import BatchNormalization from tensorflow.keras.layers , then run your code
from tensorflow.keras.layers import BatchNormalization
Add this to your code-
from tensorflow.keras.layers import BatchNormalization
# import BatchNormalization
from keras.layers.normalization import BatchNormalization
Related
I am getting a TypeErrir due to the added layer, BatchNormalization, not being the same as the class layer. I'm unsure why, I've tried to correctly import the layers, and have tried multiple different ways.
My imports are currently:
import copy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization,Dense, Conv2D, Flatten, Reshape
from tensorflow.keras.layers import Activation
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Input
I use the imports in the following section of code:
model = Sequential()
model.add(Input(shape=(9, 9, 1)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization)
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization)
model.add(Conv2D(128, kernel_size=(1, 1), activation='relu', padding='same'))
model.add(Flatten())
model.add(Dense(81 * 9))
model.add(Reshape((-1, 9)))
model.add(Activation('softmax'))
adam = Adam(lr=.001)
model.compile(loss='sparse_categorical_crossentropy', optimizer=adam)
model.fit(x_train, y_train, batch_size=32, epochs=2)
The error I am getting is:
File "**/train.py", line 24, in <module>
x_train, x_test, y_train, y_test = get_data('sudoku.csv')
File "**/data_preprocess.py", line 124, in get_data
model.add(BatchNormalization)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/tensorflow/python/training/tracking/base.py", line 457, in _method_wrapper
result = method(self, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/tensorflow/python/keras/engine/sequential.py", line 180, in add
raise TypeError('The added layer must be '
TypeError: The added layer must be an instance of class Layer. Found: <class 'tensorflow.python.keras.layers.normalization_v2.BatchNormalization'>
I also tried the following but am getting the same error.
Could the error be related to something else in the project? apart from the imports
You are almost there. Batchnorm is a class so you need to instantiate it by adding ()
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
i have an import problem when executing my code:
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
2021-10-06 22:27:14.064885: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found
2021-10-06 22:27:14.064974: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
Traceback (most recent call last):
File "C:\Data\breast-cancer-classification\train_model.py", line 10, in <module>
from cancernet.cancernet import CancerNet
File "C:\Data\breast-cancer-classification\cancernet\cancernet.py", line 2, in <module>
from keras.layers.normalization import BatchNormalization
ImportError: cannot import name 'BatchNormalization' from 'keras.layers.normalization' (C:\Users\Catalin\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\layers\normalization\__init__.py)
Keras version: 2.6.0
Tensorflow: 2.6.0
Python version: 3.9.7
The library it is installed also with
pip install numpy opencv-python pillow tensorflow keras imutils scikit-learn matplotlib
Do you have any ideas?
library path
You should import BatchNormalization in following way:
from tensorflow.keras.layers import BatchNormalization
You're using outdated imports for tf.keras. Layers can now be imported directly from tensorflow.keras.layers:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
BatchNormalization, SeparableConv2D, MaxPooling2D, Activation, Flatten, Dropout, Dense
)
from tensorflow.keras import backend as K
class CancerNet:
#staticmethod
def build(width, height, depth, classes):
model = Sequential()
shape = (height, width, depth)
channelDim = -1
if K.image_data_format() == "channels_first":
shape = (depth, height, width)
channelDim = 1
model.add(SeparableConv2D(32, (3, 3), padding="same", input_shape=shape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(SeparableConv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(SeparableConv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(classes))
model.add(Activation("softmax"))
return model
model = CancerNet()
Import libraries and models,
from __future__ import print_function
import keras
from keras.datasets import mnist
from tensorflow.keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
#from tensorflow.keras.layers import backend as k
batch_size = 128
num_classes = 10
epochs = 12
Below the written code,
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), strides=(1,1), activation="relu", input_shape=(28, 28, 1) ))
model.add(Conv2D(32, kernel_size=(3,3), strides=(1,1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2) ))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
Below the type error, which I badly faced and i can't make the solution,
TypeError Traceback (most recent call last)
<ipython-input-6-6c99a01e13d4> in <module>
7 model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2) ))
8
----> 9 model.add(Dropout(0.5))
10 model.add(Flatten())
TypeError: The added layer must be an instance of class Layer. Found: <keras.layers.core.Dropout object at 0x000001622999A5F8>
Now, How should i solve this type of error?
Need Help,
Use Keras or tensorflow.keras, don't use both of them.
from __future__ import print_function
from tensorflow import keras
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras import backend as k
batch_size = 128
num_classes = 10
epochs = 12
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), strides=(1,1), activation="relu", input_shape=(28, 28, 1) ))
model.add(Conv2D(32, kernel_size=(3,3), strides=(1,1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2) ))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
The problem you have created your model using tensorflow.keras instance and you are trying to add layers of Keras instance.
Tensorflow has its own Keras version. So use only one.
Your code runs after fixing your import statements.
Code:
from __future__ import print_function
from tensorflow import keras
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
#from tensorflow.keras.layers import backend as k
I used two different models for my case.
The case is classification for different type of surface defect.
The input shape is (200, 200, 1), and there are 6 classes.
The numbers of training data is 1440(240 for 1 class), and the number of validation data is 360(60 for 1 class).
The training process is very well with first model. Both training loss and validation loss are dropping very quickly.
After that, I want to use MobileNetV2 from keras for comparing the training result. The training loss and accuracy in MobileV2 are improve, but the validation accuracy stuck in 0.1667(the loss is bumpy).
I wonder know what causes this result, Can I call this situation 'Over-fitting'? Or just this model is too deep to my case?
First model:
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization
import parameter
input_shape = (parameter.IMAGE_SIZE_Y, parameter.IMAGE_SIZE_X, parameter.channel)
def MyModel():
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape = input_shape, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(6, activation = 'softmax'))
model.summary()
return model
Second model:
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.applications import MobileNetV2
import parameter
def MyMobileNetV2():
input_shape = (parameter.IMAGE_SIZE_X, parameter.IMAGE_SIZE_Y, parameter.channel)
model = MobileNetV2(input_shape = input_shape,
include_top = False,
weights = 'imagenet')
x = model.output
x = GlobalAveragePooling2D()(x)
x = BatchNormalization()(x)
x = Dense(1280, activation='relu')(x)
x = BatchNormalization()(x)
predictions = Dense(6, activation='softmax', kernel_initializer='random_uniform', bias_initializer='zeros')(x)
model = Model(inputs = model.input, outputs = predictions)
optimizer = Adam(lr=0.01)
loss = "categorical_crossentropy"
for layer in model.layers:
layer.trainable = True
model.compile(optimizer=optimizer,
loss=loss,
metrics=["accuracy"])
model.summary()
for i, layer in enumerate(model.layers):
print(i, layer.name, layer.trainable)
return model
I have implemented a CNN code referring to AlexNet architecture (https://www.mydatahack.com/building-alexnet-with-keras/) through keras lib in python, but I am getting an error as: model.add(BatchNormalization()) syntax error. The architecture given in the image file, I am trying to implement. CNN architecture and table description1
the following python code I am using:
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
import cv2
import os
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.layers.normalization import BatchNormalization
batch_size = 4
num_classes = 123
epochs = 80
model = Sequential()
model.add(Conv2D(filters=96, input_shape=(88, 128, 1), kernel_size=(18, 18), strides=1, activation='relu', padding='valid'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2)
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(45, 45), strides=1, activation='relu', padding='valid'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=2)
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(1024))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
how do i resolve this problem?
You're missing a ) at the end of both of your MaxPooling2D lines. Change each strides=2) to strides=2)).