I try to use Keras Tuner for hyperparameter optimisation:
import keras
from kerastuner import HyperModel
from kerastuner.tuners import Hyperband
input_shape = (1, 28, 28)
num_classes = 10
# Define hypermodel class
class CNNHyperModel(HyperModel):
def __init__(self, input_shape, num_classes):
self.input_shape = input_shape
self.num_classes = num_classes
def build(self, hp):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=["accuracy"],
)
return model
# Instantiate
hypermodel = CNNHyperModel(input_shape=input_shape, num_classes=num_classes)
# Create tuner
HYPERBAND_MAX_EPOCHS = 40
MAX_TRIALS = 20
EXECUTION_PER_TRIAL = 2
SEED = 1
tuner = RandomSearch(
hypermodel,
max_epochs=HYPERBAND_MAX_EPOCHS,
objective='val_accuracy',
seed=SEED,
max_trials=MAX_TRIALS,
executions_per_trial=EXECUTION_PER_TRIAL,
directory='hyperband',
project_name='mnist'
)
I get
AttributeError: module 'tensorflow._api.v1.keras.metrics' has no attribute 'Metric'
with both Tensorflow 1.13 and 2.0 installed using conda.
Including from tensorflow.python.keras.metrics import Metric as suggested in this answer does not change anything.
Latest version of Tensorflow 2.6.0 has tf.keras.metrics.Metric API.
You can import as
from tensorflow.keras.metrics import Metric
Related
I am trying to train my model using Keras and TensorFlow.
Code where I'm getting the error.
def build_model():
# define the model, use pre-trained weights for image_net
base_model = InceptionV3(input_shape=(resized_height, resized_width, num_channel), weights='imagenet', include_top=False, pooling='avg')
x = base_model.output
# x = Dense(100, activation='relu')(x)
# predictions = Dense(6, activation='sigmoid', name='final_classifier')(x)
# model = Model(inputs = base_model.input, outputs= predictions)
model = Sequential()
# # model.add(LSTM(1024, return_sequences=False, kernel_initializer='he_normal', dropout=0.15, recurrent_dropout=0.15, implementation=2))
model = Sequential()
model.add(Dense(1024, activation='relu', input_shape=(51200,)))(x)
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))(x)
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))(x)
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))(x)
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))(x)
model.add(Dropout(0.5))
model.add(Dense(6, activation='softmax', name='final_classifier'))(x)
return model
Build and Run the Model
model = build_model()
model_checkpoint = ModelCheckpoint(weight_file, monitor='val_loss', save_weights_only=False, save_best_only=True)
num_workers = 2
model.compile(optimizer=Adam(lr=initial_lr), loss='categorical_crossentropy', metrics=['accuracy'])
callbacks = [model_checkpoint, reduce_lr_on_plateau, tensor_board]
labels = labels_all
partition = partition_dict
model.fit_generator(generator=DataGenSequence(labels, partition['train'], current_state='train'),
steps_per_epoch=100,
epochs = 200,
verbose=1,
workers = num_workers,
callbacks=callbacks,
shuffle=False,
# maz_queue_size=32,
validation_data=DataGenSequence(labels, partition['valid'], current_state='validation'),
validation_steps=5
)
ERROR
Note: I am suffering from this error , I can't solved it and advanced thanks who are try to solve it and comment here for sharing the answer
Plz change the code to
def build_model():
# define the model, use pre-trained weights for image_net
base_model = InceptionV3(input_shape=(resized_height, resized_width, num_channel), weights='imagenet', include_top=False, pooling='avg')
#x = base_model.output
# x = Dense(100, activation='relu')(x)
# predictions = Dense(6, activation='sigmoid', name='final_classifier')(x)
# model = Model(inputs = base_model.input, outputs= predictions)
#model = Sequential()
# # model.add(LSTM(1024, return_sequences=False, kernel_initializer='he_normal', dropout=0.15, recurrent_dropout=0.15, implementation=2))
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(6, activation='softmax', name='final_classifier'))
return model
Since you cannot pass x on sequential object
Sequentail().add() does not have a return value or if speaking in Python: return None wich is an object of the Type NoneType. So when you are calling like this: Sequential().add()(x) you are calling the method .add() from the class Sequential and then you are trying to call its return value. This does not work since the return value is not a function but None from the NoneType.
I am training a Keras model using and it's throwing an error.
I replaced Convolution2D with Conv2D that doesn't work.
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-99-e85c5751f266> in <module>()
26 model.compile(loss='mse', optimizer=optimizer)
27 return model
---> 28 model = nvidia_model()
29 print(model.summary())
5 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/generic_utils.py in validate_kwargs(kwargs, allowed_kwargs, error_message)
776 for kwarg in kwargs:
777 if kwarg not in allowed_kwargs:
--> 778 raise TypeError(error_message, kwarg)
779
780
TypeError: ('Keyword argument not understood:', 'subsample')
modified code
i am using keras 2.2.4 currently
i am using keras 2.2.4 currently
i am using keras 2.2.4 currently
i am using keras 2.2.4 currently
Defining nvidia model
def nvidia_model():
model = Sequential()
model.add(Conv2D(24, 5, 5, strides=(2, 2), input_shape=(66, 200, 3), activation='elu'))
model.add(Conv2D(36, 5, 5, strides=(2, 2), activation='elu'))
model.add(Conv2D(48, 5, 5, strides=(2, 2), activation='elu'))
model.add(Conv2D(64, 3, 3, activation='elu'))
model.add(Conv2D(64, 3, 3, activation='elu'))
# model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation = 'elu'))
# model.add(Dropout(0.5))
model.add(Dense(50, activation = 'elu'))
# model.add(Dropout(0.5))
model.add(Dense(10, activation = 'elu'))
# model.add(Dropout(0.5))
model.add(Dense(1))
optimizer = Adam(lr=1e-3)
model.compile(loss='mse', optimizer=optimizer)
return model
model = nvidia_model()
print(model.summary())
It explicitly says that subsample is unknown.
Try replacing "subsample" with "strides", in recent versions of keras it is called that way.
Try this method:
def nvidia_model():
model = Sequential()
model.add(Conv2D(24,(5,5), strides=(2, 2), input_shape=(66, 200, 3), activation='elu'))
model.add(Conv2D(36, (5,5), strides=(2, 2), activation='elu'))
model.add(Conv2D(48, (5,5), strides=(2, 2), activation='elu'))
model.add(Conv2D(64, (3,3), activation='elu'))
model.add(Conv2D(64, (3,3), activation='elu'))
# model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation = 'elu'))
# model.add(Dropout(0.5))
model.add(Dense(50, activation = 'elu'))
# model.add(Dropout(0.5))
model.add(Dense(10, activation = 'elu'))
# model.add(Dropout(0.5))
model.add(Dense(1))
optimizer = Adam(lr=1e-3)
model.compile(loss='mse', optimizer=optimizer)
return model
model = nvidia_model()
print(model.summary())
Who can suggest how to improve the model?
The regular model in sklearn LinearRegression() predicts temperature with an error of 1 and the error of the model built manually on tensorflow won't drop below 5.5, no matter the activation function, the number of layers, or epochs.
The data was both standardized and derived into positive values
def createModelG(inputShape, dropout, initW):
model = Sequential()
model.add(Dense(4096,
kernel_regularizer=keras.regularizers.l2(0.001),
activation = 'elu',
kernel_initializer = initW,
input_dim = inputShape
))
model.add(Dropout(dropout))
#for i in range(3):
# model.add(Dense(512, activation = 'relu'))
# model.add(Dropout(dropout))
model.add(Dense(1024,
kernel_regularizer=keras.regularizers.l2(0.001),
activation = 'elu'
))
model.add(Dropout(dropout))
model.add(Dense(1))
model.compile(
loss = 'mae',
optimizer = tf.keras.optimizers.Adam(learning_rate = 0.0000005),
metrics = ['mse', 'mae']
)
return model
startModelTest = crossValdation(createModelG, trainDataXS, 0.01, 'truncated_normal', 'VancouverT', PrintDot())
modelTest = startModelTest[1]
hist = startModelTest[2]
startModelTest[0]
loss mse mae val_loss val_mse val_mae
0 22.6255 737.889 21.3214 7.32549 55.3201 6.02149
1 21.6446 677.313 20.3387 7.83092 64.0345 6.5251
2 21.1013 646.857 19.7952 7.00224 49.6842 5.69622
3 22.3446 712.008 21.0386 8.07596 68.7968 6.77008
4 24.2565 874.824 22.9531 7.71605 65.3973 6.41274
0 --- --- --- --- --- ---
0 22.3945
link to all code and result of my keras model and ready sklearn models:
https://www.kaggle.com/alihanurumov/weather-prediction-network
def createModelG(inputShape):
model = Sequential()
model.add(Dense(4096, input_dim = inputShape,
kernel_initializer = initializers.glorot_uniform(seed = 1),
kernel_regularizer = keras.regularizers.l2(0.01), activation = "relu"))
model.add(Dense(2048,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(Dense(2048,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(Dense(1024,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(Dense(1024,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(layers.Dropout(0.05))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate = 0.000001)
model.compile(loss = 'mse', optimizer = optimizer, metrics = ["mse", "mae"])
return model
I try to change the unit size for the last layer according to the dataset.
That is a abstraction of my code, but it's not working.
class cnn_model:
num_classes = 1
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(256, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Dense(num_classes, activation='softmax'))
#staticmethod
def train_two():
cnn_mod = cnn_model
cnn_mod.num_classes = 2
model = cnn_mod.model
#staticmethod
def train_three():
cnn_mod = cnn_model
cnn_mod.num_classes = 3
model = cnn_mod.model
Just pass number of classes as argument when you instantiate a new CNNModel class:
import tensorflow as tf
from tensorflow.keras import layers
class CNNModel:
def __init__(self, num_classes=2):
self.num_classes = num_classes
self.model = tf.keras.models.Sequential()
self.model.add(layers.Conv2D(128, (3, 3), activation='relu'))
self.model.add(layers.MaxPooling2D((2, 2)))
self.model.add(layers.Conv2D(256, (3, 3), activation='relu'))
self.model.add(layers.MaxPooling2D((2, 2)))
self.model.add(layers.Dense(self.num_classes, activation='softmax'))
cnnmodel = CNNModel(num_classes=3)
kerasmodel = cnnmodel.model
print(cnnmodel.num_classes) # 3
I would also suggest you to read Naming Conventions and Indentation in PEP8.
def _build_network(self, vocab_size, maxlen, embedding_dimension=256, hidden_units=256, trainable=False):
print('Build model...')
model = Sequential()
print('Reached here')
model.add(Embedding(vocab_size, embedding_dimension, input_length=maxlen, embeddings_initializer='glorot_normal'))
print('embedding done')
model.add(Convolution1D(hidden_units, 3, kernel_initializer='he_normal', padding='valid', activation='sigmoid',
input_shape=(1, maxlen)))
# model.add(MaxPooling1D(pool_size=3))
model.add(Convolution1D(hidden_units, 3, kernel_initializer='he_normal', padding='valid', activation='sigmoid',
input_shape=(1, maxlen - 2)))
print('conv1dcomplete')
# model.add(MaxPooling1D(pool_size=3))
# model.add(Dropout(0.25))
model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5, return_sequences=True))
#print('Reached here')
model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5))
model.add(Dense(hidden_units, kernel_initializer='he_normal', activation='sigmoid'))
model.add(Dense(2))
model.add(Activation('softmax'))
adam = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print('No of parameter:', model.count_params())
print(model.summary())
return model
Using Anaconda-spyder Python 3.6 with latest versions of tensorflow and keras. Running on Windows 7 64-bit. Before the line where LSTM is being called, everything gets printed but then, suddenly on reaching this line Python crashes.