How to reduce the error of tensorflow model? - python

Who can suggest how to improve the model?
The regular model in sklearn LinearRegression() predicts temperature with an error of 1 and the error of the model built manually on tensorflow won't drop below 5.5, no matter the activation function, the number of layers, or epochs.
The data was both standardized and derived into positive values
def createModelG(inputShape, dropout, initW):
model = Sequential()
model.add(Dense(4096,
kernel_regularizer=keras.regularizers.l2(0.001),
activation = 'elu',
kernel_initializer = initW,
input_dim = inputShape
))
model.add(Dropout(dropout))
#for i in range(3):
# model.add(Dense(512, activation = 'relu'))
# model.add(Dropout(dropout))
model.add(Dense(1024,
kernel_regularizer=keras.regularizers.l2(0.001),
activation = 'elu'
))
model.add(Dropout(dropout))
model.add(Dense(1))
model.compile(
loss = 'mae',
optimizer = tf.keras.optimizers.Adam(learning_rate = 0.0000005),
metrics = ['mse', 'mae']
)
return model
startModelTest = crossValdation(createModelG, trainDataXS, 0.01, 'truncated_normal', 'VancouverT', PrintDot())
modelTest = startModelTest[1]
hist = startModelTest[2]
startModelTest[0]
loss mse mae val_loss val_mse val_mae
0 22.6255 737.889 21.3214 7.32549 55.3201 6.02149
1 21.6446 677.313 20.3387 7.83092 64.0345 6.5251
2 21.1013 646.857 19.7952 7.00224 49.6842 5.69622
3 22.3446 712.008 21.0386 8.07596 68.7968 6.77008
4 24.2565 874.824 22.9531 7.71605 65.3973 6.41274
0 --- --- --- --- --- ---
0 22.3945
link to all code and result of my keras model and ready sklearn models:
https://www.kaggle.com/alihanurumov/weather-prediction-network

def createModelG(inputShape):
model = Sequential()
model.add(Dense(4096, input_dim = inputShape,
kernel_initializer = initializers.glorot_uniform(seed = 1),
kernel_regularizer = keras.regularizers.l2(0.01), activation = "relu"))
model.add(Dense(2048,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(Dense(2048,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(Dense(1024,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(Dense(1024,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(layers.Dropout(0.05))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate = 0.000001)
model.compile(loss = 'mse', optimizer = optimizer, metrics = ["mse", "mae"])
return model

Related

Implementing DCGAN in keras but it is not properly trained

I am trying to implement DCGAN presented in this article. Here is my Generator and Discriminator:
ki = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
def discriminator_model():
discriminator = Sequential([
Conv2D(64, (3,3), strides=(2, 2), padding='same', kernel_initializer=ki, input_shape=[64,64, 3]), # No BatchNormilization in this layer
LeakyReLU(alpha=0.2),
Dropout(0.4),
Conv2D(64, (3,3), strides=(2, 2), padding='same', kernel_initializer=ki),
BatchNormalization(),
LeakyReLU(alpha=0.2),
Dropout(0.4),
Flatten(),
Dense(1, activation='sigmoid', kernel_initializer=ki)
])
return discriminator
===========================================
noise_shape = 100
def generator_model():
generator = Sequential([
Dense(4*4*512, input_shape=[noise_shape]),
Reshape([4,4,512]),
Conv2DTranspose(256, kernel_size=4, strides=2, padding="same", kernel_initializer= ki),
BatchNormalization(),
ReLU(),
Conv2DTranspose(128, kernel_size=4, strides=2, padding="same", kernel_initializer=ki),
BatchNormalization(),
ReLU(),
Conv2DTranspose(64, kernel_size=4, strides=2, padding="same", kernel_initializer=ki),
BatchNormalization(),
ReLU(),
Conv2DTranspose(3, kernel_size=4, strides=2, padding="same", kernel_initializer=ki, activation='tanh') # 3 filters, also no BatchNormilization in this layer
])
return generator
Here I have combined these to to build DCGAN:
DCGAN = Sequential([generator,discriminator])
opt = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
discriminator.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
discriminator.trainable = False
DCGAN.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
Then I prepared my batches and tried to train my model. Here is the code:
epochs = 500
batch_size = 128
loss_from_discriminator_model=[]
loss_from_generator_model=[]
acc_dis = []
acc_gen = []
with tf.device('/gpu:0'):
for epoch in range(epochs):
for i in range(images.shape[0]//batch_size):
# Training Discriminator
noise = np.random.uniform(-1,1,size=[batch_size, noise_shape])
gen_image = generator.predict_on_batch(noise) # Generating fake images
train_dataset = images[i*batch_size:(i+1)*batch_size]
train_labels_real = np.ones(shape=(batch_size,1)) # Real image labels
discriminator.trainable = True
d_loss_real, d_acc_real = discriminator.train_on_batch(train_dataset,train_labels_real) # Training on real images
train_labels_fake = np.zeros(shape=(batch_size,1))
d_loss_fake, d_acc_fake = discriminator.train_on_batch(gen_image,train_labels_fake) # Training on fake images
# Training Generator
noise = np.random.uniform(-1, 1, size=[batch_size,noise_shape])
train_label_fake_for_gen_training = np.ones(shape=(batch_size,1))
discriminator.trainable = False
g_loss, g_acc = DCGAN.train_on_batch(noise, train_label_fake_for_gen_training)
loss_from_discriminator_model.append(d_loss_real+d_loss_fake)
loss_from_generator_model.append(g_loss)
acc_dis.append((d_acc_real + d_acc_fake) / 2)
acc_gen.append(g_acc)
The problem is my modell doesn't seem to learn anything. values of accuracy and loss don't seem rational. Here is a plot of values of generator and discriminator loss during training.
Thanks in advance.

How to apply Dropout in GridSearchCV

I use the following code to tune the hyperparameters (hidden layers, hidden neurons, batch size, optimizer) of an ANN.
## Part 2 - Tuning the ANN
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
def build_regressor(hidden_nodes, hidden_layers, optimizer):
regressor = Sequential()
regressor.add(Dense(units = hidden_nodes, kernel_initializer = 'uniform', activation = 'relu', input_dim = 7))
for layer_size in range(hidden_layers):
regressor.add(Dense(hidden_nodes, kernel_initializer = 'uniform', activation = 'relu'))
regressor.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'linear'))
regressor.compile(optimizer = optimizer, loss = 'mse', metrics = ['mse'])
return regressor
regressor = KerasRegressor(build_fn = build_regressor, epochs = 100)
# Create a dictionary of tuning parameters
parameters = {'hidden_nodes': list(range(2,101)), 'hidden_layers': [4,5,6,7], 'batch_size': [25,32], 'optimizer' : ['adam', 'nadam','RMSprop', 'adamax']}
grid_search = GridSearchCV(estimator = regressor, param_grid = parameters, scoring = 'neg_mean_squared_error', cv = 10, n_jobs = 4)
start = time.time()
grid_search = grid_search.fit(X_train, y_train)
end = time.time()
elapsed = (end - start)/3600
Now I want to add a droppout layer after each of the hidden layer like this:
regressor1 = Sequential()
regressor1.add(Dense(units = 41, kernel_initializer = 'uniform', activation = 'relu', input_dim = 7))
regressor1.add(Dropout(0.1))
regressor1.add(Dense(units = 41, kernel_initializer = 'uniform', activation = 'relu'))
regressor1.add(Dropout(0.1))
regressor1.add(Dense(units = 41, kernel_initializer = 'uniform', activation = 'relu'))
regressor1.add(Dropout(0.1))
regressor1.add(Dense(units = 41, kernel_initializer = 'uniform', activation = 'relu'))
regressor1.add(Dropout(0.1))
regressor1.add(Dense(units = 41, kernel_initializer = 'uniform', activation = 'relu'))
regressor1.add(Dropout(0.1))
regressor1.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'linear'))
regressor1.compile(optimizer = 'nadam', loss = 'mse', metrics = ['mse'])
history = regressor1.fit(X_train, y_train, batch_size = 25, epochs = 500, validation_data = (X_test, y_test), callbacks = [EarlyStopping(patience = 10)])
Is there a way to tune the number of dropout layers (the same number of hidden layers) and the dropout rates together with my current code?
Thank you so much in advance,
Here's your solution
https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/
Tip: I would suggest you you to look for similar models created by other people and play with the dropout values they've used this will save you a lot of time

TypeError: “NoneType” object is not callable in Google Colab

I am trying to train my model using Keras and TensorFlow.
Code where I'm getting the error.
def build_model():
# define the model, use pre-trained weights for image_net
base_model = InceptionV3(input_shape=(resized_height, resized_width, num_channel), weights='imagenet', include_top=False, pooling='avg')
x = base_model.output
# x = Dense(100, activation='relu')(x)
# predictions = Dense(6, activation='sigmoid', name='final_classifier')(x)
# model = Model(inputs = base_model.input, outputs= predictions)
model = Sequential()
# # model.add(LSTM(1024, return_sequences=False, kernel_initializer='he_normal', dropout=0.15, recurrent_dropout=0.15, implementation=2))
model = Sequential()
model.add(Dense(1024, activation='relu', input_shape=(51200,)))(x)
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))(x)
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))(x)
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))(x)
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))(x)
model.add(Dropout(0.5))
model.add(Dense(6, activation='softmax', name='final_classifier'))(x)
return model
Build and Run the Model
model = build_model()
model_checkpoint = ModelCheckpoint(weight_file, monitor='val_loss', save_weights_only=False, save_best_only=True)
num_workers = 2
model.compile(optimizer=Adam(lr=initial_lr), loss='categorical_crossentropy', metrics=['accuracy'])
callbacks = [model_checkpoint, reduce_lr_on_plateau, tensor_board]
labels = labels_all
partition = partition_dict
model.fit_generator(generator=DataGenSequence(labels, partition['train'], current_state='train'),
steps_per_epoch=100,
epochs = 200,
verbose=1,
workers = num_workers,
callbacks=callbacks,
shuffle=False,
# maz_queue_size=32,
validation_data=DataGenSequence(labels, partition['valid'], current_state='validation'),
validation_steps=5
)
ERROR
Note: I am suffering from this error , I can't solved it and advanced thanks who are try to solve it and comment here for sharing the answer
Plz change the code to
def build_model():
# define the model, use pre-trained weights for image_net
base_model = InceptionV3(input_shape=(resized_height, resized_width, num_channel), weights='imagenet', include_top=False, pooling='avg')
#x = base_model.output
# x = Dense(100, activation='relu')(x)
# predictions = Dense(6, activation='sigmoid', name='final_classifier')(x)
# model = Model(inputs = base_model.input, outputs= predictions)
#model = Sequential()
# # model.add(LSTM(1024, return_sequences=False, kernel_initializer='he_normal', dropout=0.15, recurrent_dropout=0.15, implementation=2))
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(6, activation='softmax', name='final_classifier'))
return model
Since you cannot pass x on sequential object
Sequentail().add() does not have a return value or if speaking in Python: return None wich is an object of the Type NoneType. So when you are calling like this: Sequential().add()(x) you are calling the method .add() from the class Sequential and then you are trying to call its return value. This does not work since the return value is not a function but None from the NoneType.

Code is not executing but is not showing any errors either

I am developing a machine learning algorithm and my code looks like this:
The directories of the images
train_dir = '../input/train_images'
train_labels = pd.read_csv('../input/train.csv')
train_labels['diagnosis'] = train_labels['diagnosis'].astype(str)
train_labels["id_code"]=train_labels["id_code"].apply(lambda x:x+".png")
test_dir = '../input/test_images'
test_labels = '../input/test.csv'
Preprocessing
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255,)
train_generator = train_datagen.flow_from_dataframe(
train_labels[:],
directory="../input/train_images",
x_col='id_code', y_col='diagnosis',
target_size=(150, 150),
color_mode='rgb',
class_mode='categorical',
batch_size=32,
shuffle=True,)
The model
def get_model():
model = models.Sequential()
model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(150,150,3)))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(5, activation='softmax'))
#Compile your model
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(),
metrics=['acc'])
return model
Training of the model using k-cross validation
k = 4
num_validation_samples = len(train_generator) // k
np.random.shuffle(train_generator)
validation_scores = []
for fold in range(k):
print('processing fold #:', fold)
validation_data = train_generator[num_validation_samples * fold: num_validation_samples * (fold + 1)]
validation_targets = train_labels[num_validation_samples * fold: num_validation_samples * (fold + 1)]
training_data = np.concatenate([train_generator[:num_validation_samples * fold], train_generator[num_validation_samples * (fold + 1) : ]], axis = 0)
training_targets = np.concatenate([train_labels[:num_validation_samples * fold], train_labels[num_validation_samples * (fold + 1) :]], axis = 0)
model = get_model()
#Run the model
model.fit_generator(
training_data,
training_targets,
steps_per_epoch=30,
epochs=30,
batch_size = 20,
verbose = 0)
#Validate the model
val_loss, val_acc = model.evaluate(validation_data, validation_targets, verbose=0)
validation_scores.append(val_loss)
Every part of this code works except for the training of the model part. It shows the execution symbol, that it is executing, but it never executes. I have waited hours, but nothing happens. I do not understand why. I will appreciate the help.

how to print confusion matrix of keras Multi output?

I have one question.
I want to print a confusion matrix.
my model is functional api of keras.
and
model = Model(inputs=[data_input], outputs=[output_1, output_2])
output_1 = 9 classes
output_2 = 5 classes
My multi-classification model
data_input = Input(shape=(trainX.shape[1], trainX.shape[2]))
Conv1 = Conv1D(filters=50, kernel_size=4, padding='valid', activation='relu', strides=1)(data_input)
Conv1 = MaxPooling1D(pool_size=2)(Conv1)
Conv2 = Conv1D(filters=50, kernel_size=4, padding='valid', activation='relu', strides=1)(Conv1)
Conv2 = MaxPooling1D(pool_size=2)(Conv2)
Conv3 = Conv1D(filters=50, kernel_size=4, padding='valid', activation='relu', strides=1)(Conv2)
Conv3 = MaxPooling1D(pool_size=2)(Conv3)
Classification1 = LSTM(128, input_shape=(47, 50), return_sequences=False)(Conv3)
Classification2 = GRU(128, input_shape=(47, 50), return_sequences=False)(Conv3)
activity = Dense(9)(Classification1)
activity = Activation('softmax')(activity)
speed = Dense(5)(Classification2)
speed = Activation('softmax')(speed)
model = Model(inputs=[data_input], outputs=[activity, speed])
model.compile(loss= 'categorical_crossentropy' , optimizer='adam', metrics=[ 'accuracy' ])
print(model.summary())
history = model.fit(trainX, {'activation_1': trainY_Activity, 'activation_2': trainY_Speed},
validation_data=(testX, {'activation_1': testY_Activity, 'activation_2': testY_Speed}),
epochs=epochs, batch_size=batch_size, verbose=1, shuffle=False)

Categories