Code is not executing but is not showing any errors either - python

I am developing a machine learning algorithm and my code looks like this:
The directories of the images
train_dir = '../input/train_images'
train_labels = pd.read_csv('../input/train.csv')
train_labels['diagnosis'] = train_labels['diagnosis'].astype(str)
train_labels["id_code"]=train_labels["id_code"].apply(lambda x:x+".png")
test_dir = '../input/test_images'
test_labels = '../input/test.csv'
Preprocessing
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255,)
train_generator = train_datagen.flow_from_dataframe(
train_labels[:],
directory="../input/train_images",
x_col='id_code', y_col='diagnosis',
target_size=(150, 150),
color_mode='rgb',
class_mode='categorical',
batch_size=32,
shuffle=True,)
The model
def get_model():
model = models.Sequential()
model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(150,150,3)))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(5, activation='softmax'))
#Compile your model
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(),
metrics=['acc'])
return model
Training of the model using k-cross validation
k = 4
num_validation_samples = len(train_generator) // k
np.random.shuffle(train_generator)
validation_scores = []
for fold in range(k):
print('processing fold #:', fold)
validation_data = train_generator[num_validation_samples * fold: num_validation_samples * (fold + 1)]
validation_targets = train_labels[num_validation_samples * fold: num_validation_samples * (fold + 1)]
training_data = np.concatenate([train_generator[:num_validation_samples * fold], train_generator[num_validation_samples * (fold + 1) : ]], axis = 0)
training_targets = np.concatenate([train_labels[:num_validation_samples * fold], train_labels[num_validation_samples * (fold + 1) :]], axis = 0)
model = get_model()
#Run the model
model.fit_generator(
training_data,
training_targets,
steps_per_epoch=30,
epochs=30,
batch_size = 20,
verbose = 0)
#Validate the model
val_loss, val_acc = model.evaluate(validation_data, validation_targets, verbose=0)
validation_scores.append(val_loss)
Every part of this code works except for the training of the model part. It shows the execution symbol, that it is executing, but it never executes. I have waited hours, but nothing happens. I do not understand why. I will appreciate the help.

Related

Implementing DCGAN in keras but it is not properly trained

I am trying to implement DCGAN presented in this article. Here is my Generator and Discriminator:
ki = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
def discriminator_model():
discriminator = Sequential([
Conv2D(64, (3,3), strides=(2, 2), padding='same', kernel_initializer=ki, input_shape=[64,64, 3]), # No BatchNormilization in this layer
LeakyReLU(alpha=0.2),
Dropout(0.4),
Conv2D(64, (3,3), strides=(2, 2), padding='same', kernel_initializer=ki),
BatchNormalization(),
LeakyReLU(alpha=0.2),
Dropout(0.4),
Flatten(),
Dense(1, activation='sigmoid', kernel_initializer=ki)
])
return discriminator
===========================================
noise_shape = 100
def generator_model():
generator = Sequential([
Dense(4*4*512, input_shape=[noise_shape]),
Reshape([4,4,512]),
Conv2DTranspose(256, kernel_size=4, strides=2, padding="same", kernel_initializer= ki),
BatchNormalization(),
ReLU(),
Conv2DTranspose(128, kernel_size=4, strides=2, padding="same", kernel_initializer=ki),
BatchNormalization(),
ReLU(),
Conv2DTranspose(64, kernel_size=4, strides=2, padding="same", kernel_initializer=ki),
BatchNormalization(),
ReLU(),
Conv2DTranspose(3, kernel_size=4, strides=2, padding="same", kernel_initializer=ki, activation='tanh') # 3 filters, also no BatchNormilization in this layer
])
return generator
Here I have combined these to to build DCGAN:
DCGAN = Sequential([generator,discriminator])
opt = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
discriminator.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
discriminator.trainable = False
DCGAN.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
Then I prepared my batches and tried to train my model. Here is the code:
epochs = 500
batch_size = 128
loss_from_discriminator_model=[]
loss_from_generator_model=[]
acc_dis = []
acc_gen = []
with tf.device('/gpu:0'):
for epoch in range(epochs):
for i in range(images.shape[0]//batch_size):
# Training Discriminator
noise = np.random.uniform(-1,1,size=[batch_size, noise_shape])
gen_image = generator.predict_on_batch(noise) # Generating fake images
train_dataset = images[i*batch_size:(i+1)*batch_size]
train_labels_real = np.ones(shape=(batch_size,1)) # Real image labels
discriminator.trainable = True
d_loss_real, d_acc_real = discriminator.train_on_batch(train_dataset,train_labels_real) # Training on real images
train_labels_fake = np.zeros(shape=(batch_size,1))
d_loss_fake, d_acc_fake = discriminator.train_on_batch(gen_image,train_labels_fake) # Training on fake images
# Training Generator
noise = np.random.uniform(-1, 1, size=[batch_size,noise_shape])
train_label_fake_for_gen_training = np.ones(shape=(batch_size,1))
discriminator.trainable = False
g_loss, g_acc = DCGAN.train_on_batch(noise, train_label_fake_for_gen_training)
loss_from_discriminator_model.append(d_loss_real+d_loss_fake)
loss_from_generator_model.append(g_loss)
acc_dis.append((d_acc_real + d_acc_fake) / 2)
acc_gen.append(g_acc)
The problem is my modell doesn't seem to learn anything. values of accuracy and loss don't seem rational. Here is a plot of values of generator and discriminator loss during training.
Thanks in advance.

How to reduce the error of tensorflow model?

Who can suggest how to improve the model?
The regular model in sklearn LinearRegression() predicts temperature with an error of 1 and the error of the model built manually on tensorflow won't drop below 5.5, no matter the activation function, the number of layers, or epochs.
The data was both standardized and derived into positive values
def createModelG(inputShape, dropout, initW):
model = Sequential()
model.add(Dense(4096,
kernel_regularizer=keras.regularizers.l2(0.001),
activation = 'elu',
kernel_initializer = initW,
input_dim = inputShape
))
model.add(Dropout(dropout))
#for i in range(3):
# model.add(Dense(512, activation = 'relu'))
# model.add(Dropout(dropout))
model.add(Dense(1024,
kernel_regularizer=keras.regularizers.l2(0.001),
activation = 'elu'
))
model.add(Dropout(dropout))
model.add(Dense(1))
model.compile(
loss = 'mae',
optimizer = tf.keras.optimizers.Adam(learning_rate = 0.0000005),
metrics = ['mse', 'mae']
)
return model
startModelTest = crossValdation(createModelG, trainDataXS, 0.01, 'truncated_normal', 'VancouverT', PrintDot())
modelTest = startModelTest[1]
hist = startModelTest[2]
startModelTest[0]
loss mse mae val_loss val_mse val_mae
0 22.6255 737.889 21.3214 7.32549 55.3201 6.02149
1 21.6446 677.313 20.3387 7.83092 64.0345 6.5251
2 21.1013 646.857 19.7952 7.00224 49.6842 5.69622
3 22.3446 712.008 21.0386 8.07596 68.7968 6.77008
4 24.2565 874.824 22.9531 7.71605 65.3973 6.41274
0 --- --- --- --- --- ---
0 22.3945
link to all code and result of my keras model and ready sklearn models:
https://www.kaggle.com/alihanurumov/weather-prediction-network
def createModelG(inputShape):
model = Sequential()
model.add(Dense(4096, input_dim = inputShape,
kernel_initializer = initializers.glorot_uniform(seed = 1),
kernel_regularizer = keras.regularizers.l2(0.01), activation = "relu"))
model.add(Dense(2048,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(Dense(2048,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(Dense(1024,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(Dense(1024,
kernel_initializer = initializers.glorot_uniform(seed = 1), activation = "relu"))
model.add(layers.Dropout(0.05))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate = 0.000001)
model.compile(loss = 'mse', optimizer = optimizer, metrics = ["mse", "mae"])
return model

How can i improve my accuracy of audio data classification

I need to classify if a sound is of a hammer or not.
The dataset contains 8000 .wav audio files(approx 1s each) for training and about 2000 for testing with 2 classes for classification(0 if the sound is not of a hammer or 1 if it is). I tried extracting features(mfcc and padding) and to build my network, but when I fit the model with the extracted data I cannot get over 70.5% accuracy.
What can I change or what else can I consider that would increase my accuracy? Also if my approach is flawed please tell me.
Here is the data extraction part:
def get_traing_features(row):
file_name = os.path.join(os.path.abspath(path_train), str(row[0]))
audio_train, sfreq_train = lb.load(file_name, res_type='kaiser_fast')
mfcc = lb.feature.mfcc(y=audio_train, sr=sfreq_train, n_mfcc=40)
pad_width = max_pad_len - mfcc.shape[1]
mfccs = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')
feature = mfccs
label = row[1]
return pd.Series([feature, label])
def get_validation(row):
file_name = os.path.join(os.path.abspath(path_validation), str(row[0]))
audio_val, sfreq_val = lb.load(file_name, res_type='kaiser_fast')
mfcc = lb.feature.mfcc(y=audio_val, sr=sfreq_val, n_mfcc=40)
pad_width = max_pad_len - mfcc.shape[1]
mfccs = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')
feature = mfccs
label = row[1]
return pd.Series([feature, label])
train_data= train_data.apply(parser, axis=1)
train_data.columns = ['feature', 'label']x = np.array(temp.feature.tolist())
x = np.array(train_data.feature.tolist())
y = np.array(train_data.label.tolist())
y_binary = to_categorical(y)
x = x.reshape(x.shape[0], numb_rows, numb_cols, num_channels)
test_data = validation.apply(get_validation, axis=1)
test_data.columns = ['feature', 'label']
x_val = np.array(temp_val.feature.tolist())
y_val = np.array(temp_val.label.tolist())
y_binary_val = to_categorical(y_val)
x_val = x_val.reshape(x_val.shape[0], numb_rows, numb_cols, num_channels)
And here is the CNN:
numb_rows = 40
numb_cols = 174
num_channels = 1
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=2, input_shape=(numb_rows, numb_cols, num_channels), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=32, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
checkpointer = ModelCheckpoint(filepath='./saved_models/weights.best.basic_cnn.hdf5', verbose=1, save_best_only=True)
model.fit(x, y_binary, batch_size=10, epochs=200, callbacks=[checkpointer], verbose=1)
y_pred = model.predict_classes(x_val)
acc = accuracy_score(y_val, y_pred)

KERAS TUNER: object of type 'HyperParameters' has no len()

here its my code trying to use KERAS TUNER:
datagen = ImageDataGenerator(
rescale=1.0/255.0,
zoom_range=[-2, 2],
width_shift_range=[-25, 25],
height_shift_range=[-25, 25],
rotation_range=40,
shear_range=40,
horizontal_flip=True,
vertical_flip=True,
brightness_range=[0.98,1.05],
featurewise_center=True,
samplewise_center=True,
# channel_shift_range=1.5,
#featurewise_center=True,
#featurewise_std_normalization=True,
validation_split=0.10)
mean,std=auxfunctions.getMeanStdClassification()
datagen.mean=mean
datagen.std=std
numClasses = 5
width=240 #diabetic retinopaty 120 120, drRafael 40 40, 96 96
height=240
input_shape=(width,height,3)
train_generator = datagen.flow_from_dataframe(
dataframe=trainLabels,
directory='./resized_train_cropped',
x_col="image",
y_col="level",
target_size=(240,240),
batch_size=16,
class_mode='categorical',
color_mode='rgb', #quitar o no quitar
subset='training')
validation_generator =datagen.flow_from_dataframe(
dataframe=trainLabels,
directory='./resized_train_cropped',
x_col="image",
y_col="level",
target_size=(240,240),
batch_size=16,
class_mode='categorical',
color_mode='rgb',
subset='validation')
#----------------------------------------------------------------------------------------
def createBaseNetwork(input_shape):
weight_decay = 1e-4
L2_norm = regularizers.l2(weight_decay)
input = Input(shape=input_shape)
print(input)
x = Conv2D(96, (9, 9), activation='relu', name='conv1', kernel_regularizer=L2_norm)(input)
x = MaxPooling2D((3, 3), name='pool1')(x)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = Conv2D(384, (5, 5), activation='relu', name='conv2', kernel_regularizer=L2_norm)(x)
x = MaxPooling2D((3, 3), name='pool2')(x)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = Conv2D(384, (3, 3), activation='relu', name='conv3')(x)
x = Conv2D(384, (3, 3), activation='relu', name='conv4')(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv5')(x)
x = MaxPooling2D((3, 3), name='pool3')(x)
x = Flatten()(x)
x = Dense(4096, activation='relu', name='fc1')(x)
return Model(input, x)
# ---------------------------------------------------------------------------------
hp=HyperParameters()
baseNetwork=createBaseNetwork(input_shape)
#baseNetwork.load_weights('./ModelWeights2.h5',by_name=True)
for l in baseNetwork.layers:
l.trainable=True
input_a = Input(shape=input_shape,name='input1')
outLayers = baseNetwork(input_a)
outLayers = Dense(2048, activation='relu', name='fc3')(outLayers)
outLayers= Dropout(0.2)(outLayers)
outLayers = Dense(1024, activation='relu', name='fc4')(outLayers)
outLayers= Dropout(0.2)(outLayers)
outLayers = Dense(hp.Int('input_units',min_value=32,max_value=512), activation='relu', name='fc5')(outLayers)
classifier = Dense(numClasses, activation='softmax', name='predictions')(outLayers)
model = Model(input_a, classifier)
model.summary()
tuner = RandomSearch(
model,
objective='val_accuracy',
max_trials=1,
executions_per_trial=1,
directory='./logtunner'
)
tuner.search(
train_generator,
validation_data=validation_generator,
epochs=1,
)
For now im just trying to use it on the last Dense layer, as you can see i just want to stimate a good number of neurons with this:
hp.Int('input_units',min_value=32,max_value=512)
But i get an error like this:
ValueError: TypeError: object of type 'HyperParameters' has no len()
I dont know how to solve it, i spent hours watching videos and tutorials but no idea of what is happening.
I also realize that there is another error mesage:
This function does not handle the case of the path where all inputs are not already EagerTensors
But i dont have any idea about that too
I was having more or less of the same error.
If you pay attention to to the keras-tuner in the tensorflow website https://www.tensorflow.org/tutorials/keras/keras_tuner or in the keras website, you see the following:
tuner = kt.Hyperband(model_builder,
objective = 'val_accuracy',
max_epochs = 10,
factor = 3,
directory = 'my_dir',
project_name = 'intro_to_kt')
the first input to the tuner is the function model_builder that is declared earlier as
def model_builder(hp):
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
# Tune the number of units in the first Dense layer
# Choose an optimal value between 32-512
hp_units = hp.Int('units', min_value = 32, max_value = 512, step = 32)
model.add(keras.layers.Dense(units = hp_units, activation = 'relu'))
model.add(keras.layers.Dense(10))
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values = [1e-2, 1e-3, 1e-4])
model.compile(optimizer = keras.optimizers.Adam(learning_rate = hp_learning_rate),
loss = keras.losses.SparseCategoricalCrossentropy(from_logits = True),
metrics = ['accuracy'])
return model
So, all you need is reorganize your code to follow the same structure. You need to encapsulate the keras model and keras-tuner hp inside a function.
Cheers.

how to print confusion matrix of keras Multi output?

I have one question.
I want to print a confusion matrix.
my model is functional api of keras.
and
model = Model(inputs=[data_input], outputs=[output_1, output_2])
output_1 = 9 classes
output_2 = 5 classes
My multi-classification model
data_input = Input(shape=(trainX.shape[1], trainX.shape[2]))
Conv1 = Conv1D(filters=50, kernel_size=4, padding='valid', activation='relu', strides=1)(data_input)
Conv1 = MaxPooling1D(pool_size=2)(Conv1)
Conv2 = Conv1D(filters=50, kernel_size=4, padding='valid', activation='relu', strides=1)(Conv1)
Conv2 = MaxPooling1D(pool_size=2)(Conv2)
Conv3 = Conv1D(filters=50, kernel_size=4, padding='valid', activation='relu', strides=1)(Conv2)
Conv3 = MaxPooling1D(pool_size=2)(Conv3)
Classification1 = LSTM(128, input_shape=(47, 50), return_sequences=False)(Conv3)
Classification2 = GRU(128, input_shape=(47, 50), return_sequences=False)(Conv3)
activity = Dense(9)(Classification1)
activity = Activation('softmax')(activity)
speed = Dense(5)(Classification2)
speed = Activation('softmax')(speed)
model = Model(inputs=[data_input], outputs=[activity, speed])
model.compile(loss= 'categorical_crossentropy' , optimizer='adam', metrics=[ 'accuracy' ])
print(model.summary())
history = model.fit(trainX, {'activation_1': trainY_Activity, 'activation_2': trainY_Speed},
validation_data=(testX, {'activation_1': testY_Activity, 'activation_2': testY_Speed}),
epochs=epochs, batch_size=batch_size, verbose=1, shuffle=False)

Categories