So I'm trying to learn how machine learning works, and I started with a simple stock price prediction program. I've tried for days to narrow down issues, but I am at a halt with my research and progress. My issue is that the Accuracy does not increase, the val_accuracy does not change either. I've narrowed my data-set to be a smaller set to observe behavior, and of course, still not changing...
I have tried switching the loss, the activation, and tried a number of changes to preparing the data... I don't get what is going on? This is on one stock ticker only... (trying to use one model for top 100 prices)
My layers / model
def createModel(X_train):
'''
#description -
'''
# Model
model = Sequential()
model.add(LSTM(512, activation = 'relu', return_sequences = True, input_shape = (X_train.shape[1:])))
model.add(Dropout(0.3))
model.add(LSTM(512, activation = 'relu', return_sequences = True))
model.add(Dropout(0.3))
model.add(LSTM(256, activation = 'relu', return_sequences = True))
model.add(Dropout(0.3))
model.add(LSTM(128, activation = 'relu', return_sequences = False))
model.add(Dropout(0.2))
model.add(Dense(1, activation = 'sigmoid'))
# print(model.summary())
# opt = tf.keras.optimizers.Adam(learning_rate = 0.01)
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
I have the following code snippets to read in the data and prepare the data.
if filename.endswith('.csv'):
data = pd.read_csv(filename)
# Clean up file name to extract ticker
filename = filename.replace('.csv', '')
data = data.drop(['Dividends', 'Stock Splits'], axis = 1)
data['Date'] = list(map(convertDateToNumber, data['Date']))
data.set_index('Date', inplace = True)
# Shift for a new column to do calculations on, then drop the shifted column after
data['Per Change'] = data['Open'].shift(1)
data['Percent Change'] = list(map(calculatePercentChange, data['Open'], data['Close']))
data['Class'] = (list(map(classify, data['Open'], data['Close'])))
# Drop the unnecessary headers now...
data = data.drop('Per Change', 1)
data.fillna(method = "ffill", inplace = True)
data.dropna(inplace = True)
trainingData = int(len(data) * 0.75)
training_data = data.head(trainingData).values.tolist()
training_data = scaler.fit_transform(training_data)
testingData = int(len(data) * 0.25)
testing_data = data.tail(testingData).values.tolist()
testing_data = scaler.fit_transform(testing_data)
X_train = []
y_train = []
for i in range(training_data.shape[0]):
X_train.append(training_data[i])
y_train.append(training_data[i, 2])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1])
# y_train = y_train.reshape(y_train.shape[0], 1)
# Test Data
X_test = []
y_test = []
for i in range(testing_data.shape[0]):
X_test.append(testing_data[i])
y_test.append(testing_data[i, 2])
X_test, y_test = np.array(X_test), np.array(y_test)
X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
# y_test = y_test.reshape(y_test.shape[0], 1)
# Create the model
model = createModel(X_train)
# Evaluate the model
print('')
loss, acc = model.evaluate(X_test, y_test)
print("\n---------- Untrained model, accuracy: {:5.2f}% ----------\n".format(100 * acc))
if os.path.isdir(modelPath.replace('data model.h5', '')):
try:
model = tf.keras.models.load_model(modelPath, compile = True)
# Re-evaluate the model
loss, acc = model.evaluate(X_test, y_test)
print("\n---------- Restored model, accuracy: {:5.2f}% ----------\n".format(100 * acc))
except:
pass
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir = log_dir, histogram_freq = 1)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath = checkpointPath, save_weights_only = True, monitor = 'accuracy', mode = 'max', save_freq = 5)
# To throw it all together... fit() trains the model
model.fit(X_train, y_train, validation_data = (X_test, y_test), shuffle = True, epochs = 50, batch_size = 500, callbacks = [tensorboard_callback, model_checkpoint_callback])
model.save(modelPath)
# # Call the model protocol
y_pred = model.predict(X_test)
scale = 1 / scaler.scale_[0]
y_test = y_test * scale
y_pred = y_pred * scale
plt.plot(y_test, color = 'blue', label = '{} Real Stock Price'.format(filename + ' ' + companyNameToTicker[filename]))
plt.plot(y_pred, color = 'red', label = '{} Predicted Stock Price'.format(filename + ' ' + companyNameToTicker[filename]))
plt.title('{} Stock Price Prediction'.format(filename + ' ' + companyNameToTicker[filename]))
plt.xlabel('Time??')
plt.ylabel('{} Stock Prediction'.format(filename + ' ' + companyNameToTicker[filename]))
plt.legend()
# plt.ion()
# plt.pause(0.05)
plt.show()
Here is the output the above code produces...
1/1 [==============================] - ETA: 0s - loss: 0.5488 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (10.187773). Check your callbacks.
1/1 [==============================] - 1s 822ms/step - loss: 0.5488 - accuracy: 0.0476 - val_loss: 0.4729 - val_accuracy: 0.1429
Epoch 3/50
1/1 [==============================] - 1s 517ms/step - loss: 0.5472 - accuracy: 0.0476 - val_loss: 0.4725 - val_accuracy: 0.1429
Epoch 4/50
1/1 [==============================] - 0s 485ms/step - loss: 0.5476 - accuracy: 0.0476 - val_loss: 0.4723 - val_accuracy: 0.1429
Epoch 5/50
1/1 [==============================] - ETA: 0s - loss: 0.5484 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.438490). Check your callbacks.
1/1 [==============================] - 1s 507ms/step - loss: 0.5484 - accuracy: 0.0476 - val_loss: 0.4725 - val_accuracy: 0.1429
Epoch 6/50
1/1 [==============================] - 1s 527ms/step - loss: 0.5476 - accuracy: 0.0476 - val_loss: 0.4732 - val_accuracy: 0.1429
Epoch 7/50
1/1 [==============================] - 0s 413ms/step - loss: 0.5481 - accuracy: 0.0476 - val_loss: 0.4738 - val_accuracy: 0.1429
Epoch 8/50
1/1 [==============================] - 0s 491ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4743 - val_accuracy: 0.1429
Epoch 9/50
1/1 [==============================] - 0s 408ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4748 - val_accuracy: 0.1429
Epoch 10/50
1/1 [==============================] - ETA: 0s - loss: 0.5478 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.434556). Check your callbacks.
1/1 [==============================] - 0s 482ms/step - loss: 0.5478 - accuracy: 0.0476 - val_loss: 0.4751 - val_accuracy: 0.1429
Epoch 11/50
1/1 [==============================] - 1s 535ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4754 - val_accuracy: 0.1429
Epoch 12/50
1/1 [==============================] - 0s 408ms/step - loss: 0.5485 - accuracy: 0.0476 - val_loss: 0.4758 - val_accuracy: 0.1429
Epoch 13/50
1/1 [==============================] - 0s 392ms/step - loss: 0.5487 - accuracy: 0.0476 - val_loss: 0.4764 - val_accuracy: 0.1429
Epoch 14/50
1/1 [==============================] - 0s 460ms/step - loss: 0.5488 - accuracy: 0.0476 - val_loss: 0.4768 - val_accuracy: 0.1429
Epoch 15/50
1/1 [==============================] - ETA: 0s - loss: 0.5486 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.388608). Check your callbacks.
1/1 [==============================] - 0s 397ms/step - loss: 0.5486 - accuracy: 0.0476 - val_loss: 0.4770 - val_accuracy: 0.1429
Epoch 16/50
1/1 [==============================] - 1s 573ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4770 - val_accuracy: 0.1429
Epoch 17/50
1/1 [==============================] - 0s 456ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4766 - val_accuracy: 0.1429
Epoch 18/50
1/1 [==============================] - 0s 392ms/step - loss: 0.5476 - accuracy: 0.0476 - val_loss: 0.4763 - val_accuracy: 0.1429
Epoch 19/50
1/1 [==============================] - 0s 404ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4760 - val_accuracy: 0.1429
Epoch 20/50
1/1 [==============================] - ETA: 0s - loss: 0.5479 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.362628). Check your callbacks.
1/1 [==============================] - ETA: 0s - loss: 0.5488 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (10.187773). Check your callbacks.
1/1 [==============================] - 1s 822ms/step - loss: 0.5488 - accuracy: 0.0476 - val_loss: 0.4729 - val_accuracy: 0.1429
Epoch 3/50
1/1 [==============================] - 1s 517ms/step - loss: 0.5472 - accuracy: 0.0476 - val_loss: 0.4725 - val_accuracy: 0.1429
Epoch 4/50
1/1 [==============================] - 0s 485ms/step - loss: 0.5476 - accuracy: 0.0476 - val_loss: 0.4723 - val_accuracy: 0.1429
Epoch 5/50
1/1 [==============================] - ETA: 0s - loss: 0.5484 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.438490). Check your callbacks.
1/1 [==============================] - 1s 507ms/step - loss: 0.5484 - accuracy: 0.0476 - val_loss: 0.4725 - val_accuracy: 0.1429
Epoch 6/50
1/1 [==============================] - 1s 527ms/step - loss: 0.5476 - accuracy: 0.0476 - val_loss: 0.4732 - val_accuracy: 0.1429
Epoch 7/50
1/1 [==============================] - 0s 413ms/step - loss: 0.5481 - accuracy: 0.0476 - val_loss: 0.4738 - val_accuracy: 0.1429
Epoch 8/50
1/1 [==============================] - 0s 491ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4743 - val_accuracy: 0.1429
Epoch 9/50
1/1 [==============================] - 0s 408ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4748 - val_accuracy: 0.1429
Epoch 10/50
1/1 [==============================] - ETA: 0s - loss: 0.5478 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.434556). Check your callbacks.
1/1 [==============================] - 0s 482ms/step - loss: 0.5478 - accuracy: 0.0476 - val_loss: 0.4751 - val_accuracy: 0.1429
Epoch 11/50
1/1 [==============================] - 1s 535ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4754 - val_accuracy: 0.1429
Epoch 12/50
1/1 [==============================] - 0s 408ms/step - loss: 0.5485 - accuracy: 0.0476 - val_loss: 0.4758 - val_accuracy: 0.1429
Epoch 13/50
1/1 [==============================] - 0s 392ms/step - loss: 0.5487 - accuracy: 0.0476 - val_loss: 0.4764 - val_accuracy: 0.1429
Epoch 14/50
1/1 [==============================] - 0s 460ms/step - loss: 0.5488 - accuracy: 0.0476 - val_loss: 0.4768 - val_accuracy: 0.1429
Epoch 15/50
1/1 [==============================] - ETA: 0s - loss: 0.5486 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.388608). Check your callbacks.
1/1 [==============================] - 0s 397ms/step - loss: 0.5486 - accuracy: 0.0476 - val_loss: 0.4770 - val_accuracy: 0.1429
Epoch 16/50
1/1 [==============================] - 1s 573ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4770 - val_accuracy: 0.1429
Epoch 17/50
1/1 [==============================] - 0s 456ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4766 - val_accuracy: 0.1429
Epoch 18/50
1/1 [==============================] - 0s 392ms/step - loss: 0.5476 - accuracy: 0.0476 - val_loss: 0.4763 - val_accuracy: 0.1429
Epoch 19/50
1/1 [==============================] - 0s 404ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4760 - val_accuracy: 0.1429
Epoch 20/50
1/1 [==============================] - ETA: 0s - loss: 0.5479 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.362628). Check your callbacks.
1/1 [==============================] - 0s 452ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4758 - val_accuracy: 0.1429
Epoch 21/50
1/1 [==============================] - 0s 473ms/step - loss: 0.5476 - accuracy: 0.0476 - val_loss: 0.4753 - val_accuracy: 0.1429
Epoch 22/50
1/1 [==============================] - 0s 428ms/step - loss: 0.5496 - accuracy: 0.0476 - val_loss: 0.4744 - val_accuracy: 0.1429
Epoch 23/50
1/1 [==============================] - 1s 584ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4741 - val_accuracy: 0.1429
Epoch 24/50
1/1 [==============================] - 0s 446ms/step - loss: 0.5478 - accuracy: 0.0476 - val_loss: 0.4743 - val_accuracy: 0.1429
Epoch 25/50
1/1 [==============================] - ETA: 0s - loss: 0.5476 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.530422). Check your callbacks.
1/1 [==============================] - 1s 646ms/step - loss: 0.5476 - accuracy: 0.0476 - val_loss: 0.4746 - val_accuracy: 0.1429
Epoch 26/50
1/1 [==============================] - 1s 506ms/step - loss: 0.5487 - accuracy: 0.0476 - val_loss: 0.4756 - val_accuracy: 0.1429
Epoch 27/50
1/1 [==============================] - 0s 413ms/step - loss: 0.5482 - accuracy: 0.0476 - val_loss: 0.4765 - val_accuracy: 0.1429
Epoch 28/50
1/1 [==============================] - 0s 382ms/step - loss: 0.5481 - accuracy: 0.0476 - val_loss: 0.4772 - val_accuracy: 0.1429
Epoch 29/50
1/1 [==============================] - 0s 421ms/step - loss: 0.5487 - accuracy: 0.0476 - val_loss: 0.4774 - val_accuracy: 0.1429
Epoch 30/50
1/1 [==============================] - ETA: 0s - loss: 0.5483 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.657228). Check your callbacks.
1/1 [==============================] - 1s 955ms/step - loss: 0.5483 - accuracy: 0.0476 - val_loss: 0.4782 - val_accuracy: 0.1429
Epoch 31/50
1/1 [==============================] - 1s 634ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4792 - val_accuracy: 0.1429
Epoch 32/50
1/1 [==============================] - 0s 364ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4800 - val_accuracy: 0.1429
Epoch 33/50
1/1 [==============================] - 0s 404ms/step - loss: 0.5478 - accuracy: 0.0476 - val_loss: 0.4808 - val_accuracy: 0.1429
Epoch 34/50
1/1 [==============================] - 0s 381ms/step - loss: 0.5477 - accuracy: 0.0476 - val_loss: 0.4812 - val_accuracy: 0.1429
Epoch 35/50
1/1 [==============================] - ETA: 0s - loss: 0.5476 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.342873). Check your callbacks.
1/1 [==============================] - 1s 524ms/step - loss: 0.5476 - accuracy: 0.0476 - val_loss: 0.4810 - val_accuracy: 0.1429
Epoch 36/50
1/1 [==============================] - 0s 442ms/step - loss: 0.5485 - accuracy: 0.0476 - val_loss: 0.4808 - val_accuracy: 0.1429
Epoch 37/50
1/1 [==============================] - 1s 514ms/step - loss: 0.5493 - accuracy: 0.0476 - val_loss: 0.4805 - val_accuracy: 0.1429
Epoch 38/50
1/1 [==============================] - 1s 630ms/step - loss: 0.5503 - accuracy: 0.0476 - val_loss: 0.4806 - val_accuracy: 0.1429
Epoch 39/50
1/1 [==============================] - ETA: 0s - loss: 0.5478 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.724169). Check your callbacks.
1/1 [==============================] - 1s 1s/step - loss: 0.5478 - accuracy: 0.0476 - val_loss: 0.4812 - val_accuracy: 0.1429
Epoch 40/50
1/1 [==============================] - ETA: 0s - loss: 0.5475 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.356633). Check your callbacks.
1/1 [==============================] - 0s 400ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4813 - val_accuracy: 0.1429
Epoch 41/50
1/1 [==============================] - 1s 625ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4814 - val_accuracy: 0.1429
Epoch 42/50
1/1 [==============================] - 1s 671ms/step - loss: 0.5481 - accuracy: 0.0476 - val_loss: 0.4810 - val_accuracy: 0.1429
Epoch 43/50
1/1 [==============================] - 1s 527ms/step - loss: 0.5482 - accuracy: 0.0476 - val_loss: 0.4803 - val_accuracy: 0.1429
Epoch 44/50
1/1 [==============================] - 1s 688ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4797 - val_accuracy: 0.1429
Epoch 45/50
1/1 [==============================] - ETA: 0s - loss: 0.5475 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.479657). Check your callbacks.
1/1 [==============================] - 1s 505ms/step - loss: 0.5475 - accuracy: 0.0476 - val_loss: 0.4789 - val_accuracy: 0.1429
Epoch 46/50
1/1 [==============================] - 1s 637ms/step - loss: 0.5479 - accuracy: 0.0476 - val_loss: 0.4776 - val_accuracy: 0.1429
Epoch 47/50
1/1 [==============================] - 0s 383ms/step - loss: 0.5490 - accuracy: 0.0476 - val_loss: 0.4772 - val_accuracy: 0.1429
Epoch 48/50
1/1 [==============================] - 0s 420ms/step - loss: 0.5486 - accuracy: 0.0476 - val_loss: 0.4769 - val_accuracy: 0.1429
Epoch 49/50
1/1 [==============================] - 0s 428ms/step - loss: 0.5478 - accuracy: 0.0476 - val_loss: 0.4769 - val_accuracy: 0.1429
Epoch 50/50
1/1 [==============================] - ETA: 0s - loss: 0.5482 - accuracy: 0.0476WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.509478). Check your callbacks.
1/1 [==============================] - 0s 417ms/step - loss: 0.5482 - accuracy: 0.0476 - val_loss: 0.4772 - val_accuracy: 0.1429
1 Physical GPUs, 1 Logical GPU
WARNING:tensorflow:Layer lstm_16 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
WARNING:tensorflow:Layer lstm_17 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
WARNING:tensorflow:Layer lstm_18 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
WARNING:tensorflow:Layer lstm_19 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
1/1 [==============================] - 0s 67ms/step - loss: 0.6931 - accuracy: 0.0714
---------- Untrained model, accuracy: 7.14% ----------
WARNING:tensorflow:Layer lstm will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
WARNING:tensorflow:Layer lstm_1 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
WARNING:tensorflow:Layer lstm_2 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
WARNING:tensorflow:Layer lstm_3 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
1/1 [==============================] - 0s 3ms/step - loss: 0.4772 - accuracy: 0.1429
---------- Restored model, accuracy: 14.29% ----------
Shapes of datasets:
X_train: (9538, 1, 7)
y_train: (9538,)
X_test: (3179, 1, 7)
y_test (3179,)
Related
I'm doing a species classification task from kaggle (https://www.kaggle.com/competitions/yum-or-yuck-butterfly-mimics-2022/overview). I decided to use transfer learning to tackle this problem since there aren't that many images. The model is as follows:
inputs = tf.keras.layers.Input(shape=(224, 224, 3))
base_model = tf.keras.applications.resnet50.ResNet50(
input_shape=(224,224,3),
include_top=False,
weights="imagenet")
for layer in base_model.layers:
layer.trainable = False
x = base_model(inputs, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(64, activation="relu")(x)
output = tf.keras.layers.Dense(6, activation="softmax")(x)
model = tf.keras.Model(inputs=inputs, outputs=output)
As per the guidelines when doing transfer learning:https://keras.io/guides/transfer_learning/, I'm freezing the resnet layers and training the model on inference only (training=False). However, the results show that the model is not learning properly. Convergence doesn't seem like it will be possible even after nearly 200 epochs:
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="categorical_crossentropy",
metrics="accuracy",
)
stop_early = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=0.0001,
patience=20,
restore_best_weights=True
)
history = model.fit(train_generator,
validation_data = val_generator,
epochs = 200,
callbacks=[stop_early])
22/22 [==============================] - 19s 442ms/step - loss: 1.9317 - accuracy: 0.1794 - val_loss: 1.8272 - val_accuracy: 0.1618
Epoch 2/200
22/22 [==============================] - 9s 398ms/step - loss: 1.8250 - accuracy: 0.1882 - val_loss: 1.7681 - val_accuracy: 0.2197
Epoch 3/200
22/22 [==============================] - 9s 402ms/step - loss: 1.7927 - accuracy: 0.2294 - val_loss: 1.7612 - val_accuracy: 0.2139
Epoch 4/200
22/22 [==============================] - 9s 424ms/step - loss: 1.7930 - accuracy: 0.2000 - val_loss: 1.7640 - val_accuracy: 0.2139
Epoch 5/200
22/22 [==============================] - 9s 391ms/step - loss: 1.7872 - accuracy: 0.2132 - val_loss: 1.7489 - val_accuracy: 0.3121
Epoch 6/200
22/22 [==============================] - 9s 389ms/step - loss: 1.7700 - accuracy: 0.2574 - val_loss: 1.7378 - val_accuracy: 0.2543
Epoch 7/200
22/22 [==============================] - 9s 396ms/step - loss: 1.7676 - accuracy: 0.2353 - val_loss: 1.7229 - val_accuracy: 0.3064
Epoch 8/200
22/22 [==============================] - 9s 427ms/step - loss: 1.7721 - accuracy: 0.2353 - val_loss: 1.7225 - val_accuracy: 0.2948
Epoch 9/200
22/22 [==============================] - 9s 399ms/step - loss: 1.7522 - accuracy: 0.2588 - val_loss: 1.7267 - val_accuracy: 0.2948
Epoch 10/200
22/22 [==============================] - 9s 395ms/step - loss: 1.7434 - accuracy: 0.2735 - val_loss: 1.7151 - val_accuracy: 0.2948
Epoch 11/200
22/22 [==============================] - 9s 391ms/step - loss: 1.7500 - accuracy: 0.2632 - val_loss: 1.7083 - val_accuracy: 0.3064
Epoch 12/200
22/22 [==============================] - 9s 425ms/step - loss: 1.7307 - accuracy: 0.2721 - val_loss: 1.6899 - val_accuracy: 0.3179
Epoch 13/200
22/22 [==============================] - 9s 407ms/step - loss: 1.7439 - accuracy: 0.2794 - val_loss: 1.7045 - val_accuracy: 0.2948
Epoch 14/200
22/22 [==============================] - 9s 404ms/step - loss: 1.7376 - accuracy: 0.2706 - val_loss: 1.7118 - val_accuracy: 0.2659
Epoch 15/200
22/22 [==============================] - 9s 419ms/step - loss: 1.7588 - accuracy: 0.2647 - val_loss: 1.6684 - val_accuracy: 0.3237
Epoch 16/200
22/22 [==============================] - 9s 394ms/step - loss: 1.7289 - accuracy: 0.2824 - val_loss: 1.6733 - val_accuracy: 0.3064
Epoch 17/200
22/22 [==============================] - 9s 387ms/step - loss: 1.7184 - accuracy: 0.2809 - val_loss: 1.7185 - val_accuracy: 0.2659
Epoch 18/200
22/22 [==============================] - 9s 408ms/step - loss: 1.7242 - accuracy: 0.2765 - val_loss: 1.6961 - val_accuracy: 0.2717
Epoch 19/200
22/22 [==============================] - 9s 424ms/step - loss: 1.7218 - accuracy: 0.2853 - val_loss: 1.6757 - val_accuracy: 0.3006
Epoch 20/200
22/22 [==============================] - 9s 396ms/step - loss: 1.7248 - accuracy: 0.2882 - val_loss: 1.6716 - val_accuracy: 0.3064
Epoch 21/200
22/22 [==============================] - 9s 401ms/step - loss: 1.7134 - accuracy: 0.2838 - val_loss: 1.6666 - val_accuracy: 0.2948
Epoch 22/200
22/22 [==============================] - 9s 393ms/step - loss: 1.7140 - accuracy: 0.2941 - val_loss: 1.6427 - val_accuracy: 0.3064
I need to unfreeze the layers and turn off inference in order for the model to learn. I tested the same scenario with EfficientNet and the same thing happened. Finally, I also used Xception, and freezing the layers and running with inference was fine. So it seems they behave differently, even though they all have batchnorm layers.
I'm not understanding what is going on here. Why would I need to turn inference off? Could anyone have a clue about this?
EDIT:
results from Resnet50:
results from Xception:
I am trying to create a custom loss function but as soon as I try to create a copy of the y_pred (model predictions) tensor, the loss function stops working.
This function is working
def custom_loss(y_true, y_pred):
y_true = tf.cast(y_true, dtype=y_pred.dtype)
loss = binary_crossentropy(y_true, y_pred)
return loss
The output is
Epoch 1/10
26/26 [==============================] - 5s 169ms/step - loss: 56.1577 - accuracy: 0.7867 - val_loss: 14.7032 - val_accuracy: 0.9185
Epoch 2/10
26/26 [==============================] - 4s 159ms/step - loss: 18.6890 - accuracy: 0.8762 - val_loss: 9.4140 - val_accuracy: 0.9185
Epoch 3/10
26/26 [==============================] - 4s 158ms/step - loss: 13.7425 - accuracy: 0.8437 - val_loss: 7.7499 - val_accuracy: 0.9185
Epoch 4/10
26/26 [==============================] - 4s 159ms/step - loss: 10.5267 - accuracy: 0.8510 - val_loss: 6.1037 - val_accuracy: 0.9185
Epoch 5/10
26/26 [==============================] - 4s 160ms/step - loss: 7.5695 - accuracy: 0.8544 - val_loss: 3.9937 - val_accuracy: 0.9185
Epoch 6/10
26/26 [==============================] - 4s 159ms/step - loss: 5.1320 - accuracy: 0.8538 - val_loss: 2.6940 - val_accuracy: 0.9185
Epoch 7/10
26/26 [==============================] - 4s 160ms/step - loss: 3.3265 - accuracy: 0.8557 - val_loss: 1.6613 - val_accuracy: 0.9185
Epoch 8/10
26/26 [==============================] - 4s 160ms/step - loss: 2.1421 - accuracy: 0.8538 - val_loss: 1.0443 - val_accuracy: 0.9185
Epoch 9/10
26/26 [==============================] - 4s 160ms/step - loss: 1.3384 - accuracy: 0.8601 - val_loss: 0.5159 - val_accuracy: 0.9184
Epoch 10/10
26/26 [==============================] - 4s 173ms/step - loss: 0.6041 - accuracy: 0.8895 - val_loss: 0.3164 - val_accuracy: 0.9185
testing
**********Testing model**********
training AUC : 0.6204090733263475
testing AUC: 0.6196677312833667
But this is not working
def custom_loss(y_true, y_pred):
y_true = tf.cast(y_true, dtype=y_pred.dtype)
y_p = tf.identity(y_pred)
loss = binary_crossentropy(y_true, y_p)
return loss
I am getting this output
Epoch 1/10
26/26 [==============================] - 11s 179ms/step - loss: 1.3587 - accuracy: 0.9106 - val_loss: 1.2569 - val_accuracy: 0.9185
Epoch 2/10
26/26 [==============================] - 4s 159ms/step - loss: 1.2572 - accuracy: 0.9185 - val_loss: 1.2569 - val_accuracy: 0.9185
Epoch 3/10
26/26 [==============================] - 4s 158ms/step - loss: 1.2572 - accuracy: 0.9185 - val_loss: 1.2569 - val_accuracy: 0.9185
Epoch 4/10
26/26 [==============================] - 4s 158ms/step - loss: 1.2572 - accuracy: 0.9185 - val_loss: 1.2569 - val_accuracy: 0.9185
Epoch 5/10
26/26 [==============================] - 4s 158ms/step - loss: 1.2572 - accuracy: 0.9185 - val_loss: 1.2569 - val_accuracy: 0.9185
Epoch 6/10
26/26 [==============================] - 4s 158ms/step - loss: 1.2572 - accuracy: 0.9185 - val_loss: 1.2569 - val_accuracy: 0.9185
Epoch 7/10
26/26 [==============================] - 4s 159ms/step - loss: 1.2572 - accuracy: 0.9185 - val_loss: 1.2569 - val_accuracy: 0.9185
Epoch 8/10
26/26 [==============================] - 4s 159ms/step - loss: 1.2572 - accuracy: 0.9185 - val_loss: 1.2569 - val_accuracy: 0.9185
Epoch 9/10
26/26 [==============================] - 4s 160ms/step - loss: 1.2572 - accuracy: 0.9185 - val_loss: 1.2569 - val_accuracy: 0.9185
Epoch 10/10
26/26 [==============================] - 4s 159ms/step - loss: 1.2572 - accuracy: 0.9185 - val_loss: 1.2569 - val_accuracy: 0.9185
testing
**********Testing model**********
training AUC : 0.5
testing AUC : 0.5
Is there a problem with tf.identity() which is causing the issue?
Or is there any other way to copy tensors which I should be using?
I am trying to run an autoencoder for dimensionality reduction on a Fraud Detection dataset (https://www.kaggle.com/kartik2112/fraud-detection?select=fraudTest.csv) and am receiving very high loss values for each iteration. Below is the autoencoder code.
nb_epoch = 100
batch_size = 128
input_dim = X_train.shape[1]
encoding_dim = 14
hidden_dim = int(encoding_dim / 2)
learning_rate = 1e-7
input_layer = Input(shape=(input_dim, ))
encoder = Dense(encoding_dim, activation="tanh", activity_regularizer=regularizers.l1(learning_rate))(input_layer)
encoder = Dense(hidden_dim, activation="relu")(encoder)
decoder = Dense(hidden_dim, activation='tanh')(encoder)
decoder = Dense(input_dim, activation='relu')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
autoencoder.compile(metrics=['accuracy'],
loss='mean_squared_error',
optimizer='adam')
cp = ModelCheckpoint(filepath="autoencoder_fraud.h5",
save_best_only=True,
verbose=0)
tb = TensorBoard(log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=True)
history = autoencoder.fit(X_train, X_train,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_data=(X_test, X_test),
verbose=1,
callbacks=[cp, tb]).history
here is a snippet of the loss values.
Epoch 1/100
10131/10131 [==============================] - 32s 3ms/step - loss: 52445827358.6230 - accuracy: 0.3389 - val_loss: 9625651200.0000 - val_accuracy: 0.5083
Epoch 2/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52393605025.8066 - accuracy: 0.5083 - val_loss: 9621398528.0000 - val_accuracy: 0.5083
Epoch 3/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52486496629.1354 - accuracy: 0.5082 - val_loss: 9617147904.0000 - val_accuracy: 0.5083
Epoch 4/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52514002255.9432 - accuracy: 0.5070 - val_loss: 9612887040.0000 - val_accuracy: 0.5083
Epoch 5/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52436489238.6388 - accuracy: 0.5076 - val_loss: 9608664064.0000 - val_accuracy: 0.5083
Epoch 6/100
10131/10131 [==============================] - 31s 3ms/step - loss: 52430005774.7556 - accuracy: 0.5081 - val_loss: 9604417536.0000 - val_accuracy: 0.5083
Epoch 7/100
10131/10131 [==============================] - 31s 3ms/step - loss: 52474495714.5898 - accuracy: 0.5079 - val_loss: 9600195584.0000 - val_accuracy: 0.5083
Epoch 8/100
10131/10131 [==============================] - 31s 3ms/step - loss: 52423052560.0695 - accuracy: 0.5076 - val_loss: 9595947008.0000 - val_accuracy: 0.5083
Epoch 9/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52442358260.0742 - accuracy: 0.5072 - val_loss: 9591708672.0000 - val_accuracy: 0.5083
Epoch 10/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52402494704.5369 - accuracy: 0.5089 - val_loss: 9587487744.0000 - val_accuracy: 0.5083
Epoch 11/100
10131/10131 [==============================] - 31s 3ms/step - loss: 52396583628.3553 - accuracy: 0.5081 - val_loss: 9583238144.0000 - val_accuracy: 0.5083
Epoch 12/100
10131/10131 [==============================] - 31s 3ms/step - loss: 52349824708.2700 - accuracy: 0.5076 - val_loss: 9579020288.0000 - val_accuracy: 0.5083
Epoch 13/100
10131/10131 [==============================] - 31s 3ms/step - loss: 52332072133.6850 - accuracy: 0.5083 - val_loss: 9574786048.0000 - val_accuracy: 0.5083
Epoch 14/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52353680011.6731 - accuracy: 0.5086 - val_loss: 9570555904.0000 - val_accuracy: 0.5083
Epoch 15/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52347432594.5456 - accuracy: 0.5088 - val_loss: 9566344192.0000 - val_accuracy: 0.5083
Epoch 16/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52327825554.3435 - accuracy: 0.5076 - val_loss: 9562103808.0000 - val_accuracy: 0.5083
Epoch 17/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52347251610.1255 - accuracy: 0.5080 - val_loss: 9557892096.0000 - val_accuracy: 0.5083
Epoch 18/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52292632667.3636 - accuracy: 0.5079 - val_loss: 9553654784.0000 - val_accuracy: 0.5083
Epoch 19/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52354135093.7671 - accuracy: 0.5083 - val_loss: 9549425664.0000 - val_accuracy: 0.5083
Epoch 20/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52295668148.2006 - accuracy: 0.5086 - val_loss: 9545219072.0000 - val_accuracy: 0.5083
Epoch 21/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52314219115.3320 - accuracy: 0.5079 - val_loss: 9540980736.0000 - val_accuracy: 0.5083
Epoch 22/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52328022934.0829 - accuracy: 0.5079 - val_loss: 9536788480.0000 - val_accuracy: 0.5083
Epoch 23/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52268139834.5172 - accuracy: 0.5074 - val_loss: 9532554240.0000 - val_accuracy: 0.5083
Epoch 24/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52308370726.3040 - accuracy: 0.5077 - val_loss: 9528341504.0000 - val_accuracy: 0.5083
Epoch 25/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52224468101.4070 - accuracy: 0.5081 - val_loss: 9524126720.0000 - val_accuracy: 0.5083
Epoch 26/100
10131/10131 [==============================] - 30s 3ms/step - loss: 52200100823.1694 - accuracy: 0.5080 - val_loss: 9519915008.0000 - val_accuracy: 0.5083
Any advice/solution will be highly appreciated. Thank you
I have scaled the numarical data using StandardScaler and encoded
categorical data using LabelEncoder
First of all, check what numerical data you scaled.
I think you wrongly scaled cc_num, because cc_num is a categorical column.
This should solve your problem with high loss, but it doen't mean your model will be good.
You should first make a good check on the features and try to get some useful relationships between label and features (data preprocessing/featurezation)
I state that I am not at all familiar with neural networks and this is the first time that I have tried to develop one.
The problem lies in predicting a week's pollution forecast, based on the previous month.
Unstructured data with 15 features are:
Start data
The data to be predicted is 'gas', for a total of 168 hours in the next week, is the hours in a week.
MinMaxScaler(feature_range (0,1)) is applied to the data. And then the data is split into train and test data. Since only one year of hourly measurements is available, the data is resampled in series of 672 hourly samples that each starts from every day of the year at midnight. Therefore, from about 8000 starting hourly surveys, about 600 series of 672 samples are obtained.
The 'date' is removed from the initial data and the form of train_x and train_y is:
Shape of train_x and train_y
In train_x[0] there are 672 hourly readings for the first 4 weeks of the data set and consist of all features including 'gas'.
In train_y [0], on the other hand, there are 168 hourly readings for the following week which begins when the month ends in train_x [0].
Train_X[0] where column 0 is 'gas' and Train_y[0] with only gas column for the next week after train_x[0]
TRAIN X SHAPE = (631, 672, 14)
TRAIN Y SHAPE = (631, 168, 1)
After organizing the data in this way (if it's wrong please let me know), I built the neural network as the following:
train_x, train_y = to_supervised(train, n_input)
train_x = train_x.astype(float)
train_y = train_y.astype(float)
# define parameters
verbose, epochs, batch_size = 1, 200, 50
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# define model
model = Sequential()
opt = optimizers.RMSprop(learning_rate=1e-3)
model.add(layers.GRU(14, activation='relu', input_shape=(n_timesteps, n_features),return_sequences=False, stateful=False))
model.add(layers.Dense(1, activation='relu'))
#model.add(layers.Dense(14, activation='linear'))
model.add(layers.Dense(n_outputs, activation='sigmoid'))
model.summary()
model.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
train_y = np.concatenate(train_y).reshape(len(train_y), 168)
callback_early_stopping = EarlyStopping(monitor='val_loss',
patience=5, verbose=1)
callback_tensorboard = TensorBoard(log_dir='./23_logs/',
histogram_freq=0,
write_graph=False)
callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
min_lr=1e-4,
patience=0,
verbose=1)
callbacks = [callback_early_stopping,
callback_tensorboard,
callback_reduce_lr]
history = model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose, shuffle=False
, validation_split=0.2, callbacks=callbacks)
When i fit the network i get:
11/11 [==============================] - 5s 305ms/step - loss: 0.1625 - accuracy: 0.0207 - val_loss: 0.1905 - val_accuracy: 0.0157
Epoch 2/200
11/11 [==============================] - 2s 179ms/step - loss: 0.1594 - accuracy: 0.0037 - val_loss: 0.1879 - val_accuracy: 0.0157
Epoch 3/200
11/11 [==============================] - 2s 169ms/step - loss: 0.1571 - accuracy: 0.0040 - val_loss: 0.1855 - val_accuracy: 0.0079
Epoch 4/200
11/11 [==============================] - 2s 165ms/step - loss: 0.1550 - accuracy: 0.0092 - val_loss: 0.1832 - val_accuracy: 0.0079
Epoch 5/200
11/11 [==============================] - 2s 162ms/step - loss: 0.1529 - accuracy: 0.0102 - val_loss: 0.1809 - val_accuracy: 0.0079
Epoch 6/200
11/11 [==============================] - 2s 160ms/step - loss: 0.1508 - accuracy: 0.0085 - val_loss: 0.1786 - val_accuracy: 0.0079
Epoch 7/200
11/11 [==============================] - 2s 160ms/step - loss: 0.1487 - accuracy: 0.0023 - val_loss: 0.1763 - val_accuracy: 0.0079
Epoch 8/200
11/11 [==============================] - 2s 158ms/step - loss: 0.1467 - accuracy: 0.0023 - val_loss: 0.1740 - val_accuracy: 0.0079
Epoch 9/200
11/11 [==============================] - 2s 159ms/step - loss: 0.1446 - accuracy: 0.0034 - val_loss: 0.1718 - val_accuracy: 0.0000e+00
Epoch 10/200
11/11 [==============================] - 2s 160ms/step - loss: 0.1426 - accuracy: 0.0034 - val_loss: 0.1695 - val_accuracy: 0.0000e+00
Epoch 11/200
11/11 [==============================] - 2s 162ms/step - loss: 0.1406 - accuracy: 0.0034 - val_loss: 0.1673 - val_accuracy: 0.0000e+00
Epoch 12/200
11/11 [==============================] - 2s 159ms/step - loss: 0.1387 - accuracy: 0.0034 - val_loss: 0.1651 - val_accuracy: 0.0000e+00
Epoch 13/200
11/11 [==============================] - 2s 159ms/step - loss: 0.1367 - accuracy: 0.0052 - val_loss: 0.1629 - val_accuracy: 0.0000e+00
Epoch 14/200
11/11 [==============================] - 2s 159ms/step - loss: 0.1348 - accuracy: 0.0052 - val_loss: 0.1608 - val_accuracy: 0.0000e+00
Epoch 15/200
11/11 [==============================] - 2s 161ms/step - loss: 0.1328 - accuracy: 0.0052 - val_loss: 0.1586 - val_accuracy: 0.0000e+00
Epoch 16/200
11/11 [==============================] - 2s 162ms/step - loss: 0.1309 - accuracy: 0.0052 - val_loss: 0.1565 - val_accuracy: 0.0000e+00
Epoch 17/200
11/11 [==============================] - 2s 171ms/step - loss: 0.1290 - accuracy: 0.0052 - val_loss: 0.1544 - val_accuracy: 0.0000e+00
Epoch 18/200
11/11 [==============================] - 2s 174ms/step - loss: 0.1271 - accuracy: 0.0052 - val_loss: 0.1523 - val_accuracy: 0.0000e+00
Epoch 19/200
11/11 [==============================] - 2s 161ms/step - loss: 0.1253 - accuracy: 0.0052 - val_loss: 0.1502 - val_accuracy: 0.0000e+00
Epoch 20/200
11/11 [==============================] - 2s 161ms/step - loss: 0.1234 - accuracy: 0.0052 - val_loss: 0.1482 - val_accuracy: 0.0000e+00
Epoch 21/200
11/11 [==============================] - 2s 159ms/step - loss: 0.1216 - accuracy: 0.0052 - val_loss: 0.1461 - val_accuracy: 0.0000e+00
Epoch 22/200
11/11 [==============================] - 2s 164ms/step - loss: 0.1198 - accuracy: 0.0052 - val_loss: 0.1441 - val_accuracy: 0.0000e+00
Epoch 23/200
11/11 [==============================] - 2s 164ms/step - loss: 0.1180 - accuracy: 0.0052 - val_loss: 0.1421 - val_accuracy: 0.0000e+00
Epoch 24/200
11/11 [==============================] - 2s 163ms/step - loss: 0.1162 - accuracy: 0.0052 - val_loss: 0.1401 - val_accuracy: 0.0000e+00
Epoch 25/200
11/11 [==============================] - 2s 167ms/step - loss: 0.1145 - accuracy: 0.0052 - val_loss: 0.1381 - val_accuracy: 0.0000e+00
Epoch 26/200
11/11 [==============================] - 2s 188ms/step - loss: 0.1127 - accuracy: 0.0052 - val_loss: 0.1361 - val_accuracy: 0.0000e+00
Epoch 27/200
11/11 [==============================] - 2s 169ms/step - loss: 0.1110 - accuracy: 0.0052 - val_loss: 0.1342 - val_accuracy: 0.0000e+00
Epoch 28/200
11/11 [==============================] - 2s 189ms/step - loss: 0.1093 - accuracy: 0.0052 - val_loss: 0.1323 - val_accuracy: 0.0000e+00
Epoch 29/200
11/11 [==============================] - 2s 183ms/step - loss: 0.1076 - accuracy: 0.0079 - val_loss: 0.1304 - val_accuracy: 0.0000e+00
Epoch 30/200
11/11 [==============================] - 2s 172ms/step - loss: 0.1059 - accuracy: 0.0079 - val_loss: 0.1285 - val_accuracy: 0.0000e+00
Epoch 31/200
11/11 [==============================] - 2s 164ms/step - loss: 0.1042 - accuracy: 0.0079 - val_loss: 0.1266 - val_accuracy: 0.0000e+00
Epoch 32/200
Accuracy always remains very low and sometimes (like this case) val_accuracy becomes 0 and never changes. While loss and val_loss do not converge well but decrease. I realize that I am certainly doing many things wrong and I cannot understand how I can fix it. I have obviously tried with other hyperparameters and also with other networks like LSTM, but I didn't get satisfactory results.
How can I improve the model so that the accuracy is at least decent? Any advice is welcome, thank you very much!
I am building a DNN with keras to classify between background or signal events (HEP). Nevertheless the loss and the accuracy are not changing.
I already tried changing the parameters on the optimizer, normalizing the data, changing the number of layers, neurons, epochs, initializing the weights, etc.
Here's the model:
epochs = 20
num_features = 2
num_classes = 2
batch_size = 32
# model
print("\n Building model...")
model = Sequential()
model.add(Dropout(0.2))
model.add(Dense(128, input_shape=(2,), activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes,activation=tf.nn.softmax))
print("\n Compiling model...")
opt = adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0,
amsgrad=False)
# compile model
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
print("\n Fitting model...")
history = model.fit(x_train, y_train, epochs = epochs,
batch_size = batch_size, validation_data = (x_test, y_test))
I'm expecting a change in the loss but it won't decrease from 0.69-ish.
The epochs report
Building model...
Compiling model...
Fitting model...
Train on 18400 samples, validate on 4600 samples
Epoch 1/20
18400/18400 [==============================] - 1s 71us/step - loss: 0.6939 - acc: 0.4965 - val_loss: 0.6933 - val_acc: 0.5000
Epoch 2/20
18400/18400 [==============================] - 1s 60us/step - loss: 0.6935 - acc: 0.5045 - val_loss: 0.6933 - val_acc: 0.5000
Epoch 3/20
18400/18400 [==============================] - 1s 69us/step - loss: 0.6937 - acc: 0.4993 - val_loss: 0.6934 - val_acc: 0.5000
Epoch 4/20
18400/18400 [==============================] - 1s 65us/step - loss: 0.6939 - acc: 0.4984 - val_loss: 0.6932 - val_acc: 0.5000
Epoch 5/20
18400/18400 [==============================] - 1s 58us/step - loss: 0.6936 - acc: 0.5000 - val_loss: 0.6936 - val_acc: 0.5000
Epoch 6/20
18400/18400 [==============================] - 1s 57us/step - loss: 0.6937 - acc: 0.4913 - val_loss: 0.6932 - val_acc: 0.5000
Epoch 7/20
18400/18400 [==============================] - 1s 58us/step - loss: 0.6935 - acc: 0.5008 - val_loss: 0.6932 - val_acc: 0.5000
Epoch 8/20
18400/18400 [==============================] - 1s 63us/step - loss: 0.6936 - acc: 0.5013 - val_loss: 0.6936 - val_acc: 0.5000
Epoch 9/20
18400/18400 [==============================] - 1s 67us/step - loss: 0.6936 - acc: 0.4924 - val_loss: 0.6932 - val_acc: 0.5000
Epoch 10/20
18400/18400 [==============================] - 1s 61us/step - loss: 0.6933 - acc: 0.5067 - val_loss: 0.6934 - val_acc: 0.5000
Epoch 11/20
18400/18400 [==============================] - 1s 64us/step - loss: 0.6938 - acc: 0.4972 - val_loss: 0.6931 - val_acc: 0.5000
Epoch 12/20
18400/18400 [==============================] - 1s 64us/step - loss: 0.6936 - acc: 0.4991 - val_loss: 0.6934 - val_acc: 0.5000
Epoch 13/20
18400/18400 [==============================] - 1s 70us/step - loss: 0.6937 - acc: 0.4960 - val_loss: 0.6935 - val_acc: 0.5000
Epoch 14/20
18400/18400 [==============================] - 1s 63us/step - loss: 0.6935 - acc: 0.4992 - val_loss: 0.6932 - val_acc: 0.5000
Epoch 15/20
18400/18400 [==============================] - 1s 61us/step - loss: 0.6937 - acc: 0.4940 - val_loss: 0.6931 - val_acc: 0.5000
Epoch 16/20
18400/18400 [==============================] - 1s 68us/step - loss: 0.6933 - acc: 0.5067 - val_loss: 0.6936 - val_acc: 0.5000
Epoch 17/20
18400/18400 [==============================] - 1s 58us/step - loss: 0.6938 - acc: 0.4997 - val_loss: 0.6935 - val_acc: 0.5000
Epoch 18/20
18400/18400 [==============================] - 1s 56us/step - loss: 0.6936 - acc: 0.4972 - val_loss: 0.6941 - val_acc: 0.5000
Epoch 19/20
18400/18400 [==============================] - 1s 57us/step - loss: 0.6934 - acc: 0.5061 - val_loss: 0.6954 - val_acc: 0.5000
Epoch 20/20
18400/18400 [==============================] - 1s 58us/step - loss: 0.6936 - acc: 0.5037 - val_loss: 0.6939 - val_acc: 0.5000
Update: My data preparation contains this
np.random.shuffle(x_train)
np.random.shuffle(y_train)
np.random.shuffle(x_test)
np.random.shuffle(y_test)
And I'm thinking it's changing the class for each data point cause the shuffle is done separately.