Real bad accuracy of training test on neural network on keras - python

I'm doing a Neural Network for the "Default of credit card clients" from http://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients.
But the accuracy of my models is pretty bad, worse than if I predicted all zeros. I already did some research on it and did oversample to correct the classes, and change the optimizer, because adam was not increasing the accuracy.
What else could I do?
import pandas
import numpy
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense
import keras
from imblearn.over_sampling import SMOTE
seed = 8
numpy.random.seed(seed)
base = pandas.read_csv('base_nao_trabalhada.csv')
train, test = train_test_split(base, test_size = 0.2)
train=train.values
test=test.values
X_train = train[:,1:23]
Y_train = train[:,24]
X_test = test[:,1:23]
Y_test = test[:,24]
sm = SMOTE(kind='regular')
X_resampled, Y_resampled = sm.fit_sample(X_train, Y_train)
# Model Creation
model = Sequential()
model.add(Dense(40, input_dim=22, init='uniform', activation='relu'))
model.add(Dense(4, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
#activation='relu'
opt = keras.optimizers.SGD(lr=0.000001)
# Compile model
model.compile(loss='binary_crossentropy', optimizer=opt , metrics=['accuracy'])
#loss=binary_crossentropy
#optimizer='adam'
# creating .fit
model.fit(X_resampled, Y_resampled, nb_epoch=10000, batch_size=30)
# evaluate the model
scores = model.evaluate(X_test, Y_test)
print ()
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

Related

I need consistent results for MSE between runs when training a Neural Network

import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
from bayes_opt import BayesianOptimization
# load data from UCI Machine Learning Repository
df = pd.read_csv(r'C:\Test_set_Yacht.csv')
df1 = pd.read_csv(r'C:\Train_set_Yacht.csv')
df2 = pd.read_csv(r'C:\Yacht_hydro.csv')
X = df2.drop("residuary_resistance", axis=1)
Y = df2["residuary_resistance"]
# split data into features and target
X_train = df1.drop("residuary_resistance", axis=1)
y_train = df1["residuary_resistance"]
# split data into train and test sets
X_test = df.drop("residuary_resistance", axis=1)
y_test = df["residuary_resistance"]
# scale data using StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
def objective_model_1(hidden_units, learning_rate):
model = Sequential()
model.add(Dense(hidden_units, input_dim=X.shape[1], activation="relu"))
model.add(Dense(hidden_units, activation="relu"))
model.add(Dense(1, activation="linear"))
model.compile(loss="mse", optimizer=Adam(learning_rate=learning_rate))
model.fit(X_train_scaled, y_train, epochs=100, verbose=0)
y_pred = model.predict(X_test_scaled)
return -mean_squared_error(y_test, y_pred)
pbounds_model_1 = {
"hidden_units": (32, 128),
"learning_rate": (1e-5, 1e-1),
}
bo_model_1 = BayesianOptimization(
f=objective_model_1,
pbounds=pbounds_model_1,
random_state=42,
)
bo_model_1.maximize(init_points=10, n_iter=90)
def objective_model_2(hidden_units, learning_rate):
model = Sequential()
model.add(Dense(hidden_units, input_shape=X_train_scaled.shape[1:], activation="relu"))
model.add(Dense(hidden_units, activation="relu"))
model.add(Dense(hidden_units, activation="relu"))
model.add(Dense(hidden_units, activation="relu"))
model.add(Dense(1, activation="linear"))
model.compile(loss="mse", optimizer=Adam(learning_rate=learning_rate))
model.fit(X_train_scaled, y_train, epochs=100, verbose=0)
y_pred = model.predict(X_test_scaled)
return -mean_squared_error(y_test, y_pred)
pbounds_model_2 = {
"hidden_units": (32, 128),
"learning_rate": (1e-5, 1e-1),
}
bo_model_2 = BayesianOptimization(
f=objective_model_2,
pbounds=pbounds_model_2,
random_state=42,
)
bo_model_2.maximize(init_points=10, n_iter=90)
# get the best hyperparameters
# get the best hyperparameters for each model
best_params_model_1 = bo_model_1.max["params"]
best_params_model_2 = bo_model_2.max["params"]
# train and evaluate model 1 with best hyperparameters
model_1 = Sequential()
model_1.add(Dense(32, input_dim=X.shape[1], activation="relu"))
model_1.add(Dense(32, activation="relu"))
model_1.add(Dense(1, activation="linear"))
model_1.compile(loss="mse", optimizer=Adam(learning_rate=best_params_model_1["learning_rate"]))
model_1.fit(X_train_scaled, y_train, epochs=100, verbose=0)
y_pred_1 = model_1.predict(X_test_scaled)
mse_1 = mean_squared_error(y_test, y_pred_1)
print("Model 1 MSE on test set:", mse_1)
# train and evaluate model 2 with best hyperparameters
model_2 = Sequential()
model_2.add(Dense(64, input_dim=X.shape[1], activation="relu"))
model_2.add(Dense(64, activation="relu"))
model_2.add(Dense(64, activation="relu"))
model_2.add(Dense(64, activation="relu"))
model_2.add(Dense(1, activation="linear"))
model_2.compile(loss="mse", optimizer=Adam(learning_rate=best_params_model_2["learning_rate"]))
model_2.fit(X_train_scaled, y_train, epochs=100, verbose=0)
y_pred_2 = model_2.predict(X_test_scaled)
mse_2 = mean_squared_error(y_test, y_pred_2)
print("Model 2 MSE on test set:", mse_2)
In the following code, I implement a bayesian optimization for hyperparameter tunning of 2 different NN using the data set from: https://archive.ics.uci.edu/ml/datasets/yacht+hydrodynamics,After running this, I create again those 2 NN in a JupytherNotebook code block and run with the best hyperparameters already determined by the bayesian optimizer. I need each time I run the code to get the same MSE. This is the reason why I am splitting the data already to ensure the same results.
The inner workings of the tensorflow library are non-deterministic. So you must set a random seed in order to get reproducible results, in practice you just need to add this line at the start of your code:
tf.random.set_seed(0)

CNN doesn't improve its performance

Here the problem, I have a dataset 2200x39, I know... very poor. Where 38 are the features (texture and statistic) and the last column is the output class which could be 0 or 1. My dataset is balanced (1100 "1" and 1100 "0").
I'm trying to improve my performance which is stuck in 0.69 for loss and 0.49 for accuracy. I tried to add a layer, to add neurons, different parameters. Nothing, values of accuracy and loss change just a bit.
So, first of all, I import all the stuff I need
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, BatchNormalization, Conv1D
from tensorflow.keras.optimizers import SGD
import matplotlib.pyplot as plt
Then I prepar my data and split 80% training set and 20% validation test
# fix a seed for reproducing same results if we wish to train and evaluate our network more than once
seed = 9
np.random.seed(seed)
# load dataset
dataset = np.loadtxt('tr_set.csv', delimiter=',', skiprows=1)
# Show the first 10 rows
print(dataset[1:10])
# Delete the first column with the patient index
dataset = dataset[:,1:42]
# Split into input (features) and output variables
X = dataset[:,2:40]
Y = dataset[:,40]
# Counting elements in class 0 and in class 1
count_0 = 0
count_1 = 0
for i in Y:
if i == 0:
count_0 = count_0 + 1
if i == 1:
count_1 = count_1 + 1
print("Number of elements in 0 class:", count_0)
print("Number of elements in 1 class:", count_1)
# The dataset is balanced
# Split into training set(80%) and validation set (20%)
(X_train, X_val, Y_train, Y_val) = train_test_split(X, Y, test_size=0.2, random_state=seed)
And here my model after I reshape X_train and X_val due to using Conv1D
# Create the model
opt = SGD(lr=0.00001)
model = Sequential()
model.add(Dense(1024, activation='relu', kernel_initializer='random_uniform', input_shape=(1,38)))
model.add(BatchNormalization()) # It is used to normalize the input layer by adjusting and scaling the activations.
model.add(Dense(512, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.summary()
model.add(Conv1D(64, 3, padding="same", activation="relu"))
# model.add(MaxPooling1D(2))
model.summary()
model.add(Dense(1, activation='sigmoid'))
model.summary()
# compile the model
model.compile(loss='binary_crossentropy', optimizer= opt, metrics=['accuracy'])
# fit the model
history = model.fit(X_train, Y_train, validation_data=(X_val, Y_val), epochs=15, batch_size=10)
# w_data = model.get_weights()
What it is wrong, I delete the max-pooling because I have problems with dimension (Something like subtracting 2 from 1)?

how can i evaluate StratifiedKFold model

import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
x_train = dataset[0:700,:-1]
y_train = dataset[0:700,-1]
x_test = dataset[700:,:-1]
y_test = dataset[700:,-1]
def create_model():
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = KerasClassifier(build_fn=create_model, epochs=100, batch_size=64)
skf = StratifiedKFold(n_splits=3, shuffle=True, random_state=seed)
scores = cross_val_score(model, x_train, y_train, cv=skf)
predictions = cross_val_predict(model, x_test, y_test, cv=skf)
I want to train [x_train], [y_train] by StratifiedKFold
and eveluate by [x_test], [y_test]
how can i do?
I tried cross_val_predict. but i think it is not appropriate.
To split between train and test in a stratified way you can use:
from sklearn.model_selection import train_test_split
dataset_train, dataset_test = train_test_split(dataset,
stratify=dataset[:,-1],
test_size=0.2)
#split both datasets into X,y
Check:
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
Stratified Train/Test-split in scikit-learn
skf = StratifiedKFold(n_splits=3, shuffle=True, random_state=seed)
accuracy=[]
for train in skf.split(x_train, y_train):
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
how about this one? it is work but i don`t know is it correct.

my LSTM model accuracy is only around 0.5

I am trying to make a positive and negative classification with a review of Korean movies.
Data are pre-processed.
Preprocessed data is shared by Google drives.
It is configured by referring to example LSTM model.
When executed, the accuracy is only around 0.5.
Help me ......
this url is train_test_dataset.
https://drive.google.com/file/d/1rnf84idV9pj72Ammu5VQ2_rNyGZIa3pU/view?usp=sharing
train = 70000
test = about 33000
from keras.models import Sequential
from keras.layers import Dense, Embedding,Dropout
from keras.layers import LSTM
max_features = 20000
maxlen = 300
batch_size = 32
import numpy as np
x_train = np.load('file_path')
y_train = np.load('file_path')
x_test = np.load('file_path')
y_test = np.load('file_path')
print('train_x shape:', train_x.shape)
print('test_x shape: ', test_x.shape)
print('model generate...')
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
try:
print('Train...')
model.fit(train_x, train_y,
batch_size=batch_size,
epochs=1,
validation_data=(test_x, test_y))
score, acc = model.evaluate(test_x, test_y,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
except Exception as e:
print(e)

How to make Keras Neural Net outperforming Logistic Regression on Iris data

I am comparing Keras Neural-Net with simple Logistic Regression from Scikit-learn on IRIS data. I expect that Keras-NN will perform better, as suggested by this post.
But why by mimicking the code there, the result of Keras-NN is lower than
Logistic regression?
import seaborn as sns
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegressionCV
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.utils import np_utils
# Prepare data
iris = sns.load_dataset("iris")
X = iris.values[:, 0:4]
y = iris.values[:, 4]
# Make test and train set
train_X, test_X, train_y, test_y = train_test_split(X, y, train_size=0.5, random_state=0)
################################
# Evaluate Logistic Regression
################################
lr = LogisticRegressionCV()
lr.fit(train_X, train_y)
pred_y = lr.predict(test_X)
print("Test fraction correct (LR-Accuracy) = {:.2f}".format(lr.score(test_X, test_y)))
################################
# Evaluate Keras Neural Network
################################
# Make ONE-HOT
def one_hot_encode_object_array(arr):
'''One hot encode a numpy array of objects (e.g. strings)'''
uniques, ids = np.unique(arr, return_inverse=True)
return np_utils.to_categorical(ids, len(uniques))
train_y_ohe = one_hot_encode_object_array(train_y)
test_y_ohe = one_hot_encode_object_array(test_y)
model = Sequential()
model.add(Dense(16, input_shape=(4,)))
model.add(Activation('sigmoid'))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
# Actual modelling
model.fit(train_X, train_y_ohe, verbose=0, batch_size=1)
score, accuracy = model.evaluate(test_X, test_y_ohe, batch_size=16, verbose=0)
print("Test fraction correct (NN-Score) = {:.2f}".format(score))
print("Test fraction correct (NN-Accuracy) = {:.2f}".format(accuracy))
I'm using this version of Keras
In [2]: keras.__version__
Out[2]: '1.0.1'
The result shows:
Test fraction correct (LR-Accuracy) = 0.83
Test fraction correct (NN-Score) = 0.75
Test fraction correct (NN-Accuracy) = 0.60
According to that post, the accuracy of Keras should be 0.99. What went wrong?
The default number of epochs was reduced from 100 in Keras version 0 to 10 in Keras version 1, just released this month (April 2016). Try:
model.fit(train_X, train_y_ohe, verbose=0, batch_size=1, nb_epoch=100)
Your neural network is quite simple. Try creating Deep neural network by adding more neurons and layers into it. Also, it's important to scale your features. Try glorot_uniform initializer. Last but not least, increase epoch and see if loss is decreasing with each epoch.
So here you go:
model = Sequential()
model.add(Dense(input_dim=4, output_dim=512, init='glorot_uniform'))
model.add(PReLU(input_shape=(512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(input_dim=512, output_dim=512, init='glorot_uniform'))
model.add(PReLU(input_shape=(512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(input_dim=512, output_dim=512, init='glorot_uniform'))
model.add(PReLU(input_shape=(512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(input_dim=512, output_dim=512, init='glorot_uniform'))
model.add(PReLU(input_shape=(512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(input_dim=512, output_dim=512, init='glorot_uniform'))
model.add(PReLU(input_shape=(512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(input_dim=512, output_dim=3, init='glorot_uniform'))
model.add(Activation('softmax'))
This reaches around 0.97 in 120th epoch

Categories