How can I test real-time predictions in neural network? [closed] - python

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 3 years ago.
Improve this question
As I'm new here let me ask a question that would be fairly common. I wrote an MLP model of the neural network using deep learning. I'm using a standard data-set which I downloaded here enter link description here. from statistical point of view, the accuracy and f1-score of mine shows wonderfully output. now I need to test this program with real-time data. I will be glad to hear your suggestions for how can I perform real-time predictions in neural networks?
from pandas import pandas as pd
from pandas import DataFrame
from numpy import*
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import GridSearchCV,train_test_split
from sklearn.metrics import confusion_matrix,accuracy_score,roc_curve,auc
from sklearn.neural_network import MLPClassifier
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
########################################################################################
db = pd.read_csv(r"C:\Users\cert 3\Desktop\Vasou\proposal\code\StackOverFlow\UDP-Flood-CSV.csv")
X = db.iloc[:, 0:4]
y = db.iloc[:, 4]
m, n = X.shape
MG = X
X = preprocessing.scale(X)
encoder = LabelEncoder()
encoder.fit(y)
encoded_y = encoder.transform(y)
y = to_categorical(encoded_y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
########################################################################################
mlp = MLPClassifier()
parameter_space = {'hidden_layer_sizes': [(8,12,4), (5,5,2), (4,4,4)],
'activation': ['tanh', 'relu'],
'solver': ['sgd', 'adam'],
'alpha': [0.001,0.01, 0.05, 0.1],
'learning_rate': ['constant','adaptive'],
'max_iter':[20,50,100]
}
clf = GridSearchCV(mlp, parameter_space, n_jobs=-1, cv=3,return_train_score=True)
clf.fit(X_train, y_train)
print('Best parameters found:\n', clf.best_params_, clf.best_score_)
#######################################################################################
cvr = clf.cv_results_
df = DataFrame(cvr)
scores = df['mean_test_score']
h = df['param_hidden_layer_sizes']
alpha = df['param_alpha']
optim = df['param_solver']
l_rate = df['param_learning_rate']
activ = df['param_activation']
itr = df['param_max_iter']
dh = DataFrame({'Scores': scores, 'Itraction':itr, 'Hidden_Layers': h, 'alpha': alpha ,
'Solver':optim, 'Learning_Rate':l_rate, 'Activation':activ})
########################################################################################
model = Sequential()
model.add(Dense(8, input_dim=n, kernel_initializer='uniform', activation='tanh'))
model.add(Dense(12, activation='tanh'))
model.add(Dense(4, activation='tanh'))
model.add(Dense(2, activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
########################################################################################
hist = model.fit(X_train, y_train, batch_size = 10, epochs = 100, validation_split=0.5)
scoress = model.evaluate(X, y, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scoress[1]*100))
print(hist.history)
# save model and architecture to single file
model.save("model.h5")
model.save_weights("model_weight.h5")
print("saved model to disk")
# Plot training & validation accuracy values
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Training vs Test accuracy , DA')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training acc', 'Validation acc'], loc='best')
#plt.show()
#plt.figure()
a = plt.savefig('Accuracy.png', dpi=300, bbox_inches='tight')
plt.close(a)
# Plot training & validation loss values
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Training vs Test Loss , DA')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training loss', 'Validation loss'], loc='best')
#plt.show()
#plt.figure()
b = plt.savefig('Loss.png', dpi=300, bbox_inches='tight')
plt.close(b)
##########################################################################################
y_score = model.predict(X_test)
org = zeros((y_test.shape[0]))
prd = zeros((y_score.shape[0]))
def decode(datum):
return np.argmax(datum)
for i in range(y_score.shape[0]):
prd[i] = decode(y_score[i])
for j in range(y_test.shape[0]):
org[j] = decode(y_test[j])
confusion_matrix(org,prd)
print("Accuracy of MLP: ", "\n", confusion_matrix(org,prd))
f = open("output.txt", "a")
print('Accuracy Score : ' + str(accuracy_score(org,prd)), file=f)
f.close()
##########################################################################################
def generate_results(y_test, y_score):
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic curve')
#plt.show()
plt.savefig('False and True comparison.png', dpi=300, bbox_inches='tight')
print('AUC: %f' % roc_auc)
print('Generating results')
generate_results(y_test[:, 0], y_score[:, 0])
and this is my python code.

If you want to use this code on demand, you can use the < input stream arrow from shell, for example: python script.py < your_streamer.
At last you must choose special character at the end of packet to find out when you capture whole of packet.
In python input() is a good choose with \n seprator.
script.py
while True:
X = np.array(input().split(','), dtype=np.float)
y = model.predict([X])
print(X, y)
file.txt as streamer.
0.218,0.7451,0.7451,0.574
0.215,0.8854,0.7451,0.745
0.275,0.5744,0.7451,0.574
0.751,0.5744,0.2150,0.885
...
...
...
$ python script.py < file.txt

Related

Whats different confusion matrix accuracy vs test accuracy?

from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(img_array, img_labels,
shuffle=True, stratify=img_labels,
test_size=0.1, random_state=42)
checkpoint = ModelCheckpoint('model.h5', monitor='val_loss', mode='min',save_best_only=True,verbose=1)
restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',factor=0.2,patience=3,verbose=1,min_delta=0.0001)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
hist = model.fit(x_train, y_train, epochs =50, batch_size=64,callbacks = [checkpoint,reduce_lr], validation_data=(x_test, y_test))
plt.plot(hist.history['loss'], color='b', label='Training Loss')
plt.plot(hist.history['val_loss'], color='r', label='Validation Loss')
plt.legend(loc='upper right')
plt.subplot(1, 2, 2)
plt.ylabel('Accuracy', fontsize=16)
plt.plot(hist.history['accuracy'], color='b', label='Training Accuracy')
plt.plot(hist.history['val_accuracy'], color='r', label='Validation Accuracy')
plt.legend(loc='lower right')
plt.show()
Output
y_pred=model.predict(x_test)
y_pred=np.argmax(y_pred, axis=1)
y_test=np.argmax(y_test, axis=1)
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
confusion_matrix
#FP, FN, TP, TN değerleri bulma
FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)
FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
TP = np.diag(confusion_matrix)
TN = confusion_matrix.sum() - (FP + FN + TP)
FP = FP.astype(float)
FN = FN.astype(float)
TP = TP.astype(float)
TN = TN.astype(float)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy
ACC = (TP+TN)/(TP+FP+FN+TN)
ACC*100
Output: array([87.21092226, 98.74616885, 85.81777654, 89.69072165, 80.46809696, 93.56366676, 83.75592087])
While the accuracy is around 60% as a result of deep learning, the accuracy is around 80%-85% on average as a result of calculation from the confusion matrix. What is the difference in accuracy here? Do both truths have different meanings?

How to optimize a model using the functional api of Keras

I am trying to build a model using the functional api of Keras.
Here is the entire model that I have made. I am not sure if it is correct, and I would be very happy if someone could take a look at it for a moment.
I have first splittet the data into train and test data set.
from sklearn.model_selection import train_test_split
X1_train, X1_test, X2_train, X2_test, y_train, y_test = train_test_split(X1_scaled, X2_scaled, end_y, test_size=0.2)
[i.shape for i in (X1_train, X1_test, X2_train, X2_test, y_train, y_test)]
Here is the part, where I start to build the model
from tensorflow.keras import layers, Model, utils
# Build the model
input1 = layers.Input((10, 6))
input2 = layers.Input((10, 2, 5))
x1 = layers.Flatten()(input1)
x2 = layers.Flatten()(input2)
concat = layers.concatenate([x1, x2])
# Add hidden and dropout layers
hidden1 = layers.Dense(64, activation='relu')(concat)
hid1_out = layers.Dropout(0.5)(hidden1)
hidden2 = layers.Dense(32, activation='relu')(hid1_out)
hid2_out = layers.Dropout(0.5)(hidden2)
output = layers.Dense(1, activation='sigmoid')(hid2_out)
model = Model(inputs=[input1, input2], outputs=output)
# summarize layers
print(model.summary())
# compile the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit the keras model on the dataset
history = model.fit([X1_train, X2_train], y_train, epochs=200, batch_size=5, verbose=0, validation_data=([X1_test, X2_test], y_test))
# evaluate the keras model
_, train_accuracy = model.evaluate([X1_train, X2_train], y_train, verbose=0)
_, test_accuracy = model.evaluate([X1_test, X2_test], y_test, verbose=0)
print('Accuracy NN: %.2f' % (train_accuracy*100))
print('Accuracy NN: %.2f' % (test_accuracy*100))
A problem occurs here. No plot is showing.
# Plots
from matplotlib import pyplot
pyplot.subplot(211)
pyplot.title('Loss')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
# plot accuracy
pyplot.subplot(212)
pyplot.title('Accuracy')
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='test')
pyplot.legend()
pyplot.show(`
Could someone give me any hints on how to manage it ?
Thank you for giving me some of your time
below is the code for a function that will produce two plots side by side. The first plot
shows the training loss and validation loss versus epochs. The second plot shows training accuracy and validation accuracy versus epochs. It also places a dot in the first plot for the epoch with the lowest validation loss and a dot on the second plot for the epoch with the highest validation accuracy.
def tr_plot(history):
#Plot the training and validation data
tacc=history.history['accuracy']
tloss=history.history['loss']
vacc=history.history['val_accuracy']
vloss=history.history['val_loss']
Epoch_count=len(tacc)
Epochs=[]
for i in range (Epoch_count):
Epochs.append(i+1)
index_loss=np.argmin(vloss)# this is the epoch with the lowest validation loss
val_lowest=vloss[index_loss] # lowest validation loss value
index_acc=np.argmax(vacc) # this is the epoch with the highest training accuracy
acc_highest=vacc[index_acc] # this is the highest accuracy value
plt.style.use('fivethirtyeight')
sc_label='best epoch= '+ str(index_loss+1 )
vc_label='best epoch= '+ str(index_acc + 1)
fig,axes=plt.subplots(nrows=1, ncols=2, figsize=(20,8))
axes[0].plot(Epochs,tloss, 'r', label='Training loss')
axes[0].plot(Epochs,vloss,'g',label='Validation loss' )
axes[0].scatter(index_loss+1 ,val_lowest, s=150, c= 'blue', label=sc_label)
axes[0].set_title('Training and Validation Loss')
axes[0].set_xlabel('Epochs')
axes[0].set_ylabel('Loss')
axes[0].legend()
axes[1].plot (Epochs,tacc,'r',label= 'Training Accuracy')
axes[1].plot (Epochs,vacc,'g',label= 'Validation Accuracy')
axes[1].scatter(index_acc+1 ,acc_highest, s=150, c= 'blue', label=vc_label)
axes[1].set_title('Training and Validation Accuracy')
axes[1].set_xlabel('Epochs')
axes[1].set_ylabel('Accuracy')
axes[1].legend()
plt.tight_layout
plt.show()
The resulting plot looks like this

How to find the ROC curve and AUC score of CNN model (keras)

I am new to deep learning. I am trying to generate ROC curve for the following code. I am using keras. The class size is 10 and the image are RGB image of size 100* 100* 3.
I went through [This link][1]. My problem is also same but I could not find the true labels. I am new in this field so please help me.
I also looked on [This for true label][2].
The code snippet of my program is:
target_size=(100,100,3)
train_generator = train_datagen.flow_from_directory('path',
target_size=target_size[:-1],
batch_size=16,
class_mode='categorical',
subset='training',
seed=random_seed)
valid_generator = ...
test_generator = ...
n_classes = len(set(train_generator.classes))
input_layer = keras.layers.Input(shape=target_size)
conv2d_1 = keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=1, padding='same',
activation='relu',
kernel_initializer='he_normal')(input_layer)
batchnorm_1 = keras.layers.BatchNormalization()(conv2d_1)
maxpool1=keras.layers.MaxPool2D(pool_size=(2,2))(batchnorm_1)
conv2d_2 = keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=1, padding='same',
activation='relu',
kernel_initializer='he_normal')(maxpool1)
batchnorm_2 = keras.layers.BatchNormalization()(conv2d_2)
maxpool2=keras.layers.MaxPool2D(pool_size=(2,2))(batchnorm_2)
flatten = keras.layers.Flatten()(maxpool2)
dense_1 = keras.layers.Dense(256, activation='relu')(flatten)
dense_2 = keras.layers.Dense(n_classes, activation='softmax')(dense_1)
model = keras.models.Model(input_layer, dense_3)
model.compile(optimizer=keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=['acc'])
model.summary()
model.fit_generator(generator=train_generator, validation_data=valid_generator,
epochs=200)
score = model.evaluate_generator(test_generator)
print(score)
Now please help me in getting AUC score and ROC Curve.
[1]: How to find the ROC curve and AUC score of this CNN model (keras)
[2]: Getting true labels for keras predictions
Add this code. Hope it works.
import numpy as np
from sklearn import metrics
x, y = test_generator.next()
prediction = model.predict(x)
predict_label1 = np.argmax(prediction, axis=-1)
true_label1 = np.argmax(y, axis=-1)
y = np.array(true_label1)
scores = np.array(predict_label1)
fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=9)
roc_auc = metrics.auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC)')
plt.legend(loc="lower right")
plt.show()
The score function does not provide roc and auc score by default we have to calculate separately. You can check following code snipet to calculate roc and auc score and plot there values.
from sklearn.metrics import roc_curve
y_pred_keras = model.predict(X_test).ravel()
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, y_pred_keras)
from sklearn.metrics import auc
auc_keras = auc(fpr_keras, tpr_keras)
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label='Keras (area = {:.3f})'.format(auc_keras))
plt.plot(fpr_rf, tpr_rf, label='RF (area = {:.3f})'.format(auc_rf))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()

How to plot the ROC curve for ANN for 10 fold Cross validation in Keras using Python?

I was just trying to find ROC plot for all the 10 experiments for 10 fold cross-validation for ANN in Keras. I got stuck with it for a week and can not find a solution. Could anyone help with this? I have tried the code from the following link(https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html) from sklearn and wanted to use wrapper to use Keras model in sklearn but it shows errors. My code in python:
## Creating NN in Keras
# Load libraries
import numpy as np
from keras import models
from keras import layers
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_classification
# Set random seed
np.random.seed(7)
#Create Function That Constructs Neural Network
# Create function returning a compiled network
def create_network():
# Start neural network
network = models.Sequential()
# Add fully connected layer with a ReLU activation function
network.add(layers.Dense(units=25, activation='relu', input_shape=(X.shape[1],)))
# Add fully connected layer with a ReLU activation function
network.add(layers.Dense(units=X.shape[1], activation='relu'))
# Add fully connected layer with a sigmoid activation function
network.add(layers.Dense(units=1, activation='sigmoid'))
# Compile neural network
network.compile(loss='binary_crossentropy', # Cross-entropy
optimizer='adam', # Root Mean Square Propagation
metrics=['accuracy']) # Accuracy performance metric
# Return compiled network
return network
###
#Wrap Function In KerasClassifier
# Wrap Keras model so it can be used by scikit-learn
neural_network = KerasClassifier(build_fn=create_network,
epochs=150,
batch_size=10,
verbose=0)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import auc
from sklearn.metrics import plot_roc_curve
from sklearn.model_selection import StratifiedKFold
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# #############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=10)
classifier = neural_network
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots()
for i, (train, test) in enumerate(cv.split(X, y)):
classifier.fit(X[train], y[train])
viz = plot_roc_curve(classifier, X[test], y[test],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax)
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="Receiver operating characteristic example")
ax.legend(loc="lower right")
plt.show()
**It shows the following error:**
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-29-f10078491154> in <module>()
40 viz = plot_roc_curve(classifier, X[test], y[test],
41 name='ROC fold {}'.format(i),
---> 42 alpha=0.3, lw=1, ax=ax)
43 interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
44 interp_tpr[0] = 0.0
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_plot/roc_curve.py in plot_roc_curve(estimator, X, y, sample_weight, drop_intermediate, response_method, name, ax, **kwargs)
170 )
171 if not is_classifier(estimator):
--> 172 raise ValueError(classification_error)
173
174 prediction_method = _check_classifer_response_method(estimator,
ValueError: KerasClassifier should be a binary classifier
I had the same question. I found this link very informative.
https://www.kaggle.com/kanncaa1/roc-curve-with-k-fold-cv. I have modified it for my case as bellow:
seed = 7
np.random.seed(seed)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 1
fig, ax = plt.subplots()
kfold = StratifiedKFold(n_splits=3, shuffle=True, random_state=seed)
# for i, (train, test) in enumerate(cv.split(X_13 , target)):
for train, test in kfold.split(X_train, y_train):
# create model
model= Sequential()
model.add(Dense(100, input_dim=X_train.shape[1], activation= 'relu',kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(80, activation = 'relu',kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(1, activation = 'sigmoid'))
##- compile model
sgd = SGD(lr=0.1, momentum=0.8)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.fit(X_train[train], y_train[train], epochs=100, batch_size=15,verbose=0)
# evaluate the model
y_pred_keras = model.predict_proba(X_train[test]).ravel()
fpr, tpr, thresholds = roc_curve(y_train[test], y_pred_keras)
tprs.append(interp(mean_fpr, fpr, tpr))
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=2, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i= i+1
plt.plot([0,1],[0,1],linestyle = '--',lw = 2,color = 'black')
mean_tpr = np.mean(tprs, axis=0)
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='blue',
label=r'Mean ROC (AUC = %0.2f )' % (mean_auc),lw=2, alpha=1)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC')
plt.legend(loc="lower right")
plt.show()
Hope it could help!
I have just answered what seems to be the copy of this post (apart from variable names) here.
Not sure whether this is the exact duplicate or not because the question comes from a different account but it seems like that. But here is a copy of my answer in case one of these is closed as a duplicate.
This is an implementational detail that is (probably) missing in this wrapper library.
Sklearn simply checks whether an attribute called _estimator_type is present on the estimator and is set to string value classifier. You can see that by looking into sklearn's source code on github.
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
All you need to do is to add this attribute to your classifier object manually.
classifier = KerasClassifier(build_fn=create_network,
epochs=10,
batch_size=100,
verbose=2)
classifier._estimator_type = "classifier"
I have tested it and it works.

Getting zero precison, recall and f1-score in binary classification?

I am working on classification in which I have to classify among binary classes.
But I always have these results i.e. precision, recall and F1-socre for second class as 0.
I have tried tuning parameters of my classifier but still no improvements.
Here is the code(Python) and results. Thanks in advance....!!
from sklearn.cross_validation import train_test_split
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix , classification_report
from sklearn.metrics import accuracy_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix , classification_report
from sklearn.neural_network import MLPClassifier
import itertools
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
#Code for Plotting Confusion Matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#Code for Live plot of Loss and Traningn score
#Code for Live Graphs Score and Loss
import keras
from matplotlib import pyplot as plt
from IPython.display import clear_output
class PlotLearning(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
self.i += 1
f, (ax1, ax2) = plt.subplots(1, 2, sharex=True)
clear_output(wait=True)
ax1.set_yscale('log')
ax1.plot(self.x, self.losses, label="loss")
ax1.plot(self.x, self.val_losses, label="val_loss")
ax1.legend()
ax2.plot(self.x, self.acc, label="accuracy")
ax2.plot(self.x, self.val_acc, label="validation accuracy")
ax2.legend()
plt.show();
plot = PlotLearning()
#Loading Data
#____________Loading Traning
dataset = pd.read_csv('DOS2012.csv')
#dataset = dataset.astype('float32')
#scaler = MinMaxScaler(feature_range=(0, 1))
#dataset = scaler.fit_transform(dataset)
#dataset = pd.DataFrame(dataset)
x_train = dataset.iloc[: , 0:11].values
x_train = pd.DataFrame(x_train)
y_train = dataset.iloc[: , 11:12].values
y_train = pd.DataFrame(y_train)
look_back = 11
#
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, random_state=0, test_size = 0.2)
#PrePorcessing
x_train = np.array(x_train)
x_test = np.array(x_test)
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1]))
x_test = np.reshape(x_test, (x_test.shape[0], 1, x_test.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(11, input_shape=(1, look_back)))
model.add(Dense(8,activation='relu'))
model.add(Dense(3,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
score = model.fit(x_train, y_train, epochs=40, batch_size=32, verbose=1, callbacks=[plot])
#************
#Ploting Traning Score
#************
print(score.history.keys())
plt.plot(score.history['loss'])
plt.plot(score.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#Plot summarize history for loss
plt.plot(score.history['loss'])
plt.plot(score.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# make predictions
predictions = model.predict(x_test)
#Accuracy
print ("Accuracy is ", accuracy_score(y_test,np.round(predictions))*100)
# Compute confusion matrix Test
cnf_matrix = confusion_matrix(y_test, np.round(predictions))
np.set_printoptions(precision=2)
print(cnf_matrix)
# Plot non-normalized confusion matrix
classes = ['A','B']
#*******************************
#Ploting Confusion Matrix
#*******************************
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes,
title='Confusion matrix, Test')
plt.show()
#Printing table values
print(classification_report(y_test,np.round(predictions)))
#***************************************************************
#*********ROC CURVE*8=******************************************
#***************************************************************
from sklearn.metrics import roc_curve, auc
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, predictions)
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.title('ROC LSTM')
plt.plot(false_positive_rate, true_positive_rate, 'blue', label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'m--')
plt.xlim([0,1])
plt.ylim([0,1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
Result:
Confusion Matrix
A[[33511 0]
B[ 765 0]]
A B
Classification Report
precision recall f1-score support
0 0.98 1.00 0.99 33511
1 0.00 0.00 0.00 765
avg / total 0.96 0.98 0.97 34276

Categories