Trying to understand and implement GridSearch method for the Keras Regression. Here is my simple producible regression application.
import pandas as pd
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data")
df.drop(['No','FLOW(cm)','Compressive Strength (28-day)(Mpa)'],1,inplace=True)
# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
dummies = pd.get_dummies(df[target])
return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32)
else:
# Regression
return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32)
x,y = to_xy(df,'SLUMP(cm)')
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
#Create Model
model = Sequential()
model.add(Dense(128, input_dim=x.shape[1], activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, mode='auto')
checkpointer = ModelCheckpoint(filepath="best_weights.hdf5",save_best_only=True) # save best model
model.fit(x_train,y_train,callbacks=[monitor,checkpointer],verbose=0,epochs=1000)
#model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor,checkpointer],verbose=0,epochs=1000)
pred = model.predict(x_test)
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("(RMSE): {}".format(score))
If you run the code, you can see loss is not too big numbers.
And here is my producible GridSearch implementation. First of all, I have simply searched the web and find the GridSearch application for KerasClassifier, then tried to revise it for KerasRegressor. I am not sure if my revision is correct. If I assume the general concept is correct, there must be a problem in this code, because loss function does not make sense. The loss function is MSE but the output is negative, unfortunately I could not figure out where I am doing wrong.
from keras.wrappers.scikit_learn import KerasRegressor
import pandas as pd
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import GridSearchCV
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data")
df.drop(['No','FLOW(cm)','Compressive Strength (28-day)(Mpa)'],1,inplace=True)
#Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
#Classification
dummies = pd.get_dummies(df[target])
return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32)
else:
#Regression
return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32)
x,y = to_xy(df,'SLUMP(cm)')
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
def create_model(optimizer='adam'):
# create model
model = Sequential()
model.add(Dense(128, input_dim=x.shape[1], activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer=optimizer,metrics=['mse'])
return model
model = KerasRegressor(build_fn=create_model, epochs=100, batch_size=10, verbose=0)
optimizer = ['SGD', 'RMSprop', 'Adagrad']
param_grid = dict(optimizer=optimizer)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(x_train, y_train)
#summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
I have tested your code, and I have seen that you are not using a scoring function in GridSearchCV so according to documentation scikit-learn documentation:
If None, the estimator’s default scorer (if available) is used.
It seems like if it would be using the 'neg_mean_absolute_error' (or any of these scoring functions for regression) by default for scoring models.
That is because probably it says that the best model is:
-75.820078 using {'optimizer':'Adagrad'}
Related
I have:
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import minmax_scale
from sklearn.model_selection import GridSearchCV, KFold
# Extract the main and side frequency factors concentractions and catalyst concentrations from the data
main_freq_factors = np.array(FreqFac['Main frequency factors concentraction (l/(mol*s))'])
side_freq_factors = np.array(FreqFac['Side frequency factors concentraction (1/s)'])
catalyst_conc = np.array(FreqFac.iloc[:,1].values)
# Scale the data using the min-max scaler
main_freq_factors_scaled = minmax_scale(main_freq_factors)
side_freq_factors_scaled = minmax_scale(side_freq_factors)
catalyst_conc_scaled = minmax_scale(catalyst_conc)
# Combine the scaled data into a single array
X = catalyst_conc_scaled
y = np.column_stack((main_freq_factors_scaled, side_freq_factors_scaled))
X_train, X_test, y_train, y_test = train_test_split(Xsc, Ysc, test_size=0.33, random_state=42)
# Define the model
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(32, activation='relu', input_shape=(1,)))
model.add(tf.keras.layers.Dense(1, activation='linear'))
# Compile the model with the mean squared error loss function and Adam optimization algorithm
model.compile(loss='mean_squared_error', optimizer='adam')
# Define the grid of hyperparameters to search
param_grid = {
'epochs': [50, 100, 150],
'batch_size': [32, 64, 128]
}
# Create a K-fold cross-validation generator
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
# Create a grid search object using the model, param_grid, and kfold
grid_search = GridSearchCV(model, param_grid, cv=kfold, scoring='neg_mean_squared_error', return_train_score=True)
# Fit the grid search object to the data
grid_search.fit(catalyst_conc.reshape(-1, 1), main_freq_factors)
# Print the results of the grid search
print(grid_search.best_params_)
But I get the error:
Cannot clone object '<keras.engine.sequential.Sequential object at 0x7eff87f72580>' (type <class 'keras.engine.sequential.Sequential'>): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' methods.
What am I doing wrong?
The error comes from scikit's clone which is used by GridSearchCV (through BaseSearchCV) to create a separate copy of the estimator for each parameter configuration during the grid search.
You could try using a wrapper for the Scikit-Learn API:
from keras.wrappers.scikit_learn import KerasRegressor
# ...
def build_fn():
# Define the model
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(32, activation='relu', input_shape=(1,)))
model.add(tf.keras.layers.Dense(1, activation='linear'))
# Compile the model with the mean squared error loss function and Adam optimization algorithm
model.compile(loss='mean_squared_error', optimizer='adam')
return model
model = KerasRegressor(build_fn)
# ...
Another option is to pip install scikeras and replace the import above with:
from scikeras.wrappers import KerasRegressor
I am trying to create a stacking ensemble using scikit-learn that contains a Keras model wrapped using KerasClassifier.
Here's an example of how my code looks using the iris dataset:
# import libraries
import pandas
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.layers import Dropout, Flatten, Dense
from keras.utils import np_utils
from keras import optimizers
from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import StackingClassifier
from numpy import mean
from numpy import std
# import data
dataframe = pandas.read_csv("iris.csv", header=None)
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
# create and wrap neural network
def create_model():
model = Sequential()
model.add(Flatten(input_shape=X.shape[1:]))
model.add(Dense(150, activation=tf.keras.layers.LeakyReLU(alpha=0.3)))
model.add(Dropout(0.9))
model.add(Dense(50, activation=tf.keras.layers.LeakyReLU(alpha=0.3)))
model.add(Dropout(0.9))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(lr=2e-3),
metrics=['acc'])
return model
model_nn = KerasClassifier(build_fn=create_model, epochs=50, batch_size=5, verbose=0)
model_nn._estimator_type = "classifier"
# create stack
def stacking():
level0 = list()
level0.append(('lr', LogisticRegression(max_iter = 500000, C = .00041, solver = 'newton-cg', multi_class = 'ovr')))
level0.append(('nn', model_nn))
level0.append(('svm', SVC(C=1.0, gamma='scale', tol=.001, probability = True)))
level1 = LogisticRegression()
model = StackingClassifier(estimators=level0, final_estimator=level1, cv=5)
return model
# evaluate model score
def evaluate_model(model, X, y):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
return scores
scores = evaluate_model(stacking(), X, Y)
print('%.3f (%.3f)' % (mean(scores), std(scores)))
and I get this error:
ValueError: The estimator KerasClassifier should be a classifier.
I found some posts where other users had this issue, but they were able to fix it using the model_nn._estimator_type = "classifier" line. Unfortunately, that isn't solving the issue for me. I'm really new to all of this, so any advice is appreciated. :)
KerasClassifier is migrated from keras.wrappers.scikit_learn to scikeras.wrappers.
You need to use below code to access the KerasClassifier :
!pip install scikeras
from scikeras.wrappers import KerasClassifier
Please check this link for more details.
I'm getting a numpy shape error when I use the predict function of a Keras estimator. I build, evaluate, and then retrain the model using the following code:
import pandas as pd
import sqlalchemy as sqla
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
# Connect to to the DB and retrieve the iris table
con = sqla.create_engine('postgresql://tristan:sebens#db:5432/tristan')
con.connect()
table_name = "iris"
schema = "public"
iris = pd.read_sql_table(table_name, con, schema=schema)
iris.head()
iris_ds = iris.values # Convert the table to a numpy array
X = iris_ds[:, 0:4].astype(float) # Slice the descriptive features into a numpy array
Y = iris_ds[:, 4] # Slice the labels away as their own numpy array
# The labels are encoded as strings, so we need to encode them
# as numbers that can be output by an ANN
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = to_categorical(encoded_Y)
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
seed = 7
# Train the model:
# First we define the model as a classifier. This will affect the process used to train it
estimator = KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=5, verbose=0)
# Honestly not totally sure what this is, but it has to do with splitting the training/evaluation data in
# a way that gives us a more realistic metric of the model's accuracy
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
# Now that we have our classifier and our data pipeline defined, we can begin the training process
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# If we like our accuracy, then we can train the model for real
# Evaluating the model actually evaluates a clone of the model, so now we need to train the model again
estimator.fit(X, dummy_y)
And this is where the trouble is. I try to make a test prediction:
# Let's make a test prediction with our model
x = X[0]
estimator.predict(x)
And I get an input shape error:
ValueError: Error when checking input: expected dense_21_input to have shape (4,) but got array with shape (1,)
I'm at a loss. How can the input have the wrong shape if it's literally a member of the training dataset?
I have the following code, using Keras Scikit-Learn Wrapper:
from keras.models import Sequential
from sklearn import datasets
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
import pickle
import numpy as np
import json
def classifier(X, y):
"""
Description of classifier
"""
NOF_ROW, NOF_COL = X.shape
def create_model():
# create model
model = Sequential()
model.add(Dense(12, input_dim=NOF_COL, init='uniform', activation='relu'))
model.add(Dense(6, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# evaluate using 10-fold cross validation
seed = 7
np.random.seed(seed)
model = KerasClassifier(build_fn=create_model, nb_epoch=150, batch_size=10, verbose=0)
return model
def main():
"""
Description of main
"""
iris = datasets.load_iris()
X, y = iris.data, iris.target
X = preprocessing.scale(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
model_tt = classifier(X_train, y_train)
model_tt.fit(X_train,y_train)
#--------------------------------------------------
# This fail
#--------------------------------------------------
filename = 'finalized_model.sav'
pickle.dump(model_tt, open(filename, 'wb'))
# load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
result = loaded_model.score(X_test, Y_test)
print(result)
#--------------------------------------------------
# This also fail
#--------------------------------------------------
# from keras.models import load_model
# model_tt.save('test_model.h5')
#--------------------------------------------------
# This works OK
#--------------------------------------------------
# print model_tt.score(X_test, y_test)
# print model_tt.predict_proba(X_test)
# print model_tt.predict(X_test)
# Output of predict_proba
# 2nd column is the probability that the prediction is 1
# this value is used as final score, which can be used
# with other method as comparison
# [ [ 0.25311464 0.74688536]
# [ 0.84401423 0.15598579]
# [ 0.96047372 0.03952631]
# ...,
# [ 0.25518912 0.74481088]
# [ 0.91467732 0.08532269]
# [ 0.25473493 0.74526507]]
# Output of predict
# [[1]
# [0]
# [0]
# ...,
# [1]
# [0]
# [1]]
if __name__ == '__main__':
main()
As stated in the code there it fails at this line:
pickle.dump(model_tt, open(filename, 'wb'))
With this error:
pickle.PicklingError: Can't pickle <function create_model at 0x101c09320>: it's not found as __main__.create_model
How can I get around it?
Edit 1 : Original answer about saving model
With HDF5 :
# saving model
json_model = model_tt.model.to_json()
open('model_architecture.json', 'w').write(json_model)
# saving weights
model_tt.model.save_weights('model_weights.h5', overwrite=True)
# loading model
from keras.models import model_from_json
model = model_from_json(open('model_architecture.json').read())
model.load_weights('model_weights.h5')
# dont forget to compile your model
model.compile(loss='binary_crossentropy', optimizer='adam')
Edit 2 : full code example with iris dataset
# Train model and make predictions
import numpy
import pandas
from keras.models import Sequential, model_from_json
from keras.layers import Dense
from keras.utils import np_utils
from sklearn import datasets
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
iris = datasets.load_iris()
X, Y, labels = iris.data, iris.target, iris.target_names
X = preprocessing.scale(X)
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
y = np_utils.to_categorical(encoded_Y)
def build_model():
# create model
model = Sequential()
model.add(Dense(4, input_dim=4, init='normal', activation='relu'))
model.add(Dense(3, init='normal', activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def save_model(model):
# saving model
json_model = model.to_json()
open('model_architecture.json', 'w').write(json_model)
# saving weights
model.save_weights('model_weights.h5', overwrite=True)
def load_model():
# loading model
model = model_from_json(open('model_architecture.json').read())
model.load_weights('model_weights.h5')
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.3, random_state=seed)
# build
model = build_model()
model.fit(X_train, Y_train, nb_epoch=200, batch_size=5, verbose=0)
# save
save_model(model)
# load
model = load_model()
# predictions
predictions = model.predict_classes(X_test, verbose=0)
print(predictions)
# reverse encoding
for pred in predictions:
print(labels[pred])
Please note that I used Keras only, not the wrapper. It only add some complexity in something simple. Also code is volontary not factored so you can have the whole picture.
Also, you said you want to output 1 or 0. It is not possible in this dataset because you have 3 output dims and classes (Iris-setosa, Iris-versicolor, Iris-virginica). If you had only 2 classes then your output dim and classes would be 0 or 1 using sigmoid output fonction.
Just adding to gaarv's answer - If you don't require the separation between the model structure (model.to_json()) and the weights (model.save_weights()), you can use one of the following:
Use the built-in keras.models.save_model and 'keras.models.load_model` that store everything together in a hdf5 file.
Use pickle to serialize the Model object (or any class that contains references to it) into file/network/whatever..
Unfortunetaly, Keras doesn't support pickle by default. You can use
my patchy solution that adds this missing feature. Working code is
here: http://zachmoshe.com/2017/04/03/pickling-keras-models.html
Another great alternative is to use callbacks when you fit your model. Specifically the ModelCheckpoint callback, like this:
from keras.callbacks import ModelCheckpoint
#Create instance of ModelCheckpoint
chk = ModelCheckpoint("myModel.h5", monitor='val_loss', save_best_only=False)
#add that callback to the list of callbacks to pass
callbacks_list = [chk]
#create your model
model_tt = KerasClassifier(build_fn=create_model, nb_epoch=150, batch_size=10)
#fit your model with your data. Pass the callback(s) here
model_tt.fit(X_train,y_train, callbacks=callbacks_list)
This will save your training each epoch to the myModel.h5 file. This provides great benefits, as you are able to stop your training when you desire (like when you see it has started to overfit), and still retain the previous training.
Note that this saves both the structure and weights in the same hdf5 file (as showed by Zach), so you can then load you model using keras.models.load_model.
If you want to save only your weights separately, you can then use the save_weights_only=True argument when instantiating your ModelCheckpoint, enabling you to load your model as explained by Gaarv. Extracting from the docs:
save_weights_only: if True, then only the model's weights will be saved (model.save_weights(filepath)), else the full model is saved (model.save(filepath)).
The accepted answer is too complicated. You can fully save and restore every aspect of your model in a .h5 file. Straight from the Keras FAQ:
You can use model.save(filepath) to save a Keras model into a single
HDF5 file which will contain:
the architecture of the model, allowing to re-create the model
the weights of the model
the training configuration (loss, optimizer)
the state of the optimizer, allowing to resume training exactly where you left off.
You can then use keras.models.load_model(filepath) to reinstantiate your model. load_model will also take care of compiling the model using the saved training configuration (unless the model was never compiled in the first place).
And the corresponding code:
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model identical to the previous one
model = load_model('my_model.h5')
In case your keras wrapper model is in a scikit pipeline, you save steps in the pipeline separately.
import joblib
from sklearn.pipeline import Pipeline
from tensorflow import keras
# pass the create_cnn_model function into wrapper
cnn_model = keras.wrappers.scikit_learn.KerasClassifier(build_fn=create_cnn_model)
# create pipeline
cnn_model_pipeline_estimator = Pipeline([
('preprocessing_pipeline', pipeline_estimator),
('clf', cnn_model)
])
# train model
final_model = cnn_model_pipeline_estimator.fit(
X, y, clf__batch_size=32, clf__epochs=15)
# collect the preprocessing pipeline & model seperately
pipeline_estimator = final_model.named_steps['preprocessing_pipeline']
clf = final_model.named_steps['clf']
# store pipeline and model seperately
joblib.dump(pipeline_estimator, open('path/to/pipeline.pkl', 'wb'))
clf.model.save('path/to/model.h5')
# load pipeline and model
pipeline_estimator = joblib.load('path/to/pipeline.pxl')
model = keras.models.load_model('path/to/model.h5')
new_example = [[...]]
# transform new data with pipeline & use model for prediction
transformed_data = pipeline_estimator.transform(new_example)
prediction = model.predict(transformed_data)
I get this error :
sum() got an unexpected keyword argument 'out'
when I run this code:
import pandas as pd, numpy as np
import keras
from keras.layers.core import Dense, Activation
from keras.models import Sequential
def AUC(y_true,y_pred):
not_y_pred=np.logical_not(y_pred)
y_int1=y_true*y_pred
y_int0=np.logical_not(y_true)*not_y_pred
TP=np.sum(y_pred*y_int1)
FP=np.sum(y_pred)-TP
TN=np.sum(not_y_pred*y_int0)
FN=np.sum(not_y_pred)-TN
TPR=np.float(TP)/(TP+FN)
FPR=np.float(FP)/(FP+TN)
return((1+TPR-FPR)/2)
# Input datasets
train_df = pd.DataFrame(np.random.rand(91,1000))
train_df.iloc[:,-2]=(train_df.iloc[:,-2]>0.8)*1
model = Sequential()
model.add(Dense(output_dim=60, input_dim=91, init="glorot_uniform"))
model.add(Activation("sigmoid"))
model.add(Dense(output_dim=1, input_dim=60, init="glorot_uniform"))
model.add(Activation("sigmoid"))
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=[AUC])
train_df.iloc[:,-1]=np.ones(train_df.shape[0]) #bias
X=train_df.iloc[:,:-1].values
Y=train_df.iloc[:,-1].values
print X.shape,Y.shape
model.fit(X, Y, batch_size=50,show_accuracy = False, verbose = 1)
Is it possible to implement a custom metric aside from doing a loop on batches and editing the source code?
Here I'm answering to OP's topic question rather than his exact problem. I'm doing this as the question shows up in the top when I google the topic problem.
You can implement a custom metric in two ways.
As mentioned in Keras docu.
import keras.backend as K
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=['accuracy', mean_pred])
But here you have to remember as mentioned in Marcin Możejko's answer that y_true and y_pred are tensors. So in order to correctly calculate the metric you need to use keras.backend functionality. Please look at this SO question for details How to calculate F1 Macro in Keras?
Or you can implement it in a hacky way as mentioned in Keras GH issue. For that you need to use callbacks argument of model.fit.
import keras as keras
import numpy as np
from keras.optimizers import SGD
from sklearn.metrics import roc_auc_score
model = keras.models.Sequential()
# ...
sgd = SGD(lr=0.001, momentum=0.9)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
class Metrics(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self._data = []
def on_epoch_end(self, batch, logs={}):
X_val, y_val = self.validation_data[0], self.validation_data[1]
y_predict = np.asarray(model.predict(X_val))
y_val = np.argmax(y_val, axis=1)
y_predict = np.argmax(y_predict, axis=1)
self._data.append({
'val_rocauc': roc_auc_score(y_val, y_predict),
})
return
def get_data(self):
return self._data
metrics = Metrics()
history = model.fit(X_train, y_train, epochs=100, validation_data=(X_val, y_val), callbacks=[metrics])
metrics.get_data()
The problem is that y_pred and y_true are not NumPy arrays but either Theano or TensorFlow tensors. That's why you got this error.
You can define your custom metrics but you have to remember that its arguments are those tensors – not NumPy arrays.
you can pass a model.predict() in your AUC metric function. [this will iterate on bacthes so you might be better off using model.predict_on_batch(). Assuming you have something like a softmax layer as output (something that outputs probabilities), then you can use that together with sklearn.metric to get the AUC.
from sklearn.metrics import roc_curve, auc
from here
def sklearnAUC(test_labels,test_prediction):
n_classes = 2
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
# ( actual labels, predicted probabilities )
fpr[i], tpr[i], _ = roc_curve(test_labels[:, i], test_prediction[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
return round(roc_auc[0],3) , round(roc_auc[1],3)
now make your metric
# gives a numpy array like so [ [0.3,0.7] , [0.2,0.8] ....]
Y_pred = model.predict_on_batch ( X_test )
# Y_test looks something like [ [0,1] , [1,0] .... ]
# auc1 and auc2 should be equal
auc1 , auc2 = sklearnAUC( Y_test , Y_pred )