Related
Similar SO questions:
Python Catboost: Multiclass F1 score custom metric
Catboost tutorials
https://catboost.ai/docs/concepts/python-usages-examples.html#user-defined-loss-function
Question
In this question, I have a binary classification problem. After modelling we get the test model predictions y_pred and we already have true test labels y_true.
I would like to get the custom evaluation metric defined by following equation:
profit = 400 * truePositive - 200*fasleNegative - 100*falsePositive
Also, since higher profit is better I would like to maximize the function instead of minimize it.
How to get this eval_metric in catboost?
Using sklearn
def get_profit(y_true, y_pred):
tn, fp, fn, tp = sklearn.metrics.confusion_matrix(y_true,y_pred).ravel()
loss = 400*tp - 200*fn - 100*fp
return loss
scoring = sklearn.metrics.make_scorer(get_profit, greater_is_better=True)
Using catboost
class ProfitMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
** I don't know here**
return error_sum, weight_sum
Question
How to complete the custom eval metric in catboost?
UPDATE
My update so far
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
def get_profit(y_true, y_pred):
tn, fp, fn, tp = sklearn.metrics.confusion_matrix(y_true,y_pred).ravel()
profit = 400*tp - 200*fn - 100*fp
return profit
class ProfitMetric:
def is_max_optimal(self):
return True # greater is better
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
y_pred = np.rint(approx)
y_true = np.array(target).astype(int)
output_weight = 1 # weight is not used
score = get_profit(y_true, y_pred)
return score, output_weight
def get_final_error(self, error, weight):
return error
df = sns.load_dataset('titanic')
X = df[['survived','pclass','age','sibsp','fare']]
y = X.pop('survived')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)
model = CatBoostClassifier(metric_period=50,
n_estimators=200,
eval_metric=ProfitMetric()
)
model.fit(X, y, eval_set=(X_test, y_test)) # this fails
The main difference from yours is:
#staticmethod
def get_profit(y_true, y_pred):
y_pred = expit(y_pred).astype(int)
y_true = y_true.astype(int)
#print("ACCURACY:",(y_pred==y_true).mean())
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
loss = 400*tp - 200*fn - 100*fp
return loss
It's not obvious from the example you linked what are the predictions, but after inspecting it turns out catboost treats predictions internally as raw log-odds (hat tip #Ben). So, to properly use confusion_matrix you need to make it sure both y_true and y_pred are integer class labels. This is done via:
y_pred = scipy.special.expit(y_pred)
y_true = y_true.astype(int)
So the full working code is:
import seaborn as sns
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from scipy.special import expit
df = sns.load_dataset('titanic')
X = df[['survived','pclass','age','sibsp','fare']]
y = X.pop('survived')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)
class ProfitMetric:
#staticmethod
def get_profit(y_true, y_pred):
y_pred = expit(y_pred).astype(int)
y_true = y_true.astype(int)
#print("ACCURACY:",(y_pred==y_true).mean())
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
loss = 400*tp - 200*fn - 100*fp
return loss
def is_max_optimal(self):
return True # greater is better
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
y_true = np.array(target).astype(int)
approx = approxes[0]
score = self.get_profit(y_true, approx)
return score, 1
def get_final_error(self, error, weight):
return error
model = CatBoostClassifier(metric_period=50,
n_estimators=200,
eval_metric=ProfitMetric()
)
model.fit(X, y, eval_set=(X_test, y_test))
By way of example, I implemented a very simple metric.
It counts the number of times y_pred != y_true in a multi-class classifier.
class CountErrors:
'''Count of wrong predictions'''
def is_max_optimal(self):
False
def evaluate(self, approxes, target, weight):
y_pred = np.array(approxes).argmax(0)
y_true = np.array(target)
return sum(y_pred!=y_true), 1
def get_final_error(self, error, weight):
return error
You can see it used if you run this code:
import numpy as np
import pandas as pd
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
class CountErrors:
'''Count number of wrong predictions'''
def is_max_optimal(self):
False # Lower is better
def evaluate(self, approxes, target, weight):
y_pred = np.array(approxes).argmax(0)
y_true = np.array(target)
return sum(y_pred!=y_true), 1
def get_final_error(self, error, weight):
return error
df = pd.read_csv('https://raw.githubusercontent.com/mkleinbort/resource-datasets/master/abalone/abalone.csv')
y = df['sex']
X = df.drop(columns=['sex'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12)
model = CatBoostClassifier(metric_period=50, n_estimators=200, eval_metric=CountErrors())
model.fit(X, y, eval_set=(X_test, y_test))
Hope you can adapt this to your use-case.
I am working on knn without using any library. The problem is that the labels are numeric
label = [1.5171, 1.7999, 2.4493, 2.8622, 2.9961, 3.6356, 3.7742, 5.8069, 7.1357 etc..]}
from each label there is one value
I want to predict the label for a new data but how should i choose the winning label if from each one there is one value?
prediction = max(set(label_neighbors), key=label_neighbors.count)
I'm guessing that you want to learn the mechanics of KNN, right. See the sample code below. This should do what you want.
import numpy as np
import scipy.spatial
from collections import Counter
# loading the Iris-Flower dataset from Sklearn
from sklearn import datasets
from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state = 42, test_size = 0.2)
class KNN:
def __init__(self, k):
self.k = k
def fit(self, X, y):
self.X_train = X
self.y_train = y
def distance(self, X1, X2):
distance = scipy.spatial.distance.euclidean(X1, X2)
def predict(self, X_test):
final_output = []
for i in range(len(X_test)):
d = []
votes = []
for j in range(len(X_train)):
dist = scipy.spatial.distance.euclidean(X_train[j] , X_test[i])
d.append([dist, j])
d.sort()
d = d[0:self.k]
for d, j in d:
votes.append(y_train[j])
ans = Counter(votes).most_common(1)[0][0]
final_output.append(ans)
return final_output
def score(self, X_test, y_test):
predictions = self.predict(X_test)
return (predictions == y_test).sum() / len(y_test)
clf = KNN(3)
clf.fit(X_train, y_train)
prediction = clf.predict(X_test)
for i in prediction:
print(i, end= ' ')
prediction == y_test
clf.score(X_test, y_test)
# Result:
# 1.0
Well, look at that! We got 100%! Not bad, not bad at all!!
Reference:
https://medium.com/analytics-vidhya/implementing-k-nearest-neighbours-knn-without-using-scikit-learn-3905b4decc3c
I have data with differing weights for each sample. In my application, it is important that these weights are accounted for in estimating the model and comparing alternative models.
I'm using sklearn to estimate models and to compare alternative hyperparameter choices. But this unit test shows that GridSearchCV does not apply sample_weights to estimate scores.
Is there a way to have sklearn use sample_weight to score the models?
Unit test:
from __future__ import division
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
from sklearn.model_selection import GridSearchCV, RepeatedKFold
def grid_cv(X_in, y_in, w_in, cv, max_features_grid, use_weighting):
out_results = dict()
for k in max_features_grid:
clf = RandomForestClassifier(n_estimators=256,
criterion="entropy",
warm_start=False,
n_jobs=-1,
random_state=RANDOM_STATE,
max_features=k)
for train_ndx, test_ndx in cv.split(X=X_in, y=y_in):
X_train = X_in[train_ndx, :]
y_train = y_in[train_ndx]
w_train = w_in[train_ndx]
y_test = y[test_ndx]
clf.fit(X=X_train, y=y_train, sample_weight=w_train)
y_hat = clf.predict_proba(X=X_in[test_ndx, :])
if use_weighting:
w_test = w_in[test_ndx]
w_i_sum = w_test.sum()
score = w_i_sum / w_in.sum() * log_loss(y_true=y_test, y_pred=y_hat, sample_weight=w_test)
else:
score = log_loss(y_true=y_test, y_pred=y_hat)
results = out_results.get(k, [])
results.append(score)
out_results.update({k: results})
for k, v in out_results.items():
if use_weighting:
mean_score = sum(v)
else:
mean_score = np.mean(v)
out_results.update({k: mean_score})
best_score = min(out_results.values())
best_param = min(out_results, key=out_results.get)
return best_score, best_param
if __name__ == "__main__":
RANDOM_STATE = 1337
X, y = load_iris(return_X_y=True)
sample_weight = np.array([1 + 100 * (i % 25) for i in range(len(X))])
# sample_weight = np.array([1 for _ in range(len(X))])
inner_cv = RepeatedKFold(n_splits=3, n_repeats=1, random_state=RANDOM_STATE)
outer_cv = RepeatedKFold(n_splits=3, n_repeats=1, random_state=RANDOM_STATE)
rfc = RandomForestClassifier(n_estimators=256,
criterion="entropy",
warm_start=False,
n_jobs=-1,
random_state=RANDOM_STATE)
search_params = {"max_features": [1, 2, 3, 4]}
fit_params = {"sample_weight": sample_weight}
my_scorer = make_scorer(log_loss,
greater_is_better=False,
needs_proba=True,
needs_threshold=False)
grid_clf = GridSearchCV(estimator=rfc,
scoring=my_scorer,
cv=inner_cv,
param_grid=search_params,
refit=True,
return_train_score=False,
iid=False) # in this usage, the results are the same for `iid=True` and `iid=False`
grid_clf.fit(X, y, **fit_params)
print("This is the best out-of-sample score using GridSearchCV: %.6f." % -grid_clf.best_score_)
msg = """This is the best out-of-sample score %s weighting using grid_cv: %.6f."""
score_with_weights, param_with_weights = grid_cv(X_in=X,
y_in=y,
w_in=sample_weight,
cv=inner_cv,
max_features_grid=search_params.get(
"max_features"),
use_weighting=True)
print(msg % ("WITH", score_with_weights))
score_without_weights, param_without_weights = grid_cv(X_in=X,
y_in=y,
w_in=sample_weight,
cv=inner_cv,
max_features_grid=search_params.get(
"max_features"),
use_weighting=False)
print(msg % ("WITHOUT", score_without_weights))
Which produces output:
This is the best out-of-sample score using GridSearchCV: 0.135692.
This is the best out-of-sample score WITH weighting using grid_cv: 0.099367.
This is the best out-of-sample score WITHOUT weighting using grid_cv: 0.135692.
Explanation: Since manually computing the loss without weighting produces the same scoring as GridSearchCV, we know that the sample weights are not being used.
The GridSearchCV takes a scoring as input, which can be callable. You can see the details of how to change the scoring function, and also how to pass your own scoring function here. Here's the relevant piece of code from that page for the sake of completeness:
EDIT: The fit_params is passed only to the fit functions, and not the score functions. If there are parameters which are supposed to be passed to the scorer, they should be passed to the make_scorer. But that still doesn't solve the issue here, since that would mean that the whole sample_weight parameter would be passed to log_loss, whereas only the part which corresponds to y_test at the time of calculating the loss should be passed.
sklearn does NOT support such a thing, but you can hack your way through, using a padas.DataFrame. The good news is, sklearn understands a DataFrame, and keeps it that way. Which means you can exploit the index of a DataFrame as you see in the code here:
# more code
X, y = load_iris(return_X_y=True)
index = ['r%d' % x for x in range(len(y))]
y_frame = pd.DataFrame(y, index=index)
sample_weight = np.array([1 + 100 * (i % 25) for i in range(len(X))])
sample_weight_frame = pd.DataFrame(sample_weight, index=index)
# more code
def score_f(y_true, y_pred, sample_weight):
return log_loss(y_true.values, y_pred,
sample_weight=sample_weight.loc[y_true.index.values].values.reshape(-1),
normalize=True)
score_params = {"sample_weight": sample_weight_frame}
my_scorer = make_scorer(score_f,
greater_is_better=False,
needs_proba=True,
needs_threshold=False,
**score_params)
grid_clf = GridSearchCV(estimator=rfc,
scoring=my_scorer,
cv=inner_cv,
param_grid=search_params,
refit=True,
return_train_score=False,
iid=False) # in this usage, the results are the same for `iid=True` and `iid=False`
grid_clf.fit(X, y_frame)
# more code
As you see, the score_f uses the index of y_true to find which parts of sample_weight to use. For the sake of completeness, here's the whole code:
from __future__ import division
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import log_loss
from sklearn.model_selection import GridSearchCV, RepeatedKFold
from sklearn.metrics import make_scorer
import pandas as pd
def grid_cv(X_in, y_in, w_in, cv, max_features_grid, use_weighting):
out_results = dict()
for k in max_features_grid:
clf = RandomForestClassifier(n_estimators=256,
criterion="entropy",
warm_start=False,
n_jobs=1,
random_state=RANDOM_STATE,
max_features=k)
for train_ndx, test_ndx in cv.split(X=X_in, y=y_in):
X_train = X_in[train_ndx, :]
y_train = y_in[train_ndx]
w_train = w_in[train_ndx]
y_test = y_in[test_ndx]
clf.fit(X=X_train, y=y_train, sample_weight=w_train)
y_hat = clf.predict_proba(X=X_in[test_ndx, :])
if use_weighting:
w_test = w_in[test_ndx]
w_i_sum = w_test.sum()
score = w_i_sum / w_in.sum() * log_loss(y_true=y_test, y_pred=y_hat, sample_weight=w_test)
else:
score = log_loss(y_true=y_test, y_pred=y_hat)
results = out_results.get(k, [])
results.append(score)
out_results.update({k: results})
for k, v in out_results.items():
if use_weighting:
mean_score = sum(v)
else:
mean_score = np.mean(v)
out_results.update({k: mean_score})
best_score = min(out_results.values())
best_param = min(out_results, key=out_results.get)
return best_score, best_param
#if __name__ == "__main__":
if True:
RANDOM_STATE = 1337
X, y = load_iris(return_X_y=True)
index = ['r%d' % x for x in range(len(y))]
y_frame = pd.DataFrame(y, index=index)
sample_weight = np.array([1 + 100 * (i % 25) for i in range(len(X))])
sample_weight_frame = pd.DataFrame(sample_weight, index=index)
# sample_weight = np.array([1 for _ in range(len(X))])
inner_cv = RepeatedKFold(n_splits=3, n_repeats=1, random_state=RANDOM_STATE)
outer_cv = RepeatedKFold(n_splits=3, n_repeats=1, random_state=RANDOM_STATE)
rfc = RandomForestClassifier(n_estimators=256,
criterion="entropy",
warm_start=False,
n_jobs=1,
random_state=RANDOM_STATE)
search_params = {"max_features": [1, 2, 3, 4]}
def score_f(y_true, y_pred, sample_weight):
return log_loss(y_true.values, y_pred,
sample_weight=sample_weight.loc[y_true.index.values].values.reshape(-1),
normalize=True)
score_params = {"sample_weight": sample_weight_frame}
my_scorer = make_scorer(score_f,
greater_is_better=False,
needs_proba=True,
needs_threshold=False,
**score_params)
grid_clf = GridSearchCV(estimator=rfc,
scoring=my_scorer,
cv=inner_cv,
param_grid=search_params,
refit=True,
return_train_score=False,
iid=False) # in this usage, the results are the same for `iid=True` and `iid=False`
grid_clf.fit(X, y_frame)
print("This is the best out-of-sample score using GridSearchCV: %.6f." % -grid_clf.best_score_)
msg = """This is the best out-of-sample score %s weighting using grid_cv: %.6f."""
score_with_weights, param_with_weights = grid_cv(X_in=X,
y_in=y,
w_in=sample_weight,
cv=inner_cv,
max_features_grid=search_params.get(
"max_features"),
use_weighting=True)
print(msg % ("WITH", score_with_weights))
score_without_weights, param_without_weights = grid_cv(X_in=X,
y_in=y,
w_in=sample_weight,
cv=inner_cv,
max_features_grid=search_params.get(
"max_features"),
use_weighting=False)
print(msg % ("WITHOUT", score_without_weights))
The output of the code is then:
This is the best out-of-sample score using GridSearchCV: 0.095439.
This is the best out-of-sample score WITH weighting using grid_cv: 0.099367.
This is the best out-of-sample score WITHOUT weighting using grid_cv: 0.135692.
EDIT 2: as the comment bellow says:
the difference in my score and the sklearn score using this solution
originates in the way that I was computing a weighted average of
scores. If you omit the weighted average portion of the code, the two
outputs match to machine precision.
Currently in sklearn, GridSearchCV(and any classes inherit BaseSearchCV) only allow sample_weight in **fit_params but not using it in scoring, which is not correct, since CV pick the "best estimator" via unweighted score. Notes, when you grid.fit(X, y, sample_weight=w) only use sample weights in fit, not score.
There are two ways to solve this problems:
Handy method: add weight as the first columns in X. write your customized scoring function and transformer in your model.
from sklearn.base import BaseEstimator, TransformerMixin
# customized scorer
def weight_remover_scorer(estimator, X, y):
y_pred = estimator.predict(X)
w = X[:,0]
return your_scorer(y, y_pred, sample_weight=w)
# customized transformer
class WeightRemover(TransformerMixin, BaseEstimator):
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, y=None, **fit_params):
return X[:,1:]
# in your main function
if __name__=='__main__':
pipe = Pipeline([('remove_weight', WeightRemover()),('model',model)])
params_grid = {'model__'+k:v for k,v in params_grid.items()}
X = np.c_[train_w, X]
X_test = np.c_[test_w, X_test]
grid = GridSearchCV(pipe, params_grid, cv=5, scoring=weight_remover_scorer)
grid.fit(X, y)
add features in sklearn class (wait for new upgrade). Just add parameters sample_weight in BaseSearchCV (default is None), safer indexing them in the same way as fit_params = _check_fit_params(X, fit_params).
Just pointing out that there is an ongoing effort to support this important feature: https://github.com/scikit-learn/scikit-learn/pull/13432
But it seems that because of backward compatibility issues and the desire to tackle the more general problem of passing arbitrary sample related information it is taking a bit too long. The last attempt seems to be: https://github.com/scikit-learn/scikit-learn/pull/16079
Here is a good review of the issue: http://deaktator.github.io/2019/03/10/the-error-in-the-comparator/
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
r = pd.read_csv("vitalsign_test.csv")
clm_list = []
for column in r.columns:
clm_list.append(column)
X = r[clm_list[1:len(clm_list)-1]].values
y = r[clm_list[len(clm_list)-1]].values
X_train, X_test, y_train, y_test = train_test_split (X,y, test_size = 0.3, random_state=4)
k_range = range(1,25)
scores = []
for k in k_range:
clf = KNeighborsClassifier(n_neighbors = k)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
scores.append(metrics.accuracy_score(y_test,y_pred))
plt.plot(k_range,scores)
plt.xlabel('value of k for clf')
plt.ylabel('testing accuracy')
reponse that I am getting is
ValueError: x and y must have same first dimension
my feature and response shape is:
y.shape
Out[60]: (500,)
X.shape
Out[61]: (500, 6)
It has nothing to do with your X and y, it is about x and y arguments to plot, since your scores has one element, and k_range has 25. The error is incorrect indentation:
for k in k_range:
clf = KNeighborsClassifier(n_neighbors = k)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
scores.append(metrics.accuracy_score(y_test,y_pred))
should be
for k in k_range:
clf = KNeighborsClassifier(n_neighbors = k)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
scores.append(metrics.accuracy_score(y_test,y_pred))
Using python and scikit-learn, I'd like to do a grid search. But some of my models end up being empty. How can I make the grid search function to ignore those models?
I guess I can have a scoring function which returns 0 if the models is empty, but I'm not sure how.
predictor = sklearn.svm.LinearSVC(penalty='l1', dual=False, class_weight='auto')
param_dist = {'C': pow(2.0, np.arange(-10, 11))}
learner = sklearn.grid_search.GridSearchCV(estimator=predictor,
param_grid=param_dist,
n_jobs=self.n_jobs, cv=5,
verbose=0)
learner.fit(X, y)
My data's in a way that this learner object will choose a C corresponding to an empty model. Any idea how I can make sure the model's not empty?
EDIT: by an "empty model" I mean a model that has selected 0 features to use. Specially with an l1 regularized model, this can easily happen. So in this case, if the C in the SVM is small enough, the optimization problem will find the 0 vector as the optimal solution for the coefficients. Therefore predictor.coef_ will be a vector of 0s.
Try to implement custom scorer, something similar to:
import numpy as np
def scorer_(estimator, X, y):
# Your criterion here
if np.allclose(estimator.coef_, np.zeros_like(estimator.coef_)):
return 0
else:
return estimator.score(X, y)
learner = sklearn.grid_search.GridSearchCV(...
scoring=scorer_)
I don't think there is such a built-in function; it's easy, however, to make a custom gridsearcher:
from sklearn.cross_validation import KFold
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import cross_val_score
import itertools
from sklearn import metrics
import operator
def model_eval(X, y, model, cv):
scores = []
for train_idx, test_idx in cv:
X_train, y_train = X[train_idx], y[train_idx]
X_test, y_test = X[test_idx], y[test_idx]
model.fit(X_train, y_train)
nonzero_coefs = len(np.nonzero(model.coef_)[0]) #check for nonzero coefs
if nonzero_coefs == 0: #if they're all zero, don't evaluate any further; move to next hyperparameter combo
return 0
predictions = model.predict(X_test)
score = metrics.accuracy_score(y_test, predictions)
scores.append(score)
return np.array(scores).mean()
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
C = pow(2.0, np.arange(-20, 11))
penalty = {'l1', 'l2'}
parameter_grid = itertools.product(C, penalty)
kf = KFold(X.shape[0], n_folds=5) #use the same folds to evaluate each hyperparameter combo
hyperparameter_scores = {}
for C, penalty in parameter_grid:
model = svm.LinearSVC(dual=False, C=C, penalty=penalty)
result = model_eval(X, y, model, kf)
hyperparameter_scores[(C, penalty)] = result
sorted_scores = sorted(hyperparameter_scores.items(), key=operator.itemgetter(1))
best_parameters, best_score = sorted_scores[-1]
print best_parameters
print best_score