Is there a way to get all computed coefficients in a GridSearchCV? - python

I am trying different ML models, all using a pipeline which includes a transformer and an algorithm, 'nested' in a GridSearchCV to find the best hyperparameters.
When running Ridge, Lasso and ElasticNet regressions, I would like to store all the computed coefficients, not only the best_estimator_ coefficients, in order to plot them according to the alpha's path.
In other words, when the GridSearchCV changes the alpha parameter and fit a new model, I would like to store the resulting coefficients, to plot them against the alpha values.
You can take a look at this official scikit post for a beautiful example.
This is my code:
from sklearn.linear_model import Ridge
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_absolute_error, mean_squared_error
import time
start = time.time()
# Cross-validated - Ridge Regression
model_ridge = make_pipeline(transformer, Ridge()) # my transformer is already defined
alphas = np.logspace(-5, 5, num = 50)
params = {'ridge__alpha' : alphas}
grid = GridSearchCV(model_ridge, param_grid = params, cv=10)
grid.fit(X_train, y_train)
regressor = grid.estimator.named_steps['ridge'].coef_ # when I add this line, it returns an error
stop = time.time()
training_time = stop-start
y_pred = grid.predict(X_test)
Ridge_Regression_results = {'Algorithm' : 'Ridge Regression',
'R²' : grid.score(X_train, y_train),
'MAE' : mean_absolute_error(y_test, y_pred),
'RMSE' : np.sqrt(mean_squared_error(y_test, y_pred)),
'Training time (sec)' : training_time}
In this topic: return coefficients from Pipeline object in sklearn, the author was adviced to use the named_steps attribute of the pipeline.
But in my case, when I try to use it, it returns the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_18260/3310195105.py in <module>
13
14 grid.fit(X_train, y_train)
---> 15 regressor = grid.estimator.named_steps['ridge'].coef_
16
17
AttributeError: 'Ridge' object has no attribute 'coef_'
I don't understand why this is happening.
For this to work, my guess is that this storing should happen during the GridSearchCV loop, but I can't figure out how to do this.

You can get the coefficients by making them "scores," although it isn't very semantically correct.
import pandas as pd
def myscores(estimator, X, y):
r2 = estimator.score(X, y)
coefs = estimator.named_steps["ridge"].coef_
ret_dict = {
f'a_{i}': coef for i, coef in enumerate(coefs)
}
ret_dict['r2'] = r2
return ret_dict
grid = GridSearchCV(
model_ridge,
param_grid=params,
scoring=myscores,
refit='r2'
)
print(pd.DataFrame(grid.cv_results_)

Related

Sklearn fbeta_score default average parameter not working with multiclass target variable

I want to use f2-score to evaluate my model,but there is an error.
from sklearn.metrics import fbeta_score, make_scorer
ftwo_scorer = make_scorer(fbeta_score, beta=2) # beta=2
U can see , I use fbeta_score and make_scorer to define a metrics of mine. But when I apply this metrics to fit, there is an error.
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
svc = SVC()
parameters = { 'C':[100,1000]}
#'gamma':[1e-5,1e-4,1e-3]}}
clf = GridSearchCV(svc, parameters, scoring=ftwo_scorer,cv=5)
clf.fit(Xtrain, Ytrain,)
Yval_pred = clf.predict(Xval)
Error messege:
ValueError Traceback (most recent call last)
<ipython-input-600-ebf059724dd4> in <module>
4 #'gamma':[1e-5,1e-4,1e-3]}
5 clf = GridSearchCV(svc, parameters, scoring=ftwo_scorer,cv=5)
----> 6 clf.fit(Xtrain, Ytrain)
7 Yval_pred = clf.predict(Xval)
……
ValueError: Target is multiclass but average='binary'. Please choose
another average setting, one of [None, 'micro', 'macro', 'weighted'].
Can u help me?
This is happening cause your target variable Y has probably more than two classes. By default fbeta_score set average to binary targets.
So, as the error suggest pass a different average value of these options [None, 'micro', 'macro', 'weighted'] to the make_scorer function, i.e.
from sklearn.metrics import fbeta_score, make_scorer
ftwo_scorer = make_scorer(fbeta_score, beta=2, average='weighted')

Ridge Regression Grid Search with Pipeline

I am trying to optimize hyperparameters for ridge regression. But also add polynomial features. So, pipeline looks okay but getting error when try to gridsearchcv. Here:
# Importing the Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from collections import Counter
from IPython.core.display import display, HTML
sns.set_style('darkgrid')
# Data Preprocessing
from sklearn.datasets import load_boston
boston_dataset = load_boston()
dataset = pd.DataFrame(boston_dataset.data, columns = boston_dataset.feature_names)
dataset['MEDV'] = boston_dataset.target
# X and y Variables
X = dataset.iloc[:, 0:13].values
y = dataset.iloc[:, 13].values.reshape(-1,1)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 25)
# Building the Model ------------------------------------------------------------------------
# Fitting regressior to the Training set
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
steps = [
('scalar', StandardScaler()),
('poly', PolynomialFeatures(degree=2)),
('model', Ridge())
]
ridge_pipe = Pipeline(steps)
ridge_pipe.fit(X_train, y_train)
# Predicting the Test set results
y_pred = ridge_pipe.predict(X_test)
# Applying k-Fold Cross Validation
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = ridge_pipe, X = X_train, y = y_train, cv = 10)
accuracies.mean()
#accuracies.std()
# Applying Grid Search to find the best model and the best parameters
from sklearn.model_selection import GridSearchCV
parameters = [ {'alpha': np.arange(0, 0.2, 0.01) } ]
grid_search = GridSearchCV(estimator = ridge_pipe,
param_grid = parameters,
scoring = 'accuracy',
cv = 10,
n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train) # <-- GETTING ERROR IN HERE
Error:
ValueError: Invalid parameter ridge for estimator
What to do or, is there a better way to use ridge regression with pipeline? I would be pleased if put some sources about gridsearch because I am a newbie on this. The error:
There are two problems in your code. First since you are using a pipeline, you need to specify in the params list which part of the pipeline does the params belongs to. See the official documentation for more information :
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters. For this,
it enables setting parameters of the various steps using their names
and the parameter name separated by a ‘__’, as in the example below
In this case, since alpha is going to be used with ridge-regression and you have used the string model in the Pipeline defintion, you need to rename the key alpha to model_alpha:
steps = [
('scalar', StandardScaler()),
('poly', PolynomialFeatures(degree=2)),
('model', Ridge()) # <------ Whatever string you assign here will be used later
]
# Since you have named it as 'model', you need change it to 'model_alpha'
parameters = [ {'model__alpha': np.arange(0, 0.2, 0.01) } ]
Next, you need to understand this dataset is for Regression. You should not use accuracy here, instead use a regression based scoring function like, mean_squared_error. Here are some other metrics for regression that you can use. Something like this
from sklearn.metrics import mean_squared_error, make_scorer
scoring_func = make_scorer(mean_squared_error)
grid_search = GridSearchCV(estimator = ridge_pipe,
param_grid = parameters,
scoring = scoring_func, #<--- Use the scoring func defined above
cv = 10,
n_jobs = -1)
Here is a link to a Google colab notebook with working code.
For the GridSearchCV parameters, the parameter name for ridge should be 'ridge__alpha' (note 2 underscores) instead of just 'alpha'.

How to get the prediction probabilities using cross validation in scikit-learn

I am using RandomForestClassifier as follows using cross validation for a binary classification (class labels are 0 and 1).
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
clf=RandomForestClassifier(random_state = 42, class_weight="balanced")
k_fold = StratifiedKFold(n_splits=10, shuffle=True, random_state=0)
accuracy = cross_val_score(clf, X, y, cv=k_fold, scoring = 'accuracy')
print("Accuracy: " + str(round(100*accuracy.mean(), 2)) + "%")
f1 = cross_val_score(clf, X, y, cv=k_fold, scoring = 'f1_weighted')
print("F Measure: " + str(round(100*f1.mean(), 2)) + "%")
Now I want to order my data using prediction probabilities of class 1 with cross validation results. For that I tried the following two ways.
pred = clf.predict_proba(X)[:,1]
print(pred)
probs = clf.predict_proba(X)
best_n = np.argsort(probs, axis=1)[:,-6:]
I get the following error
NotFittedError: This RandomForestClassifier instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method.
for both the situations.
I am just wondering where I am making things wrong.
I am happy to provide more details if needed.
In case, you want to use the CV model for a unseen data point/s, use the following approach.
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate
iris = datasets.load_iris()
X = iris.data
y = iris.target
clf = RandomForestClassifier(n_estimators=10, random_state = 42, class_weight="balanced")
cv_results = cross_validate(clf, X, y, cv=3, return_estimator=True)
clf_fold_0 = cv_results['estimator'][0]
clf_fold_0.predict_proba([iris.data[133]])
# array([[0. , 0.5, 0.5]])
Have a look at the documentation it specifies that the probability is calculated based on the mean results from the trees.
In your case, you first need to call the fit() method to generate the tress in the model. Once you fit the model on the training data, you can call the predict_proba() method.
This is also specified in the error.
# Fit model
model = RandomForestClassifier(...)
model.fit(X_train, Y_train)
# Probabilty
model.predict_proba(X)[:,1]
I solved my problem using the following code:
proba = cross_val_predict(clf, X, y, cv=k_fold, method='predict_proba')
print(proba[:,1])
print(np.argsort(proba[:,1]))

GridSearchCV gives ValueError: continuous is not supported for DecisionTreeRegressor

I'm learning ML and doing the task for Boston house price predictions. I have following code:
from sklearn.metrics import fbeta_score, make_scorer
from sklearn.model_selection import GridSearchCV
def fit_model(X, y):
""" Tunes a decision tree regressor model using GridSearchCV on the input data X
and target labels y and returns this optimal model. """
# Create a decision tree regressor object
regressor = DecisionTreeRegressor()
# Set up the parameters we wish to tune
parameters = {'max_depth':(1,2,3,4,5,6,7,8,9,10)}
# Make an appropriate scoring function
scoring_function = make_scorer(fbeta_score, beta=2)
# Make the GridSearchCV object
reg = GridSearchCV(regressor, param_grid=parameters, scoring=scoring_function)
print reg
# Fit the learner to the data to obtain the optimal model with tuned parameters
reg.fit(X, y)
# Return the optimal model
return reg.best_estimator_
reg = fit_model(housing_features, housing_prices)
This gives me ValueError: continuous is not supported for the reg.fit(X, y) line and I don't understand why. What is the reason for this, what am I missing here?
That's because of the line:
scoring_function = make_scorer(fbeta_score, beta=2)
This sets the scoring-metric to fbeta, which is for classification tasks!
Your are doing regression here as seen in:
regressor = DecisionTreeRegressor()
From the docs

Recursive feature elimination on Random Forest using scikit-learn

I'm trying to preform recursive feature elimination using scikit-learn and a random forest classifier, with OOB ROC as the method of scoring each subset created during the recursive process.
However, when I try to use the RFECV method, I get an error saying AttributeError: 'RandomForestClassifier' object has no attribute 'coef_'
Random Forests don't have coefficients per se, but they do have rankings by Gini score. So, I'm wondering how to get arround this problem.
Please note that I want to use a method that will explicitly tell me what features from my pandas DataFrame were selected in the optimal grouping as I am using recursive feature selection to try to minimize the amount of data I will input into the final classifier.
Here's some example code:
from sklearn import datasets
import pandas as pd
from pandas import Series
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFECV
iris = datasets.load_iris()
x=pd.DataFrame(iris.data, columns=['var1','var2','var3', 'var4'])
y=pd.Series(iris.target, name='target')
rf = RandomForestClassifier(n_estimators=500, min_samples_leaf=5, n_jobs=-1)
rfecv = RFECV(estimator=rf, step=1, cv=10, scoring='ROC', verbose=2)
selector=rfecv.fit(x, y)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bbalin/anaconda/lib/python2.7/site-packages/sklearn/feature_selection/rfe.py", line 336, in fit
ranking_ = rfe.fit(X_train, y_train).ranking_
File "/Users/bbalin/anaconda/lib/python2.7/site-packages/sklearn/feature_selection/rfe.py", line 148, in fit
if estimator.coef_.ndim > 1:
AttributeError: 'RandomForestClassifier' object has no attribute 'coef_'
Here's what I've done to adapt RandomForestClassifier to work with RFECV:
class RandomForestClassifierWithCoef(RandomForestClassifier):
def fit(self, *args, **kwargs):
super(RandomForestClassifierWithCoef, self).fit(*args, **kwargs)
self.coef_ = self.feature_importances_
Just using this class does the trick if you use 'accuracy' or 'f1' score. For 'roc_auc', RFECV complains that multiclass format is not supported. Changing it to two-class classification with the code below, the 'roc_auc' scoring works. (Using Python 3.4.1 and scikit-learn 0.15.1)
y=(pd.Series(iris.target, name='target')==2).astype(int)
Plugging into your code:
from sklearn import datasets
import pandas as pd
from pandas import Series
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFECV
class RandomForestClassifierWithCoef(RandomForestClassifier):
def fit(self, *args, **kwargs):
super(RandomForestClassifierWithCoef, self).fit(*args, **kwargs)
self.coef_ = self.feature_importances_
iris = datasets.load_iris()
x=pd.DataFrame(iris.data, columns=['var1','var2','var3', 'var4'])
y=(pd.Series(iris.target, name='target')==2).astype(int)
rf = RandomForestClassifierWithCoef(n_estimators=500, min_samples_leaf=5, n_jobs=-1)
rfecv = RFECV(estimator=rf, step=1, cv=2, scoring='roc_auc', verbose=2)
selector=rfecv.fit(x, y)
This is my code, I've tidied it up a bit to make it relevant to your task:
features_to_use = fea_cols # this is a list of features
# empty dataframe
trim_5_df = DataFrame(columns=features_to_use)
run=1
# this will remove the 5 worst features determined by their feature importance computed by the RF classifier
while len(features_to_use)>6:
print('number of features:%d' % (len(features_to_use)))
# build the classifier
clf = RandomForestClassifier(n_estimators=1000, random_state=0, n_jobs=-1)
# train the classifier
clf.fit(train[features_to_use], train['OpenStatusMod'].values)
print('classifier score: %f\n' % clf.score(train[features_to_use], df['OpenStatusMod'].values))
# predict the class and print the classification report, f1 micro, f1 macro score
pred = clf.predict(test[features_to_use])
print(classification_report(test['OpenStatusMod'].values, pred, target_names=status_labels))
print('micro score: ')
print(metrics.precision_recall_fscore_support(test['OpenStatusMod'].values, pred, average='micro'))
print('macro score:\n')
print(metrics.precision_recall_fscore_support(test['OpenStatusMod'].values, pred, average='macro'))
# predict the class probabilities
probs = clf.predict_proba(test[features_to_use])
# rescale the priors
new_probs = kf.cap_and_update_priors(priors, probs, private_priors, 0.001)
# calculate logloss with the rescaled probabilities
print('log loss: %f\n' % log_loss(test['OpenStatusMod'].values, new_probs))
row={}
if hasattr(clf, "feature_importances_"):
# sort the features by importance
sorted_idx = np.argsort(clf.feature_importances_)
# reverse the order so it is descending
sorted_idx = sorted_idx[::-1]
# add to dataframe
row['num_features'] = len(features_to_use)
row['features_used'] = ','.join(features_to_use)
# trim the worst 5
sorted_idx = sorted_idx[: -5]
# swap the features list with the trimmed features
temp = features_to_use
features_to_use=[]
for feat in sorted_idx:
features_to_use.append(temp[feat])
# add the logloss performance
row['logloss']=[log_loss(test['OpenStatusMod'].values, new_probs)]
print('')
# add the row to the dataframe
trim_5_df = trim_5_df.append(DataFrame(row))
run +=1
So what I'm doing here is I have a list of features I want to train and then predict against, using the feature importances I then trim the worst 5 and repeat. During each run I add a row to record the prediction performance so that I can do some analysis later.
The original code was much bigger I had different classifiers and datasets I was analysing but I hope you get the picture from the above. The thing I noticed was that for random forest the number of features I removed on each run affected the performance so trimming by 1, 3 and 5 features at a time resulted in a different set of best features.
I found that using a GradientBoostingClassifer was more predictable and repeatable in the sense that the final set of best features agreed whether I trimmed 1 feature at a time or 3 or 5.
I hope I'm not teaching you to suck eggs here, you probably know more than me, but my approach to ablative anlaysis was to use a fast classifier to get a rough idea of the best sets of features, then use a better performing classifier, then start hyper parameter tuning, again doing coarse grain comaprisons and then fine grain once I get a feel of what the best params were.
I submitted a request to add coef_ so RandomForestClassifier may be used with RFECV. However, the change had already been made. This change will be in version 0.17.
https://github.com/scikit-learn/scikit-learn/issues/4945
You can pull the latest dev build if you want to use it now.
Here's what I ginned up. It's a pretty simple solution, and relies on a custom accuracy metric (called weightedAccuracy) since I'm classifying a highly unbalanced dataset. But, it should be easily made more extensible if desired.
from sklearn import datasets
import pandas
from sklearn.ensemble import RandomForestClassifier
from sklearn import cross_validation
from sklearn.metrics import confusion_matrix
def get_enhanced_confusion_matrix(actuals, predictions, labels):
""""enhances confusion_matrix by adding sensivity and specificity metrics"""
cm = confusion_matrix(actuals, predictions, labels = labels)
sensitivity = float(cm[1][1]) / float(cm[1][0]+cm[1][1])
specificity = float(cm[0][0]) / float(cm[0][0]+cm[0][1])
weightedAccuracy = (sensitivity * 0.9) + (specificity * 0.1)
return cm, sensitivity, specificity, weightedAccuracy
iris = datasets.load_iris()
x=pandas.DataFrame(iris.data, columns=['var1','var2','var3', 'var4'])
y=pandas.Series(iris.target, name='target')
response, _ = pandas.factorize(y)
xTrain, xTest, yTrain, yTest = cross_validation.train_test_split(x, response, test_size = .25, random_state = 36583)
print "building the first forest"
rf = RandomForestClassifier(n_estimators = 500, min_samples_split = 2, n_jobs = -1, verbose = 1)
rf.fit(xTrain, yTrain)
importances = pandas.DataFrame({'name':x.columns,'imp':rf.feature_importances_
}).sort(['imp'], ascending = False).reset_index(drop = True)
cm, sensitivity, specificity, weightedAccuracy = get_enhanced_confusion_matrix(yTest, rf.predict(xTest), [0,1])
numFeatures = len(x.columns)
rfeMatrix = pandas.DataFrame({'numFeatures':[numFeatures],
'weightedAccuracy':[weightedAccuracy],
'sensitivity':[sensitivity],
'specificity':[specificity]})
print "running RFE on %d features"%numFeatures
for i in range(1,numFeatures,1):
varsUsed = importances['name'][0:i]
print "now using %d of %s features"%(len(varsUsed), numFeatures)
xTrain, xTest, yTrain, yTest = cross_validation.train_test_split(x[varsUsed], response, test_size = .25)
rf = RandomForestClassifier(n_estimators = 500, min_samples_split = 2,
n_jobs = -1, verbose = 1)
rf.fit(xTrain, yTrain)
cm, sensitivity, specificity, weightedAccuracy = get_enhanced_confusion_matrix(yTest, rf.predict(xTest), [0,1])
print("\n"+str(cm))
print('the sensitivity is %d percent'%(sensitivity * 100))
print('the specificity is %d percent'%(specificity * 100))
print('the weighted accuracy is %d percent'%(weightedAccuracy * 100))
rfeMatrix = rfeMatrix.append(
pandas.DataFrame({'numFeatures':[len(varsUsed)],
'weightedAccuracy':[weightedAccuracy],
'sensitivity':[sensitivity],
'specificity':[specificity]}), ignore_index = True)
print("\n"+str(rfeMatrix))
maxAccuracy = rfeMatrix.weightedAccuracy.max()
maxAccuracyFeatures = min(rfeMatrix.numFeatures[rfeMatrix.weightedAccuracy == maxAccuracy])
featuresUsed = importances['name'][0:maxAccuracyFeatures].tolist()
print "the final features used are %s"%featuresUsed

Categories