Grid search ValueError: Invalid parameter classifier for estimator - python

I'm trying to use random forest with grid search but this error shows up
ValueError: Invalid parameter classifier for estimator Pipeline(steps=[('tfidf_vectorizer', TfidfVectorizer()),
('rf_classifier', RandomForestClassifier())]).
Check the list of available parameters with `estimator.get_params().keys()`.
import numpy as np # linear algebra
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn import pipeline,ensemble,preprocessing,feature_extraction,metrics
train=pd.read_json('cleaned_data1')
#split dataset into X , Y
X=train.iloc[:,0]
Y=train.iloc[:,2]
estimators=pipeline.Pipeline([
('tfidf_vectorizer', feature_extraction.text.TfidfVectorizer(lowercase=True)),
('rf_classifier', ensemble.RandomForestClassifier())
])
print(estimators.get_params().keys())
params = {"classifier__max_depth": [3, None],
"classifier__max_features": [1, 3, 10],
"classifier__min_samples_split": [1, 3, 10],
"classifier__min_samples_leaf": [1, 3, 10],
# "bootstrap": [True, False],
"classifier__criterion": ["gini", "entropy"]}
X_train,X_test,y_train,y_test=train_test_split(X,Y, test_size=0.2)
rf_classifier=GridSearchCV(estimators,params, cv=10 , n_jobs=-1 ,scoring='accuracy',iid=True)
rf_classifier.fit(X_train,y_train)
y_pred=rf_classifier.predict(X_test)
metrics.confusion_matrix(y_test,y_pred)
print(metrics.accuracy_score(y_test,y_pred))
I've tried to add those params
param_grid = {
'n_estimators': [200, 500],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [4,5,6,7,8],
'criterion' :['gini', 'entropy']
}
but still the same error

Please ensure that when you reference something in the pipeline, you use the same naming convention when you are initializing a parameter grid.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# Define a pipeline to search for the best combination of PCA truncation
# and classifier regularization.
pca = PCA()
# set the tolerance to a large value to make the example faster
logistic = LogisticRegression(max_iter=10000, tol=0.1)
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
X_digits, y_digits = datasets.load_digits(return_X_y=True)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
param_grid = {
'pca__n_components': [5, 15, 30, 45, 64],
'logistic__C': np.logspace(-4, 4, 4),
}
search = GridSearchCV(pipe, param_grid, n_jobs=-1)
search.fit(X_digits, y_digits)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
In this example, we reference LogisticRegression model as 'logistic'. Also on a side note, please note that for RandomForestClassifiers, a value of min_samples_split = 1 is not possible and will result in an error.
This is from the sklearn documentation

Where you have called the random forest ensemble 'rf_classifier' within the pipeline, you should rename this to 'classifier' which should solve the issue.
The params look for something named 'classifier' in the pipeline so they can apply themselves however at current there is nothing named this and therefore this error is thrown.
If you want (I'm not sure if this will work but worth testing), you could change "classifier__" in the params list to "rf_classifier__" to see if the params will then recognise the passed classifier.

Related

How to go around the error "Invalid parameter estimator for estimator Pipeline" while using GridSearchCV with SVR?

I'm trying to GridSearch the best hyperparameters with this code:
search =GridSearchCV(
make_pipeline(RobustScaler(),
SVR()#,
#cv=kf
#refit=True
),
param_grid = {
'estimator__svr__kernel': ('linear', 'rbf','poly')#,
#'estimator__svr__C':[ 10,20]
#'estimator__svr__gamma': [1e-5, 3e-4 ],
#'estimator__svr__epsilon':[0.001,0.002,0.006,0.008]#,
# 'cv' : [10]
},
refit=True)
search.fit(train, target)
I get this error :
ValueError: Invalid parameter estimator for estimator Pipeline(steps=[('robustscaler', RobustScaler()), ('svr', SVR())]). Check the list of available parameters with estimator.get_params().keys()
The error doesn't pin-point any particular entry in the parameter grid. Moreover, estimator.get_params().keys() lists the prameters that I used:
dict_keys(['cv', 'error_score', 'estimator__memory', 'estimator__steps', 'estimator__verbose', 'estimator__robustscaler', 'estimator__svr', 'estimator__robustscaler__copy', 'estimator__robustscaler__quantile_range', 'estimator__robustscaler__unit_variance', 'estimator__robustscaler__with_centering', 'estimator__robustscaler__with_scaling', 'estimator__svr__C', 'estimator__svr__cache_size', 'estimator__svr__coef0', 'estimator__svr__degree', 'estimator__svr__epsilon', 'estimator__svr__gamma', 'estimator__svr__kernel', 'estimator__svr__max_iter', 'estimator__svr__shrinking', 'estimator__svr__tol', 'estimator__svr__verbose', 'estimator', 'n_jobs', 'param_grid', 'pre_dispatch', 'refit', 'return_train_score', 'scoring', 'verbose'])
No combination of param_grid seems to work.
I think that you should use square brackets instead of parentheses for the estimator__svr__kernel:
'estimator__svr__kernel': ['linear', 'rbf','poly']
EDIT:
I was able to run your script against the iris dataset by using svr__kernel instead of estimator__svr__kernel in the paramter grid:
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
search =GridSearchCV(
make_pipeline(RobustScaler(),
SVR()#,
#cv=kf
#refit=True
),
param_grid = {'svr__kernel': ('linear', 'rbf','poly')},
refit=True)
search.fit(X, y)
This returns:
GridSearchCV(estimator=Pipeline(steps=[('robustscaler', RobustScaler()),
('svr', SVR())]),
param_grid={'svr__kernel': ('linear', 'rbf', 'poly')})

Why are Pipelines used as part of GridsearchCV and not the other way around?

Though I understand the potential benefits, especially in combination with GridSearchCV, I wonder why it is always used like this (or at least from how I understand it):
Pipeline steps are set for each classifier (with 'passthrough' for the clf step). Then, GridSearchCV equips the pipeline with multiple parameters and classifiers.
I am not sure if this is true, but from my point of view, it seems as if this causes the steps before the classifier to run multiple times, even if they are always used with the same parameter.
This leads me to the question, why it is not used the other way around... or if this would even be possible?
Here is a picture of the situation in my head with example configuration:
First let's create a dataset
from sklearn.datasets import make_classification
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# generate some data to play with
X, y = make_classification(n_informative=5, n_redundant=0, random_state=42)
Now the usual way of working with a grid_search is to try different parameters for all steps.
As an example let's use PCA and SVC.
pipe = Pipeline(steps=[('pca', PCA()), ('svm', svm.SVC())])
# Parameters of pipelines can be set using ‘__’ separated parameter names:
param_grid = {
'pca__n_components': [5, 15, 30, 45, 64],
'svm__C': [1, 5, 10],
}
gs = GridSearchCV(pipe, param_grid, n_jobs=-1)
gs.fit(X, y)
However if you want you can apply the previous steps to the classifier itself and only perform the GridSearch on the classifier:
pca = PCA()
X_pca, y_pca = pca.fit_transform(X, y)
parameters = {'C':[1, 5, 10]}
svc = svm.SVC()
gs = GridSearchCV(svc, parameters)
gs.fit(X_pca, y_pca)
The problem is that this way you can't test parameter correlations between different steps.

How to perform cross-validation of a random-forest model in scikit-learn?

I need to perform leave-one-out cross validation of RF model.
I successfully built a model with high predictive ability.
Now I need to perform LOO test prior to the publication.
Here is my code:
import pandas as pd
import numpy as np
import seaborn as sns
%matplotlib inline
import matplotlib.pyplot as plt
FC_data = pd.read_excel('C:\\Users\\Dre\\Desktop\\My Papers\\Furocoumarins_paper_2018\\Furocoumarins_NEW1.xlsx', index_col=0)
FC_data.head()
# Create correlation matrix
corr_matrix = FC_data.corr().abs()
# Select upper triangle of correlation matrix
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
# Find index of feature columns with correlation greater than 0.95
to_drop = [column for column in upper.columns if any(upper[column] > 0.95)]
# Drop features
FC_data1 = FC_data.drop(FC_data[to_drop], axis=1)
y = FC_data1.LogFiT
X = FC_data1.drop(['LogFiT', 'LogS'], axis=1)
X_train = X.drop(["3-Acetoisopseudopsoralen", "3-Carbethoxypsoralen", "4,4'-Dimethylangelicin",
"4,7,4'-Trimethylallopsoralen", "Psoralen"], axis=0)
X_train.head(21)
y_train = y.drop(["3-Acetoisopseudopsoralen", "3-Carbethoxypsoralen", "4,4'-Dimethylangelicin",
"4,7,4'-Trimethylallopsoralen", "Psoralen"], axis=0)
y_train.head(21)
X_test = X.loc[["3-Acetoisopseudopsoralen", "3-Carbethoxypsoralen", "4,4'-Dimethylangelicin",
"4,7,4'-Trimethylallopsoralen", "Psoralen"]]
X_test.head(5)
y_test = y.loc[["3-Acetoisopseudopsoralen", "3-Carbethoxypsoralen", "4,4'-Dimethylangelicin",
"4,7,4'-Trimethylallopsoralen", "Psoralen"]]
y_test.head(5)
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
randomforest = RandomForestRegressor(n_jobs=-1)
selector = SelectFromModel(randomforest)
features_important = selector.fit_transform(X_train, y_train)
model = randomforest.fit(features_important, y_train)
from sklearn.model_selection import GridSearchCV
clf_rf = RandomForestRegressor()
parameters = {"n_estimators":[1, 2, 3, 4, 5, 7, 10, 15, 20, 30, 40, 50, 100], "max_depth":[1, 2, 3, 4, 5, 7, 10, 15, 20, 30, 40, 50, 100]}
grid_search_cv_clf = GridSearchCV(clf_rf, parameters, cv=5)
grid_search_cv_clf.fit(features_important, y_train)
from sklearn.metrics import r2_score
y_pred = grid_search_cv_clf.predict(features_important)
r2_score(y_train, y_pred)
grid_search_cv_clf.best_params_
best_clf = grid_search_cv_clf.best_estimator_
X_test_filtered = X_test.iloc[:,selector.get_support()]
best_clf.score(X_test_filtered, y_test)
feature_importances = best_clf.feature_importances_
feature_importances_df = pd.DataFrame({'features': X_test_filtered.columns.values,
'feature_importances':feature_importances})
importances = feature_importances_df.sort_values('feature_importances', ascending=False)
importances.head(25)
Now I need q2 value.
Finally, I wrote this code and got a reasonably high score 0.9071543776303185
.
from sklearn.model_selection import LeaveOneOut
parameters = {"n_estimators":[4], "max_depth":[20]}
loo_clf = GridSearchCV(best_clf, parameters, cv=LeaveOneOut())
loo_clf.fit(features_important, y_train)
loo_clf.score(features_important, y_train)
I'm not sure if it is q2 or not. How do you think?
I also decided to obtain 5-fold cross-validation score. However, it gives ridiculous values like, for example: -36.58997717, 0.76801832, -1.59900448, 0.1834304 , -2.38256389 and a mean of -7.924019361863889.
from sklearn.model_selection import cross_val_score
cvs=cross_val_score(best_clf, features_important, y_train)
mean_cross_val_score = cvs.mean()
mean_cross_val_score
Probably, there is a way to fix it?
You should not run the hyper-parameters search before to make the model evaluation. Instead, you should the 2 cross-validations, otherwise, you are leaking some information. To know more about this, you should look at the following example from the scikit-learn documentation: https://scikit-learn.org/stable/auto_examples/model_selection/plot_nested_cross_validation_iris.html#sphx-glr-auto-examples-model-selection-plot-nested-cross-validation-iris-py
Therefore, in your particular use-case, you should use: GridSearchCV, SelectFromModel, and cross_val_score:
from sklearn.datasets import make_regression
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
X, y = make_regression(n_samples=100)
feature_selector = SelectFromModel(
RandomForestRegressor(n_jobs=-1), threshold="mean"
)
pipe = make_pipeline(
feature_selector, RandomForestRegressor(n_jobs=-1)
)
param_grid = {
# define the grid of the random-forest for the feature selection
"selectfrommodel__estimator__n_estimators": [10, 20],
"selectfrommodel__estimator__max_depth": [3, 5],
# define the grid of the random-forest for the prediction
"randomforestregressor__n_estimators": [10, 20],
"randomforestregressor__max_depth": [5, 8],
}
grid_search = GridSearchCV(pipe, param_grid=param_grid, n_jobs=-1, cv=3)
# You can use the LOO in this way. Be aware that this not a good practise,
# it leads to large variance when evaluating your model.
# scores = cross_val_score(pipe, X, y, cv=LeaveOneOut(), error_score='raise')
scores = cross_val_score(pipe, X, y, cv=2, error_score='raise')
score.mean()
You need to specify the scoring and the cv arguments.
Use this:
from sklearn.model_selection import cross_val_score
mycv = LeaveOneOut()
cvs=cross_val_score(best_clf, features_important, y_train, scoring='r2',cv = mycv)
mean_cross_val_score = cvs.mean()
print(mean_cross_val_score)
This will return the mean cross-validated R2 score using LOOCV.
For more scoring options see here: https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values

List of parameters in sklearn randomizedSearchCV like GridSearchCV?

I have a problem where I'd like to test multiple models that don't all have the same named parameters. How would you use a list of parameters for a pipeline in RandomizedSearchCV like you can use in this example with GridSearchCV?
Example from:
https://scikit-learn.org/stable/auto_examples/compose/plot_compare_reduction.html
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
pipe = Pipeline([
# the reduce_dim stage is populated by the param_grid
('reduce_dim', None),
('classify', LinearSVC())
])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
]
grid = GridSearchCV(pipe, cv=3, n_jobs=2, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
This is an old issue that was resolved for a while now (not sure starting from which scikit-learn version).
You can now pass a list of dictionaries for RandomizedSearchCV in the param_distributions parameter. Your example code would become:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
pipe = Pipeline([
# the reduce_dim stage is populated by the param_grid
('reduce_dim', None),
('classify', LinearSVC())
])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
]
grid = RandomizedSearchCV(pipe, cv=3, n_jobs=2, param_distributions=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
I'm using sklearn version 0.23.1 .
I have found a way around, that relies on duck-typing, and doesn't get too much in the way.
It relies on passing complete estimators as parameters to the pipeline. We first sample the kind of model, and then its parameters. For that we define two classes that can be sampled :
from sklearn.model_selection import ParameterSampler
class EstimatorSampler:
"""
Class that holds a model and its parameters distribution.
When sampled, the parameters are first sampled and set to the model,
which is returned.
# Arguments
===========
model : sklearn.base.BaseEstimator
param_distributions : dict
Input to ParameterSampler
# Returns
=========
sampled : sklearn.base.BaseEstimator
"""
def __init__(self, model, param_distributions):
self.model = model
self.param_distributions = param_distributions
def rvs(self, random_state=None):
sampled_params = next(iter(
ParameterSampler(self.param_distributions,
n_iter=1,
random_state=random_state)))
return self.model.set_params(**sampled_params)
class ListSampler:
"""
List container that when sampled, returns one of its item,
with probabilities defined by `probs`.
# Arguments
===========
items : 1-D array-like
probs : 1-D array-like of floats
If not None, it should be the same length of `items`
and sum to 1.
# Returns
=========
sampled item
"""
def __init__(self, items, probs=None):
self.items = items
self.probs = probs
def rvs(self, random_state=None):
item = np.random.choice(self.items, p=self.probs)
if hasattr(item, 'rvs'):
return item.rvs(random_state=random_state)
return item
And the rest of the code is defined below.
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
pipe = Pipeline([
# the reduce_dim stage is populated by the param_grid
('reduce_dim', None),
('classify', None)
])
N_FEATURES_OPTIONS = [2, 4, 8]
dim_reducers = ListSampler([EstimatorSampler(est, {'n_components': N_FEATURES_OPTIONS})
for est in [PCA(iterated_power=7), NMF()]] +
[EstimatorSampler(SelectKBest(chi2), {'k': N_FEATURES_OPTIONS})])
C_OPTIONS = [1, 10, 100, 1000]
classifiers = EstimatorSampler(LinearSVC(), {'C': C_OPTIONS})
param_dist = {
'reduce_dim': dim_reducers,
'classify': classifiers
}
grid = RandomizedSearchCV(pipe, cv=3, n_jobs=2, scoring='accuracy', param_distributions=param_dist)
digits = load_digits()
grid.fit(digits.data, digits.target)
Hyperopt supports Hyperparameter Tuning across multiple estimators, check this wiki for more details (2.2 A Search Space Example: scikit-learn section).
Check out this post if you want to use sklearn's GridSearch to do that. It suggests an implementation of EstimatorSelectionHelper estimator which can run different estimators, each with its own grid of parameters.

Pass a scoring function from sklearn.metrics to GridSearchCV

GridSearchCV's documentations states that I can pass a scoring function.
scoring : string, callable or None, default=None
I would like to use a native accuracy_score as a scoring function.
So here is my attempt. Imports and some data:
import numpy as np
from sklearn.cross_validation import KFold, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn import neighbors
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
Y = np.array([0, 1, 0, 0, 0, 1])
Now when I use just k-fold cross-validation without my scoring function, everything works as intended:
parameters = {
'n_neighbors': [2, 3, 4],
'weights':['uniform', 'distance'],
'p': [1, 2, 3]
}
model = neighbors.KNeighborsClassifier()
k_fold = KFold(len(Y), n_folds=6, shuffle=True, random_state=0)
clf = GridSearchCV(model, parameters, cv=k_fold) # TODO will change
clf.fit(X, Y)
print clf.best_score_
But when I change the line to
clf = GridSearchCV(model, parameters, cv=k_fold, scoring=accuracy_score) # or accuracy_score()
I get the error: ValueError: Cannot have number of folds n_folds=10 greater than the number of samples: 6. which in my opinion does not represent the real problem.
In my opinion the problem is that accuracy_score does not follow the signature scorer(estimator, X, y), which is written in the documentation
So how can I fix this problem?
It will work if you change scoring=accuracy_score to scoring='accuracy' (see the documentation for the full list of scorers you can use by name in this way.)
In theory, you should be able to pass custom scoring functions like you're trying, but my guess is that you're right and accuracy_score doesn't have the right API.
Here is an example of using Weighted Kappa as scoring metric for GridSearchCV for a simple Random Forest model. The key learning for me was to use the parameters related to the scorer in the 'make_scorer' function.
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import cohen_kappa_score, make_scorer
kappa_scorer = make_scorer(cohen_kappa_score,weights="quadratic")
# Create the parameter grid based on the results of random search
param_grid = {
'bootstrap': [True],
'max_features': range(2,10), # try features from 2 to 10
'min_samples_leaf': [3, 4, 5],
'n_estimators' : [100,300,500],
'max_depth': [5]
}
# Create a based model
random_forest = RandomForestClassifier(class_weight ="balanced_subsample",random_state=1)
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = random_forest, param_grid = param_grid,
cv = 5, n_jobs = -1, verbose = 2, scoring = kappa_scorer) # search for best model using roc_auc
# Fit the grid search to the data
grid_search.fit(final_tr, yTrain)

Categories