ValueError: The estimator KerasClassifier should be a classifier - python

I am trying to create a stacking ensemble using scikit-learn that contains a Keras model wrapped using KerasClassifier.
Here's an example of how my code looks using the iris dataset:
# import libraries
import pandas
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.layers import Dropout, Flatten, Dense
from keras.utils import np_utils
from keras import optimizers
from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import StackingClassifier
from numpy import mean
from numpy import std
# import data
dataframe = pandas.read_csv("iris.csv", header=None)
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
# create and wrap neural network
def create_model():
model = Sequential()
model.add(Flatten(input_shape=X.shape[1:]))
model.add(Dense(150, activation=tf.keras.layers.LeakyReLU(alpha=0.3)))
model.add(Dropout(0.9))
model.add(Dense(50, activation=tf.keras.layers.LeakyReLU(alpha=0.3)))
model.add(Dropout(0.9))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(lr=2e-3),
metrics=['acc'])
return model
model_nn = KerasClassifier(build_fn=create_model, epochs=50, batch_size=5, verbose=0)
model_nn._estimator_type = "classifier"
# create stack
def stacking():
level0 = list()
level0.append(('lr', LogisticRegression(max_iter = 500000, C = .00041, solver = 'newton-cg', multi_class = 'ovr')))
level0.append(('nn', model_nn))
level0.append(('svm', SVC(C=1.0, gamma='scale', tol=.001, probability = True)))
level1 = LogisticRegression()
model = StackingClassifier(estimators=level0, final_estimator=level1, cv=5)
return model
# evaluate model score
def evaluate_model(model, X, y):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
return scores
scores = evaluate_model(stacking(), X, Y)
print('%.3f (%.3f)' % (mean(scores), std(scores)))
and I get this error:
ValueError: The estimator KerasClassifier should be a classifier.
I found some posts where other users had this issue, but they were able to fix it using the model_nn._estimator_type = "classifier" line. Unfortunately, that isn't solving the issue for me. I'm really new to all of this, so any advice is appreciated. :)

KerasClassifier is migrated from keras.wrappers.scikit_learn to scikeras.wrappers.
You need to use below code to access the KerasClassifier :
!pip install scikeras
from scikeras.wrappers import KerasClassifier
Please check this link for more details.

Related

WARNING:tensorflow:AutoGraph could not transform <function Model.make_train_function.<locals>.Cause:unsupported operand type for-:'NoneType' and 'int'

While i was dealing with basic example of dropout layer, i got stuck here.
not only in this moving further where in another model i used KerasClassifier i got similar error.
for this you can use sonar dataset.
# Baseline Model on the Sonar Dataset
from pandas import read_csv
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
# load dataset
dataframe = read_csv("/content/sonar.csv", header=None)
dataset = dataframe.values
# split into input (X) and output (Y) variables
X = dataset[:,0:60].astype(float)
Y = dataset[:,60]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# baseline
def create_baseline():
# create model
model = Sequential()
model.add(Dense(40, input_shape=(60,), activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
sgd = SGD(learning_rate=0.01, momentum=0.8)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasClassifier(model=create_baseline, epochs=300, batch_size=16, verbose=0)))
pipeline = Pipeline(estimators)
kfold = StratifiedKFold(n_splits=10, shuffle=True)
results = cross_val_score(pipeline, X, encoded_Y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
I'm getting error:
WARNING:tensorflow:AutoGraph could not transform <function Model.make_train_function.<locals>.train_function at 0x2ab077a60> and will run it as-is.
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
Cause: unsupported operand type(s) for -: 'NoneType' and 'int'
I'm currently using Mac M1 and working in mini-conda environment.

The first argument to `Layer.call` must always be passed

import os
from pylab import rcParams
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns; sns.set()
from numpy import *
from scipy import stats
from pandas.plotting import scatter_matrix
import sklearn
import warnings
from imblearn.over_sampling import SMOTE
import tensorflow as tf
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import GridSearchCV
from imblearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
data = pd.read_excel(r'Attrition Data Exercise.xlsx')
X = data.iloc[:, 3:-1].values
y = data.iloc[:, -1].values
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
ct = ColumnTransformer(transformers=
[('one_encoder', OneHotEncoder(), [2, 5, 11, 13, 28]),
('ord_encoder', OrdinalEncoder(), [0])],
remainder='passthrough')
X = np.array(ct.fit_transform(X))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
ann = tf.keras.models.Sequential()
ann.add(tf.keras.layers.Dropout(rate=0.3))
ann.add(tf.keras.layers.Dense(units=6, activation='relu', kernel_regularizer='l1', bias_regularizer='l2'))
ann.add(tf.keras.layers.Dropout(rate=0.3))
ann.add(tf.keras.layers.Dense(units=3, activation='relu', kernel_regularizer='l1', bias_regularizer='l2'))
ann.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
opt = tf.keras.optimizers.Adam(
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-08)
ann.compile(optimizer = opt, loss = 'binary_crossentropy', metrics = ['accuracy', tf.keras.metrics.Recall()])
The above code runs successfully. It's when I run the below code in a cell that it causes an error.
pipe = Pipeline([('smt', SMOTE()), ('model', KerasClassifier(build_fn = ann, verbose = 0, epochs=170))])
weights = np.linspace(0.5, 0.5, 1)
gsc = GridSearchCV(
estimator = pipe,
param_grid = {
'smt__sampling_strategy' : weights
},
scoring = 'f1',
cv = 4)
grid_result = gsc.fit(X_train, y_train)
The code above results in the following error:
ValueError: The first argument to `Layer.call` must always be passed
Any idea what I might be doing wrong or what can be improved?
I tried replacing KerasClassifier with KerasRegressor too just to see if something changes but nothing did. What essentially is going wrong?
I'm trying to use the Pipeline class from imblearn and GridSearchCV to get the best parameters for classifying the imbalanced dataset, I want to leave out resampling of the validation set and only resample the training set, which imblearn's Pipeline seems to be doing. However, I'm getting an error while implementing the accepted solution
Also link to the screenshot to the error trace is attached.Error Trace Complete
#danr got it correct. Many thanks to him. I was getting the same error when using KerasClassifier with sklearn's cross_val_score. Adding the lambda after build_fn solved the problem. I had a function create_model that created a keras Sequential model. Corrected code that runs smoothly (tensorflow 2.4.1):
from sklearn.model_selection import cross_val_score
# Create a KerasClassifier using best params determined using RandomizedSearchCV above
model = KerasClassifier(build_fn = lambda: create_model(learning_rate = 0.01, activation = 'tanh'), epochs = 50, batch_size = 32, verbose = 0)
# Calculate the accuracy score for each fold
kfolds = cross_val_score(model, X, y, cv = 3)

I am doing an LSTM classification for DDoS detection. I have an error while performing the evaluation

This is the code that I have used. Now I do not really know where did I go wrong as I am new to such things. As an overview of what I am trying to do is that I am trying to classify where there is an attack or no. I gave 'Y' as my label that contains '11' classes.
import pandas as pd
data=pd.read_csv('/content/gdrive/MyDrive/data /Ecobee_Thermostat.csv')
#imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import pandas as pd
from sklearn.metrics import accuracy_score, precision_score, recall_score,
f1_score
#label encoder
data['Y'].unique()
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
data['Y']=label_encoder.fit_transform(data['Y'])
data['Y'].unique()
#ex
data.head(10)
data.isnull().sum()
data.shape
for col in data.columns:
print(col)
x = data.iloc[:, 0:115]
x.shape
Y=data['Y']
Y
#scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x)
X= scaler.transform(x)
#split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=44, shuffle =True)
#reshape
x_train = np.reshape(x_train, (x_train.shape[0],1,X.shape[1]))
x_test = np.reshape(x_test, (x_test.shape[0],1,X.shape[1]))
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import LSTM
tf.keras.backend.clear_session()
from tensorflow import keras
from tensorflow.keras import layers
from keras import metrics
from sklearn.metrics import classification_report
opt = keras.optimizers.Adamax(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,)
model = Sequential()
model.add(LSTM(200, input_shape=(1,115),activation="relu",return_sequences=True))
model.add(Dropout(0.2))
model.add(Dense(11, activation='softmax'))
from keras.optimizers import SGD
model.compile(loss = 'sparse_categorical_crossentropy', optimizer = opt, metrics = ['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs = 5,batch_size=2000
, validation_data= (x_test, y_test))
score, acc = model.evaluate(x_test, y_test)
print(acc)
print(score)
y_pred = model.predict(x_test)
y_predtrain = model.predict(x_train) (* here where I get the error)
from sklearn.metrics import confusion_matrix
confusiontr = confusion_matrix(y_train, y_pred)
confusionts = confusion_matrix(y_test, y_pred.round())
print('Confusion Matrix\n')
print(confusionts)
sn.heatmap(confusiontr)
When i do run it, it goes very well until evaluating the output be as followed:
> 0.8152588605880737
0.36209338903427124
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-14-61331f357bd6> in <module>()
5 y_predtrain = model.predict(x_train)
6 from sklearn.metrics import confusion_matrix
----> 7 confusiontr = confusion_matrix(y_train, y_pred)
8 confusionts = confusion_matrix(y_test, y_pred.round())
9 print('Confusion Matrix\n')
2 frames
/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py in check_consistent_length(*arrays)
210 if len(uniques) > 1:
211 raise ValueError("Found input variables with inconsistent numbers of"
--> 212 " samples: %r" % [int(l) for l in lengths])
213
214
ValueError: Found input variables with inconsistent numbers of samples: [560036, 275840]
this is a snapshot of the error
The output
I think the problem comes from the following lines:
y_pred = model.predict(x_test)
from sklearn.metrics import confusion_matrix
confusiontr = confusion_matrix(y_train, y_pred)
Your y_pred comes from x_test, while your y_train comes from x_train. That is, your train and test sets have different dimensions, hence what you call y_train and y_pred on the last line above won't match either.
Thus, simply use y_predtrain instead for confusiontr:
confusiontr = confusion_matrix(y_train, y_predtrain)

Keras estimator predict says that input is misshapen

I'm getting a numpy shape error when I use the predict function of a Keras estimator. I build, evaluate, and then retrain the model using the following code:
import pandas as pd
import sqlalchemy as sqla
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
# Connect to to the DB and retrieve the iris table
con = sqla.create_engine('postgresql://tristan:sebens#db:5432/tristan')
con.connect()
table_name = "iris"
schema = "public"
iris = pd.read_sql_table(table_name, con, schema=schema)
iris.head()
iris_ds = iris.values # Convert the table to a numpy array
X = iris_ds[:, 0:4].astype(float) # Slice the descriptive features into a numpy array
Y = iris_ds[:, 4] # Slice the labels away as their own numpy array
# The labels are encoded as strings, so we need to encode them
# as numbers that can be output by an ANN
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = to_categorical(encoded_Y)
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
seed = 7
# Train the model:
# First we define the model as a classifier. This will affect the process used to train it
estimator = KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=5, verbose=0)
# Honestly not totally sure what this is, but it has to do with splitting the training/evaluation data in
# a way that gives us a more realistic metric of the model's accuracy
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
# Now that we have our classifier and our data pipeline defined, we can begin the training process
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# If we like our accuracy, then we can train the model for real
# Evaluating the model actually evaluates a clone of the model, so now we need to train the model again
estimator.fit(X, dummy_y)
And this is where the trouble is. I try to make a test prediction:
# Let's make a test prediction with our model
x = X[0]
estimator.predict(x)
And I get an input shape error:
ValueError: Error when checking input: expected dense_21_input to have shape (4,) but got array with shape (1,)
I'm at a loss. How can the input have the wrong shape if it's literally a member of the training dataset?

GridSearch implementation for Keras Regression

Trying to understand and implement GridSearch method for the Keras Regression. Here is my simple producible regression application.
import pandas as pd
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data")
df.drop(['No','FLOW(cm)','Compressive Strength (28-day)(Mpa)'],1,inplace=True)
# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
dummies = pd.get_dummies(df[target])
return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32)
else:
# Regression
return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32)
x,y = to_xy(df,'SLUMP(cm)')
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
#Create Model
model = Sequential()
model.add(Dense(128, input_dim=x.shape[1], activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, mode='auto')
checkpointer = ModelCheckpoint(filepath="best_weights.hdf5",save_best_only=True) # save best model
model.fit(x_train,y_train,callbacks=[monitor,checkpointer],verbose=0,epochs=1000)
#model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor,checkpointer],verbose=0,epochs=1000)
pred = model.predict(x_test)
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("(RMSE): {}".format(score))
If you run the code, you can see loss is not too big numbers.
And here is my producible GridSearch implementation. First of all, I have simply searched the web and find the GridSearch application for KerasClassifier, then tried to revise it for KerasRegressor. I am not sure if my revision is correct. If I assume the general concept is correct, there must be a problem in this code, because loss function does not make sense. The loss function is MSE but the output is negative, unfortunately I could not figure out where I am doing wrong.
from keras.wrappers.scikit_learn import KerasRegressor
import pandas as pd
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import GridSearchCV
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data")
df.drop(['No','FLOW(cm)','Compressive Strength (28-day)(Mpa)'],1,inplace=True)
#Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
#Classification
dummies = pd.get_dummies(df[target])
return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32)
else:
#Regression
return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32)
x,y = to_xy(df,'SLUMP(cm)')
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
def create_model(optimizer='adam'):
# create model
model = Sequential()
model.add(Dense(128, input_dim=x.shape[1], activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer=optimizer,metrics=['mse'])
return model
model = KerasRegressor(build_fn=create_model, epochs=100, batch_size=10, verbose=0)
optimizer = ['SGD', 'RMSprop', 'Adagrad']
param_grid = dict(optimizer=optimizer)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(x_train, y_train)
#summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
I have tested your code, and I have seen that you are not using a scoring function in GridSearchCV so according to documentation scikit-learn documentation:
If None, the estimator’s default scorer (if available) is used.
It seems like if it would be using the 'neg_mean_absolute_error' (or any of these scoring functions for regression) by default for scoring models.
That is because probably it says that the best model is:
-75.820078 using {'optimizer':'Adagrad'}

Categories