LightGBM Regression in python categorical values error - python

I am trying to fit a LightGBM Regressor in python and it gives me an error. Basically, I have a dataset where all the predictors are categorical and my target variable is continuous numeric. Since, all my X variables are categorical I converted them into numeric form using label encoding.
After that, I passed to LGBMRegressor my categorical variables in order to the algorithm to handle them accordingly.
# lightgbm for regression
import numpy as np
import lightgbm as lgb
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
df = pd.read_csv("TrainModelling.csv")
df.drop(df.columns[0],axis=1,inplace=True) #Remove index column
y = df["Target"]
X = df.drop("Target", axis=1)
le = preprocessing.LabelEncoder()
X = X.apply(le.fit_transform)
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42)
hyper_params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': ['l2', 'auc'],
'learning_rate': 0.005,
'feature_fraction': 0.9,
'bagging_fraction': 0.7,
'bagging_freq': 10,
'verbose': 0,
"max_depth": 8,
"num_leaves": 128,
"max_bin": 512,
"num_iterations": 100000,
"n_estimators": 1000
}
cat_feature_list = np.where(X.dtypes != float)[0]
gbm = lgb.LGBMRegressor(**hyper_params, categorical_feature=cat_feature_list)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=1000)
The error:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()

This line is problematic:
cat_feature_list = np.where(X.dtypes != float)[0]
(i wish you shared the whole traceback of the error, it could have saved time..)
X.dtypes != float gives a pandas series of booleans and numpy then tries to evaluate its truthiness and hence the error. To get the name of categorical columns in a list:
cat_feature_list = X.select_dtypes("object").columns.tolist()

Related

ValueError: too many values to unpack (expected 3), Machine learning

I am learning machine learning from a book Artificial-Intelligence-with-Python-Second-Edition. I faced such error:
ValueError: too many values to unpack (expected 3)
Here is the code from the book:
print("\nGrid scores for the parameter grid:")
for params, avg_score, _ in classifier.grid_scores_: # from sklearn import grid_search
print(params, '-->', round(avg_score, 3))
(The code for the tutorial was taken from the GitHub: Artificial-Intelligence-with-Python-Second-Edition/Chapter06/run_grid_search.py )
From sklearn import grid_search - this library is no longer used, I need to change it to cv_results_.
but when I'm using this attributes cv_results_, I get this error:
ValueError: too many values to unpack (expected 3)
I have tried different variants and also re-read all the help on this topic and I cannot find a solution yet.
My full code:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from utilities import visualize_classifier
# Load input data
input_file = 'data_random_forests.txt'
data = np.loadtxt(input_file, delimiter=',')
X, y = data[:, :-1], data[:, -1]
# Separate input data into three classes based on labels
class_0 = np.array(X[y==0])
class_1 = np.array(X[y==1])
class_2 = np.array(X[y==2])
# Split the data into training and testing datasets
X_train, X_test, y_train, y_test = train_test_split.train_test_split(
X, y, test_size=0.25, random_state=5)
# Define the parameter grid
parameter_grid = [ {'n_estimators': [100], 'max_depth': [2, 4, 7, 12, 16]},
{'max_depth': [4], 'n_estimators': [25, 50, 100, 250]}
]
metrics = ['precision_weighted', 'recall_weighted']
for metric in metrics:
print("\n##### Searching optimal parameters for", metric)
classifier = grid_search.GridSearchCV(
ExtraTreesClassifier(random_state=0),
parameter_grid, cv=5, scoring=metric)
classifier.fit(X_train, y_train)
print("\nGrid scores for the parameter grid:")
for params, avg_score, _ in classifier.cv_results_:
print(params, '-->', round(avg_score, 3))
print("\nBest parameters:", classifier.best_params_)
y_pred = classifier.predict(X_test)
print("\nPerformance report:\n")
print(classification_report(y_test, y_pred))
GridSearchCV.cv_results_ is a dictionary of numpy ndarrays (source). You are trying to cast 1 dictionary into 3 variables (params, avg_score and _). It probably worked in the past since grid_search.cv_results_ returned 3 objects, while current GridSearchCV.cv_results_ returns one dictionary.
It's very straight forward to convert the dictionary into a Pandas DataFrame.
import pandas as pd
df = pd.DataFrame(classifier.cv_results_)
You are interested in printing only the parameters and the scores, so let's do that by selecting the columns which have 'param' or 'score' in their names:
df_columns_to_print = [column for column in df.columns if 'param' in column or 'score' in column]
print(df[df_columns_to_print])

ValueError: Input contains NaN, infinity or a value too large for dtype('float32'). randomforest run

#fill -999 to NAs
X = X_train.fillna(-999)
y = y_train.fillna(-999)
import lightgbm as lgb
import xgboost as xgb
NFOLDS = 8
folds = KFold(n_splits=NFOLDS)
#====================================
xgb_submission=sample_submission.copy()
xgb_submission['isFraud'] = 0
import xgboost as xgb
from sklearn.metrics import roc_auc_score
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
X_train_, X_valid = X.iloc[train_index], X.iloc[valid_index]
y_train_, y_valid = y.iloc[train_index], y.iloc[valid_index]
#xgbclf.fit(X_train_,y_train_)
rf_clf1 = RandomForestClassifier(n_estimators=300, max_depth = 10, min_samples_leaf=8, \
min_samples_split=8, random_state=0)
rf_clf1.fit(X_train,y_train_)
pred = rf_clf1.predict(X_test)
print(pred)
I checked the X or y has any Nan but no
but it gives the error with ValueError: Input contains NaN, infinity or a value too large for dtype('float32').
> print(type(X),type(y))
> <class 'pandas.core.frame.DataFrame'> <class'pandas.core.series.Series'>
When does this error appear actually - while assigning X_train_, X_valid values or while fitting the datasets to RandomForest algorithm?
I also see from the code that in the first turn you define X_train_ dataframe:
**X_train_**, X_valid = X.iloc[train_index], X.iloc[valid_index]
Whereas you fit the rf_clf1 object to another dataset (namely: X_train)
rf_clf1.fit(X_train,y_train_)
So here the missing _ in the variable name might be the case as well.

python LightGBM text classicication with Tfidf

I'm trying to introduce LightGBM for text multiclassification.
2 columns in pandas dataframe, where 'category' and 'contents' are set as follows.
Dataframe:
contents category
1 this is example1... A
2 this is example2... B
3 this is example3... C
*Actual data frame consists of approx 600 rows and 2 columns.
Hereby I'm trying to classify text into 3 categories as follows.
Codes:
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
stopwords1 = set(stopwords.words('english'))
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
import lightgbm as lgbm
from lightgbm import LGBMClassifier, LGBMRegressor
#--main code--#
X_train, X_test, Y_train, Y_test = train_test_split(df['contents'], df['category'], random_state = 0, test_size=0.3, shuffle=True)
count_vect = CountVectorizer(ngram_range=(1,2), stop_words=stopwords1)
X_train_counts = count_vect.fit_transform(X_train)
tfidf_transformer = TfidfTransformer(use_idf=True, smooth_idf=True, norm='l2', sublinear_tf=True)
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
lgbm_train = lgbm.Dataset(X_train_tfidf, Y_train)
lgbm_eval = lgbm.Dataset(count_vect.transform(X_test), Y_test, reference=lgbm_train)
params = {
'boosting_type':'gbdt',
'objective':'multiclass',
'learning_rate': 0.02,
'num_class': 3,
'early_stopping': 100,
'num_iteration': 2000,
'num_leaves': 31,
'is_enable_sparse': 'true',
'tree_learner': 'data',
'max_depth': 4,
'n_estimators': 50
}
clf_gbm = lgbm.train(params, valid_sets=lgbm_eval)
predicted_LGBM = clf_gbm.predict(count_vect.transform(X_test))
print(accuracy_score(Y_test, predicted_LGBM))
Then I got an error as:
ValueError: could not convert string to float: 'b'
I also convert 'category' column ['a', 'b', 'c'] to int as [0, 1, 2] but got an error as
TypeError: Expected np.float32 or np.float64, met type(int64).
What's wrong with my code?
Any advice / suggestions will be greatly appreciated.
Thanks in advance.
I managed to deal with this issue. Very simple but noted here for reference.
Since LightGBM expects float32/64 for input, so 'categories' should be number, rather than str.
And input data should be converted to float32/64 using .astype().
Changes1:
added following 4 lines after X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
X_train_tfidf = X_train_tfidf.astype('float32')
X_test_counts = X_test_counts.astype('float32')
Y_train = Y_train.astype('float32')
Y_test = Y_test.astype('float32')
changes2:
just convert 'category' column from [A, B, C, ...] to [0.0, 1.0, 2.0, ...]
Maybe just assigning attirbute as TfidfVecotrizer(dtype=np.float32) works in this case.
And putting vectorized data to LGBMClassifier will be much simpler.
Update
Using TfidfVectorizer is much simpler:
tfidf_vec = TfidfVectorizer(dtype=np.float32, sublinear_tf=True, use_idf=True, smooth_idf=True)
X_data_tfidf = tfidf_vec.fit_transform(df['contents'])
X_train_tfidf = tfidf_vec.transform(X_train)
X_test_tfidf = tfidf_vec.transform(X_test)
clf_LGBM = lgbm.LGBMClassifier(objective='multiclass', verbose=-1, learning_rate=0.5, max_depth=20, num_leaves=50, n_estimators=120, max_bin=2000,)
clf_LGBM.fit(X_train_tfidf, Y_train, verbose=-1)
predicted_LGBM = clf_LGBM.predict(X_test_tfidf)

SKLearn Naive Bayes: add feature after tfidf vectorization

So I have been tasked with training a model on phone call transcripts. The following code does this. A little background info:
- x is a list of strings, each ith element is an entire transcript
- y is a list of booleans, stating the outcome of a call being positive or negative.
The following code works, but here is my issue. I want to include call duration as a feature to train on. I'd assume after the TFIDF transformer that vectorizes the transcripts, I would just concatenate the call duration feature to the TFIDF output right? Maybe this is easier than I think, but I have the transcripts and the durations all in the pandas data frame you see at the beginning of the code. So if I have that data frame column (numpy array) of durations, what do I need to do to add that feature into my model?
Additional Questions:
Am I missing a fundamental assumption about Naive Bayes model that limits me to vectorized strings?
At which step in my pipeline do I add the new feature?
Can this even be done in a pipeline or do I have to break it apart to do something like this?
Code:
import numpy as np
import pandas as pd
import random
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import cross_val_score
from sklearn.feature_selection import SelectPercentile
from sklearn.metrics import roc_auc_score
from sklearn.feature_selection import chi2
def main():
filename = 'QA_training.pkl'
splitRatio = 0.67
dataframe = loadData(filename)
x, y = getTrainingData(dataframe)
print len(x), len(y)
x_train, x_test = splitDataset(x, splitRatio)
y_train, y_test = splitDataset(y, splitRatio)
#x_train = np.asarray(x_train)
percentiles = [10, 15, 20, 25, 30, 35, 40, 45, 50]
MNNB_pipe = Pipeline([('vec', CountVectorizer()),('tfidf', TfidfTransformer()),('select', SelectPercentile(score_func=chi2)),('clf', MultinomialNB())])
MNNB_param_grid = {
#'vec__max_features': (10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000),
'tfidf__use_idf': (True, False),
'tfidf__sublinear_tf': (True, False),
'vec__binary': (True, False),
'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (1, 0.1, 0.01, 0.001, 0.0001, 0.00001),
'select__percentile': percentiles
}
MNNB_search = GridSearchCV(MNNB_pipe, param_grid=MNNB_param_grid, cv=10, scoring='roc_auc', n_jobs=-1, verbose=1)
MNNB_search = MNNB_search.fit(x_train, y_train)
MNNB_search_best_cv = cross_val_score(MNNB_search.best_estimator_, x_train, y_train, cv=10, scoring='roc_auc', n_jobs=-1, verbose=10)
SGDC_pipe = Pipeline([('vec', CountVectorizer()),('tfidf', TfidfTransformer()),('select', SelectPercentile(score_func=chi2)),('clf', SGDClassifier())])
SGDC_param_grid = {
#'vec__max_features': [10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000],
'tfidf__use_idf': [True, False],
'tfidf__sublinear_tf': [True, False],
'vec__binary': [True, False],
'tfidf__norm': ['l1', 'l2'],
'clf__loss': ['modified_huber','log'],
'clf__penalty': ['l1','l2'],
'clf__alpha': [1e-3],
'clf__n_iter': [5,10],
'clf__random_state': [42],
'select__percentile': percentiles
}
SGDC_search = GridSearchCV(SGDC_pipe, param_grid=SGDC_param_grid, cv=10, scoring='roc_auc', n_jobs=-1, verbose=1)
SGDC_search = SGDC_search.fit(x_train, y_train)
SGDC_search_best_cv = cross_val_score(SGDC_search.best_estimator_, x_train, y_train, cv=10, scoring='roc_auc', n_jobs=-1, verbose=10)
# pre_SGDC = SGDC_clf.predict(x_test)
# print (np.mean(pre_SGDC == y_test))
mydata = [{'model': MNNB_search.best_estimator_.named_steps['clf'],'features': MNNB_search.best_estimator_.named_steps['select'], 'mean_cv_scores': MNNB_search_best_cv.mean()},
#{'model': GNB_search.best_estimator_.named_steps['classifier'],'features': GNB_search.best_estimator_.named_steps['select'], 'mean_cv_scores': GNB_search_best_cv.mean()},
{'model': SGDC_search.best_estimator_.named_steps['clf'],'features': SGDC_search.best_estimator_.named_steps['select'], 'mean_cv_scores': SGDC_search_best_cv.mean()}]
model_results_df = pd.DataFrame(mydata)
model_results_df.to_csv("best_model_results.csv")
As far as I'm aware, sklearn pipelines are API driven -- There's no real magic that happens in the pipeline itself. So, from that perspective, you should be able to create your own wrapper around TfidfVectorizer that does what you want it to do. For example, let's assume that you have a DataFrame that looks like this:
df = pd.DataFrame({'text': ['foo text', 'bar text'], 'duration': [1, 2]})
you could probably implement your transform as follows:
class MyVectorizer(object):
def __init__(self, tfidf_kwargs=None):
self._tfidf = TfidfVectorizer(**(tfidf_kwargs or None))
def fit(self, X, y=None):
self._tfidf.fit(X['text'], y)
return self
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X, copy=False)
def transform(self, X, copy=True):
result = self._tfidf.transform(X['text'], copy=copy)
# result is a sparse matrix. I'm not sure of a clean way
# to add a column to a sparse matrix. If you have the
# memory, you can use a dense matrix instead...
return np.column_stack((result, X['duration']))
And then I think you should be all set to use this instead of the original tfidf vectorizer.

Using explicit (predefined) validation set for grid search with sklearn

I have a dataset, which has previously been split into 3 sets: train, validation and test. These sets have to be used as given in order to compare the performance across different algorithms.
I would now like to optimize the parameters of my SVM using the validation set. However, I cannot find how to input the validation set explicitly into sklearn.grid_search.GridSearchCV(). Below is some code I've previously used for doing K-fold cross-validation on the training set. However, for this problem I need to use the validation set as given. How can I do that?
from sklearn import svm, cross_validation
from sklearn.grid_search import GridSearchCV
# (some code left out to simplify things)
skf = cross_validation.StratifiedKFold(y_train, n_folds=5, shuffle = True)
clf = GridSearchCV(svm.SVC(tol=0.005, cache_size=6000,
class_weight=penalty_weights),
param_grid=tuned_parameters,
n_jobs=2,
pre_dispatch="n_jobs",
cv=skf,
scoring=scorer)
clf.fit(X_train, y_train)
Use PredefinedSplit
ps = PredefinedSplit(test_fold=your_test_fold)
then set cv=ps in GridSearchCV
test_fold : “array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1 indicates that the corresponding sample is not part of any test set folds, but will instead always be put into the training fold.
Also see here
when using a validation set, set the test_fold to 0 for all samples that are part of the validation set, and to -1 for all other samples.
Consider using the hypopt Python package (pip install hypopt) for which I am an author. It's a professional package created specifically for parameter optimization with a validation set. It works with any scikit-learn model out-of-the-box and can be used with Tensorflow, PyTorch, Caffe2, etc. as well.
# Code from https://github.com/cgnorthcutt/hypopt
# Assuming you already have train, test, val sets and a model.
from hypopt import GridSearch
param_grid = [
{'C': [1, 10, 100], 'kernel': ['linear']},
{'C': [1, 10, 100], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
# Grid-search all parameter combinations using a validation set.
opt = GridSearch(model = SVR(), param_grid = param_grid)
opt.fit(X_train, y_train, X_val, y_val)
print('Test Score for Optimized Parameters:', opt.score(X_test, y_test))
EDIT: I (think I) received -1's on this response because I'm suggesting a package that I authored. This is unfortunate, given that the package was created specifically to solve this type of problem.
# Import Libraries
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.model_selection import PredefinedSplit
# Split Data to Train and Validation
X_train, X_val, y_train, y_val = train_test_split(X, y, train_size = 0.8, stratify = y,random_state = 2020)
# Create a list where train data indices are -1 and validation data indices are 0
split_index = [-1 if x in X_train.index else 0 for x in X.index]
# Use the list to create PredefinedSplit
pds = PredefinedSplit(test_fold = split_index)
# Use PredefinedSplit in GridSearchCV
clf = GridSearchCV(estimator = estimator,
cv=pds,
param_grid=param_grid)
# Fit with all data
clf.fit(X, y)
To add to the #Vinubalan's answer, when the train-valid-test split is not done with Scikit-learn's train_test_split() function, i.e., the dataframes are already split manually beforehand and scaled/normalized so as to prevent leakage from training data, the numpy arrays can be concatenated.
import numpy as np
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
from sklearn.model_selection import PredefinedSplit, GridSearchCV
split_index = [-1]*len(X_train) + [0]*len(X_val)
X = np.concatenate((X_train, X_val), axis=0)
y = np.concatenate((y_train, y_val), axis=0)
pds = PredefinedSplit(test_fold = split_index)
clf = GridSearchCV(estimator = estimator,
cv=pds,
param_grid=param_grid)
# Fit with all data
clf.fit(X, y)
I wanted to provide some reproducible code that creates a validation split using the last 20% of observations.
from sklearn import datasets
from sklearn.model_selection import PredefinedSplit, GridSearchCV
from sklearn.ensemble import GradientBoostingRegressor
# load data
df_train = datasets.fetch_california_housing(as_frame=True).data
y = datasets.fetch_california_housing().target
param_grid = {"max_depth": [5, 6],
'learning_rate': [0.03, 0.06],
'subsample': [.5, .75]
}
model = GradientBoostingRegressor()
# Create a single validation split
val_prop = .2
n_val_rows = round(len(df_train) * val_prop)
val_starting_index = len(df_train) - n_val_rows
cv = PredefinedSplit([-1 if i < val_starting_index else 0 for i in df_train.index])
# Use PredefinedSplit in GridSearchCV
results = GridSearchCV(estimator = model,
cv=cv,
param_grid=param_grid,
verbose=True,
n_jobs=-1)
# Fit with all data
results.fit(df_train, y)
results.best_params_
The cv argument of the SearchCV i.e. Grid or Random can just be an iterable of indices too for train and validation split i.e. cv=((train_idcs, val_idcs),).
Note that the data on which the search classifier will be fit should be the train+val set and the indices specified will be used by the sklearn to separate them internally. Additionally, when working with dataframes, the indices specified should be accessible as ilocs, so reset indices (don't drop them if they will be required later).
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import (
train_test_split,
RandomizedSearchCV,
)
data = load_iris(as_frame=True)["frame"]
# These indices will serves as explicit and predefined split
train_idcs, val_idcs = train_test_split(
data.index,
random_state=42,
stratify=data.target,
)
param_grid = dict(
n_estimators=[50,100,150,200],
max_samples=[0.85,0.9,0.95,1],
max_depth=[3,5,7,10],
max_features=["sqrt", "log2", 0.85, 0.9, 0.95, 1],
)
search_clf = RandomizedSearchCV(
estimator=RandomForestClassifier(),
param_distributions=param_grid,
n_iter=50,
cv=((train_idcs, val_idcs),), # explicit predefined split in terms of indices
random_state=42,
)
# X is the first 4 columns i.e. the sepal and petal widths and lengths
# and y is the 5th column i.e. target column
search_clf.fit(X=data.iloc[:,:4], y=data.target)
Also, be mindful if you want to refit on the whole data or only on the train data and thus retrain the classifier using the best fit parameters accordingly.

Categories