Improve precision of my predictive technique in Python - python

I am using the following Python code to make output predictions depending on some values using decision trees based on entropy/gini index. My input data is contained in the file: https://drive.google.com/file/d/1C8GZ2wiqFUW3WuYxyc0G3axgkM1Uwsb6/view?usp=sharing
The first column "gold" in the file contains the output that I am trying to predict (either T or N). The remaining columns represents some 0 or 1 data that I can use to predict the first column. I am using a test set of 30% and a training set of 70%. I am getting the same precision/recall using either entropy or gini index. I am getting a precision of 0.80 for T and a recall of 0.54 for T. I would like to increase the precision of T and I am okay if the recall for T goes down as well, I am willing to accept this tradeoff. I do not care about the precision/recall of N predictions, I am just trying to improve the precision of T, that's all I care about. I guess increasing the precision means that we should abstain from making predictions in some situations that we are not certain about. How to do that?
# Run this program on your local python
# interpreter, provided you have installed
# the required libraries.
# Importing the required packages
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.ensemble import ExtraTreesClassifier
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
from sklearn import tree
import collections
import pydotplus
# Function importing Dataset
column_count =0
def importdata():
balance_data = pd.read_csv( 'data1extended.txt', sep= ',')
row_count, column_count = balance_data.shape
# Printing the dataswet shape
print ("Dataset Length: ", len(balance_data))
print ("Dataset Shape: ", balance_data.shape)
print("Number of columns ", column_count)
# Printing the dataset obseravtions
print ("Dataset: ",balance_data.head())
return balance_data, column_count
def columns(balance_data):
row_count, column_count = balance_data.shape
return column_count
# Function to split the dataset
def splitdataset(balance_data, column_count):
# Separating the target variable
X = balance_data.values[:, 1:column_count]
Y = balance_data.values[:, 0]
# Splitting the dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size = 0.3, random_state = 100)
return X, Y, X_train, X_test, y_train, y_test
# Function to perform training with giniIndex.
def train_using_gini(X_train, X_test, y_train):
# Creating the classifier object
clf_gini = DecisionTreeClassifier(criterion = "gini",
random_state = 100,max_depth=3, min_samples_leaf=5)
# Performing training
clf_gini.fit(X_train, y_train)
return clf_gini
# Function to perform training with entropy.
def tarin_using_entropy(X_train, X_test, y_train):
# Decision tree with entropy
clf_entropy = DecisionTreeClassifier(
criterion = "entropy", random_state = 100,
max_depth = 3, min_samples_leaf = 5)
# Performing training
clf_entropy.fit(X_train, y_train)
return clf_entropy
# Function to make predictions
def prediction(X_test, clf_object):
# Predicton on test with giniIndex
y_pred = clf_object.predict(X_test)
print("Predicted values:")
print(y_pred)
return y_pred
# Function to calculate accuracy
def cal_accuracy(y_test, y_pred):
print("Confusion Matrix: ",
confusion_matrix(y_test, y_pred))
print ("Accuracy : ",
accuracy_score(y_test,y_pred)*100)
print("Report : ",
classification_report(y_test, y_pred))
#Univariate selection
def selection(column_count, data):
# data = pd.read_csv("data1extended.txt")
X = data.iloc[:,1:column_count] #independent columns
y = data.iloc[:,0] #target column i.e price range
#apply SelectKBest class to extract top 10 best features
bestfeatures = SelectKBest(score_func=chi2, k=5)
fit = bestfeatures.fit(X,y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
df=pd.DataFrame(data, columns=X)
#concat two dataframes for better visualization
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
print(featureScores.nlargest(5,'Score')) #print 10 best features
return X,y,data,df
#Feature importance
def feature(X,y):
model = ExtraTreesClassifier()
model.fit(X,y)
print(model.feature_importances_) #use inbuilt class feature_importances of tree based classifiers
#plot graph of feature importances for better visualization
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(5).plot(kind='barh')
plt.show()
#Correlation Matrix
def correlation(data, column_count):
corrmat = data.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(column_count,column_count))
#plot heat map
g=sns.heatmap(data[top_corr_features].corr(),annot=True,cmap="RdYlGn")
def generate_decision_tree(X,y):
clf = DecisionTreeClassifier(random_state=0)
data_feature_names = ['callersAtLeast1T','CalleesAtLeast1T','callersAllT','calleesAllT','CallersAtLeast1N','CalleesAtLeast1N','CallersAllN','CalleesAllN','childrenAtLeast1T','parentsAtLeast1T','childrenAtLeast1N','parentsAtLeast1N','childrenAllT','parentsAllT','childrenAllN','ParentsAllN','ParametersatLeast1T','FieldMethodsAtLeast1T','ReturnTypeAtLeast1T','ParametersAtLeast1N','FieldMethodsAtLeast1N','ReturnTypeN','ParametersAllT','FieldMethodsAllT','ParametersAllN','FieldMethodsAllN']
#generate model
model = clf.fit(X, y)
# Create DOT data
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=data_feature_names,
class_names=y)
# Draw graph
graph = pydotplus.graph_from_dot_data(dot_data)
# Show graph
Image(graph.create_png())
# Create PDF
graph.write_pdf("tree.pdf")
# Create PNG
graph.write_png("tree.png")
# Driver code
def main():
# Building Phase
data,column_count = importdata()
X, Y, X_train, X_test, y_train, y_test = splitdataset(data, column_count)
clf_gini = train_using_gini(X_train, X_test, y_train)
clf_entropy = tarin_using_entropy(X_train, X_test, y_train)
# Operational Phase
print("Results Using Gini Index:")
# Prediction using gini
y_pred_gini = prediction(X_test, clf_gini)
cal_accuracy(y_test, y_pred_gini)
print("Results Using Entropy:")
# Prediction using entropy
y_pred_entropy = prediction(X_test, clf_entropy)
cal_accuracy(y_test, y_pred_entropy)
#COMMENTED OUT THE 4 FOLLOWING LINES DUE TO MEMORY ERROR
#X,y,dataheaders,df=selection(column_count,data)
#generate_decision_tree(X,y)
#feature(X,y)
#correlation(dataheaders,column_count)
# Calling main function
if __name__=="__main__":
main()

I would suggest using Pipelines, to build data pipelines and GridSearchCV to find the best possible hyper-parameters and classifiers for the pipe.
A basic example;
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import SelectKBest, chi2, f_class
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
pipe = Pipeline[('kbest', SelectKBest(chi2, k=3000)),
('clf', DecisionTreeClassifier())
])
pipe_params = {'kbest__k': range(1, 10, 1),
'kbest__score_func': [f_classif, chi2],
'clf__max_depth': np.arange(1,30),
'clf__min_samples_leaf': [1,2,4,5,10,20,30,40,80,100]}
grid_search = GridSearchCV(pipe, pipe_params, n_jobs=-1
scoring=accuracy_score, cv=10)
grid_search.fit(X_train, Y_train)
This will iterate over every hyper-parameters in pipe_params and choose the best classifier based on accuracy_score.

Related

TypeError: Singleton array -1.2180335273374168 cannot be considered a valid collection

# Import libraries:
## Basic libs:
import pandas as pd
import numpy as np
import warnings
## Data Visualization:
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler # Scale Numeric Data
from sklearn.preprocessing import OneHotEncoder # Encoding Categorical Data
from sklearn.model_selection import train_test_split # Set Training and Testing Data
from sklearn import preprocessing
from sklearn import utils
from sklearn.datasets import load_iris
# Configure libraries:
warnings.filterwarnings('ignore')
plt.rcParams['figure.figsize'] = (10, 10)
plt.style.use('seaborn')
# Load dataset:
df_bank = pd.read_csv("'...'.csv")
# Drop last row by selecting all rows except last row
df_bank = df_bank.iloc[:-1 , :]
df_bank.pop('Post Message')
df_bank.pop('Posted')
# print('Shape of dataframe:', df_bank.shape)
print(df_bank.head())
# Handle missing data:
df_bank = df_bank.fillna("Unknown") # Imputation for Categorical Data
# Scale Numeric Data:
df_bank_ready = df_bank.copy() # Copying original dataframe
scaler = StandardScaler()
num_cols = ['Impressions']
df_bank_ready[num_cols] = scaler.fit_transform(df_bank[num_cols])
# Encoding Categorical Data:
encoder = OneHotEncoder(sparse=False)
cat_cols = ['Type','Weather','Weekend']
df_encoded = pd.DataFrame(encoder.fit_transform(df_bank_ready[cat_cols])) # Encode Categorical Data
df_encoded.columns = encoder.get_feature_names(cat_cols)
df_bank_ready = df_bank_ready.drop(cat_cols ,axis=1) # Replace Categotical Data with Encoded Data
df_bank_ready = pd.concat([df_encoded, df_bank_ready], axis=1)
df_bank_ready = df_bank_ready.astype(float) # Encode target value
# Split Dataset for Training and Testing:
feature = df_bank_ready.drop('Impressions', axis=1) # Select Features
print("feat:")
print(feature)
target = df_bank_ready['Impressions'] # Select Target
print("target:")
print(target)
# Set Training and Testing Data:
X_train, X_test, y_train, y_test = train_test_split(feature , target,
shuffle = True,
test_size=0.2,
random_state=1)
# Modelling:
def evaluate_model(model, x_test, y_test):
from sklearn import metrics
# Predict Test Data:
y_pred = model.predict(x_test)
# convert the continuous values of the response variable to categorical value:
lab = preprocessing.LabelEncoder()
y_pred = lab.fit_transform(y_pred)
y_test = lab.fit_transform(y_test)
prec = metrics.precision_score(y_test, y_pred,
pos_label='positive',
average='micro')
# rec = metrics.recall_score(y_test, y_pred)
rec = metrics.recall_score(y_test, y_pred,
average='micro',
pos_label='positive')
f1 = metrics.f1_score(y_test, y_pred,
average='micro',
pos_label='positive')
kappa = metrics.cohen_kappa_score(y_test, y_pred)
# Calculate area under curve (AUC):
y_pred_proba = model.predict(x_test)[1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba,
pos_label='positive')
auc = metrics.roc_auc_score(y_test, y_pred_proba)
# Display confussion matrix:
cm = metrics.confusion_matrix(y_test, y_pred)
return {
# 'acc': acc,
'prec': prec, 'rec': rec
#, 'f1': f1, 'kappa': kappa,'fpr': fpr, 'tpr': tpr, 'auc': auc, 'cm': cm
}
# Building Model:
from sklearn import tree
from sklearn.tree import DecisionTreeRegressor
# Building Decision Tree model:
dtc = DecisionTreeRegressor(random_state=0)
dtc.fit(X_train, y_train)
# Evaluate Model:
dtc_eval = evaluate_model(dtc, X_test, y_test)
# Print result:
print('Precision:', dtc_eval['prec'])
print('Recall:', dtc_eval['rec'])
Trying to build a supervised machine learning classifier to predict the impression of posts to be greater than 1,000 based on the type of post, weekday or weekend, and weather.
When attempting to implement the classifier and test it using Python to calculate the precision and recall. I keep running into various errors including what's in the title.
Thank you.
Tutorial site reference:
Building Classification Model with Python by Rafi Atha
https://medium.com/analytics-vidhya/building-classification-model-with-python-9bdfc13faa4b

Cross validation desicion tree

After making a desicion tree function, I have decided to check how accurate is the tree, and to confirm that atleast the first split is the same if I'll make another trees with the same data
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import os
from sklearn import tree
from sklearn import preprocessing
import sys
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
.....
def desicion_tree(data_set:pd.DataFrame,val_1 : str, val_2 : str):
#Encoder -- > fit doesn't accept strings
feature_cols = data_set.columns[0:-1]
X = data_set[feature_cols] # Independent variables
y = data_set.Mut #class
y = y.to_list()
le = preprocessing.LabelBinarizer()
y = le.fit_transform(y)
# Split data set into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1) # 75%
# Create Decision Tree classifer object
clf = DecisionTreeClassifier(max_depth= 4, criterion= 'entropy')
# Train Decision Tree Classifer
clf.fit(X_train, y_train)
# Predict the response for test dataset
y_pred = clf.predict(X_test)
#Perform cross validation
for i in range(2, 8):
plt.figure(figsize=(14, 7))
# Perform Kfold cross validation
#cv = ShuffleSplit(test_size=0.25, random_state=0)
kf = KFold(n_splits=5,shuffle= True)
scores = cross_val_score(estimator=clf, X=X, y=y, n_jobs=4, cv=kf)
print("%0.2f accuracy with a standard deviation of %0.2f" % (scores.mean(), scores.std()))
tree.plot_tree(clf,filled = True,feature_names=feature_cols,class_names=[val_1,val_2])
plt.show()
desicion_tree(car_rep_sep_20, 'Categorial', 'Non categorial')
Down , I wrote a loop in order to rectreate the tree with splitted values using Kfold. The accuracy is changing (around 90%) but the tree is the same, where did I mistaken?
cross_val_score clones the estimator in order to fit-and-score on the various folds, so the clf object remains the same as when you fit it to the entire dataset before the loop, and so the plotted tree is that one rather than any of the cross-validated ones.
To get what you're after, I think you can use cross_validate with option return_estimator=True. You also shouldn't need the loop, if your cv object has the number of splits desired:
kf = KFold(n_splits=5, shuffle=True)
cv_results = cross_validate(
estimator=clf,
X=X,
y=y,
n_jobs=4,
cv=kf,
return_estimator=True,
)
print("%0.2f accuracy with a standard deviation of %0.2f" % (
cv_results['test_score'].mean(),
cv_results['test_score'].std(),
))
for est in cv_results['estimator']:
tree.plot_tree(est, filled=True, feature_names=feature_cols, class_names=[val_1, val_2])
plt.show();
Alternatively, loop manually over the folds (or other cv iteration), fitting the model and plotting its tree in the loop.

KNN model, accuracy(clf.score) returns 0

I am working one a simple KNN model with 3NN to predict a weight,
However, the accuracy is 0.0, I don't know why.
The code can give me a prediction on weight with 58 / 59.
This is the reproducible code
import numpy as np
from sklearn import preprocessing, neighbors
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.metrics import accuracy_score
#Create df
data = {"ID":[i for i in range(1,11)],
"Height":[5,5.11,5.6,5.9,4.8,5.8,5.3,5.8,5.5,5.6],
"Age":[45,26,30,34,40,36,19,28,23,32],
"Weight": [77,47,55,59,72,60,40,60,45,58]
}
df = pd.DataFrame(data, columns = [x for x in data.keys()])
print("This is the original df:")
print(df)
#Feature Engineering
df.drop(["ID"], 1, inplace = True)
X = np.array(df.drop(["Weight"],1))
y = np.array(df["Weight"])
#define training and testing
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size =0.2)
#Build clf with n =3
clf = neighbors.KNeighborsClassifier(n_neighbors=3)
clf.fit(X_train, y_train)
#accuracy
accuracy = clf.score(X_test, y_test)
print("\n accruacy = ", accuracy)
#Prediction on 11th
ans = np.array([5.5,38])
ans = ans.reshape(1,-1)
prediction = clf.predict(ans)
print("\nThis is the ans: ", prediction)
You are classifying Weight which is a continuous (not a discrete) variable. This should be a regression rather than a classification. Try KNeighborsRegressor.
To evaluate your result, use metrics for regression such as R2 score.
If your score is low, that can mean different things: training set too small, test set too different from training set, regression model not adequate...

How to improve the ML model in order to improve accuracy

I am writing a python script that deal with sentiment analysis and I did the pre-process for the text and vectorize the categorical features and split the dataset, then I use the LogisticRegression model and I got accuracy 84%
When I upload a new dataset and try to deploy the created model I got accuracy 51,84%
code:
import pandas as pd
import numpy as np
import re
import string
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer,TfidfTransformer
from sklearn.model_selection import train_test_split
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
# ML Libraries
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
stop_words = set(stopwords.words('english'))
import joblib
def load_dataset(filename, cols):
dataset = pd.read_csv(filename, encoding='latin-1')
dataset.columns = cols
return dataset
dataset = load_dataset("F:\AIenv\sentiment_analysis\input_2_balanced.csv", ["id","label","date","text"])
dataset.head()
dataset['clean_text'] = dataset['text'].apply(processTweet)
# create doc2vec vector columns
from gensim.test.utils import common_texts
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(dataset["clean_text"].apply(lambda x: x.split(" ")))]
# train a Doc2Vec model with our text data
model = Doc2Vec(documents, vector_size=5, window=2, min_count=1, workers=4)
# transform each document into a vector data
doc2vec_df = dataset["clean_text"].apply(lambda x: model.infer_vector(x.split(" "))).apply(pd.Series)
doc2vec_df.columns = ["doc2vec_vector_" + str(x) for x in doc2vec_df.columns]
dataset = pd.concat([dataset, doc2vec_df], axis=1)
# add tf-idfs columns
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(min_df = 10)
tfidf_result = tfidf.fit_transform(dataset["clean_text"]).toarray()
tfidf_df = pd.DataFrame(tfidf_result, columns = tfidf.get_feature_names())
tfidf_df.columns = ["word_" + str(x) for x in tfidf_df.columns]
tfidf_df.index = dataset.index
dataset = pd.concat([dataset, tfidf_df], axis=1)
x = dataset.iloc[:,3]
y = dataset.iloc[:,1]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 42)
from sklearn.pipeline import Pipeline
# create pipeline
pipeline = Pipeline([
('bow', CountVectorizer(strip_accents='ascii',
stop_words=['english'],
lowercase=True)),
('tfidf', TfidfTransformer()),
('classifier', LogisticRegression(C=15.075475376884423,penalty="l2")),
])
# Parameter grid settings for LogisticRegression
parameters = {'bow__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
}
grid = GridSearchCV(pipeline, cv=10, param_grid=parameters, verbose=1,n_jobs=-1)
grid.fit(X_train,y_train)
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
#get predictions from best model above
y_preds = grid.predict(X_test)
cm = confusion_matrix(y_test, y_preds)
print("accuracy score: ",accuracy_score(y_test,y_preds))
print("\n")
print("confusion matrix: \n",cm)
print("\n")
print(classification_report(y_test,y_preds))
joblib.dump(grid,"F:\\AIenv\\sentiment_analysis\\RF_jupyter.pkl")
RF_Model = joblib.load("F:\\AIenv\\sentiment_analysis\\RF_jupyter.pkl")
test_twtr_preds = RF_Model.predict(test_twtr["clean_text"])
I have conducted survey research on different classifications performance in Sentiment analysis.
For a specific twitter dataset, I used to perform models like Logistic Regression, Naïve Bayes, Support vector machine, k-nearest neighbors (KNN), and Decision tree as well.
Observations of the selected dataset show that Logistic Regression and Naïve Bayes perform well in all types of testing with accuracy. SVM at the next. Then Decision tree classification with accuracy. As of the result, KNN scores lowest with accuracy level. Logistic regression and Naïve Bayes models are performing respectively better in sentiment analysis and predictions.
Sentiment Classifier (Accuracy Score RMSE)
LR (78.3541 1.053619)
NB (76.764706 1.064738)
SVM (73.5835 1.074752)
DT (69.2941 1.145234)
KNN (62.9476 1.376589)
Feature extraction is very critical in these cases.
#This may help you.
Importing Essentials
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
import time
df = pd.read_csv('FilePath', header=0)
X = df['content']
y = df['sentiment']
def lrSentimentAnalysis(n):
# Using CountVectorizer to convert text into tokens/features
vect = CountVectorizer(ngram_range=(1, 1))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size=n)
# Using training data to transform text into counts of features for each message
vect.fit(X_train)
X_train_dtm = vect.transform(X_train)
X_test_dtm = vect.transform(X_test)
# dual = [True, False]
max_iter = [100, 110, 120, 130, 140, 150]
C = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5]
solvers = ['newton-cg', 'lbfgs', 'liblinear']
param_grid = dict(max_iter=max_iter, C=C, solver=solvers)
LR1 = LogisticRegression(penalty='l2', multi_class='auto')
grid = GridSearchCV(estimator=LR1, param_grid=param_grid, cv=10, n_jobs=-1)
grid_result = grid.fit(X_train_dtm, y_train)
# Summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
y_pred = grid_result.predict(X_test_dtm)
print ('Accuracy Score: ', metrics.accuracy_score(y_test, y_pred) * 100, '%')
# print('Confusion Matrix: ',metrics.confusion_matrix(y_test,y_pred))
# print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
# print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print ('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
return [n, metrics.accuracy_score(y_test, y_pred) * 100, grid_result.best_estimator_.get_params()['max_iter'],
grid_result.best_estimator_.get_params()['C'], grid_result.best_estimator_.get_params()['solver']]
def darwConfusionMetrix(accList):
# Using CountVectorizer to convert text into tokens/features
vect = CountVectorizer(ngram_range=(1, 1))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size=accList[0])
# Using training data to transform text into counts of features for each message
vect.fit(X_train)
X_train_dtm = vect.transform(X_train)
X_test_dtm = vect.transform(X_test)
# Accuracy using Logistic Regression Model
LR = LogisticRegression(penalty='l2', max_iter=accList[2], C=accList[3], solver=accList[4])
LR.fit(X_train_dtm, y_train)
y_pred = LR.predict(X_test_dtm)
# creating a heatmap for confusion matrix
data = metrics.confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(data, columns=np.unique(y_test), index=np.unique(y_test))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize=(10, 7))
sns.set(font_scale=1.4) # for label size
sns.heatmap(df_cm, cmap="Blues", annot=True, annot_kws={"size": 16}) # font size
fig0 = plt.gcf()
fig0.show()
fig0.savefig('FilePath', dpi=100)
def findModelWithBestAccuracy(accList):
accuracyList = []
for item in accList:
accuracyList.append(item[1])
N = accuracyList.index(max(accuracyList))
print('Best Model:', accList[N])
return accList[N]
accList = []
print('Logistic Regression')
print('grid search method for hyperparameter tuning (accurcy by cross validation) ')
for i in range(2, 7):
n = i / 10.0
print ("\nsplit ", i - 1, ": n=", n)
accList.append(lrSentimentAnalysis(n))
darwConfusionMetrix(findModelWithBestAccuracy(accList))
Preprocessing is a vital part of building a well-performing classifier. When you have such a large discrepancy between training and test set performance, it is likely that some error has occurred in your preprocessing (of your test set).
A classifier is also available without any programming. The second video here (below) shows how sentiments can be classified from keywords in mails.
You can visit the web service insight classifiers and try a free build first.
Your new data can be very different from the first dataset you used to train and test your model. Preprocessing techniques and statistical analysis will help you characterise your data and compare different datasets. Poor performance on new data can be observed for various reasons including:
your initial dataset is not statistically representative of a bigger data dataset (example: your dataset is a corner case)
Overfitting: you over-train your model which incorporates specificities (noise) of the training data
Different preprocessing methods
Unbalanced training data set. ML techniques work best with balanced dataset (equal occurrence of different classes in the training set)

python the evaluation index valus are different largely between cross validate and train_test_split cases

write a program, use support vectore Regression-SVR to predict, firstly, split the dataset to train dataset and test dataset, the ratio of test dataset is 20%(case 1); secondly, use cross validate, split the dataset to 5 groups to predict(case 2),however, Using the same evaluation index(R2,MAE,MSE) to evaluate the two methods, the results are quite different
the program is as follows:
dataset = pd.read_csv('Dataset/allGlassStraightThroughTube.csv')
tube_par = dataset.iloc[:, 3:8].values
tube_eff = dataset.iloc[:, -1:].values
# # form train dataset , test dataset
tube_par_X_train, tube_par_X_test, tube_eff_Y_train, tube_eff_Y_test = train_test_split(tube_par, tube_eff, random_state=33, test_size=0.2)
# normalize the data
sc_X = StandardScaler()
sc_Y = StandardScaler()
sc_tube_par_X_train = sc_X.fit_transform(tube_par_X_train)
sc_tube_par_X_test = sc_X.transform(tube_par_X_test)
sc_tube_eff_Y_train = sc_Y.fit_transform(tube_eff_Y_train)
sc_tube_eff_Y_test = sc_Y.transform(tube_eff_Y_test)
# fit rbf SVR to the sc_tube_par_X dataset
support_vector_regressor = SVR(kernel='rbf')
support_vector_regressor.fit(sc_tube_par_X_train, sc_tube_eff_Y_train)
#
# # predict new result according to the sc_tube_par_X Dataset
pre_sc_tube_eff_Y_test = support_vector_regressor.predict(sc_tube_par_X_test)
pre_tube_eff_Y_test = sc_Y.inverse_transform(pre_sc_tube_eff_Y_test)
# calculate the predict quality
print('R2-score value rbf SVR')
print(r2_score(sc_Y.inverse_transform(sc_tube_eff_Y_test), sc_Y.inverse_transform(pre_sc_tube_eff_Y_test)))
print('The mean squared error of rbf SVR is')
print(mean_squared_error(sc_Y.inverse_transform(sc_tube_eff_Y_test), sc_Y.inverse_transform(pre_sc_tube_eff_Y_test)))
print('The mean absolute error of rbf SVR is')
print(mean_absolute_error(sc_Y.inverse_transform(sc_tube_eff_Y_test), sc_Y.inverse_transform(pre_sc_tube_eff_Y_test)))
# normalize
sc_tube_par_X = sc_X.fit_transform(tube_par)
sc_tube_eff_Y = sc_Y.fit_transform(tube_eff)
scoring = ['r2','neg_mean_squared_error', 'neg_mean_absolute_error']
rbf_svr_regressor = SVR(kernel='rbf')
scores = cross_validate(rbf_svr_regressor, sc_tube_par_X, sc_tube_eff_Y, cv=5, scoring=scoring, return_train_score=False)
in case 1, the evaluation index output is:
R2-score value rbf SVR
0.6486074476528559
The mean squared error of rbf SVR is
0.00013501023459497165
The mean absolute error of rbf SVR is
0.007196636233830076
in case 2, the evalution index output is:
R2-score
0.2621779727614816
test_neg_mean_squared_error
-0.6497292887710239
test_neg_mean_absolute_error
-0.5629408849740231
the difference between case 1 and case 2 is big, could you please me the reason and how to correct it
Bin.
I have prepared a little example to see how the results change using cross-validation. I recomend you to try to split the data without seed and see how the results change.
You will see that cross validation results are almost a constant independently of the data split.
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split,cross_validate
#from sklearn.cross_validation import train_test_split
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
import matplotlib.pyplot as plt
def print_metrics(real_y,predicted_y):
# calculate the predict quality
print('R2-score value {:>8.4f}'.format(r2_score(real_y, predicted_y)))
print('Mean squared error is {:>8.4f}'.format(mean_squared_error(real_y, predicted_y)))
print('Mean absolute error is {:>8.4f}\n\n'.format(mean_absolute_error(real_y, predicted_y)))
def show_plot(real_y,predicted_y):
fig,ax = plt.subplots()
ax.scatter(real_y,predicted_y,edgecolors=(0,0,0))
ax.plot([real_y.min(),real_y.max()],[real_y.min(),real_y.max()],"k--",lw=4)
ax.set_xlabel("Measured")
ax.set_ylabel("Predicted")
plt.show()
# dataset load
boston = datasets.load_boston()
#dataset info
# print(boston.keys())
# print(boston.DESCR)
# print(boston.data.shape)
# print(boston.feature_names)
# numpy_arrays
X = boston.data
Y = boston.target
# # form train dataset , test dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
#X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=33, test_size=0.2)
#X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=5, test_size=0.2)
# fit scalers
sc_X = StandardScaler().fit(X_train)
# standarizes X (train and test)
X_train = sc_X.transform(X_train)
X_test = sc_X.transform(X_test)
######################################################################
############################### SVR ##################################
######################################################################
support_vector_regressor = SVR(kernel='rbf')
support_vector_regressor.fit(X_train, Y_train)
predicted_Y = support_vector_regressor.predict(X_test)
print_metrics(predicted_Y,Y_test)
show_plot(predicted_Y,Y_test)
######################################################################
########################### LINEAR REGRESSOR #########################
######################################################################
lin_model = LinearRegression()
lin_model.fit(X_train, Y_train)
predicted_Y = lin_model.predict(X_test)
print_metrics(predicted_Y,Y_test)
show_plot(predicted_Y,Y_test)
######################################################################
######################### SVR + CROSS VALIDATION #####################
######################################################################
sc = StandardScaler().fit(X)
standarized_X = sc.transform(X)
scoring = ['r2','neg_mean_squared_error', 'neg_mean_absolute_error']
rbf_svr_regressor = SVR(kernel='rbf')
scores = cross_validate(rbf_svr_regressor, standarized_X, Y, cv=10, scoring=scoring, return_train_score=False)
print(scores["test_r2"].mean())
print(-1*(scores["test_neg_mean_squared_error"].mean()))
print(-1*(scores["test_neg_mean_absolute_error"].mean()))

Categories