Added a not desired column in csv - python

I have this code
from sklearn import tree
train_url = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/train.csv"
train = pd.read_csv(train_url)
train["Sex"][train["Sex"] == "male"] = 0
train["Sex"][train["Sex"] == "female"] = 1
train["Embarked"] = train["Embarked"].fillna("S")
train["Age"] = train["Age"].fillna(train["Age"].median())
train["Embarked"][train["Embarked"] == "S"] = 0
train["Embarked"][train["Embarked"] == "C"] = 1
train["Embarked"][train["Embarked"] == "Q"] = 2
target = train["Survived"].values
features_one = train[["Pclass", "Sex", "Age", "Fare"]].values
my_tree_one = tree.DecisionTreeClassifier()
my_tree_one = my_tree_one.fit(features_one, target)
test_url = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/test.csv"
test = pd.read_csv(test_url)
test.Fare[152] = test["Fare"].median()
test["Sex"][test["Sex"] == "male"] = 0
test["Sex"][test["Sex"] == "female"] = 1
test["Embarked"] = test["Embarked"].fillna("S")
test["Age"] = test["Age"].fillna(test["Age"].median())
test["Embarked"][test["Embarked"] == "S"] = 0
test["Embarked"][test["Embarked"] == "C"] = 1
test["Embarked"][test["Embarked"] == "Q"] = 2
test_features = test[["Pclass", "Sex", "Age", "Fare"]].values
my_prediction = my_tree_one.predict(test_features)
PassengerId = np.array(test["PassengerId"]).astype(int)
my_solution = pd.DataFrame(my_prediction, PassengerId)
my_solution.to_csv("5.csv", index_label = ["PassangerId", "Survived"])
As you can see I only want save a csv with two columns, but when I look at the file 5.csv it's added another column called 0..Anybody know why?

You're seeing this behaviour because you're adding two index_labels when there is only one index.
You can instead name your one column as such:
my_solution.columns = ['Survived']
And then label your index like so:
my_solution.to_csv("5.csv", index_label=["PassengerId"])

Try this slightly optimized solution:
from sklearn import tree
train_url = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/train.csv"
cols = ["Pclass", "Sex", "Age", "Fare"]
mappings = {
'Sex': {'male':0, 'female':1},
}
def cleanup(df, mappings=mappings):
# map non-numeric columns
for c in mappings.keys():
df[c] = df[c].map(mappings[c])
# replace NaN's with average value
for c in df.columns[df.isnull().any()]:
df[c].fillna(df[c].mean(), inplace=True)
return df
# parse train data set
train = cleanup(d.read_csv(train_url, usecols=cols + ['Survived']))
my_tree_one = tree.DecisionTreeClassifier()
my_tree_one.fit(train.drop('Survived',1), train['Survived'])
# parse test data set
test_url = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/test.csv"
test = pd.read_csv(test_url, usecols=cols+['PassengerId'])
result = test.pop('PassengerId').to_frame('PassengerId')
test = cleanup(test)
result['Survived'] = my_tree_one.predict(test)
result.to_csv("5.csv", index=False)

Related

Getting financial data into data frames for multiple tickers

here is the code :
**tickers = ['AMZN','AAPL','MSFT','DIS','GOOG']
# Created individual dataframes for each category of data and tickers
BS0=yfs.get_balance_sheet(tickers[0])
IS0=yfs.get_income_statement(tickers[0])
CF0=yfs.get_cash_flow(tickers[0])
BS0.columns = ['Period0','Period1','Period2','Period3']
IS0.columns = ['Period0','Period1','Period2','Period3']
CF0.columns = ['Period0','Period1','Period2','Period3']
BS0.columns.name = tickers[0]
IS0.columns.name = tickers[0]
CF0.columns.name = tickers[0]
BS1=yfs.get_balance_sheet(tickers[1])
IS1=yfs.get_income_statement(tickers[1])
CF1=yfs.get_cash_flow(tickers[1])
BS1.columns = ['Period0','Period1','Period2','Period3']
IS1.columns = ['Period0','Period1','Period2','Period3']
CF1.columns = ['Period0','Period1','Period2','Period3']
BS1.columns.name = tickers[1]
IS1.columns.name = tickers[1]
CF1.columns.name = tickers[1]
BS2=yfs.get_balance_sheet(tickers[2])
IS2=yfs.get_income_statement(tickers[2])
CF2=yfs.get_cash_flow(tickers[2])
BS2.columns = ['Period0','Period1','Period2','Period3']
IS2.columns = ['Period0','Period1','Period2','Period3']
CF2.columns = ['Period0','Period1','Period2','Period3']
BS2.columns.name = tickers[2]
IS2.columns.name = tickers[2]
CF2.columns.name = tickers[2]
BS3=yfs.get_balance_sheet(tickers[3])
IS3=yfs.get_income_statement(tickers[3])
CF3=yfs.get_cash_flow(tickers[3])
BS3.columns = ['Period0','Period1','Period2','Period3']
IS3.columns = ['Period0','Period1','Period2','Period3']
CF3.columns = ['Period0','Period1','Period2','Period3']
BS3.columns.name = tickers[3]
IS3.columns.name = tickers[3]
CF3.columns.name = tickers[3]
BS4=yfs.get_balance_sheet(tickers[4])
IS4=yfs.get_income_statement(tickers[4])
CF4=yfs.get_cash_flow(tickers[4])
BS4.columns = ['Period0','Period1','Period2','Period3']
IS4.columns = ['Period0','Period1','Period2','Period3']
CF4.columns = ['Period0','Period1','Period2','Period3']
BS4.columns.name = tickers[4]
IS4.columns.name = tickers[4]
CF4.columns.name = tickers[4]**
I have tried writing with for ticker in tickers logic and then converting to data frame with pandas, but this gives me a huge data frame with all the information packed into individual cells instead of columns and I have no idea how to spread it out in a way that makes sense for referencing.
Maybe there is a way to do this or simply to create a loop to save different data frames such as the code above but with less lines.
Thanks in advance
You can try maintaining a dictionary of dataframes
import pandas as pd
tickers = ['AMZN','AAPL','MSFT','DIS','GOOG']
column_names = ['Period0','Period1','Period2','Period3']
ticker_dfs ={}
for index, ticker in enumerate(tickers):
bs_index = 'BS' + str(index)
is_index = 'IS' + str(index)
cf_index = 'CF' + str(index)
ticker_dfs[bs_index] = yfs.get_balance_sheet(ticker)
ticker_dfs[bs_index].columns = column_names
ticker_dfs[is_index] = yfs.get_income_statement(ticker)
ticker_dfs[is_index].columns = column_names
ticker_dfs[cf_index] = yfs.get_cash_flow(ticker)
ticker_dfs[cf_index].columns = column_names

Writing a for loop that appends data to a unique DF and continues the loop in the range specified

This code scrapes one match in the range only, I need the code to loop through each range, scrape the data specified, add it to one df and move on until it's finished in the range.
For some reason the code is stopping at one loop and not continuing on as desired.
import requests
from bs4 import BeautifulSoup
import json
import pandas as pd
colnames = ["x", "y", "xg", "team"]
df = pd.DataFrame(index=colnames)
for id in range(16376,16379):
understat = f'https://understat.com/match/{id}'
res = requests.get(understat)
# parsing the webpage, use .content
soup = BeautifulSoup(res.content, "lxml")
scripts = soup.find_all('script')
# get only shots data, and strip data so we only have json data
strings = scripts[1].string
index_start = strings.index("('") + 2
index_end = strings.index("')")
json_data = strings[index_start:index_end]
json_data = json_data.encode('utf8').decode('unicode_escape')
data1 = json.loads(json_data)
x = []
y = []
xg = []
team = []
data_home = data1["h"]
data_away = data1["a"]
for index in range(len(data_home)):
for key in data_home[index]:
if key == "X":
x.append(data_home[index][key])
if key == "Y":
y.append(data_home[index][key])
if key == "xG":
xg.append(data_home[index][key])
if key == "h_team":
team.append(data_home[index][key])
df_h = (x,y,xg,team)
for index in range(len(data_away)):
for key in data_away[index]:
if key == "X":
x.append(data_away[index][key])
if key == "Y":
y.append(data_away[index][key])
if key == "xG":
xg.append(data_away[index][key])
if key == "a_team":
team.append(data_away[index][key])
df_a = (x, y, xg, team)
continue
# create the df
colnames = ["x", "y", "xg", "team"]
df = pd.DataFrame([x, y, xg, team], index=colnames)
df = df.T
I think the problem is you override the lists x,y,xg,team and use only the last iteration to create the df.
If your end goal is to create One dataframe contain the data from all of the iteration, you should try those steps.
Steps:
Create a list before the first loop to keep all the dataframes. list_of_dfs = []
Create df at the end of each iteration. Just by inserting your df creation into the loop (One indentation)
Append the created dataframe to the list of dataframes. Just after the df creation: list_of_dfs.append(df)
after the loops end, concat all the dataframes from the list into one dataframe. You can use: final_df = pd.concat(list_of_dfs, axis=1)
Full code should go like this:
import requests
from bs4 import BeautifulSoup
import json
import pandas as pd
colnames = ["x", "y", "xg", "team"]
df = pd.DataFrame(index=colnames)
list_of_dfs = []
for id in range(16376,16379):
understat = f'https://understat.com/match/{id}'
res = requests.get(understat)
# parsing the webpage, use .content
soup = BeautifulSoup(res.content, "lxml")
scripts = soup.find_all('script')
# get only shots data, and strip data so we only have json data
strings = scripts[1].string
index_start = strings.index("('") + 2
index_end = strings.index("')")
json_data = strings[index_start:index_end]
json_data = json_data.encode('utf8').decode('unicode_escape')
data1 = json.loads(json_data)
x = []
y = []
xg = []
team = []
data_home = data1["h"]
data_away = data1["a"]
for index in range(len(data_home)):
for key in data_home[index]:
if key == "X":
x.append(data_home[index][key])
if key == "Y":
y.append(data_home[index][key])
if key == "xG":
xg.append(data_home[index][key])
if key == "h_team":
team.append(data_home[index][key])
df_h = (x,y,xg,team)
for index in range(len(data_away)):
for key in data_away[index]:
if key == "X":
x.append(data_away[index][key])
if key == "Y":
y.append(data_away[index][key])
if key == "xG":
xg.append(data_away[index][key])
if key == "a_team":
team.append(data_away[index][key])
df_a = (x, y, xg, team)
continue
# create the df
colnames = ["x", "y", "xg", "team"]
df = pd.DataFrame([x, y, xg, team], index=colnames)
df = df.T
list_of_dfs.append(df)
final_df = pd.concat(list_of_dfs, axis=0)

How to create an efficient sliding window generator for timeseries event log with caseIDs

The solution I have now works, but the downside is that takes over 1 hour to fit a simple model. Most of the training time is lost in the python generator, in the code called WindowGenerator.gen
If we postfix all the data, I would think it would affect the models performance since we will then have zeros for most of the data.
I have an eventlog with data on the format (approximates): (600 000, 500)
The first feature is the group/caseID and the three last are labels. Each case has on average 6 events, but it deviates from 4 all the way to 100. In tensorflow v1 there was a generator that seemed like what I wanted: tf.contrib.training.batch_sequences_with_states
I want to load in batches that get postfixed, my solution was to create an empty 3d array in the desired shape defined by (batchsize, the max length case, num_features)
The idea is to create a sliding window, where we are guessing the target columns by an offset/shift.
Example:
batch_1 = pd.DataFrame([[1,1,0.1,11],
[1,2,0.2,12],
[1,3,0.3,13],
[1,4,0.4,14],
[1,4,0.4,14],
[2,5,0.5,15],
[2,6,0.6,16],
[2,7,0.7,17],
[3,8,0.8,18],
[3,9,0.9,19],
[3,10,0.7,20]], columns=["id", "x1","x2", "target"])
# we want to be on the form before sliding:
[ [[1,1,0.1,11],
[1,2,0.2,12],
[1,3,0.3,13],
[1,4,0.4,14],
[1,4,0.4,14] ],
[[2,5,0.5,15],
[2,6,0.6,16],
[2,7,0.7,17],
[2,7,0.7,17],
[2,0,0,0],
[2,0,0,0] ],
[3,8,0.8,18],
[3,9,0.9,19],
[3,10,0.7,20]
[3,0,0,0],
[3,0,0,0], ]
]
The splitting of data:
unique_caseids = eventvise_training_data.caseid.unique()
np.random.shuffle(unique_caseids)
n = len(unique_caseids)
train_ids = unique_caseids[0:int(n*0.7)]
val_ids = unique_caseids[int(n*0.7):int(n*0.9)]
test_ids = unique_caseids[int(n*0.9):]
train_df = eventvise_training_data[eventvise_training_data.caseid.isin(train_ids)]
val_df = eventvise_training_data[eventvise_training_data.caseid.isin(val_ids)]
test_df = eventvise_training_data[eventvise_training_data.caseid.isin(test_ids)]
The WindowGenerator object
class WindowGenerator(object):
def __init__(self, input_width, label_width, shift,
train_df=train_df, val_df=val_df, test_df=test_df,id_column=0,
label_columns=None, batchsize=32, target_neg_ind=-3):
# Store the raw data.
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
self.batchsize = batchsize
self.id_column = id_column
self.id_name = id_column
self.unique_ids = unique_ids
self.target_neg_ind = target_neg_ind
self.generated_train_counter = 0
self.generated_val_counter = 0
self.generated_test_counter = 0
if self.unique_ids is None:
if type(id_column) == int:
self.unique_train_ids = train_df[train_df.columns[id_column]].unique()
self.unique_val_ids = val_df[val_df.columns[id_column]].unique()
self.unique_test_ids = test_df[test_df.columns[id_column]].unique()
self.id_name = train_df.columns[id_column]
elif type(id_column) == str:
self.unique_train_ids = train_df[id_column].unique()
self.unique_val_ids = val_df[id_column].unique()
self.unique_test_ids = test_df[id_column].unique()
# need the length of unique ids
self.num_unique_train = len(self.unique_train_ids)
self.num_unique_val = len(self.unique_val_ids)
self.num_unique_test = len(self.unique_test_ids)
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
self.label_name = list(self.label_columns_indices.keys())
def split_window(self, data, index_start_of_target=-3, type_of_data="train", seq_sliding=0, features=None):
seq_sliding = 0
if features is None:
features = self.generate_split_by_id(data, type_of_data=type_of_data)
if features is None:
return None, None, None, True
max_sliding = features.shape[1] - self.total_window_size
if seq_sliding > max_sliding:
return inputs, labels, features, False
if seq_sliding < 1:
input_slice = self.input_slice
output_slice = self.labels_slice
elif seq_sliding >= 1:
input_slice = slice(0+seq_sliding, self.input_width + seq_sliding)
output_slice = slice(0+seq_sliding, None)
inputs = features[:, input_slice, :index_start_of_target]
labels = features[:, output_slice, :]
if self.label_columns is not None:
labels = tf.stack( #-1 since we have removed the id columns
[labels[:, seq_sliding + self.label_start:seq_sliding+self.total_window_size, self.column_indices[name] - 1] for name in self.label_columns],
axis=-1)
# Slicing doesn't preserve static shape information, so set the shapes
# manually.
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, len(self.label_columns)])
return inputs, labels, features, False
def generate_split_by_id(self, data, type_of_data):
# to get the appropriate data for continuing on generating new batches
counter, num_unique, unique_ids = self.get_data_info(type_of_data=type_of_data)
start = counter
end = counter+self.batchsize
id_num = []
if end > num_unique: # to stop the batch collection before we run out caseIDs
end = num_unique
print("§§Finished training on all the data -- reseting counter§§")
flag = 1
counter = 0
self.set_data_info(type_of_data=type_of_data, counter=counter)
return
for num in range(start, end):
id_num.append(unique_ids[num])
counter += 1
self.set_data_info(type_of_data=type_of_data, counter=counter)
stacking_blocks = []
max_timesteps = 0
for ids in id_num[:]:
temp = data[data[self.id_name] == ids].drop(columns=[self.id_name]).to_numpy("float64")
if temp.shape[0] > max_timesteps:
max_timesteps = temp.shape[0]
stacking_blocks.append(temp)
# will create a postfix 3d-tensor
fill_array = np.zeros((len(id_num),max_timesteps,temp.shape[1]))
for sample_idx, sample in enumerate(stacking_blocks):
for time_step_idx, time_step in enumerate(sample):
fill_array[sample_idx, time_step_idx] = time_step
return tf.stack(fill_array)
def gen(self, data, type_of_data):
while 1:
# reset the sliding
sliding = 0
features = None
while 1:
input_data, output_data, features, stop_flag = self.split_window(data,
index_start_of_target=self.target_neg_ind,
type_of_data=type_of_data, seq_sliding=sliding)
sliding += 1
# break whens we run out of batches
if input_data is None:
break
yield input_data, output_data
if stop_flag:
break
def get_data_info(self, type_of_data=None):
if type_of_data == "train":
counter = self.generated_train_counter
num_unique = self.num_unique_train
unique_ids = self.unique_train_ids
elif type_of_data == "val":
counter = self.generated_val_counter
num_unique = self.num_unique_val
unique_ids = self.unique_val_ids
elif type_of_data == "test":
counter = self.generated_test_counter
num_unique = self.num_unique_test
unique_ids = self.unique_test_ids
return counter, num_unique, unique_ids
def set_data_info(self, type_of_data=None, counter=0):
if type_of_data == "train":
self.generated_train_counter = counter
elif type_of_data == "val":
self.generated_val_counter = counter
elif type_of_data == "test":
self.generated_test_counter = counter

Is there a function to add WOE, calculated on Training data, to the whole data set? (python)

I am working on some python code to predict Default rate of loans handed out by a bank.
I have calculated the WOE and information value (IV) on the training set
(using the following code: https://github.com/Sundar0989/WOE-and-IV/blob/master/WOE_IV.ipynb?fbclid=IwAR1MvEfyGsdyTre0uPJC5WRl91dfue_t0vH5qJezwm2mAg6sjHZJg9MyDYo).
We have also concluded 2 high cardinality variables. We don't know however how to add these WOE scores to the whole set. How do we tackle this problem? How can we go further to use WOE to predict the target variable?
code:
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy, pylab
Reading the data received from bank, feature selection part 1, splitting up whole set (Training) into training set: indices_traintrain, validation set: indices_val and test set: indices_test (70/30 split training and validation set - test set and 70/30 split training - validation)
Training =
pd.read_excel('/Users/enjo/Documents/Master/DM/Data_DSC2019_STUDENTS/DSC2019_Training.xlsx', na_values=np.nan)
Status = Training.iloc[:,-1]
Data = Training.iloc[:,0:45]
Data_missing = Data.isna()
Data_missing = Data_missing.sum()
print(Data_missing/len(Data))
"""
drop variables with more than 80% missing
"""
Drop = ['FREE_CASH_FLOW_AMT',
'A2_MTHS_FIRST_PCX_COREPROF_CNT', 'A2_MONTHS_IN_BELGIUM_CNT', 'A2_MTHS_SNC_FIRST_COREPROF_CNT', 'MONTHS_SINCE_LAST_REFUSAL_CNT']
DroppedTraining = Training.copy()
for element in Drop:
DroppedTraining.drop(element, axis=1,inplace=True)
import numpy as np
from sklearn import datasets
from sklearn import svm
from sklearn import preprocessing
Data_preprocessed=[] #contains preprocessed data
from Preprocessing_continuous import Preprocessing_continuous #import function for preprocessing
from Preprocessing_discrete import Preprocessing_discrete #import function for preprocessing
from sklearn.model_selection import train_test_split
indices=np.arange(26962)
indices_train, indices_test = train_test_split(indices, test_size=0.3, random_state=0)
indices_traintrain, indices_val = train_test_split(indices_train, test_size=0.3, random_state=0)
Training['target']= Training['Label_Default'].apply(lambda x:1 if x=='Y' else 0)
Highcardinalityset=[]
Highcardinalityset = Training[['Type',
'INDUSTRY_CD_3',
'INDUSTRY_CD_4',
'Managing_Sales_Office_Nbr',
'Postal_Code_L',
'Product_Desc',
'CREDIT_TYPE_CD',
'ACCOUNT_PURPOSE_CD',
'A2_MARITAL_STATUS_CD',
'FINANCIAL_PRODUCT_TYPE_CD',
'A2_EMPLOYMENT_STATUS_CD',
'A2_RESIDENT_STATUS_CD',
'target']]
Highcardinalityset = Highcardinalityset.iloc[indices_traintrain]
function found on github
import pandas as pd
import numpy as np
import pandas.core.algorithms as algos
from pandas import Series
import scipy.stats.stats as stats
import re
import traceback
import string
max_bin = 20
force_bin = 3
# define a binning function
def mono_bin(Y, X, n = max_bin):
df1 = pd.DataFrame({"X": X, "Y": Y})
justmiss = df1[['X','Y']][df1.X.isnull()]
notmiss = df1[['X','Y']][df1.X.notnull()]
r = 0
while np.abs(r) < 1:
try:
d1 = pd.DataFrame({"X": notmiss.X, "Y": notmiss.Y, "Bucket": pd.qcut(notmiss.X, n)})
d2 = d1.groupby('Bucket', as_index=True)
r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)
n = n - 1
except Exception as e:
n = n - 1
if len(d2) == 1:
n = force_bin
bins = algos.quantile(notmiss.X, np.linspace(0, 1, n))
if len(np.unique(bins)) == 2:
bins = np.insert(bins, 0, 1)
bins[1] = bins[1]-(bins[1]/2)
d1 = pd.DataFrame({"X": notmiss.X, "Y": notmiss.Y, "Bucket": pd.cut(notmiss.X, np.unique(bins),include_lowest=True)})
d2 = d1.groupby('Bucket', as_index=True)
d3 = pd.DataFrame({},index=[])
d3["MIN_VALUE"] = d2.min().X
d3["MAX_VALUE"] = d2.max().X
d3["COUNT"] = d2.count().Y
d3["EVENT"] = d2.sum().Y
d3["NONEVENT"] = d2.count().Y - d2.sum().Y
d3=d3.reset_index(drop=True)
if len(justmiss.index) > 0:
d4 = pd.DataFrame({'MIN_VALUE':np.nan},index=[0])
d4["MAX_VALUE"] = np.nan
d4["COUNT"] = justmiss.count().Y
d4["EVENT"] = justmiss.sum().Y
d4["NONEVENT"] = justmiss.count().Y - justmiss.sum().Y
d3 = d3.append(d4,ignore_index=True)
d3["EVENT_RATE"] = d3.EVENT/d3.COUNT
d3["NON_EVENT_RATE"] = d3.NONEVENT/d3.COUNT
d3["DIST_EVENT"] = d3.EVENT/d3.sum().EVENT
d3["DIST_NON_EVENT"] = d3.NONEVENT/d3.sum().NONEVENT
d3["WOE"] = np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)
d3["IV"] = (d3.DIST_EVENT-d3.DIST_NON_EVENT)*np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)
d3["VAR_NAME"] = "VAR"
d3 = d3[['VAR_NAME','MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE', 'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT','DIST_NON_EVENT','WOE', 'IV']]
d3 = d3.replace([np.inf, -np.inf], 0)
d3.IV = d3.IV.sum()
return(d3)
def char_bin(Y, X):
df1 = pd.DataFrame({"X": X, "Y": Y})
justmiss = df1[['X','Y']][df1.X.isnull()]
notmiss = df1[['X','Y']][df1.X.notnull()]
df2 = notmiss.groupby('X',as_index=True)
d3 = pd.DataFrame({},index=[])
d3["COUNT"] = df2.count().Y
d3["MIN_VALUE"] = df2.sum().Y.index
d3["MAX_VALUE"] = d3["MIN_VALUE"]
d3["EVENT"] = df2.sum().Y
d3["NONEVENT"] = df2.count().Y - df2.sum().Y
if len(justmiss.index) > 0:
d4 = pd.DataFrame({'MIN_VALUE':np.nan},index=[0])
d4["MAX_VALUE"] = np.nan
d4["COUNT"] = justmiss.count().Y
d4["EVENT"] = justmiss.sum().Y
d4["NONEVENT"] = justmiss.count().Y - justmiss.sum().Y
d3 = d3.append(d4,ignore_index=True)
d3["EVENT_RATE"] = d3.EVENT/d3.COUNT
d3["NON_EVENT_RATE"] = d3.NONEVENT/d3.COUNT
d3["DIST_EVENT"] = d3.EVENT/d3.sum().EVENT
d3["DIST_NON_EVENT"] = d3.NONEVENT/d3.sum().NONEVENT
d3["WOE"] = np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)
d3["IV"] = (d3.DIST_EVENT-d3.DIST_NON_EVENT)*np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)
d3["VAR_NAME"] = "VAR"
d3 = d3[['VAR_NAME','MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE', 'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT','DIST_NON_EVENT','WOE', 'IV']]
d3 = d3.replace([np.inf, -np.inf], 0)
d3.IV = d3.IV.sum()
d3 = d3.reset_index(drop=True)
return(d3)
def data_vars(df1, target):
stack = traceback.extract_stack()
filename, lineno, function_name, code = stack[-2]
vars_name = re.compile(r'\((.*?)\).*$').search(code).groups()[0]
final = (re.findall(r"[\w']+", vars_name))[-1]
x = df1.dtypes.index
count = -1
for i in x:
if i.upper() not in (final.upper()):
if np.issubdtype(df1[i], np.number) and len(Series.unique(df1[i])) > 2:
conv = mono_bin(target, df1[i])
conv["VAR_NAME"] = i
count = count + 1
else:
conv = char_bin(target, df1[i])
conv["VAR_NAME"] = i
count = count + 1
if count == 0:
iv_df = conv
else:
iv_df = iv_df.append(conv,ignore_index=True)
iv = pd.DataFrame({'IV':iv_df.groupby('VAR_NAME').IV.max()})
iv = iv.reset_index()
return(iv_df,iv)
final_iv, IV = data_vars(Highcardinalityset,Highcardinalityset.target)
final_iv
IV.sort_values('IV')
IV.to_csv('test.csv')
transform_vars_list = Highcardinalityset.columns.difference(['target'])
transform_prefix = 'new_' # leave this value blank if you need replace the original column values
transform_vars_list
for var in transform_vars_list:
small_df = final_iv[final_iv['VAR_NAME'] == var]
transform_dict = dict(zip(small_df.MAX_VALUE.astype(str),small_df.WOE.astype(str)))
replace_cmd = ''
replace_cmd1 = ''
for i in sorted(transform_dict.items()):
replace_cmd = replace_cmd + str(i[1]) + str(' if x <= ') + str(i[0]) + ' else '
replace_cmd1 = replace_cmd1 + str(i[1]) + str(' if x == "') + str(i[0]) + '" else '
replace_cmd = replace_cmd + '0'
replace_cmd1 = replace_cmd1 + '0'
if replace_cmd != '0':
try:
Highcardinalityset[transform_prefix + var] = Highcardinalityset[var].apply(lambda x: eval(replace_cmd))
except:
Highcardinalityset[transform_prefix + var] = Highcardinalityset[var].apply(lambda x: eval(replace_cmd1))
Highcardinalityset['Postal_Code_L'].value_counts()
Highcardinalityset['new_Postal_Code_L'].value_counts()
Highcardinalityset['Managing_Sales_Office_Nbr'].value_counts()
Highcardinalityset['new_Managing_Sales_Office_Nbr'].value_counts()
Nice to see when high WOE: interesting for that postal code: high risk for default!
Highcardinalityset.to_excel("Highcardinalitysettraintrain.xlsx")
TrainingWOE = DroppedTraining[['Managing_Sales_Office_Nbr', "Postal_Code_L"]]
TrainingWOE["Postal_Code_L_WOE"]=Highcardinalityset[["new_Postal_Code_L"]]
TrainingWOE["Managing_Sales_Office_Nbr_WOE"]=Highcardinalityset[["new_Managing_Sales_Office_Nbr"]]
drop variables that are not relevant because of low IV value
Drop = ["ACCOUNT_PURPOSE_CD", "A2_MARITAL_STATUS_CD", "A2_EMPLOYMENT_STATUS_CD", "A2_RESIDENT_STATUS_CD",
"INDUSTRY_CD_3", "INDUSTRY_CD_4","Type"]
DroppedTrainingAfterIVcalc = DroppedTraining.copy()
for element in Drop:
DroppedTrainingAfterIVcalc.drop(element, axis=1,inplace=True)
preprocess remaining (44-5 (because of too many missing) - 7 (because of low iv) + 1 (target variable added))
Thanks for asking this question. Here is the code to do the required transformation which is shown in the notebook as well.
transform_vars_list = df.columns.difference(['target'])
transform_prefix = 'new_' # leave this value blank to replace the original column
#apply transformations
for var in transform_vars_list:
small_df = final_iv[final_iv['VAR_NAME'] == var]
transform_dict = dict(zip(small_df.MAX_VALUE,small_df.WOE))
replace_cmd = ''
replace_cmd1 = ''
for i in sorted(transform_dict.items()):
replace_cmd = replace_cmd + str(i[1]) + str(' if x <= ') + str(i[0]) + ' else '
replace_cmd1 = replace_cmd1 + str(i[1]) + str(' if x == "') + str(i[0]) + '" else '
replace_cmd = replace_cmd + '0'
replace_cmd1 = replace_cmd1 + '0'
if replace_cmd != '0':
try:
df[transform_prefix + var] = df[var].apply(lambda x: eval(replace_cmd))
except:
df[transform_prefix + var] = df[var].apply(lambda x: eval(replace_cmd1))
In addition, there is a package Xverse which does the same. Please refer to it here - https://github.com/Sundar0989/XuniVerse

ID3 Algorithm in Python

I am trying to plot a decision tree using ID3 in Python. I am really new to Python and couldn't understand the implementation of the following code. I need to know how I can apply this code to my data.
from math import log
import operator
def entropy(data):
entries = len(data)
labels = {}
for feat in data:
label = feat[-1]
if label not in labels.keys():
labels[label] = 0
labels[label] += 1
entropy = 0.0
for key in labels:
probability = float(labels[key])/entries
entropy -= probability * log(probability,2)
return entropy
def split(data, axis, val):
newData = []
for feat in data:
if feat[axis] == val:
reducedFeat = feat[:axis]
reducedFeat.extend(feat[axis+1:])
newData.append(reducedFeat)
return newData
def choose(data):
features = len(data[0]) - 1
baseEntropy = entropy(data)
bestInfoGain = 0.0;
bestFeat = -1
for i in range(features):
featList = [ex[i] for ex in data]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
newData = split(data, i, value)
probability = len(newData)/float(len(data))
newEntropy += probability * entropy(newData)
infoGain = baseEntropy - newEntropy
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeat = i
return bestFeat
def majority(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def tree(data,labels):
classList = [ex[-1] for ex in data]
if classList.count(classList[0]) == len(classList):
return classList[0]
if len(data[0]) == 1:
return majority(classList)
bestFeat = choose(data)
bestFeatLabel = labels[bestFeat]
theTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [ex[bestFeat] for ex in data]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
theTree[bestFeatLabel][value] = tree(split/(data, bestFeat, value),subLabels)
return theTree
So what I did after this is the following:
infile=open("SData.csv","r")
data=infile.read()
tree(data)
The error which I got is "1 argument is missing" which is the label which I have to define and this is where I don't know what I have to put. I tried the variable for which I have to make the decision tree but it doesn't work:
tree(data,MinTemp)
Here I get an error "MinTemp is not defined".
Please help me out and let me know what I should do to have a look at the tree.
Following is the part of data and I want to generate a tree for MinTemp
MinTemp,Rainfall,Tempat9,RHat9,CAat9,WSat9
high,no,mild,normal,overcast,weak
high,no,mild,normal,cloudy,weak
high,no,mild,normal,cloudy,mild
high,yes,mild,high,cloudy,weak
high,yes,mild,high,cloudy,mild
medium,yes,mild,high,cloudy,mild
high,no,mild,high,overcast,weak
high,no,mild,normal,sunny,weak
high,no,hot,normal,sunny,weak
high,no,hot,normal,overcast,weak

Categories