Applying function on pandas column using information from another column - python

I have a dataframe that contains a bunch of people's text descriptions. Other than that, I also have 4 descriptions a,b,c,d. For each person's text description, I wish to compare them to each of the 4 descriptions by using cosine similarity and store these scores in the same dataframe in 4 new columns: a, b, c, d.
How can I do this in a panda way, without using for loops? I was thinking of using the apply function but I don't know how to reference to the 'text' column as well as the 4 descriptions a,b,c,d in the apply function.
Thank you very much for any help!!
What I have tried:
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
person_one = [' '.join(['table','car','mouse'])]
person_two = [' '.join(['computer','card','can','mouse'])]
person_three = [' '.join(['chair','table','whiteboard','window','button'])]
person_four = [' '.join(['queen','king','joker','phone'])]
description_a = [' '.join(['table','yellow','car','king'])]
description_b = [' '.join(['bottle','whiteboard','queen'])]
description_c = [' '.join(['chair','car','car','phone'])]
description_d = [' '.join(['joker','blue','earphone','king'])]
mystuff = [('person 1',person_one),
('person 2',person_two),
('person 3',person_three),
('person 4',person_four)
]
labels = ['person','text']
df = pd.DataFrame.from_records(mystuff,columns = labels)
df = df.reindex(columns = ['person','text','a','b','c','d'])
def trying(cell,jd):
vectorizer = CountVectorizer(analyzer='word', max_features=5000).fit(jd)
jd_vector = vectorizer.transform(jd)
person_vector = vectorizer.transform(cell['text'])
score = cosine_similarity(jd_vector,person_vector)
return score
df['a'] = df['a'].apply(trying(description_a))
df['b'] = df['b'].apply(trying(description_b))
df['c'] = df['c'].apply(trying(description_c))
df['d'] = df['d'].apply(trying(description_d))
This gives me an error:
df['a'] = df['a'].apply(trying(description_a))
TypeError: trying() missing 1 required positional argument: 'jd'
The output should look something like this:
person text a b c d
0 person 1 [table, car, mouse] 0.3 0.2 0.5 0.7
1 person 2 [computer, card, can, mouse] 0.2 0.1 0.9 0.7
2 person 3 [chair, table, whiteboard, window, button] 0.3 0.5 0.1 0.4
3 person 4 [queen, king, joker, phone] 0.2 0.4 0.3 0.5

I can't post comment yet, but to solve the error :
df['a'] = df['a'].apply(trying(description_a))
TypeError: trying() missing 1 required positional argument: 'jd'
You need to pass the parameter like this :
df['a'] = df['a'].apply(trying, args=(description_a))
The first argument will be the column vector in your case, and the other arguments will then be taken in order from ther args list.
Hope this help.

How about this:
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
person_one = ['table','car','mouse']
person_two = ['computer','card','can','mouse']
person_three = ['chair','table','whiteboard','window','button']
person_four = ['queen','king','joker','phone']
description_a = ['table','yellow','car','king']
description_b = ['bottle','whiteboard','queen']
description_c = ['chair','car','car','phone']
description_d = ['joker','blue','earphone','king']
descriptors = {
'a' : description_a,
'b' : description_d,
'c' : description_c,
'd' : description_d
}
mystuff = [('person 1',person_one),
('person 2',person_two),
('person 3',person_three),
('person 4',person_four)
]
labels = ['person','text']
df = pd.DataFrame.from_records(mystuff,columns = labels)
vocabulary_data =[
person_one,
person_two,
person_three,
person_four,
description_a,
description_b,
description_c,
description_d,
]
data = [set(sentence) for sentence in vocabulary_data]
vocabulary = set.union(*data)
cv = CountVectorizer(vocabulary=vocabulary)
def similarity(row, desc):
a = cosine_similarity(cv.fit_transform(row['text']).sum(axis=0), cv.fit_transform(desc).sum(axis=0))
return a.item()
for key, description in descriptors.items():
df[key] = df.apply(lambda x: similarity(x, description), axis=1)
I used one for loop, but only for filling different descriptions. The main "computation" is done by apply.

Related

pandas str.contains match exact substring not working with regex boudry

I have two dataframes, and trying to find out a way to match the exact substring from one dataframe to another dataframe.
First DataFrame:
import pandas as pd
import numpy as np
random_data = {'Place Name':['TS~HOT_MD~h_PB~progra_VV~gogl', 'FM~uiosv_PB~emo_SZ~1x1_TG~bhv'],
'Site':['DV360', 'Adikteev']}
dataframe = pd.DataFrame(random_data)
print(dataframe)
Second DataFrame
test_data = {'code name': ['PB', 'PB', 'PB'],
'Actual':['programmatic me', 'emoteev', 'programmatic-mechanics'],
'code':['progra', 'emo', 'prog']}
test_dataframe = pd.DataFrame(test_data)
Approach
for k, l, m in zip(test_dataframe.iloc[:, 0], test_dataframe.iloc[:, 1], test_dataframe.iloc[:, 2]):
dataframe['Site'] = np.select([dataframe['Place Name'].str.contains(r'\b{}~{}\b'.format(k, m), regex=False)], [l],
default=dataframe['Site'])
The current output is as below, though I am expecting to match the exact substring, which is not working with the code above.
Current Output:
Place Name Site
TS~HOT_MD~h_PB~progra_VV~gogl programmatic-mechanics
FM~uiosv_PB~emo_SZ~1x1_TG~bhv emoteev
Expected Output:
Place Name Site
TS~HOT_MD~h_PB~progra_VV~gogl programmatic me
FM~uiosv_PB~emo_SZ~1x1_TG~bhv emoteev
Data
import pandas as pd
import numpy as np
random_data = {'Place Name':['TS~HOT_MD~h_PB~progra_VV~gogl',
'FM~uiosv_PB~emo_SZ~1x1_TG~bhv'], 'Site':['DV360', 'Adikteev']}
dataframe = pd.DataFrame(random_data)
test_data = {'code name': ['PB', 'PB', 'PB'], 'Actual':['programmatic me', 'emoteev', 'programmatic-mechanics'],
'code':['progra', 'emo', 'prog']}
test_dataframe = pd.DataFrame(test_data)
Map the test_datframe code and Actual into dictionary as key and value respectively
keys=test_dataframe['code'].values.tolist()
dicto=dict(zip(test_dataframe.code, test_dataframe.Actual))
dicto
Join the keys separated by | to enable search of either phrases
k = '|'.join(r"{}".format(x) for x in dicto.keys())
k
Extract string from datframe meeting any of the phrases in k and map them to to the dictionary
dataframe['Site'] = dataframe['Place Name'].str.extract('('+ k + ')', expand=False).map(dicto)
dataframe
Output
Not the most elegant solution, but this does the trick.
Set up data
import pandas as pd
import numpy as np
random_data = {'Place Name':['TS~HOT_MD~h_PB~progra_VV~gogl',
'FM~uiosv_PB~emo_SZ~1x1_TG~bhv'], 'Site':['DV360', 'Adikteev']}
dataframe = pd.DataFrame(random_data)
test_data = {'code name': ['PB', 'PB', 'PB'], 'Actual':['programmatic me', 'emoteev', 'programmatic-mechanics'],
'code':['progra', 'emo', 'prog']}
test_dataframe = pd.DataFrame(test_data)
Solution
Create a column in test_dataframe with the substring to match:
test_dataframe['match_str'] = test_dataframe['code name'] + '~' + test_dataframe.code
print(test_dataframe)
code name Actual code match_str
0 PB programmatic me progra PB~progra
1 PB emoteev emo PB~emo
2 PB programmatic-mechanics prog PB~prog
Define a function to apply to test_dataframe:
def match_string(row, dataframe):
ind = row.name
try:
if row[-1] in dataframe.loc[ind, 'Place Name']:
return row[1]
else:
return dataframe.loc[ind, 'Site']
except KeyError:
# More rows in test_dataframe than there are in dataframe
pass
# Apply match_string and assign back to dataframe
dataframe['Site'] = test_dataframe.apply(match_string, args=(dataframe,), axis=1)
Output:
Place Name Site
0 TS~HOT_MD~h_PB~progra_VV~gogl programmatic me
1 FM~uiosv_PB~emo_SZ~1x1_TG~bhv emoteev

Python - Encoding Genomic Data in dataframe

Hi I'm trying to encode a Genome, stored as a string inside a dataframe read from a CSV.
Right now I'm looking to split each string in the dataframe under the column 'Genome' into a list of it's base pairs i.e. from ('acgt...') to ('a','c','g','t'...) then convert each base pair into a float (0.25,0.50,0.75,1.00) respectively.
I thought I was looking for a split function to split each string into characters but none seem to work on the data in the dataframe even when transformed to string using .tostring
Here's my most recent code:
import re
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def string_to_array(my_string):
my_string = my_string.lower()
my_string = re.sub('[^acgt]', 'z', my_string)
my_array = np.array(list(my_string))
return my_array
label_encoder = LabelEncoder()
label_encoder.fit(np.array(['a','g','c','t','z']))
def ordinal_encoder(my_array):
integer_encoded = label_encoder.transform(my_array)
float_encoded = integer_encoded.astype(float)
float_encoded[float_encoded == 0] = 0.25 # A
float_encoded[float_encoded == 1] = 0.50 # C
float_encoded[float_encoded == 2] = 0.75 # G
float_encoded[float_encoded == 3] = 1.00 # T
float_encoded[float_encoded == 4] = 0.00 # anything else, z
return float_encoded
dfpath = 'C:\\Users\\CAAVR\\Desktop\\Ison.csv'
dataframe = pd.read_csv(dfpath)
df = ordinal_encoder(string_to_array(dataframe[['Genome']].values.tostring()))
print(df)
I've tried making my own function but I don't have any clue how they work. Everything I try points to not being able to process data when it's in a numpy array and nothing is working to transform the data to another type.
Thanks for the tips!
Edit: Here is the print of the dataframe-
Antibiotic ... Genome
0 isoniazid ... ccctgacacatcacggcgcctgaccgacgagcagaagatccagctc...
1 isoniazid ... gggggtgctggcggggccggcgccgataaccccaccggcatcggcg...
2 isoniazid ... aatcacaccccgcgcgattgctagcatcctcggacacactgcacgc...
3 isoniazid ... gttgttgttgccgagattcgcaatgcccaggttgttgttgccgaga...
4 isoniazid ... ttgaccgatgaccccggttcaggcttcaccacagtgtggaacgcgg...
There are 5 columns 'Genome' being the 5th in the list I don't know why 1. .head() will not work and 2. why print() doesn't give me all columns...
I don't think LabelEncoder is what you want. This is a simple transformation, I recommend doing it directly. Start with a lookup your base pair mapping:
lookup = {
'a': 0.25,
'g': 0.50,
'c': 0.75,
't': 1.00
# z: 0.00
}
Then apply the lookup to value of the "Genome" column. The values attribute will return the resulting dataframe as an ndarray.
dataframe['Genome'].apply(lambda bps: pd.Series([lookup[bp] if bp in lookup else 0.0 for bp in bps.lower()])).values

String matching between 2 dataframe

Learning Python here, and any help on this is much appreciated.
My problem scenario is, there are 2 dataframes A and B contains a column(Name and Flag) list of Names.
ExDF = pd.DataFrame({'Name' : ['Smith','John, Alex','Peter Lin','Carl Marx','Abhraham Moray','Calvin Klein'], 'Flag':['False','False','False','False','False','False']})
SnDF = pd.DataFrame({'Name' : ['Adam K ','John Smith','Peter Lin','Carl Josh','Abhraham Moray','Tim Klein'], 'Flag':['False','False','False','False','False','False']})
The initial value of Flag is False.
Point 1: I need to flip the names in both dataframe ie. Adam Smith to Smith Adam and save the flip names in another new column in the both dataframes.
- This part is done.
Point 2: Then both the Original name and flip names of A dataframe should get check in B dataframe original names and flip names. If it found the the flag column in both the dataframe should get update by True.
I wrote the code but it checks one on one row to both dataframe like A[0] to B[0], A[1] to B[1], but i need to check A[0] record to all the records of B dataframe.
Pls help me on this!!
The code which tried is below:
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
ExDF_swap = ExDF["Swap"] = ExDF["Name"].apply(lambda x: " ".join(reversed(x.split())))
SnDF_swap = SnDF["Swap"] = SnDF["Name"].apply(lambda x: " ".join(reversed(x.split())))
ExDF_swap = pd.DataFrame(ExDF_swap)
SnDF_swap = pd.DataFrame(SnDF_swap)
vect = CountVectorizer()
X = vect.fit_transform(ExDF_swap.Name)
Y = vect.transform(SnDF_swap.Name)
res = np.ravel(np.any((X.dot(Y.T) > 1).todense(), axis=1))
pd.DataFrame(X.toarray(), columns=vect.get_feature_names())
pd.DataFrame(Y.toarray(), columns=vect.get_feature_names())
ExDF["Flag"] = np.ravel(np.any((X.dot(Y.T) > 1).todense(), axis=1))
SnDF["Flag"] = np.ravel(np.any((X.dot(Y.T) > 1).todense(), axis=1))
You could try isin() - of pandas:
import pandas as pd
ExDF = pd.DataFrame({'Name' : ['Smith','John, Alex','Peter Lin','Carl Marx','Abhraham Moray','Calvin Klein'], 'Flag':['False','False','False','False','False','False']})
SnDF = pd.DataFrame({'Name' : ['Adam K ','John Smith','Peter Lin','Carl Josh','Abhraham Moray','Tim Klein'], 'Flag':['False','False','False','False','False','False']})
print(ExDF)
print(SnDF)
ExDF["Swap"] = ExDF["Name"].apply(lambda x: " ".join(reversed(x.split())))
SnDF["Swap"] = SnDF["Name"].apply(lambda x: " ".join(reversed(x.split())))
print(ExDF)
print(SnDF)
ExDF['Flag'] = ExDF.Name.isin(SnDF.Name)
SnDF['Flag'] = SnDF.Name.isin(ExDF.Name)
print(ExDF)
print(SnDF)

linearmodels panelOLS: Regression output with stars

I'm using the linearmodels package to estimate a Panel-OLS. As an example see:
import numpy as np
from statsmodels.datasets import grunfeld
data = grunfeld.load_pandas().data
data.year = data.year.astype(np.int64)
# MultiIndex, entity - time
data = data.set_index(['firm','year'])
from linearmodels import PanelOLS
mod = PanelOLS(data.invest, data[['value','capital']], entity_effect=True)
res = mod.fit(cov_type='clustered', cluster_entity=True)
I want to export the regression's output in a .tex file. Is there a convenient way of formatting the output with confidence stars and without the other information like the CIs? The question has been asked in the context of a standard OLS in here but this does not apply for a 'PanelEffectsResults' object, since I get the following error:
'PanelEffectsResults' object has no attribute 'bse'
Thanks in advance.
A bit late but here is what I use. In the example above I calculated two fixed effects regressions with their results stored in fe_res_VS and fe_res_CVS:
pd.set_option('precision', 4)
pd.options.display.float_format = '{:,.4f}'.format
Reg_Output_FAmount= pd.DataFrame()
#1)
Table1 = pd.DataFrame(fe_res_VS.params)
Table1['id'] = np.arange(len(Table1))#create numerical index for pd.DataFrame
Table1 = Table1.reset_index().set_index(keys = 'id')#set numercial index as new index
Table1 = Table1.rename(columns={"index":"parameter", "parameter":"coefficient 1"})
P1 = pd.DataFrame(fe_res_VS.pvalues)
P1['id'] = np.arange(len(P1))#create numerical index for pd.DataFrame
P1 = P1.reset_index().set_index(keys = 'id')#set numercial index as new index
P1 = P1.rename(columns={"index":"parameter"})
Table1 = pd.merge(Table1, P1, on='parameter')
Table1['significance 1'] = np.where(Table1['pvalue'] <= 0.01, '***',\
np.where(Table1['pvalue'] <= 0.05, '**',\
np.where(Table1['pvalue'] <= 0.1, '*', '')))
Table1.rename(columns={"pvalue": "pvalue 1"}, inplace=True)
SE1 = pd.DataFrame(fe_res_VS.std_errors)
SE1['id'] = np.arange(len(SE1))#create numerical index for pd.DataFrame
SE1 = SE1.reset_index().set_index(keys = 'id')#set numercial index as new index
SE1 = SE1.rename(columns={"index":"parameter", "std_error":"coefficient 1"})
SE1['parameter'] = SE1['parameter'].astype(str) + '_SE'
SE1['significance 1'] = ''
SE1 = SE1.round(4)
SE1['coefficient 1'] = '(' + SE1['coefficient 1'].astype(str) + ')'
Table1 = Table1.append(SE1)
Table1 = Table1.sort_values('parameter')
Table1.replace(np.nan,'', inplace=True)
del P1
del SE1
#2)
Table2 = pd.DataFrame(fe_res_CVS.params)
Table2['id'] = np.arange(len(Table2))#create numerical index for pd.DataFrame
Table2 = Table2.reset_index().set_index(keys = 'id')#set numercial index as new index
Table2 = Table2.rename(columns={"index":"parameter", "parameter":"coefficient 2"})
P2 = pd.DataFrame(fe_res_CVS.pvalues)
P2['id'] = np.arange(len(P2))#create numerical index for pd.DataFrame
P2 = P2.reset_index().set_index(keys = 'id')#set numercial index as new index
P2 = P2.rename(columns={"index":"parameter"})
Table2 = pd.merge(Table2, P2, on='parameter')
Table2['significance 2'] = np.where(Table2['pvalue'] <= 0.01, '***',\
np.where(Table2['pvalue'] <= 0.05, '**',\
np.where(Table2['pvalue'] <= 0.1, '*', '')))
Table2.rename(columns={"pvalue": "pvalue 2"}, inplace=True)
SE2 = pd.DataFrame(fe_res_CVS.std_errors)
SE2['id'] = np.arange(len(SE2))#create numerical index for pd.DataFrame
SE2 = SE2.reset_index().set_index(keys = 'id')#set numercial index as new index
SE2 = SE2.rename(columns={"index":"parameter", "std_error":"coefficient 2"})
SE2['parameter'] = SE2['parameter'].astype(str) + '_SE'
SE2['significance 2'] = ''
SE2 = SE2.round(4)
SE2['coefficient 2'] = '(' + SE2['coefficient 2'].astype(str) + ')'
Table2 = Table2.append(SE2)
Table2 = Table2.sort_values('parameter')
Table2.replace(np.nan,'', inplace=True)
del P2
del SE2
#Merging Tables and adding Stats
Reg_Output_FAmount= pd.merge(Table1, Table2, on='parameter', how='outer')
Reg_Output_FAmount = Reg_Output_FAmount.append(pd.DataFrame(np.array([["observ.", fe_res_VS.nobs, '', fe_res_CVS.nobs, '']]), columns=['parameter', 'pvalue 1', 'significance 1', 'pvalue 2', 'significance 2']), ignore_index=True)
Reg_Output_FAmount = Reg_Output_FAmount.append(pd.DataFrame(np.array([["Rsquared", "{:.4f}".format(fe_res_VS.rsquared), '', "{:.4f}".format(fe_res_CVS.rsquared), '']]), columns=['parameter', 'pvalue 1', 'significance 1', 'pvalue 2', 'significance 2']), ignore_index=True)
Reg_Output_FAmount= Reg_Output_FAmount.append(pd.DataFrame(np.array([["Model type", fe_res_VS.name, '', fe_res_CVS.name, '']]), columns=['parameter', 'pvalue 1', 'significance 1', 'pvalue 2', 'significance 2']), ignore_index=True)
Reg_Output_FAmount = Reg_Output_FAmount.append(pd.DataFrame(np.array([["DV", fe_res_VS.model.dependent.vars[0], '', fe_res_CVS.model.dependent.vars[0], '']]), columns=['parameter', 'pvalue 1', 'significance 1', 'pvalue 2', 'significance 2']), ignore_index=True)
Reg_Output_FAmount.fillna('', inplace=True)
resulting in a nice regression output looking like that:
parameter coefficient 1 pvalue 1 significance 1 coefficient 2 pvalue 2 significance 2
0 IV 0.0676 0.2269 0.0732 0.1835
1 IV_SE (0.0559) (0.055)
2 Control 0.3406 0.0125 ** 0.3482 0.0118 **
3 Control_SE (0.1363) 0.1383)
4 const 0.2772 0.0000 *** 0.2769 0.0000 ***
5 const_SE (0.012) (0.012)
6 observ. 99003 99003
7 Rsquared 0.12 0.14
8 Model type PanelOLS PanelOLS
9 DV FAmount FAmount
Have been struggling with the same problem for a few days. Very excited to share with my peers a very easy way to do it: include the significance stars, remove CIs.
Here it is:
Step 1: install linearmodels package.
Step 2: import compare function from linearmodels.panel
from linearmodels.panel import compare
Step3: Use compare function and specify the arguments as you want in compare. For instance, specifying stars = True will give you significance stars. Very convenient!
compare({'model_A_name': results of model_A, 'model_B_name': results of model_B, }, stars = True)
This small function saved my life! Enjoy it.
One more thing, please know that the stars are based on the p-value of the coefficient where 1, 2 and 3-stars correspond to p-values of 10%, 5% and 1%, respectively. I am not sure whether there is a way to make a customized stars measurement, like 1, 2 and 3-stars correspond to p-values of 5%, 1% and 0.1%.
The credit goes to the fantastic package developer and maintainer. Thank you all! Please see the file and get more information at:
~/opt/anaconda3/lib/python3.7/site-packages/linearmodels/panel/results.py

label-encoder encoding missing values

I am using the label encoder to convert categorical data into numeric values.
How does LabelEncoder handle missing values?
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
a = pd.DataFrame(['A','B','C',np.nan,'D','A'])
le = LabelEncoder()
le.fit_transform(a)
Output:
array([1, 2, 3, 0, 4, 1])
For the above example, label encoder changed NaN values to a category. How would I know which category represents missing values?
Don't use LabelEncoder with missing values. I don't know which version of scikit-learn you're using, but in 0.17.1 your code raises TypeError: unorderable types: str() > float().
As you can see in the source it uses numpy.unique against the data to encode, which raises TypeError if missing values are found. If you want to encode missing values, first change its type to a string:
a[pd.isnull(a)] = 'NaN'
you can also use a mask to replace form the original data frame after labelling
df = pd.DataFrame({'A': ['x', np.NaN, 'z'], 'B': [1, 6, 9], 'C': [2, 1, np.NaN]})
A B C
0 x 1 2.0
1 NaN 6 1.0
2 z 9 NaN
original = df
mask = df_1.isnull()
A B C
0 False False False
1 True False False
2 False False True
df = df.astype(str).apply(LabelEncoder().fit_transform)
df.where(~mask, original)
A B C
0 1.0 0 1.0
1 NaN 1 0.0
2 2.0 2 NaN
Hello a little computational hack I did for my own work:
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
a = pd.DataFrame(['A','B','C',np.nan,'D','A'])
le = LabelEncoder()
### fit with the desired col, col in position 0 for this example
fit_by = pd.Series([i for i in a.iloc[:,0].unique() if type(i) == str])
le.fit(fit_by)
### Set transformed col leaving np.NaN as they are
a["transformed"] = fit_by.apply(lambda x: le.transform([x])[0] if type(x) == str else x)
This is my solution, because I was not pleased with the solutions posted here. I needed a LabelEncoder that keeps my missing values as NaN to use an Imputer afterwards. So I have written my own LabelEncoder class. It works with DataFrames.
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.preprocessing import LabelEncoder
class LabelEncoderByCol(BaseEstimator, TransformerMixin):
def __init__(self,col):
#List of column names in the DataFrame that should be encoded
self.col = col
#Dictionary storing a LabelEncoder for each column
self.le_dic = {}
for el in self.col:
self.le_dic[el] = LabelEncoder()
def fit(self,x,y=None):
#Fill missing values with the string 'NaN'
x[self.col] = x[self.col].fillna('NaN')
for el in self.col:
#Only use the values that are not 'NaN' to fit the Encoder
a = x[el][x[el]!='NaN']
self.le_dic[el].fit(a)
return self
def transform(self,x,y=None):
#Fill missing values with the string 'NaN'
x[self.col] = x[self.col].fillna('NaN')
for el in self.col:
#Only use the values that are not 'NaN' to fit the Encoder
a = x[el][x[el]!='NaN']
#Store an ndarray of the current column
b = x[el].to_numpy()
#Replace the elements in the ndarray that are not 'NaN'
#using the transformer
b[b!='NaN'] = self.le_dic[el].transform(a)
#Overwrite the column in the DataFrame
x[el]=b
#return the transformed DataFrame
return x
You can enter a DataFrame, not only a 1-dim Series. with col you can chose the columns that should be encoded.
I would like to here some feedback.
I want to share with you my solution.
I created a module which take mix dataset and convert it from categorical to numerical
and inverse.
This Module also available in my Github well organized with example.
Please upvoted if you like my solution.
Tks,
Idan
class label_encoder_contain_missing_values :
def __init__ (self) :
pass
def categorical_to_numeric (self,dataset):
import numpy as np
import pandas as pd
self.dataset = dataset
self.summary = None
self.table_encoder= {}
for index in self.dataset.columns :
if self.dataset[index].dtypes == 'object' :
column_data_frame = pd.Series(self.dataset[index],name='column').to_frame()
unique_values = pd.Series(self.dataset[index].unique())
i = 0
label_encoder = pd.DataFrame({'value_name':[],'Encode':[]})
while i <= len(unique_values)-1:
if unique_values.isnull()[i] == True :
label_encoder = label_encoder.append({'value_name': unique_values[i],'Encode':np.nan}, ignore_index=True) #np.nan = -1
else:
label_encoder = label_encoder.append({'value_name': unique_values[i],'Encode':i}, ignore_index=True)
i+=1
output = pd.merge(left=column_data_frame,right = label_encoder, how='left',left_on='column',right_on='value_name')
self.summary = output[['column','Encode']].drop_duplicates().reset_index(drop=True)
self.dataset[index] = output.Encode
self.table_encoder.update({index:self.summary})
else :
pass
# ---- Show Encode Table ----- #
print('''\nLabel Encoding completed in Successfully.\n
Next steps: \n
1. To view table_encoder, Execute the follow: \n
for index in table_encoder :
print(f'\\n{index} \\n',table_encoder[index])
2. For inverse, execute the follow : \n
df = label_encoder_contain_missing_values().
inverse_numeric_to_categorical(table_encoder, df) ''')
return self.table_encoder ,self.dataset
def inverse_numeric_to_categorical (self,table_encoder, df):
dataset = df.copy()
for column in table_encoder.keys():
df_column = df[column].to_frame()
output = pd.merge(left=df_column,right = table_encoder[column], how='left',left_on= column,right_on='Encode')#.rename(columns={'column_x' :'encode','column_y':'category'})
df[column]= output.column
print('\nInverse Label Encoding, from categorical to numerical completed in Successfully.\n')
return df
**execute command from categorical to numerical** <br>
table_encoder, df = label_encoder_contain_missing_values().categorical_to_numeric(df)
**execute command from numerical to categorical** <br>
df = label_encoder_contain_missing_values().inverse_numeric_to_categorical(table_encoder, df)
An easy way is this
It is an example of Titanic
LABEL_COL = ["Sex", "Embarked"]
def label(df):
_df = df.copy()
le = LabelEncoder()
for col in LABEL_COL:
# Not NaN index
idx = ~_df[col].isna()
_df.loc[idx, col] \
= le.fit(_df.loc[idx, col]).transform(_df.loc[idx, col])
return _df
The most voted answer by #Kerem has typos, therefore I am posting the corrected and improved answer here:
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
a = pd.DataFrame(['A','B','C',np.nan,'D','A'])
for j in a.columns.values:
le = LabelEncoder()
### fit with the desired col, col in position 0 for this ###example
fit_by = pd.Series([i for i in a[j].unique() if type(i) == str])
le.fit(fit_by)
### Set transformed col leaving np.NaN as they are
a["transformed"] = a[j].apply(lambda x: le.transform([x])[0] if type(x) == str else x)
You can handle missing values by replacing it with string 'NaN'. The category can be obtained by le.transfrom().
le.fit_transform(a.fillna('NaN'))
category = le.transform(['NaN'])
Another solution is for label encoder to ignore missing values.
a = le.fit_transform(a.astype(str))
You can fill the na's by some value and later change the dataframe column type to string to make things work.
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
a = pd.DataFrame(['A','B','C',np.nan,'D','A'])
a.fillna(99)
le = LabelEncoder()
le.fit_transform(a.astype(str))
Following encoder addresses None values in each category.
class MultiColumnLabelEncoder:
def __init__(self):
self.columns = None
self.led = defaultdict(preprocessing.LabelEncoder)
def fit(self, X):
self.columns = X.columns
for col in self.columns:
cat = X[col].unique()
cat = [x if x is not None else "None" for x in cat]
self.led[col].fit(cat)
return self
def fit_transform(self, X):
if self.columns is None:
self.fit(X)
return self.transform(X)
def transform(self, X):
return X.apply(lambda x: self.led[x.name].transform(x.apply(lambda e: e if e is not None else "None")))
def inverse_transform(self, X):
return X.apply(lambda x: self.led[x.name].inverse_transform(x))
Uses Example
df = pd.DataFrame({
'pets': ['cat', 'dog', 'cat', 'monkey', 'dog', 'dog'],
'owner': ['Champ', 'Ron', 'Brick', None, 'Veronica', 'Ron'],
'location': ['San_Diego', 'New_York', 'New_York', 'San_Diego', 'San_Diego',
None]
})
print(df)
location owner pets
0 San_Diego Champ cat
1 New_York Ron dog
2 New_York Brick cat
3 San_Diego None monkey
4 San_Diego Veronica dog
5 None Ron dog
le = MultiColumnLabelEncoder()
le.fit(df)
transformed = le.transform(df)
print(transformed)
location owner pets
0 2 1 0
1 0 3 1
2 0 0 0
3 2 2 2
4 2 4 1
5 1 3 1
inverted = le.inverse_transform(transformed)
print(inverted)
location owner pets
0 San_Diego Champ cat
1 New_York Ron dog
2 New_York Brick cat
3 San_Diego None monkey
4 San_Diego Veronica dog
5 None Ron dog
This function takes a column from a dataframe and return the column where only non-NaNs are label encoded, the rest remains untouched
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def label_encode_column(col):
nans = col.isnull()
nan_lst = []
nan_idx_lst = []
label_lst = []
label_idx_lst = []
for idx, nan in enumerate(nans):
if nan:
nan_lst.append(col[idx])
nan_idx_lst.append(idx)
else:
label_lst.append(col[idx])
label_idx_lst.append(idx)
nan_df = pd.DataFrame(nan_lst, index=nan_idx_lst)
label_df = pd.DataFrame(label_lst, index=label_idx_lst)
label_encoder = LabelEncoder()
label_df = label_encoder.fit_transform(label_df.astype(str))
label_df = pd.DataFrame(label_df, index=label_idx_lst)
final_col = pd.concat([label_df, nan_df])
return final_col.sort_index()
This is how I did it:
import pandas as pd
from sklearn.preprocessing import LabelEncoder
UNKNOWN_TOKEN = '<unknown>'
a = pd.Series(['A','B','C', 'D','A'], dtype=str).unique().tolist()
a.append(UNKNOWN_TOKEN)
le = LabelEncoder()
le.fit_transform(a)
embedding_map = dict(zip(le.classes_, le.transform(le.classes_)))
and when applying to new test data:
test_df = test_df.apply(lambda x: x if x in embedding_map else UNKNOWN_TOKEN)
le.transform(test_df)
I also wanted to contribute my workaround, as I found the others a bit more tedious when working with categorical data which contains missing values
# Create a random dataframe
foo = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))
# Randomly intersperse column 'A' with missing data (NaN)
foo['A'][np.random.randint(0,len(foo), size=20)] = np.nan
# Convert this series to string, to simulate our problem
series = foo['A'].astype(str)
# np.nan are converted to the string "nan", mask these out
mask = (series == "nan")
# Apply the LabelEncoder to the unmasked series, replace the masked series with np.nan
series[~mask] = LabelEncoder().fit_transform(series[~mask])
series[mask] = np.nan
foo['A'] = series
This is my attempt!
import numpy as np
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
#Now lets encode the incomplete Cabin feature
titanic_train_le['Cabin'] = le.fit_transform(titanic_train_le['Cabin'].astype(str))
#get nan code for the cabin categorical feature
cabin_nan_code=le.transform(['nan'])[0]
#Now, retrieve the nan values in the encoded data
titanic_train_le['Cabin'].replace(cabin_nan_code,np.nan,inplace=True)
I just created my own encoder which can encode a dataframe at once.
Using this class, None is encoded to 0. It can be handy when trying to make sparse matrix.
Note that the input dataframe must include categorical columns only.
class DF_encoder():
def __init__(self):
self.mapping = {None : 0}
self.inverse_mapping = {0 : None}
self.all_keys =[]
def fit(self,df:pd.DataFrame):
for col in df.columns:
keys = list(df[col].unique())
self.all_keys += keys
self.all_keys = list(set(self.all_keys))
for i , item in enumerate(start=1 ,iterable=self.all_keys):
if item not in self.mapping.keys():
self.mapping[item] = i
self.inverse_mapping[i] = item
def transform(self,df):
temp_df = pd.DataFrame()
for col in df.columns:
temp_df[col] = df[col].map(self.mapping)
return temp_df
def inverse_transform(self,df):
temp_df = pd.DataFrame()
for col in df.columns:
temp_df[col] = df[col].map(self.inverse_mapping)
return temp_df
I faced the same problem but none of the above worked for me. So I added a new row to the training data consisting only "nan"

Categories