None produced in loop - python

Why does this function return None if missing? I only want NaN when there is a missing value.
def func(row):
if (pd.notna(row['year'])):
if (pd.notna(row['fruit'])) & (pd.notna(row['signifiance'])):
return row['fruit']
elif (pd.isna(row['fruit'])) & (pd.notna(row['signifiance'])):
return row['fruit']
else:
return np.NaN
df['new_col'] = df.apply(func, axis=1)
df fruit year price vol signifiance
0 apple 2010 1 5 NaN
1 apple 2011 2 4 NaN
2 apple 2012 3 3 NaN
3 NaN 2013 3 3 NaN
4 NaN NaN NaN 3 NaN
5 apple 2015 3 3 important
Actual Output:
df fruit year price vol signifiance new_col
0 apple 2010 1 5 NaN None
1 apple 2011 2 4 NaN None
2 apple 2012 3 3 NaN None
3 NaN 2013 3 3 NaN None
4 NaN NaN NaN 3 NaN None
5 apple 2015 3 3 important apple
Expected Output:
df fruit year price vol signifiance new_col
0 apple 2010 1 5 NaN NaN
1 apple 2011 2 4 NaN NaN
2 apple 2012 3 3 NaN NaN
3 NaN 2013 3 3 NaN NaN
4 NaN NaN NaN 3 NaN NaN
5 apple 2015 3 3 important apple

Change to
def func(row):
if (pd.notna(row['year'])):
if (pd.notna(row['fruit'])) & (pd.notna(row['signifiance'])):
return row['fruit']
elif (pd.isna(row['fruit'])) & (pd.notna(row['signifiance'])):
return row['fruit']
else:
return np.NaN
else:
return np.NaN
df.apply(func,axis=1)
Out[178]:
0 NaN
1 NaN
2 NaN
3 NaN
4 NaN
5 apple
dtype: object

Related

How to create multiple year columns in a new dataframe, from the original single column datetime dataframe?

Source df has sdate datetime64 and svalue float64 columns as:
sdate svalue
1980-01-01 5
1980-01-02 7
1980-01-05 2
1981-01-01 6
1981-01-02 3
1982-01-01 4
1982-01-02 2
1982-01-06 9
1983-01-06 8
How to create multiple year columns in a new dataset as:
dayofyear 1980 1981 1982 1983
1 5 6 4 nan
2 7 3 2 nan
3 nan nan nan nan
4 nan nan nan nan
5 2 nan nan nan
6 nan nan 9 8
I tried something like
df_new = df.pivot(index=df.sdate.dt.dayofyear, columns=df.sdate.dt.year, values='svalue')
Use DataFrame.assign for new columns and then pivoting:
df_new = df.assign(d = df.sdate.dt.dayofyear, y = df.sdate.dt.year).pivot('d','y','svalue')
print (df_new)
y 1980 1981 1982 1983
d
1 5.0 6.0 4.0 NaN
2 7.0 3.0 2.0 NaN
5 2.0 NaN NaN NaN
6 NaN NaN 9.0 8.0

Fill Nan based on multiple column condition in Pandas

The objective is to fill NaN with respect to two columns (i.e., a, b) .
a b c d
2,0,1,4
5,0,5,6
6,0,1,1
1,1,1,4
4,1,5,6
5,1,5,6
6,1,1,1
1,2,2,3
6,2,5,6
Such that, there should be continous value of between 1 to 6 for the column a for a fixed value in column b. Then, the other rows assigned to nan.
The code snippet does the trick
import numpy as np
import pandas as pd
maxval_col_a=6
lowval_col_a=1
maxval_col_b=2
lowval_col_b=0
r=list(range(lowval_col_b,maxval_col_b+1))
df=pd.DataFrame(np.column_stack([[2,5,6,1,4,5,6,1,6,],
[0,0,0,1,1,1,1,2,2,], [1,5,1,1,5,5,1,2,5,],[4,6,1,4,6,6,1,3,6,]]),columns=['a','b','c','d'])
all_df=[]
for idx in r:
k=df.loc[df['b']==idx].set_index('a').reindex(range(lowval_col_a, maxval_col_a+1, 1)).reset_index()
k['b']=idx
all_df.append(k)
df=pd.concat(all_df)
But, I am curious whether there are more efficient and better way of doing this with Pandas.
The expected output
a b c d
0 1 0 NaN NaN
1 2 0 1.0 4.0
2 3 0 NaN NaN
3 4 0 NaN NaN
4 5 0 5.0 6.0
5 6 0 1.0 1.0
0 1 1 1.0 4.0
1 2 1 NaN NaN
2 3 1 NaN NaN
3 4 1 5.0 6.0
4 5 1 5.0 6.0
5 6 1 1.0 1.0
0 1 2 2.0 3.0
1 2 2 NaN NaN
2 3 2 NaN NaN
3 4 2 NaN NaN
4 5 2 NaN NaN
5 6 2 5.0 6.0
Create the cartesian product of combinations:
mi = pd.MultiIndex.from_product([df['b'].unique(), range(1, 7)],
names=['b', 'a']).swaplevel()
out = df.set_index(['a', 'b']).reindex(mi).reset_index()
print(out)
# Output
a b c d
0 1 0 NaN NaN
1 2 0 1.0 4.0
2 3 0 NaN NaN
3 4 0 NaN NaN
4 5 0 5.0 6.0
5 6 0 1.0 1.0
6 1 1 1.0 4.0
7 2 1 NaN NaN
8 3 1 NaN NaN
9 4 1 5.0 6.0
10 5 1 5.0 6.0
11 6 1 1.0 1.0
12 1 2 2.0 3.0
13 2 2 NaN NaN
14 3 2 NaN NaN
15 4 2 NaN NaN
16 5 2 NaN NaN
17 6 2 5.0 6.0
First create a multindex with cols [a,b] then a new multindex with all the combinations and then you reindex with the new multindex:
(showing all steps)
# set both a and b as index (it's a multiindex)
df.set_index(['a','b'],drop=True,inplace=True)
# create the new multindex
new_idx_a=np.tile(np.arange(0,6+1),3)
new_idx_b=np.repeat([0,1,2],6+1)
new_multidx=pd.MultiIndex.from_arrays([new_idx_a,
new_idx_b])
# reindex
df=df.reindex(new_multidx)
# convert the multindex back to columns
df.index.names=['a','b']
df.reset_index()
results:
a b c d
0 0 0 NaN NaN
1 1 0 NaN NaN
2 2 0 1.0 4.0
3 3 0 NaN NaN
4 4 0 NaN NaN
5 5 0 5.0 6.0
6 6 0 1.0 1.0
7 0 1 NaN NaN
8 1 1 1.0 4.0
9 2 1 NaN NaN
10 3 1 NaN NaN
11 4 1 5.0 6.0
12 5 1 5.0 6.0
13 6 1 1.0 1.0
14 0 2 NaN NaN
15 1 2 2.0 3.0
16 2 2 NaN NaN
17 3 2 NaN NaN
18 4 2 NaN NaN
19 5 2 NaN NaN
20 6 2 5.0 6.0
We can do it by using a groupby on the column b, then set a as index and add the missing values of a using numpy.arange.
To finish, reset the index to get the expected result :
import numpy as np
df.groupby('b').apply(lambda x : x.set_index('a').reindex(np.arange(1, 7))).drop('b', 1).reset_index()
Output :
b a c d
0 0 1 NaN NaN
1 0 2 1.0 4.0
2 0 3 NaN NaN
3 0 4 NaN NaN
4 0 5 5.0 6.0
5 0 6 1.0 1.0
6 1 1 1.0 4.0
7 1 2 NaN NaN
8 1 3 NaN NaN
9 1 4 5.0 6.0
10 1 5 5.0 6.0
11 1 6 1.0 1.0
12 2 1 2.0 3.0
13 2 2 NaN NaN
14 2 3 NaN NaN
15 2 4 NaN NaN
16 2 5 NaN NaN
17 2 6 5.0 6.0

How can I ffill a pandas dataframe based on id and "original date" range?

I have a DataFrame where I have 1000s of rows and 100s of column where I want to forwardfill the data but grouped by id and original data ( date range). What I mean by original data is if we have a data for id 1 for date 01/01/2020 but null value for date 01/05/2020, 02/02/2020, I would like to fill the data on 01/05/2020 but not 02/02/2020 since 02/02/2020 is not within 30 days period. When we ffill, it fills all data based on last result.
import pandas as pd
import numpy as np
res= pd.DataFrame({'id':[1,1,1,1,1,2,2],
'date':['01/01/2020','01/05/2020','02/03/2020','02/05/2020','04/01/2020','01/01/2020','01/02/2020'],
'result':[1.5,np.nan,np.nan,2.6,np.nan,np.nan,6.0]})
res['result1']= res.groupby(['id']).apply(lambda x: x.result.ffill()).reset_index(drop=True)
result I get is:
id date result result1
0 1 01/01/2020 1.5 1.5
1 1 01/05/2020 NaN 1.5
2 1 02/03/2020 NaN 1.5
3 1 02/05/2020 2.6 2.6
4 1 04/01/2020 NaN 2.6
5 2 01/01/2020 NaN NaN
6 2 01/02/2020 6.0 6.0
What I want is :
id date result result1
0 1 01/01/2020 1.5 1.5
1 1 01/05/2020 NaN 1.5
2 1 02/03/2020 NaN NaN
3 1 02/05/2020 2.6 2.6
4 1 04/01/2020 NaN NaN
5 2 01/01/2020 NaN NaN
6 2 01/02/2020 6.0 6.0
You can try with merge_asof
res['date']=pd.to_datetime(res['date'])
res = res.sort_values('date')
res1 = res.dropna(subset=['result']).rename(columns={'result':'result1'})
out = pd.merge_asof(res.reset_index(),res1 , by ='id', on ='date',tolerance = pd.Timedelta(30, unit='d'),direction = 'backward').sort_values('index')
Out[72]:
index id date result result1
0 0 1 2020-01-01 1.5 1.5
3 1 1 2020-01-05 NaN 1.5
4 2 1 2020-02-03 NaN NaN
5 3 1 2020-02-05 2.6 2.6
6 4 1 2020-04-01 NaN NaN
1 5 2 2020-01-01 NaN NaN
2 6 2 2020-01-02 6.0 6.0
Not so elegant as Ben's merge_asof, but you can do something like this:
res['date'] = pd.to_datetime(res['date'])
# valid blocks
valids = res['result'].notna().cumsum()
# first dates in each block
first_dates = res.groupby(['id',valids])['date'].transform('min')
# How far we ffill
mask = (res['date']-first_dates)<pd.Timedelta('30D')
# ffill and then mask
res['result1'] = res['result'].groupby(res['id']).ffill().where(mask)
Output:
id date result result1
0 1 2020-01-01 1.5 1.5
1 1 2020-01-05 NaN 1.5
2 1 2020-02-03 NaN NaN
3 1 2020-02-05 2.6 2.6
4 1 2020-04-01 NaN NaN
5 2 2020-01-01 NaN NaN
6 2 2020-01-02 6.0 6.0

In python, reading multiple CSV's, with different headers, into one dataframe

I have dozens of csv files with similar (but not always exactly the same) headers. For instance, one has:
Year Month Day Hour Minute Direct Diffuse D_Global D_IR Zenith Test_Site
One has:
Year Month Day Hour Minute Direct Diffuse2 D_Global D_IR U_Global U_IR Zenith Test_Site
(Notice one lacks "U_Global" and "U_IR", the other has "Diffuse2" instead of "Diffuse")
I know how to pass multiple csv's into my script, but how do I have the csv's only pass values to columns in which they currently have values? And perhaps pass "Nan" to all other columns in that row.
Ideally I'd have something like:
'Year','Month','Day','Hour','Minute','Direct','Diffuse','Diffuse2','D_Global','D_IR','U_Global','U_IR','Zenith','Test_Site'
1992,1,1,0,3,-999.00,-999.00,"Nan",-999.00,-999.00,"Nan","Nan",122.517,"BER"
2013,5,30,15,55,812.84,270.62,"Nan",1078.06,-999.00,"Nan","Nan",11.542,"BER"
2004,9,1,0,1,1.04,79.40,"Nan",78.67,303.58,61.06,310.95,85.142,"ALT"
2014,12,1,0,1,0.00,0.00,"Nan",-999.00,226.95,0.00,230.16,115.410,"ALT"
The other caveat, is that this dataframe needs to be appended to. It needs to remain as multiple csv files are passed into it. I think I'll probably have it write out to it's own csv at the end (it's eventually going to NETCDF4).
Assuming you have the following CSV files:
test1.csv:
year,month,day,Direct
1992,1,1,11
2013,5,30,11
2004,9,1,11
test2.csv:
year,month,day,Direct,Direct2
1992,1,1,21,201
2013,5,30,21,202
2004,9,1,21,203
test3.csv:
year,month,day,File3
1992,1,1,text1
2013,5,30,text2
2004,9,1,text3
2016,1,1,unmatching_date
Solution:
import glob
import pandas as pd
files = glob.glob(r'd:/temp/test*.csv')
def get_merged(files, **kwargs):
df = pd.read_csv(files[0], **kwargs)
for f in files[1:]:
df = df.merge(pd.read_csv(f, **kwargs), how='outer')
return df
print(get_merged(files))
Output:
year month day Direct Direct Direct2 File3
0 1992 1 1 11.0 21.0 201.0 text1
1 2013 5 30 11.0 21.0 202.0 text2
2 2004 9 1 11.0 21.0 203.0 text3
3 2016 1 1 NaN NaN NaN unmatching_date
UPDATE: usual idiomatic pd.concat(list_of_dfs) solution wouldn't work here, because it's joining by indexes:
In [192]: pd.concat([pd.read_csv(f) for f in glob.glob(file_mask)], axis=0, ignore_index=True)
Out[192]:
Direct Direct Direct2 File3 day month year
0 NaN 11.0 NaN NaN 1 1 1992
1 NaN 11.0 NaN NaN 30 5 2013
2 NaN 11.0 NaN NaN 1 9 2004
3 21.0 NaN 201.0 NaN 1 1 1992
4 21.0 NaN 202.0 NaN 30 5 2013
5 21.0 NaN 203.0 NaN 1 9 2004
6 NaN NaN NaN text1 1 1 1992
7 NaN NaN NaN text2 30 5 2013
8 NaN NaN NaN text3 1 9 2004
9 NaN NaN NaN unmatching_date 1 1 2016
In [193]: pd.concat([pd.read_csv(f) for f in glob.glob(file_mask)], axis=1, ignore_index=True)
Out[193]:
0 1 2 3 4 5 6 7 8 9 10 11 12
0 1992.0 1.0 1.0 11.0 1992.0 1.0 1.0 21.0 201.0 1992 1 1 text1
1 2013.0 5.0 30.0 11.0 2013.0 5.0 30.0 21.0 202.0 2013 5 30 text2
2 2004.0 9.0 1.0 11.0 2004.0 9.0 1.0 21.0 203.0 2004 9 1 text3
3 NaN NaN NaN NaN NaN NaN NaN NaN NaN 2016 1 1 unmatching_date
or using index_col=None explicitly:
In [194]: pd.concat([pd.read_csv(f, index_col=None) for f in glob.glob(file_mask)], axis=0, ignore_index=True)
Out[194]:
Direct Direct Direct2 File3 day month year
0 NaN 11.0 NaN NaN 1 1 1992
1 NaN 11.0 NaN NaN 30 5 2013
2 NaN 11.0 NaN NaN 1 9 2004
3 21.0 NaN 201.0 NaN 1 1 1992
4 21.0 NaN 202.0 NaN 30 5 2013
5 21.0 NaN 203.0 NaN 1 9 2004
6 NaN NaN NaN text1 1 1 1992
7 NaN NaN NaN text2 30 5 2013
8 NaN NaN NaN text3 1 9 2004
9 NaN NaN NaN unmatching_date 1 1 2016
In [195]: pd.concat([pd.read_csv(f, index_col=None) for f in glob.glob(file_mask)], axis=1, ignore_index=True)
Out[195]:
0 1 2 3 4 5 6 7 8 9 10 11 12
0 1992.0 1.0 1.0 11.0 1992.0 1.0 1.0 21.0 201.0 1992 1 1 text1
1 2013.0 5.0 30.0 11.0 2013.0 5.0 30.0 21.0 202.0 2013 5 30 text2
2 2004.0 9.0 1.0 11.0 2004.0 9.0 1.0 21.0 203.0 2004 9 1 text3
3 NaN NaN NaN NaN NaN NaN NaN NaN NaN 2016 1 1 unmatching_date
The following more idiomatic solution works, but it changes original order of columns and rows / data:
In [224]: dfs = [pd.read_csv(f, index_col=None) for f in glob.glob(r'd:/temp/test*.csv')]
...:
...: common_cols = list(set.intersection(*[set(x.columns.tolist()) for x in dfs]))
...:
...: pd.concat((df.set_index(common_cols) for df in dfs), axis=1).reset_index()
...:
Out[224]:
month day year Direct Direct Direct2 File3
0 1 1 1992 11.0 21.0 201.0 text1
1 1 1 2016 NaN NaN NaN unmatching_date
2 5 30 2013 11.0 21.0 202.0 text2
3 9 1 2004 11.0 21.0 203.0 text3
Can't pandas take care of this automagically?
http://pandas.pydata.org/pandas-docs/stable/merging.html#concatenating-using-append
If your indices overlap, don't forget to add 'ignore_index=True'
First, run through all the files to define the common headers :
csv_path = './csv_files'
csv_separator = ','
full_headers = []
for fn in os.listdir(csv_path):
with open(fn, 'r') as f:
headers = f.readline().split(csv_separator)
full_headers += full_headers + list(set(full_headers) - set(headers))
Then write your header line into your output file, and run again through all the files to fill it.
You can use : csv.DictReader(open('myfile.csv')) to be able to match the headers to their designated column simply.

How to get rid of multilevel index after using pivot table pandas?

I had following data frame (the real data frame is much more larger than this one ) :
sale_user_id sale_product_id count
1 1 1
1 8 1
1 52 1
1 312 5
1 315 1
Then reshaped it to move the values in sale_product_id as column headers using the following code:
reshaped_df=id_product_count.pivot(index='sale_user_id',columns='sale_product_id',values='count')
and the resulting data frame is:
sale_product_id -1057 1 2 3 4 5 6 8 9 10 ... 98 980 981 982 983 984 985 986 987 99
sale_user_id
1 NaN 1.0 NaN NaN NaN NaN NaN 1.0 NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
3 NaN 1.0 NaN NaN NaN NaN NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
4 NaN NaN 1.0 NaN NaN NaN NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
as you can see we have a multililevel index , what i need is to have sale_user_is in the first column without multilevel indexing:
i take the following approach :
reshaped_df.reset_index()
the the result would be like this i still have the sale_product_id column , but i do not need it anymore:
sale_product_id sale_user_id -1057 1 2 3 4 5 6 8 9 ... 98 980 981 982 983 984 985 986 987 99
0 1 NaN 1.0 NaN NaN NaN NaN NaN 1.0 NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
1 3 NaN 1.0 NaN NaN NaN NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
2 4 NaN NaN 1.0 NaN NaN NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN
i can subset this data frame to get rid of sale_product_id but i don't think it would be efficient.I am looking for an efficient way to get rid of multilevel indexing while reshaping the original data frame
You need remove only index name, use rename_axis (new in pandas 0.18.0):
print (reshaped_df)
sale_product_id 1 8 52 312 315
sale_user_id
1 1 1 1 5 1
print (reshaped_df.index.name)
sale_user_id
print (reshaped_df.rename_axis(None))
sale_product_id 1 8 52 312 315
1 1 1 1 5 1
Another solution working in pandas below 0.18.0:
reshaped_df.index.name = None
print (reshaped_df)
sale_product_id 1 8 52 312 315
1 1 1 1 5 1
If need remove columns name also:
print (reshaped_df.columns.name)
sale_product_id
print (reshaped_df.rename_axis(None).rename_axis(None, axis=1))
1 8 52 312 315
1 1 1 1 5 1
Another solution:
reshaped_df.columns.name = None
reshaped_df.index.name = None
print (reshaped_df)
1 8 52 312 315
1 1 1 1 5 1
EDIT by comment:
You need reset_index with parameter drop=True:
reshaped_df = reshaped_df.reset_index(drop=True)
print (reshaped_df)
sale_product_id 1 8 52 312 315
0 1 1 1 5 1
#if need reset index nad remove column name
reshaped_df = reshaped_df.reset_index(drop=True).rename_axis(None, axis=1)
print (reshaped_df)
1 8 52 312 315
0 1 1 1 5 1
Of if need remove only column name:
reshaped_df = reshaped_df.rename_axis(None, axis=1)
print (reshaped_df)
1 8 52 312 315
sale_user_id
1 1 1 1 5 1
Edit1:
So if need create new column from index and remove columns names:
reshaped_df = reshaped_df.rename_axis(None, axis=1).reset_index()
print (reshaped_df)
sale_user_id 1 8 52 312 315
0 1 1 1 1 5 1
Make a DataFrame
import random
d = {'Country': ['Afghanistan','Albania','Algeria','Andorra','Angola']*2,
'Year': [2005]*5 + [2006]*5, 'Value': random.sample(range(1,20),10)}
df = pd.DataFrame(data=d)
df:
Country Year Value
1 Afghanistan 2005 6
2 Albania 2005 13
3 Algeria 2005 10
4 Andorra 2005 11
5 Angola 2005 5
6 Afghanistan 2006 3
7 Albania 2006 2
8 Algeria 2006 7
9 Andorra 2006 3
10 Angola 2006 6
Pivot
table = df.pivot(index='Country',columns='Year',values='Value')
Table:
Year Country 2005 2006
0 Afghanistan 16 9
1 Albania 17 19
2 Algeria 11 7
3 Andorra 5 12
4 Angola 6 18
I want 'Year' to be 'index':
clean_tbl = table.rename_axis(None, axis=1).reset_index(drop=True)
clean_tbl:
Country 2005 2006
0 Afghanistan 16 9
1 Albania 17 19
2 Algeria 11 7
3 Andorra 5 12
4 Angola 6 18
Done!
You can also use a to_flat_index method of MultiIndex object to convert it into a list of tuples, which you can then concatenate with list comprehension and use it to overwrite the .columns attribute of your dataframe.
# create a dataframe
df = pd.DataFrame({"a": [1, 2, 3, 1], "b": ["x", "x", "y", "y"], "c": [0.1, 0.2, 0.1, 0.2]})
a b c
0 1 x 0.1
1 2 x 0.2
2 3 y 0.1
3 1 y 0.2
# pivot the dataframe
df_pivoted = df.pivot(index="a", columns="b")
c
b x y
a
1 0.1 0.2
2 0.2 NaN
3 NaN 0.1
Now let's overwrite the .columns attribute and .reset_index():
df_pivoted.columns = ["_".join(tup) for tup in df_pivoted.columns.to_flat_index()]
df_pivoted.reset_index()
a c_x c_y
0 1 0.1 0.2
1 2 0.2 NaN
2 3 NaN 0.1
We need to reset_index() to reset the index columns back into the dataframe, then rename_axis() to rename the index to None and the columns to their axis=1 (column headers) values.
reshaped_df = reshaped_df.reset_index().rename_axis(None, axis=1)
Pivot from long to wide format using pivot:
import pandas
df = pandas.DataFrame({
"lev1": [1, 1, 1, 2, 2, 2],
"lev2": [1, 1, 2, 1, 1, 2],
"lev3": [1, 2, 1, 2, 1, 2],
"lev4": [1, 2, 3, 4, 5, 6],
"values": [0, 1, 2, 3, 4, 5]})
df_wide = df.pivot(index="lev1", columns=["lev2", "lev3"], values="values")
df_wide
# lev2 1 2
# lev3 1 2 1 2
# lev1
# 1 0.0 1.0 2.0 NaN
# 2 4.0 3.0 NaN 5.0
Rename the (sometimes confusing) axis names
df_wide.rename_axis(columns=[None, None])
# 1 2
# 1 2 1 2
# lev1
# 1 0.0 1.0 2.0 NaN
# 2 4.0 3.0 NaN 5.0
The way it works for me is
df_cross=pd.DataFrame(pd.crosstab(df[c1], df[c2]).to_dict()).reset_index()

Categories