I tried to concatenate two Pandas DataFrames, but it concatenates wrong.
Initial dataset looks like:
df
>>>
well qoil cum_oil wct top_perf bot_perf st x y
5233 101 259 3.684131e+05 97 -2352.13 -2359.12 0 517228 5931024
12786 102 3495 1.369303e+06 5.47 -2352.92 -2566.81 0 517192 5927187
13062 103 2691 1.353718e+06 0.5 -2377.93 -2581.73 0 517731 5926430
. . . .
65 rows × 9 columns
Then I generate a Euclidean distance between every well from x and y coordinates (last two columns):
from sklearn.neighbors import DistanceMetric
dist = DistanceMetric.get_metric('euclidean')
loc = pd.DataFrame(dist.pairwise(df[['x','y']].to_numpy()),
columns=df.well.unique(), index=df.well.unique())
and receive 65x65 matrix (pandas.core.frame.DataFrame type) where contains the distance between every well
loc
>>>
101 102 103 . . .
101 0.000000 152.278917 270.835312 . . .
102 151.278917 0.000000 326.310146 . . .
103 270.835312 346.310146 0.000000 . . .
. . .
Then I drop extra columns and concatenate two dataframes:
df_train_prep = df.drop(['well', 'wct', 'x', 'y'], axis=1)
df2 = pd.concat([df_train_prep, loc], axis=1)
As a result I receive not 65 rows x (9 + 65) columns dataframe but 130 rows × 70 columns df like:
df2
>>>
qoil cum_oil top_perf bot_perf st 101 102 103 . . .
236 0.001 542681.0 -2427.66 -2539.25 0.0 NaN NaN NaN NaN NaN ...
258 2291 292356.0 -2537.38 -2657.02 1.0 NaN NaN NaN NaN NaN ...
537 3290 237163.0 -2714.32 -2741.49 0.0 NaN NaN NaN NaN NaN ...
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
101 NaN NaN NaN NaN NaN 0.000000 157.278917 280.835312 323.423701 ...
102 NaN NaN NaN NaN NaN 154.278917 0.000000 356.310146 210.348200 518.786999 ...
It looks like some data concatenate in the right but some moved to the bottom. Moreover, strange NaN value popped up.
Please, help me to understand what I am doing wrong.
# Dummy Data
df = pd.DataFrame({'x': range(5), 'y': range(5)})
# Pairewice euclidean distances
from sklearn.metrics.pairwise import euclidean_distances
distance = pd.DataFrame(euclidean_distances(df[['x', 'y']]))
# Concatenate
df = pd.concat([df, distance], axis=1)
print (df)
Output:
x y 0 1 2 3 4
0 0 0 0.000000 1.414214 2.828427 4.242641 5.656854
1 1 1 1.414214 0.000000 1.414214 2.828427 4.242641
2 2 2 2.828427 1.414214 0.000000 1.414214 2.828427
3 3 3 4.242641 2.828427 1.414214 0.000000 1.414214
4 4 4 5.656854 4.242641 2.828427 1.414214 0.000000
As you can see the parewise distance is a symmetric matrix.
Related
I created a script to load data, check NA values, and fill all NA values. Here is my code:
import pandas as pd
def filter_df(merged_df, var_list):
ind = merged_df.Name.isin(var_list)
return merged_df[ind]
def pivot_df(df):
return df.pivot(index='Date', columns='Name', values=['Open', 'High', 'Low', 'Close'])
def validation_df(input, summary = False):
df = input.copy()
# na check
missing = df.isna().sum().sort_values(ascending=False)
percent_missing = ((missing / df.isnull().count()) * 100).sort_values(ascending=False)
missing_df = pd.concat([missing, percent_missing], axis=1, keys=['Total', 'Percent'], sort=False)
# fill na
columns = list(missing_df[missing_df['Total'] >= 1].reset_index()['index'])
for col in columns:
null_index = df.index[df[col].isnull() == True].tolist()
null_index.sort()
for ind in null_index:
if ind > 0:
print(df.loc[ind, col])
print(df.loc[ind - 1, col])
df.loc[ind, col] = df.loc[ind - 1, col]
if ind == 0:
df.loc[ind, col] = 0
# outliers check
count = []
for col in df.columns:
count.append(sum(df[col] > df[col].mean() + 2 * df[col].std()) + sum(df[col] < df[col].mean() - 2 * df[col].std()))
outliers_df = pd.DataFrame({'Columns': df.columns, 'Count': count}).sort_values(by = 'Count')
if summary == True:
print('missing value check:/n')
print(missing_df)
print('/n outliers check:/n')
print(outliers_df)
return df
def join_df(price_df, transaction_df, var_list):
price_df = filter_df(price_df, var_list)
price_df = pivot_df(price_df)
joined_df = transaction_df.merge(price_df, how = 'left', on = 'Date')
#joined_df = validation_df(joined_df)
return joined_df
token_path = 'https://raw.githubusercontent.com/Carloszone/Cryptocurrency_Research_project/main/datasets/1_token_df.csv'
transaction_path = 'https://raw.githubusercontent.com/Carloszone/Cryptocurrency_Research_project/main/datasets/transaction_df.csv'
var_list = ['Bitcoin', 'Ethereum', 'Golem', 'Solana']
token_df = pd.read_csv(token_path)
transaction_df = pd.read_csv(transaction_path)
df = join_df(token_df, transaction_df, var_list)
df = validation_df(df)
But it did not work. I checked my code and found this issue came from the loc(). For example:
df = join_df(token_df, transaction_df, var_list)
print(df[df.columns[15]])
print(df.loc[1,df.columns[15]])
what I got is:
0 NaN
1 NaN
2 NaN
3 NaN
4 NaN
..
2250 NaN
2251 NaN
2252 NaN
2253 NaN
2254 NaN
Name: (High, Solana), Length: 2255, dtype: float64
AssertionError Traceback (most recent call last)
<ipython-input-19-75f01cc22c9c> in <module>()
2
3 print(df[df.columns[15]])
----> 4 print(df.loc[1,df.columns[15]])
2 frames
/usr/local/lib/python3.7/dist-packages/pandas/core/indexing.py in __getitem__(self, key)
923 with suppress(KeyError, IndexError):
924 return self.obj._get_value(*key, takeable=self._takeable)
--> 925 return self._getitem_tuple(key)
926 else:
927 # we by definition only have the 0th axis
/usr/local/lib/python3.7/dist-packages/pandas/core/indexing.py in _getitem_tuple(self, tup)
1107 return self._multi_take(tup)
1108
-> 1109 return self._getitem_tuple_same_dim(tup)
1110
1111 def _get_label(self, label, axis: int):
/usr/local/lib/python3.7/dist-packages/pandas/core/indexing.py in _getitem_tuple_same_dim(self, tup)
807 # We should never have retval.ndim < self.ndim, as that should
808 # be handled by the _getitem_lowerdim call above.
--> 809 assert retval.ndim == self.ndim
810
811 return retval
AssertionError:
I don't know why df[column_name] is available, but df.loc[index,columns_name] is wrong.
You can check my code on Colab: https://colab.research.google.com/drive/1Yg280JRwFayW1tdp4OJqTO5-X3dGsItB?usp=sharing
The issue is that you're merging two DataFrames on a column they don't share in common (because you pivoted price_df, Date column became the index). Also the Date columns don't have a uniform format, so you have to make them the same. Replace your join_df function with the one below and it will work as expected.
I added comments on the lines that had to be added.
def join_df(price_df, transaction_df, var_list):
price_df = filter_df(price_df, var_list)
price_df = pivot_df(price_df)
# After pivot the Date column is the index, and price_df has MultiIndex columns
# since we want to merge it with transaction_df, we need to first flatten the columns
price_df.columns = price_df.columns.map('.'.join)
# and reset_index so that we have the index as the Date column
price_df = price_df.reset_index()
# the Dates are formatted differently across the two DataFrames;
# one has the following format: '2016-01-01' and the other '2016/1/1'
# to have a uniform format, we convert the both Date columns to datetime objects
price_df['Date'] = pd.to_datetime(price_df['Date'])
transaction_df['Date'] = pd.to_datetime(transaction_df['Date'])
joined_df = transaction_df.merge(price_df, how = 'left', on = 'Date')
#joined_df = validation_df(joined_df)
return joined_df
Output:
Date total_transaction_count Volume gas_consumption \
0 2016-01-01 2665 NaN NaN
1 2016-01-02 4217 NaN NaN
2 2016-01-03 4396 NaN NaN
3 2016-01-04 4776 NaN NaN
4 2016-01-05 26649 NaN NaN
... ... ... ... ...
2250 2022-02-28 1980533 1.968686e+06 8.626201e+11
2251 2022-03-01 2013145 2.194055e+06 1.112079e+12
2252 2022-03-02 1987934 2.473327e+06 1.167615e+12
2253 2022-03-03 1973190 3.093248e+06 1.260826e+12
2254 2022-03-04 1861286 4.446204e+06 1.045814e+12
old_ave_gas_fee new_avg_gas_fee new_avg_base_fee \
0 0.000000e+00 0.000000e+00 0.000000e+00
1 0.000000e+00 0.000000e+00 0.000000e+00
2 0.000000e+00 0.000000e+00 0.000000e+00
3 0.000000e+00 0.000000e+00 0.000000e+00
4 0.000000e+00 0.000000e+00 0.000000e+00
... ... ... ...
2250 6.356288e-08 6.356288e-08 5.941877e-08
2251 5.368574e-08 5.368574e-08 4.982823e-08
2252 5.567472e-08 5.567472e-08 4.782055e-08
2253 4.763823e-08 4.763823e-08 4.140883e-08
2254 4.566440e-08 4.566440e-08 3.547666e-08
new_avg_priority_fee Open.Bitcoin Open.Ethereum ... High.Golem \
0 0.000000e+00 430.0 NaN ... NaN
1 0.000000e+00 434.0 NaN ... NaN
2 0.000000e+00 433.7 NaN ... NaN
3 0.000000e+00 430.7 NaN ... NaN
4 0.000000e+00 433.3 NaN ... NaN
... ... ... ... ... ...
2250 4.144109e-09 37707.2 2616.34 ... 0.48904
2251 3.857517e-09 43187.2 2922.44 ... 0.48222
2252 7.854179e-09 44420.3 2975.80 ... 0.47550
2253 6.229401e-09 NaN NaN ... NaN
2254 1.018774e-08 NaN NaN ... NaN
High.Solana Low.Bitcoin Low.Ethereum Low.Golem Low.Solana \
0 NaN 425.9 NaN NaN NaN
1 NaN 430.7 NaN NaN NaN
2 NaN 423.1 NaN NaN NaN
3 NaN 428.6 NaN NaN NaN
4 NaN 428.9 NaN NaN NaN
... ... ... ... ... ...
2250 NaN 37458.9 2574.12 0.41179 NaN
2251 NaN 42876.6 2858.54 0.45093 NaN
2252 NaN 43361.3 2914.70 0.43135 NaN
2253 NaN NaN NaN NaN NaN
2254 NaN NaN NaN NaN NaN
Close.Bitcoin Close.Ethereum Close.Golem Close.Solana
0 434.0 NaN NaN NaN
1 433.7 NaN NaN NaN
2 430.7 NaN NaN NaN
3 433.3 NaN NaN NaN
4 431.2 NaN NaN NaN
... ... ... ... ...
2250 43188.2 2922.50 0.47748 NaN
2251 44420.3 2975.81 0.47447 NaN
2252 43853.2 2952.47 0.43964 NaN
2253 NaN NaN NaN NaN
2254 NaN NaN NaN NaN
[2255 rows x 24 columns]
I want to merge the following 2 data frames in Pandas but the result isn't containing all the relevant columns:
L1aIn[0:5]
Filename OrbitNumber OrbitMode
OrbitModeCounter Year Month Day L1aIn
0 oco2_L1aInDP_35863a_210329_B10206_210330111927.h5 35863 DP a 2021 3 29 1
1 oco2_L1aInDP_35862a_210329_B10206_210330111935.h5 35862 DP a 2021 3 29 1
2 oco2_L1aInDP_35861b_210329_B10206_210330111934.h5 35861 DP b 2021 3 29 1
3 oco2_L1aInLP_35861a_210329_B10206_210330111934.h5 35861 LP a 2021 3 29 1
4 oco2_L1aInSP_35861a_210329_B10206_210330111934.h5 35861 SP a 2021 3 29 1
L2Std[0:5]
Filename OrbitNumber OrbitMode OrbitModeCounter Year Month Day L2Std
0 oco2_L2StdGL_35861a_210329_B10206r_21042704283... 35861 GL a 2021 3 29 1
1 oco2_L2StdXS_35860a_210329_B10206r_21042700342... 35860 XS a 2021 3 29 1
2 oco2_L2StdND_35852a_210329_B10206r_21042622540... 35852 ND a 2021 3 29 1
3 oco2_L2StdGL_35862a_210329_B10206r_21042622403... 35862 GL a 2021 3 29 1
4 oco2_L2StdTG_35856a_210329_B10206r_21042622422... 35856 TG a 2021 3 29 1
>>> df = L1aIn.copy(deep=True)
>>> df.merge(L2Std, how="outer", on=["OrbitNumber","OrbitMode","OrbitModeCounter"])
0 oco2_L1aInDP_35863a_210329_B10206_210330111927.h5 35863 DP a ... NaN NaN NaN NaN
1 oco2_L1aInDP_35862a_210329_B10206_210330111935.h5 35862 DP a ... NaN NaN NaN NaN
2 oco2_L1aInDP_35861b_210329_B10206_210330111934.h5 35861 DP b ... NaN NaN NaN NaN
3 oco2_L1aInLP_35861a_210329_B10206_210330111934.h5 35861 LP a ... NaN NaN NaN NaN
4 oco2_L1aInSP_35861a_210329_B10206_210330111934.h5 35861 SP a ... NaN NaN NaN NaN
5 NaN 35861 GL a ... 2021.0 3.0 29.0 1.0
6 NaN 35860 XS a ... 2021.0 3.0 29.0 1.0
7 NaN 35852 ND a ... 2021.0 3.0 29.0 1.0
8 NaN 35862 GL a ... 2021.0 3.0 29.0 1.0
9 NaN 35856 TG a ... 2021.0 3.0 29.0 1.0
[10 rows x 13 columns]
>>> df.columns
Index(['Filename', 'OrbitNumber', 'OrbitMode', 'OrbitModeCounter', 'Year',
'Month', 'Day', 'L1aIn'],
dtype='object')
I want the resulting merged table to include both the "L1aIn" and "L2Std" columns but as you can see it doesn't and only picks up the original columns from L1aIn.
I'm also puzzled about why it seems to be returning a dataframe object rather than None.
A toy example works fine for me, but the real-life one does not. What circumstances provoke this kind of behavior for merge?
Seems to me that you just need to a variable to the output of
merged_df = df.merge(L2Std, how="outer", on=["OrbitNumber","OrbitMode","OrbitModeCounter"])
print(merged_df.columns)
I am processing information in several Pandas DataFrames with 10,000+ rows.
I have...
df1, student information
Class Number Student ID
0 13530159 201733468
1 13530159 201736271
2 13530159 201833263
3 13530159 201931506
4 13530159 201933329
...
df2, student responses
title time stu_id score
0 Unit 12 - Reading Homework 10/30/2020 22:06:53 202031164 100
1 Unit 10 - Vocabulary Homework 11/1/2020 21:07:44 202031674 100
2 Unit 10 - Vocabulary Homework 11/3/2020 17:20:55 202032311 100
3 Unit 12 - Reading Homework 11/6/2020 6:04:37 202031164 95
4 Unit 12 - Reading Homework 11/7/2020 5:49:15 202031164 90
...
I want...
a DataFrame with columns for the class number, student ID, and unique assignment titles. The assignment columns should contain the students' highest score for that assignment. There can be 20+ assignments / columns. A student can have many different scores for a single assignment. I only want the highest. I also want to omit scores submitted after a specific date.
df3, highest student grades
Class Number Student ID Unit 12 - Reading Homework Unit 10 - Vocabulary Homework ...
0 13530159 201733468 100 85 ...
1 13530159 201736271 95 70 ...
2 13530159 201833263 75 65 ...
3 13530159 201931506 80 85 ...
4 13530159 201933329 65 75 ...
...
What is the most efficient way? I will do this several dozen times.
PS, the DataFrames are based on 50+ Google Sheets. I could go back and compile a new DataFrame from the original sheets, but this is time consuming. I'm hoping there is an easier, faster way.
PPS, I've read similar questions: Pandas: efficient way to combine dataframes, Pandas apply a function of multiple columns, row-wise, Conditionally fill column values based on another columns value in pandas, etc. None of them specifically address my question.
Of course I don't have your data, so I have to "fake" some data but this should work:
import numpy
import pandas
import random
# Student info
df_1 = pandas.DataFrame(
[
{"Class Number": random.randint(13530159, 13530259), "Student ID": student_id}
for student_id in range(201733468, 201735468)
]
)
# Student responses
df_2 = pandas.DataFrame(
[
{
"title": f"Unit {random.randint(1, 10)} - ...",
"time": pandas.Timestamp(random.randint(1577870112, 1606814112), unit="s"),
"stu_id": random.randint(201733468, 201735468),
"score": random.randint(10, 100),
}
for _ in range(10000)
]
)
# Merge the two dataframes together
df = df_1.merge(df_2, left_on="Student ID", right_on="stu_id")
# Create a pivot table, using the "max" as an aggregation function
result = pandas.pivot_table(df, index=["Class Number", "Student ID"], columns="title", values="score", aggfunc=numpy.max).reset_index()
Output:
title Class Number Student ID Unit 1 - ... Unit 10 - ... Unit 2 - ... \
0 13530159 201733485 NaN NaN NaN
1 13530159 201733705 NaN NaN 16.0
2 13530159 201734020 NaN 92.0 67.0
3 13530159 201734028 100.0 42.0 NaN
4 13530159 201734218 NaN 50.0 41.0
... ... ... ... ... ...
1989 13530259 201734501 NaN 19.0 32.0
1990 13530259 201734760 NaN NaN NaN
1991 13530259 201734954 NaN NaN NaN
1992 13530259 201735137 NaN NaN 83.0
1993 13530259 201735266 NaN 26.0 NaN
title Unit 3 - ... Unit 4 - ... Unit 5 - ... Unit 6 - ... \
0 45.0 NaN NaN 39.0
1 46.0 NaN NaN NaN
2 NaN 89.0 88.0 NaN
3 NaN NaN NaN NaN
4 100.0 NaN NaN 88.0
... ... ... ... ...
1989 NaN NaN 48.0 NaN
1990 33.0 NaN NaN NaN
1991 NaN NaN NaN 74.0
1992 NaN NaN NaN 13.0
1993 35.0 62.0 NaN 43.0
title Unit 7 - ... Unit 8 - ... Unit 9 - ...
0 NaN 65.0 65.0
1 NaN NaN NaN
2 90.0 NaN 88.0
3 NaN 16.0 92.0
4 NaN 77.0 NaN
... ... ... ...
1989 35.0 94.0 NaN
1990 34.0 NaN 45.0
1991 NaN 21.0 19.0
1992 NaN 99.0 60.0
1993 83.0 51.0 NaN
[1994 rows x 12 columns]
NOTE: The output contains a lot of NaN values but that is because I'm generating data randomly. This means that not all students will have a result for all classes. If there is no result for a class the value will be NaN.
Here is my definition:
def fill(df_name):
"""
Function to fill rows and dates.
"""
# Fill Down
for row in df_name[0]:
if 'Unnamed' in row:
df_name[0] = df_name[0].replace(row, np.nan)
df_name[0] = df_name[0].ffill(limit=2)
df_name[1] = df_name[1].ffill(limit=2)
# Fill in Dates
for col in df_name.columns:
if col >= 3:
old_dt = datetime(1998, 11, 15)
add_dt = old_dt + relativedelta(months=col - 3)
new_dt = add_dt.strftime('%#m/%d/%Y')
df_name = df_name.rename(columns={col: new_dt})
and then I call:
fill(df_cars)
The first half of the formula works (columns 0 and 1 have filled in correctly). However, as you can see, the columns are labeled 0-288. When I delete this function and simply run the code (changing df_name to df_cars) it runs correctly and the column names are the dates specified in the second half of the function.
What could be causing this to not execute the # Fill in Dates portion when defined in a function? Does it have to do with local variables?
0 1 2 3 4 5 ... 287 288 289 290 291 292
0 France NaN Market 3330 7478 2273 ... NaN NaN NaN NaN NaN NaT
1 France NaN World 362 798 306 ... NaN NaN NaN NaN NaN NaT
2 France NaN % 0.108709 0.106713 0.134624 ... NaN NaN NaN NaN NaN NaT
3 Germany NaN Market 1452 2025 1314 ... NaN NaN NaN NaN NaN NaT
4 Germany NaN World 209 246 182 ... NaN NaN NaN NaN NaN NaT
.. ... ... ... ... ... ... ... ... ... ... ... ... ..
349 Slovakia 0 World 1 1 0 ... NaN NaN NaN NaN NaN NaT
350 Slovakia 0 % 0.5 0.5 0 ... NaN NaN NaN NaN NaN NaT
I have a dataframe like this:
ID Date Value
783 C 2018-02-23 0.704
580 B 2018-08-04 -1.189
221 A 2018-08-10 -0.788
228 A 2018-08-17 0.038
578 B 2018-08-02 1.188
What I want is expanding the dataframe based on Date column to 1-month earlier, and fill ID with the same person, and fill Value with nan until the last observation.
The expected result is similar to this:
ID Date Value
0 C 2018/01/24 nan
1 C 2018/01/25 nan
2 C 2018/01/26 nan
3 C 2018/01/27 nan
4 C 2018/01/28 nan
5 C 2018/01/29 nan
6 C 2018/01/30 nan
7 C 2018/01/31 nan
8 C 2018/02/01 nan
9 C 2018/02/02 nan
10 C 2018/02/03 nan
11 C 2018/02/04 nan
12 C 2018/02/05 nan
13 C 2018/02/06 nan
14 C 2018/02/07 nan
15 C 2018/02/08 nan
16 C 2018/02/09 nan
17 C 2018/02/10 nan
18 C 2018/02/11 nan
19 C 2018/02/12 nan
20 C 2018/02/13 nan
21 C 2018/02/14 nan
22 C 2018/02/15 nan
23 C 2018/02/16 nan
24 C 2018/02/17 nan
25 C 2018/02/18 nan
26 C 2018/02/19 nan
27 C 2018/02/20 nan
28 C 2018/02/21 nan
29 C 2018/02/22 nan
30 C 2018/02/23 1.093
31 B 2018/07/05 nan
32 B 2018/07/06 nan
33 B 2018/07/07 nan
34 B 2018/07/08 nan
35 B 2018/07/09 nan
36 B 2018/07/10 nan
37 B 2018/07/11 nan
38 B 2018/07/12 nan
39 B 2018/07/13 nan
40 B 2018/07/14 nan
41 B 2018/07/15 nan
42 B 2018/07/16 nan
43 B 2018/07/17 nan
44 B 2018/07/18 nan
45 B 2018/07/19 nan
46 B 2018/07/20 nan
47 B 2018/07/21 nan
48 B 2018/07/22 nan
49 B 2018/07/23 nan
50 B 2018/07/24 nan
51 B 2018/07/25 nan
52 B 2018/07/26 nan
53 B 2018/07/27 nan
54 B 2018/07/28 nan
55 B 2018/07/29 nan
56 B 2018/07/30 nan
57 B 2018/07/31 nan
58 B 2018/08/01 nan
59 B 2018/08/02 nan
60 B 2018/08/03 nan
61 B 2018/08/04 0.764
62 A 2018/07/11 nan
63 A 2018/07/12 nan
64 A 2018/07/13 nan
65 A 2018/07/14 nan
66 A 2018/07/15 nan
67 A 2018/07/16 nan
68 A 2018/07/17 nan
69 A 2018/07/18 nan
70 A 2018/07/19 nan
71 A 2018/07/20 nan
72 A 2018/07/21 nan
73 A 2018/07/22 nan
74 A 2018/07/23 nan
75 A 2018/07/24 nan
76 A 2018/07/25 nan
77 A 2018/07/26 nan
78 A 2018/07/27 nan
79 A 2018/07/28 nan
80 A 2018/07/29 nan
81 A 2018/07/30 nan
82 A 2018/07/31 nan
83 A 2018/08/01 nan
84 A 2018/08/02 nan
85 A 2018/08/03 nan
86 A 2018/08/04 nan
87 A 2018/08/05 nan
88 A 2018/08/06 nan
89 A 2018/08/07 nan
90 A 2018/08/08 nan
91 A 2018/08/09 nan
92 A 2018/08/10 2.144
93 A 2018/07/18 nan
94 A 2018/07/19 nan
95 A 2018/07/20 nan
96 A 2018/07/21 nan
97 A 2018/07/22 nan
98 A 2018/07/23 nan
99 A 2018/07/24 nan
100 A 2018/07/25 nan
101 A 2018/07/26 nan
102 A 2018/07/27 nan
103 A 2018/07/28 nan
104 A 2018/07/29 nan
105 A 2018/07/30 nan
106 A 2018/07/31 nan
107 A 2018/08/01 nan
108 A 2018/08/02 nan
109 A 2018/08/03 nan
110 A 2018/08/04 nan
111 A 2018/08/05 nan
112 A 2018/08/06 nan
113 A 2018/08/07 nan
114 A 2018/08/08 nan
115 A 2018/08/09 nan
116 A 2018/08/10 nan
117 A 2018/08/11 nan
118 A 2018/08/12 nan
119 A 2018/08/13 nan
120 A 2018/08/14 nan
121 A 2018/08/15 nan
122 A 2018/08/16 nan
123 A 2018/08/17 0.644
124 B 2018/07/03 nan
125 B 2018/07/04 nan
126 B 2018/07/05 nan
127 B 2018/07/06 nan
128 B 2018/07/07 nan
129 B 2018/07/08 nan
130 B 2018/07/09 nan
131 B 2018/07/10 nan
132 B 2018/07/11 nan
133 B 2018/07/12 nan
134 B 2018/07/13 nan
135 B 2018/07/14 nan
136 B 2018/07/15 nan
137 B 2018/07/16 nan
138 B 2018/07/17 nan
139 B 2018/07/18 nan
140 B 2018/07/19 nan
141 B 2018/07/20 nan
142 B 2018/07/21 nan
143 B 2018/07/22 nan
144 B 2018/07/23 nan
145 B 2018/07/24 nan
146 B 2018/07/25 nan
147 B 2018/07/26 nan
148 B 2018/07/27 nan
149 B 2018/07/28 nan
150 B 2018/07/29 nan
151 B 2018/07/30 nan
152 B 2018/07/31 nan
153 B 2018/08/01 nan
154 B 2018/08/02 -0.767
The source data can be created as below:
import pandas as pd
from itertools import chain
import numpy as np
df_1 = pd.DataFrame({
'ID' : list(chain.from_iterable([['A'] * 365, ['B'] * 365, ['C'] * 365])),
'Date' : pd.date_range(start = '2018-01-01', end = '2018-12-31').tolist() + pd.date_range(start = '2018-01-01', end = '2018-12-31').tolist() + pd.date_range(start = '2018-01-01', end = '2018-12-31').tolist(),
'Value' : np.random.randn(365 * 3)
})
df_1 = df_1.sample(5, random_state = 123)
Thanks for the advice!
You can create another DataFrame with previous months, then join together by concat, create DatetimeIndex, so possible use groupby with resample by d for days for add all values between:
df_2 = df_1.assign(Date = df_1['Date'] - pd.DateOffset(months=1) + pd.DateOffset(days=1),
Value = np.nan)
df = (pd.concat([df_2, df_1], sort=False)
.reset_index()
.set_index('Date')
.groupby('index', sort=False)
.resample('D')
.ffill()
.reset_index(level=1)
.drop('index', 1)
.rename_axis(None))
print (df)
Date ID Value
783 2018-01-24 C NaN
783 2018-01-25 C NaN
783 2018-01-26 C NaN
783 2018-01-27 C NaN
783 2018-01-28 C NaN
.. ... .. ...
578 2018-07-29 B NaN
578 2018-07-30 B NaN
578 2018-07-31 B NaN
578 2018-08-01 B NaN
578 2018-08-02 B 0.562684
[155 rows x 3 columns]
Another solution with list comprehension and concat, but last is necessary back filling of columns for index and ID, solution working if no missing value in original ID column:
offset = pd.DateOffset(months=1) + pd.DateOffset(days=1)
df=pd.concat([df_1.iloc[[i]].reset_index().set_index('Date').reindex(pd.date_range(d-offset,d))
for i, d in enumerate(df_1['Date'])], sort=False)
df = (df.assign(index = df['index'].bfill().astype(int), ID = df['ID'].bfill())
.rename_axis('Date')
.reset_index()
.set_index('index')
.rename_axis(None)
)
print (df)
Date ID Value
783 2018-01-24 C NaN
783 2018-01-25 C NaN
783 2018-01-26 C NaN
783 2018-01-27 C NaN
783 2018-01-28 C NaN
.. ... .. ...
578 2018-07-29 B NaN
578 2018-07-30 B NaN
578 2018-07-31 B NaN
578 2018-08-01 B NaN
578 2018-08-02 B 1.224345
[155 rows x 3 columns]
We can create a date range in the "Date" column, then explode it.
Then group the "Value" column by the index and set values to nan but the last.
Finally reset the index.
def drange(t):
return pd.date_range( t-pd.DateOffset(months=1)+pd.DateOffset(days=1),t,freq="D",normalize=True)
df["Date"]= df["Date"].transform(drange)
ID Date Value
index
783 C DatetimeIndex(['2018-01-24', '2018-01-25', '20... 0.704
580 B DatetimeIndex(['2018-07-05', '2018-07-06', '20... -1.189
221 A DatetimeIndex(['2018-07-11', '2018-07-12', '20... -0.788
228 A DatetimeIndex(['2018-07-18', '2018-07-19', '20... 0.038
578 B DatetimeIndex(['2018-07-03', '2018-07-04', '20... 1.188
df= df.reset_index(drop=True).explode(column="Date")
ID Date Value
0 C 2018-01-24 0.704
0 C 2018-01-25 0.704
0 C 2018-01-26 0.704
0 C 2018-01-27 0.704
0 C 2018-01-28 0.704
.. .. ... ...
4 B 2018-07-29 1.188
4 B 2018-07-30 1.188
4 B 2018-07-31 1.188
4 B 2018-08-01 1.188
4 B 2018-08-02 1.188
df["Value"]= df.groupby(level=0)["Value"].transform(lambda v: [np.nan]*(len(v)-1)+[v.iloc[0]])
df= df.reset_index(drop=True)
ID Date Value
0 C 2018-01-24 NaN
1 C 2018-01-25 NaN
2 C 2018-01-26 NaN
3 C 2018-01-27 NaN
4 C 2018-01-28 NaN
.. .. ... ...
150 B 2018-07-29 NaN
151 B 2018-07-30 NaN
152 B 2018-07-31 NaN
153 B 2018-08-01 NaN
154 B 2018-08-02 1.188