I have the following dataframe:
Col
0 A,B,C
1 B,A,D
2 C
3 A,D,E,F
4 B,C,F
df = pd.DataFrame({'Col': ['A,B,C', 'B,A,D', 'C', 'A,D,E,F', 'B,C,F']})
which needs to be turned into:
A B C D E F
0 A B C
1 A B D
2 C
3 A D E F
4 B C F
You could use str.get_dummies to get the dummy variables, then multiply with the columns:
tmp = df['Col'].str.get_dummies(sep=',')
out = tmp * tmp.columns
One-liner as suggested by #piRSquared:
out = df.Col.str.get_dummies(',').pipe(lambda d: d*[*d])
Output:
A B C D E F
0 A B C
1 A B D
2 C
3 A D E F
4 B C F
Benchmark:
On data created by duplicating the data in the OP:
#piRSquared's first method using numpy methods is the fastest solution here.
On randomly generated DataFrames of increasing sizes:
Code to reproduce the plot:
import perfplot
import pandas as pd
import numpy as np
def enke(df):
tmp = df['Col'].str.get_dummies(sep=',')
return tmp * tmp.columns
def mozway(df):
return pd.concat([pd.Series((idx:=x.split(',')), index=idx)
for x in df['Col']], axis=1).T.fillna('')
def piRSquared(df):
n = df.shape[0]
i = np.repeat(np.arange(n), df.Col.str.count(',')+1)
c, j = np.unique(df.Col.str.cat(sep=',').split(','), return_inverse=True)
m = c.shape[0]
a = np.full((n, m), '')
a[i, j] = c[j]
return pd.DataFrame(a, df.index, c)
def piRSquared2(df):
n = df.shape[0]
base = df.Col.to_numpy().astype(str)
commas = np.char.count(base, ',')
sepped = ','.join(base).split(',')
i = np.repeat(np.arange(n), commas+1)
c, j = np.unique(sepped, return_inverse=True)
m = c.shape[0]
a = np.full((n, m), '')
a[i, j] = c[j]
return pd.DataFrame(a, df.index, c)
def constructor1(n):
df = pd.DataFrame({'Col': ['A,B,C', 'B,A,D', 'C', 'A,D,E,F', 'B,C,F']})
return pd.concat([df]*n, ignore_index=True)
def constructor2(n):
uc = np.array([*ascii_uppercase])
k = [','.join(np.random.choice(uc, x, replace=False))
for x in np.random.randint(1, 10, size=n)]
return pd.DataFrame({'Col': k})
kernels = [enke, piRSquared, piRSquared2, mozway]
df = pd.DataFrame({'Col': ['A,B,C', 'B,A,D', 'C', 'A,D,E,F', 'B,C,F']})
perfplot.plot(
setup=constructor1,
kernels=kernels,
labels=[func.__name__ for func in kernels],
n_range=[2**k for k in range(15)],
xlabel='len(df)',
logx=True,
logy=True,
relative_to=0,
equality_check=pd.DataFrame.equals)
Using pandas.concat:
pd.concat([pd.Series((idx:=x.split(',')), index=idx)
for x in df['Col']], axis=1).T
For python < 3.8:
pd.concat([pd.Series(val, index=val)
for x in df['Col']
for val in [x.split(',')]], axis=1).T
Output:
A B C D E F
0 A B C NaN NaN NaN
1 A B NaN D NaN NaN
2 NaN NaN C NaN NaN NaN
3 A NaN NaN D E F
4 NaN B C NaN NaN F
NB. add fillna('') to have empty strings for missing values
A B C D E F
0 A B C
1 A B D
2 C
3 A D E F
4 B C F
This comes from my Project Overkill stash of tricks.
I'll use Numpy to identify where the labels are to be dropped in the 2-d array.
n = df.shape[0] # Get number of rows
base = df.Col.to_numpy().astype(str) # Turn `'Col'` to Numpy array
commas = np.char.count(base, ',') # Count commas in each row
sepped = ','.join(base).split(',') # Flat array of each element
i = np.repeat(np.arange(n), commas+1) # Row indices for flat array
# Note that I could've used `pd.factorize` here but I actually wanted
# a sorted array of labels so `np.unique` was the way to go.
# Otherwise I'd have used `j, c = pd.factorize(sepped)`
c, j = np.unique(sepped, return_inverse=True) # `j` col indices for flat array
# `c` will be the column labels
m = c.shape[0] # Get number of unique labels
a = np.full((n, m), '') # Default array of empty strings
a[i, j] = c[j] # Use row/col indices to insert
# the column labels in right spots
pd.DataFrame(a, df.index, c) # Construct new dataframe
A B C D E F
0 A B C
1 A B D
2 C
3 A D E F
4 B C F
Time Testing
The Functions
import pandas as pd
import numpy as np
from string import ascii_uppercase
def pir(s):
n = s.shape[0]
base = s.to_numpy().astype(str)
commas = np.char.count(base, ',')
sepped = ','.join(base).split(',')
i = np.repeat(np.arange(n), commas+1)
c, j = np.unique(sepped, return_inverse=True)
m = c.shape[0]
a = np.full((n, m), '')
a[i, j] = c[j]
return pd.DataFrame(a, s.index, c)
def pir2(s):
n = s.shape[0]
sepped = s.str.cat(sep=',').split(',')
commas = s.str.count(',')
i = np.repeat(np.arange(n), commas+1)
c, j = np.unique(sepped, return_inverse=True)
m = c.shape[0]
a = np.full((n, m), '')
a[i, j] = c[j]
return pd.DataFrame(a, s.index, c)
def mozway(s):
return pd.concat([
pd.Series((idx:=x.split(',')), index=idx)
for x in s
], axis=1).T.fillna('')
def enke(s):
return s.str.get_dummies(',').pipe(lambda d: d*d.columns)
The test data constructor
def constructor(n, m):
uc = np.array([*ascii_uppercase])
m = min(26, m)
k = [','.join(np.random.choice(uc, x, replace=False))
for x in np.random.randint(1, m, size=n)]
return pd.Series(k)
The results dataframe
res = pd.DataFrame(
index=['enke', 'mozway', 'pir', 'pir2'],
columns=[10, 30, 100, 300, 1000, 3000, 10000, 30000],
dtype=float
)
Run the test
from IPython.display import clear_output
for j in res.columns:
s = constructor(j, 10)
for i in res.index:
stmt = f'{i}(s)'
setp = f'from __main__ import s, {i}'
res.at[i, j] = timeit(stmt, setp, number=50)
print(res)
clear_output(True)
Show the results
res.T.plot(loglog=True)
res.div(res.min()).T
enke mozway pir pir2
10 8.634105 19.416376 1.000000 2.300573
30 7.626107 32.741218 1.000000 2.028423
100 5.071308 50.539772 1.000000 1.533791
300 3.475711 66.638151 1.000000 1.184982
1000 2.616885 79.032159 1.012205 1.000000
3000 2.518983 91.521389 1.094863 1.000000
10000 2.536735 98.172680 1.131758 1.000000
30000 2.603489 99.756007 1.149734 1.000000
I have a pandas dataframe
From
To
A
B
A
C
D
E
F
F
B
G
B
H
B
I
G
J
G
K
L
L
M
M
N
N
I want to convert it into multi column hierarchy. The expected hierarchy will look like
Level_1
Level_2
Level_3
Level_4
A
B
G
J
A
B
G
K
A
B
H
A
B
I
A
C
D
E
F
F
L
L
M
M
N
N
Is there an in-built way in pandas to achieve this? I know i can use recursion, Is there any other simplified way?
You can easily get what you expect using networkx
# Python env: pip install networkx
# Anaconda env: conda install networkx
import networkx as nx
import pandas as pd
df = pd.DataFrame({'From': ['A', 'A', 'D', 'F', 'B', 'B', 'B', 'G', 'G', 'L', 'M', 'N'],
'To': ['B', 'C', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']})
G = nx.from_pandas_edgelist(df, source='From', target='To', create_using=nx.DiGraph)
roots = [v for v, d in G.in_degree() if d == 0]
leaves = [v for v, d in G.out_degree() if d == 0]
all_paths = []
for root in roots:
for leaf in leaves:
paths = nx.all_simple_paths(G, root, leaf)
all_paths.extend(paths)
for node in nx.nodes_with_selfloops(G):
all_paths.append([node, node])
Output:
>>> pd.DataFrame(sorted(all_paths)).add_prefix('Level_').fillna('')
Level_0 Level_1 Level_2 Level_3
0 A B G J
1 A B G K
2 A B H
3 A B I
4 A C
5 D E
6 F F
7 L L
8 M M
9 N N
Documentation: networkx.algorithms.simple_paths.all_simple_paths
Solution without networkx:
def path(df, parent, cur_path=None):
if cur_path is None:
cur_path = []
x = df[df.From.eq(parent)]
if len(x) == 0:
yield cur_path
return
elif len(x) == 1:
yield cur_path + x["To"].to_list()
return
for _, row in x.iterrows():
yield from path(df, row["To"], cur_path + [row["To"]])
def is_sublist(l1, l2):
# checks if l1 is sublist of l2
if len(l1) > len(l2):
return False
for i in range(len(l2)):
if l1 == l2[i : i + len(l1)]:
return True
return False
unique_paths = []
for v in df["From"].unique():
for p in path(df, v, [v]):
if not any(is_sublist(p, up) for up in unique_paths):
unique_paths.append(p)
df = pd.DataFrame(
[{f"level_{i}": v for i, v in enumerate(p, 1)} for p in unique_paths]
).fillna("")
print(df)
Prints:
level_1 level_2 level_3 level_4
0 A B G J
1 A B G K
2 A B H
3 A B I
4 A C
5 D E
6 F F
7 L L
8 M M
9 N N
Here's an alternative method that uses repeated pandas merges to get the result:
def create_multi_level(
df: pd.DataFrame, from_col: str = "From", to_col: str = "To"
) -> pd.DataFrame:
# Separate root nodes from non-root
mask = df[from_col].isin(df[to_col].unique()) & (df[from_col] != df[to_col])
root = df[~mask].copy()
non_root = df[mask].copy()
# Define starting columns
root.columns = ["level_0", "level_1"]
non_root.columns = ["level_1", "level_2"]
i = 1
while True:
# merge
root = root.merge(non_root, on=f"level_{i}", how="left")
# Return if we're done
non_missing = root[f"level_{i + 1}"].notna()
if non_missing.mean() == 0:
return root.drop(columns=f"level_{i + 1}")
# Increment and rename columns
i += 1
non_root.columns = [f"level_{i}", f"level_{i + 1}"]
Output:
>>> create_multi_level(df)
level_0 level_1 level_2 level_3
0 A B G J
1 A B G K
2 A B H NaN
3 A B I NaN
4 A C NaN NaN
5 D E NaN NaN
6 F F NaN NaN
7 L L NaN NaN
8 M M NaN NaN
9 N N NaN NaN
So I made this dataframe
alp = "abcdefghijklmnopqrstuvwxyz0123456789"
s = "carl"
for i in s:
alp = alp.replace(i,"")
jaa = s+alp
x = list(jaa)
array = np.array(x)
re = np.reshape(array,(6,6))
dt = pd.DataFrame(re)
dt.columns = [1,2,3,4,5,6]
dt.index = [1,2,3,4,5,6]
dt
1 2 3 4 5 6
1 c a r l b d
2 e f g h i j
3 k m n o p q
4 s t u v w x
5 y z 0 1 2 3
6 4 5 6 7 8 9
I want to search a value , and print its row(index) and column.
For example, 'h', the output i want is 2,4.
Is there any way to get that output?
row, col = np.where(dt == "h")
print(dt.index[row[0]], dt.columns[col[0]])
So I have a 1 Gb input txt file (1 million lines * 10 columns) and I am using python to process this input to get some calculated information and add each information (out of 1 M lines) into a string, and eventually save it. I tried to run my script, but realized the process got slower and slower as the string got bigger. I am wondering is it possible to append each line into the output and remove the previous buffered line to reduce the memory usage? Thank you. An example of codes:
import pandas as pd
# main_df.txt has more than 1 million lines and 10 columns
main_df = pd.read_csv('main_df.txt')
"""
processing main_df into new_df, but new_df still has 1 M lines in the end
"""
sum_df = ''
# I'm guessing sum_df gets super big here as it goes, which uses up memory and slows the process .
# I have a bunch of complex loops, to simplify, I will just make an example for one single loop:
for i in range(len(new_df)):
sum_df += new_df.loc[i, 1] + '\t' + new_df.loc[i, 3] + '\t' + new_df.loc[i, 5] + '\n'
with open('out.txt', 'w') as w:
w.write(sum_df)
Hard to tell what your goal is here, but a few things might help. Here is an example df.
new_df = pd.DataFrame({0:np.random.choice(list(string.ascii_lowercase), size=(10)),
1:np.random.choice(list(string.ascii_lowercase), size=(10)),
2:np.random.choice(list(string.ascii_lowercase), size=(10)),
3:np.random.choice(list(string.ascii_lowercase), size=(10)),
4:np.random.choice(list(string.ascii_lowercase), size=(10)),
5:np.random.choice(list(string.ascii_lowercase), size=(10)),
6:np.random.choice(list(string.ascii_lowercase), size=(10))})
print(new_df)
0 1 2 3 4 5 6
0 z k o m s k w
1 x g k k h b v
2 o y m r g l r
3 i n m q o j h
4 r d s r s p s
5 t o d w e b a
6 t z w y q s n
7 r r d x b s s
8 g v h m w c l
9 r v y i w i z
Your code outputs:
sum_df = '' # this is a string, not a df
for i in range(len(new_df)):
sum_df += new_df.loc[i, 1] + '\t' + new_df.loc[i, 3] + '\t' + new_df.loc[i, 5] + '\n'
print(sum_df)
i k z
x g o
y l x
g s l
p h e
u s v
r u l
m j e
q k f
d p b
I'm not really sure what your other loops are supposed to do, but the one in your example looks like it's just taking columns 1, 3, and 5. So rather than a for loop, you could do something like this.
sum_df = new_df[[1,3,5]]
print(sum_df)
1 3 5
0 k m k
1 g k b
2 y r l
3 n q j
4 d r p
5 o w b
6 z y s
7 r x s
8 v m c
9 v i i
Then save it to a .txt with something like this.
sum_df.to_csv('new_df.txt', header=None, index=None, sep='\t')
Generally speaking you want to avoid looping over dfs. If you need to do something more complex than the example you can use pd.apply() to apply a custom function along an axis of the df. If you must loop over the df, df.itertuples or df.iterrows() are preferable to for loops as they use a generator like mentioned by Datanovice's comment.
I eventually figured it out...
w = open('out.txt', 'a')
for i in range(len(new_df)):
sum_df = new_df.loc[i, 1] + '\t' + new_df.loc[i, 3] + '\t' + new_df.loc[i, 5] + '\n'
w.write(sum_df)
w.close()
If I have the following:
df = pd.DataFrame(np.random.random((4,8)))
tupleList = zip([x for x in 'abcdefgh'], [y for y in ['iijjkkll'])
ind = pd.MultiIndex.from_tuples(tupleList)
df.columns = ind
In [71]: df
Out[71]:
a b c d e f g \
i i j j k k l
0 0.968112 0.809183 0.144320 0.518120 0.820079 0.648237 0.971552
1 0.959022 0.721705 0.139588 0.408940 0.230956 0.907192 0.467016
2 0.335085 0.537437 0.725119 0.486447 0.114048 0.150150 0.894322
3 0.051249 0.186547 0.779814 0.905914 0.024298 0.002489 0.339714
h
l
0 0.438330
1 0.225447
2 0.331413
3 0.530789
[4 rows x 8 columns]
what is the easiest way to select the columns that have a second level label of "j" or "k"?
c d e f
j j k k
0 0.948030 0.243993 0.627497 0.729024
1 0.087703 0.874968 0.581875 0.996466
2 0.802155 0.213450 0.375096 0.184569
3 0.164278 0.646088 0.201323 0.022498
I can do this:
df.loc[:, df.columns.get_level_values(1).isin(['j', 'k'])]
But that seems pretty verbose for something that feels like it should be simple. Any better approaches?
See here for multiindex using slicers, introduced in 0.14.0
In [36]: idx = pd.IndexSlice
In [37]: df.loc[:, idx[:, ['j', 'k']]]
Out[37]:
c d e f
j j k k
0 0.750582 0.877763 0.262696 0.226005
1 0.025902 0.967179 0.125647 0.297304
2 0.463544 0.104973 0.154113 0.284820
3 0.631695 0.841023 0.820907 0.938378