Transform pandas Series with variable length comma separated values to Dataframe - python

I have a pandas-Series 'A' containing comma separated values like this :
index A
1 null
2 5,6
3 3
4 null
5 5,18,22
... ...
I need a dataframe like this :
index A_5 A_6 A_18 A_20
1 0 0 0 ...
2 1 1 0 ...
3 0 0 0 ...
4 0 0 0 ...
5 1 0 1 ...
... ... ... ... ...
Values that don't occur at least MIN_OBS times should be ignored and not get an own column, because there are so many distinct values that the df would become too big if this threshold isn't applied.
I designed the solution below. It works, but is way too slow (due to iterating over rows I suppose). Could anyone suggest a faster approach ?
temp_dict = defaultdict(int)
for k, v in A.iteritems():
temp_list = v.split(',')
for item in temp_list:
temp_dict[item] += 1
cols_to_make = []
for k, v in temp_dict.iteritems():
if v > MIN_OBS:
cols_to_make.append('A_' + k)
result_df = pd.DataFrame(0, index = the_series.index, columns = cols_to_make)
for k, v in A.iteritems():
temp_list = v.split(',')
for item in temp_list:
if ('A_' + item) in cols_to_make:
temp_df['A_' + item][k] = 1

You can use get_dummies for creating indicator variables, then convert columns to numbers by to_numeric and last filter columns by variable TRESH and ix:
print df
A
index
1 null
2 5,6
3 3
4 null
5 5,18,22
df = df.A.str.get_dummies(sep=",")
print df
18 22 3 5 6 null
index
1 0 0 0 0 0 1
2 0 0 0 1 1 0
3 0 0 1 0 0 0
4 0 0 0 0 0 1
5 1 1 0 1 0 0
df.columns = pd.to_numeric(df.columns, errors='coerce')
df = df.sort_index(axis=1)
TRESH = 5
cols = [col for col in df.columns if col > TRESH]
print cols
[6.0, 18.0, 22.0]
df = df.ix[:, cols]
print df
6 18 22
index
1 0 0 0
2 1 0 0
3 0 0 0
4 0 0 0
5 0 1 1
df.columns = ["A_" + str(int(col)) for col in df.columns]
print df
A_6 A_18 A_22
index
1 0 0 0
2 1 0 0
3 0 0 0
4 0 0 0
5 0 1 1
EDIT:
I try modified perfect original unutbu answer and change creating Series, removing Series with null values in index and add parameter prefix to get_dummies:
import numpy as np
import pandas as pd
s = pd.Series(['null', '5,6', '3', 'null', '5,18,22', '3,4'])
print s
#result = s.str.split(',').apply(pd.Series).stack()
#replacing to:
result = pd.DataFrame([ x.split(',') for x in s ]).stack()
count = pd.value_counts(result)
min_obs = 2
#add removing Series, which contains null
count = count[(count >= min_obs) & ~(count.index.isin(['null'])) ]
result = result.loc[result.isin(count.index)]
#add prefix to function get_dummies
result = pd.get_dummies(result, prefix="A")
result.index = result.index.droplevel(1)
result = result.reindex(s.index)
print(result)
A_3 A_5
0 NaN NaN
1 0 1
2 1 0
3 NaN NaN
4 0 1
5 1 0
Timings:
In [143]: %timeit pd.DataFrame([ x.split(',') for x in s ]).stack()
1000 loops, best of 3: 866 µs per loop
In [144]: %timeit s.str.split(',').apply(pd.Series).stack()
100 loops, best of 3: 2.46 ms per loop

Since memory is an issue, we have to be careful not to build large intermediate
data structures if possible.
Let's start with the OP's posted code that works:
def orig(A, MIN_OBS):
temp_dict = collections.defaultdict(int)
for k, v in A.iteritems():
temp_list = v.split(',')
for item in temp_list:
temp_dict[item] += 1
cols_to_make = []
for k, v in temp_dict.iteritems():
if v > MIN_OBS:
cols_to_make.append('A_' + k)
result_df = pd.DataFrame(0, index=A.index, columns=cols_to_make)
for k, v in A.iteritems():
temp_list = v.split(',')
for item in temp_list:
if ('A_' + item) in cols_to_make:
result_df['A_' + item][k] = 1
return result_df
and extract the first loop into its own function:
def count(A, MIN_OBS):
temp_dict = collections.Counter()
for k, v in A.iteritems():
temp_list = v.split(',')
for item in temp_list:
temp_dict[item] += 1
temp_dict = {k:v for k, v in temp_dict.items() if v > MIN_OBS}
return temp_dict
From experimentation in an interactive session, we can see this is not the bottleneck; even for "large" DataFrames, count(A, MIN_OBS) completes fairly quickly.
The slowness of orig occurs in the the double for-loop at the end of orig
which increments modifies cells in the DataFrame one value at a time
(e.g. result_df['A_' + item][k] = 1.)
We could replace that double-for loop with a single for-loop over the columns of the DataFrame, using the vectorized string method, A.str.contains to search for values in the strings. Since we never split the original strings into Python lists of strings (or Pandas DataFrames holding the string fragments), we save some memory.
Since orig and alt use similar data structures, their memory footprint is about the same.
def alt(A, MIN_OBS):
temp_dict = count(A, MIN_OBS)
df = pd.DataFrame(0, index=A.index, columns=temp_dict)
for col in df:
df[col] = A.str.contains(r'^{v}|,{v},|,{v}$'.format(v=col)).astype(int)
df.columns = ['A_{}'.format(col) for col in df]
return df
Here is an example, on a 200K row DataFrame with 40K different possible values:
import numpy as np
import pandas as pd
import collections
np.random.seed(2016)
ncols = 5
nrows = 200000
nvals = 40000
MIN_OBS = 200
# nrows = 20
# nvals = 4
# MIN_OBS = 2
idx = np.random.randint(ncols, size=nrows).cumsum()
data = np.random.choice(np.arange(nvals), size=idx[-1])
data = np.array_split(data, idx[:-1])
data = map(','.join, [map(str, arr) for arr in data])
A = pd.Series(data)
A.loc[A == ''] = 'null'
def orig(A, MIN_OBS):
temp_dict = collections.defaultdict(int)
for k, v in A.iteritems():
temp_list = v.split(',')
for item in temp_list:
temp_dict[item] += 1
cols_to_make = []
for k, v in temp_dict.iteritems():
if v > MIN_OBS:
cols_to_make.append('A_' + k)
result_df = pd.DataFrame(0, index=A.index, columns=cols_to_make)
for k, v in A.iteritems():
temp_list = v.split(',')
for item in temp_list:
if ('A_' + item) in cols_to_make:
result_df['A_' + item][k] = 1
return result_df
def count(A, MIN_OBS):
temp_dict = collections.Counter()
for k, v in A.iteritems():
temp_list = v.split(',')
for item in temp_list:
temp_dict[item] += 1
temp_dict = {k:v for k, v in temp_dict.items() if v > MIN_OBS}
return temp_dict
def alt(A, MIN_OBS):
temp_dict = count(A, MIN_OBS)
df = pd.DataFrame(0, index=A.index, columns=temp_dict)
for col in df:
df[col] = A.str.contains(r'^{v}|,{v},|,{v}$'.format(v=col)).astype(int)
df.columns = ['A_{}'.format(col) for col in df]
return df
Here is a benchmark:
In [48]: %timeit expected = orig(A, MIN_OBS)
1 loops, best of 3: 3.03 s per loop
In [49]: %timeit expected = alt(A, MIN_OBS)
1 loops, best of 3: 483 ms per loop
Note that the majority of the time required for alt to complete is spent in count:
In [60]: %timeit count(A, MIN_OBS)
1 loops, best of 3: 304 ms per loop

Would something like this work or could it be modified to fit your need?
df = pd.DataFrame({'A': ['null', '5,6', '3', 'null', '5,18,22']}, columns=['A'])
A
0 null
1 5,6
2 3
3 null
4 5,18,22
Then use get_dummies()
pd.get_dummies(df['A'].str.split(',').apply(pd.Series), prefix=df.columns[0])
Result:
A_3 A_5 A_null A_18 A_6 A_22
index
1 0 0 1 0 0 0
2 0 1 0 0 1 0
3 1 0 0 0 0 0
4 0 0 1 0 0 0
5 0 1 0 1 0 1

Related

Python/Pandas: Calculating RMS in sections

What is the best way to calculate the RMS of a column in sections in python/pandas. Here is a example for a better understanding what I mean:
index
x
x_rms
0
2
1
3
2.55
2
10
3
22
17.09
...
...
...
So 2.55 is the RMS of 2 and 3, 17.09 is the RMS of 10 and 22 and so on.
the following will work
import pandas as pd
df = pd.DataFrame([2,3,10,22], columns=["x"])
def rms(a, b):
# return round(np.sqrt((a**2+b**2)/2), 2) # for only two decimals
return np.sqrt((a**2+b**2)/2)
df["rms"] = [rms(df.loc[idx-1,"x"], val["x"]) if idx%2 != 0 else np.nan
for idx, val in df.iterrows()]
output
x rms
0 2 NaN
1 3 2.549510
2 10 NaN
3 22 17.088007
EDIT regarding comment
if your index is a date you should do this to have the same output
values = [2,3,10,22]
tidx = pd.date_range('2019-01-01', periods=len(values), freq='D')
df = pd.DataFrame([2,3,10,22], columns=["x"], index=tidx)
def rms(a, b):
# return round(np.sqrt((a**2+b**2)/2), 2) # for only two decimals
return np.sqrt((a**2+b**2)/2)
df = df.reset_index()
df["rms"] = [rms(df.loc[idx-1,"x"], val["x"]) if idx%2 != 0 else np.nan
for idx, val in df.iterrows()]
df.set_index("index")

grouping a list values based on max value

I'm working on k-mean algorthim to cluster list of number, If i have an array (X)
X=array([[0.85142858],[0.85566274],[0.85364912],[0.81536489],[0.84929932],[0.85042336],[0.84899714],[0.82019115], [0.86112067],[0.8312496 ]])
then I run the following code
from sklearn.cluster import AgglomerativeClustering
cluster = AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward')
cluster.fit_predict(X)
for i in range(len(X)):
print("%4d " % cluster.labels_[i], end=""); print(X[i])
i got the results
1 1 [0.85142858]
2 3 [0.85566274]
3 3 [0.85364912]
4 0 [0.81536489]
5 1 [0.84929932]
6 1 [0.85042336]
7 1 [0.84899714]
8 0 [0.82019115]
9 4 [0.86112067]
10 2 [0.8312496]
how to get the max number in each cluster with value of (i) ? like this
0: 0.82019115 8
1: 0.85142858 1
2: 0.8312496 10
3: 0.85566274 2
4: 0.86112067 9
First group them together as pair using zip then sort it by values(second element of pair) in increasing order and create a dict out of it.
Try:
res = list(zip(cluster.labels_, X))
max_num = dict(sorted(res, key=lambda x: x[1], reverse=False))
max_num:
{0: array([0.82019115]),
2: array([0.8312496]),
1: array([0.85142858]),
3: array([0.85566274]),
4: array([0.86112067])}
Edit:
Do you want this?
elem = list(zip(res, range(1,len(X)+1)))
e = sorted(elem, key=lambda x: x[0][1], reverse=False)
final_dict = {k[0]:(k[1], v) for (k,v) in e}
for key in sorted(final_dict):
print(f"{key}: {final_dict[key][0][0]} {final_dict[key][1]}")
0: 0.82019115 8
1: 0.85142858 1
2: 0.8312496 10
3: 0.85566274 2
4: 0.86112067 9
OR
import pandas as pd
df = pd.DataFrame(zip(cluster.labels_,X))
df[1] = df[1].str[0]
df = df.sort_values(1).drop_duplicates([0],keep='last')
df.index = df.index+1
df = df.sort_values(0)
df:
0 1
8 0 0.820191
1 1 0.851429
10 2 0.831250
2 3 0.855663
9 4 0.861121

Is there a way to speed up the following pandas for loop?

My data frame contains 10,000,000 rows! After group by, ~ 9,000,000 sub-frames remain to loop through.
The code is:
data = read.csv('big.csv')
for id, new_df in data.groupby(level=0): # look at mini df and do some analysis
# some code for each of the small data frames
This is super inefficient, and the code has been running for 10+ hours now.
Is there a way to speed it up?
Full Code:
d = pd.DataFrame() # new df to populate
print 'Start of the loop'
for id, new_df in data.groupby(level=0):
c = [new_df.iloc[i:] for i in range(len(new_df.index))]
x = pd.concat(c, keys=new_df.index).reset_index(level=(2,3), drop=True).reset_index()
x = x.set_index(['level_0','level_1', x.groupby(['level_0','level_1']).cumcount()])
d = pd.concat([d, x])
To get the data:
data = pd.read_csv('https://raw.githubusercontent.com/skiler07/data/master/so_data.csv', index_col=0).set_index(['id','date'])
Note:
Most of id's will only have 1 date. This indicates only 1 visit. For id's with more visits, I would like to structure them in a 3d format e.g. store all of their visits in the 2nd dimension out of 3. The output is (id, visits, features)
Here is one way to speed that up. This adds the desired new rows in some code which processes the rows directly. This saves the overhead of constantly constructing small dataframes. Your sample of 100,000 rows runs in a couple of seconds on my machine. While your code with only 10,000 rows of your sample data takes > 100 seconds. This seems to represent a couple of orders of magnitude improvement.
Code:
def make_3d(csv_filename):
def make_3d_lines(a_df):
a_df['depth'] = 0
depth = 0
prev = None
accum = []
for row in a_df.values.tolist():
row[0] = 0
key = row[1]
if key == prev:
depth += 1
accum.append(row)
else:
if depth == 0:
yield row
else:
depth = 0
to_emit = []
for i in range(len(accum)):
date = accum[i][2]
for j, r in enumerate(accum[i:]):
to_emit.append(list(r))
to_emit[-1][0] = j
to_emit[-1][2] = date
for r in to_emit[1:]:
yield r
accum = [row]
prev = key
df_data = pd.read_csv('big-data.csv')
df_data.columns = ['depth'] + list(df_data.columns)[1:]
new_df = pd.DataFrame(
make_3d_lines(df_data.sort_values('id date'.split())),
columns=df_data.columns
).astype(dtype=df_data.dtypes.to_dict())
return new_df.set_index('id date'.split())
Test Code:
start_time = time.time()
df = make_3d('big-data.csv')
print(time.time() - start_time)
df = df.drop(columns=['feature%d' % i for i in range(3, 25)])
print(df[df['depth'] != 0].head(10))
Results:
1.7390995025634766
depth feature0 feature1 feature2
id date
207555809644681 20180104 1 0.03125 0.038623 0.008130
247833985674646 20180106 1 0.03125 0.004378 0.004065
252945024181083 20180107 1 0.03125 0.062836 0.065041
20180107 2 0.00000 0.001870 0.008130
20180109 1 0.00000 0.001870 0.008130
329567241731951 20180117 1 0.00000 0.041952 0.004065
20180117 2 0.03125 0.003101 0.004065
20180117 3 0.00000 0.030780 0.004065
20180118 1 0.03125 0.003101 0.004065
20180118 2 0.00000 0.030780 0.004065
I believe your approach for feature engineering could be done better, but I will stick to answering your question.
In Python, iterating over a Dictionary is way faster than iterating over a DataFrame
Here how I managed to process a huge pandas DataFrame (~100,000,000 rows):
# reset the Dataframe index to get level 0 back as a column in your dataset
df = data.reset_index() # the index will be (id, date)
# split the DataFrame based on id
# and store the splits as Dataframes in a dictionary using id as key
d = dict(tuple(df.groupby('id')))
# iterate over the Dictionary and process the values
for key, value in d.items():
pass # each value is a Dataframe
# concat the values and get the original (processed) Dataframe back
df2 = pd.concat(d.values(), ignore_index=True)
Modified #Stephen's code
def make_3d(dataset):
def make_3d_lines(a_df):
a_df['depth'] = 0 # sets all depth from (1 to n) to 0
depth = 1 # initiate from 1, so that the first loop is correct
prev = None
accum = [] # accumulates blocks of data belonging to given user
for row in a_df.values.tolist(): # for each row in our dataset
row[0] = 0 # NOT SURE
key = row[1] # this is the id of the row
if key == prev: # if this rows id matches previous row's id, append together
depth += 1
accum.append(row)
else: # else if this id is new, previous block is completed -> process it
if depth == 0: # previous id appeared only once -> get that row from accum
yield accum[0] # also remember that depth = 0
else: # process the block and emit each row
depth = 0
to_emit = [] # prepare to emit the list
for i in range(len(accum)): # for each unique day in the accumulated list
date = accum[i][2] # define date to be the first date it sees
for j, r in enumerate(accum[i:]):
to_emit.append(list(r))
to_emit[-1][0] = j # define the depth
to_emit[-1][2] = date # define the
for r in to_emit[0:]:
yield r
accum = [row]
prev = key
df_data = dataset.reset_index()
df_data.columns = ['depth'] + list(df_data.columns)[1:]
new_df = pd.DataFrame(
make_3d_lines(df_data.sort_values('id date'.split(), ascending=[True,False])),
columns=df_data.columns
).astype(dtype=df_data.dtypes.to_dict())
return new_df.set_index('id date'.split())
Testing:
t = pd.DataFrame(data={'id':[1,1,1,1,2,2,3,3,4,5], 'date':[20180311,20180310,20180210,20170505,20180312,20180311,20180312,20180311,20170501,20180304], 'feature':[10,20,45,1,14,15,20,20,13,11],'result':[1,1,0,0,0,0,1,0,1,1]})
t = t.reindex(columns=['id','date','feature','result'])
print t
id date feature result
0 1 20180311 10 1
1 1 20180310 20 1
2 1 20180210 45 0
3 1 20170505 1 0
4 2 20180312 14 0
5 2 20180311 15 0
6 3 20180312 20 1
7 3 20180311 20 0
8 4 20170501 13 1
9 5 20180304 11 1
Output
depth feature result
id date
1 20180311 0 10 1
20180311 1 20 1
20180311 2 45 0
20180311 3 1 0
20180310 0 20 1
20180310 1 45 0
20180310 2 1 0
20180210 0 45 0
20180210 1 1 0
20170505 0 1 0
2 20180312 0 14 0
20180312 1 15 0
20180311 0 15 0
3 20180312 0 20 1
20180312 1 20 0
20180311 0 20 0
4 20170501 0 13 1

pandas convert text feature to numeric value

I can convert all text features in a pandas dataframe by casting to 'category' using the df.astype() method as below. However I find category hard to work with (eg for plotting data) and would prefer to create a new column of integers
#convert all objects to categories
object_types = dataset.select_dtypes(include=['O'])
for col in object_types:
dataset['{0}_category'.format(col)] = dataset[col].astype('category')
I can convert the text to integers using this hack:
#convert all objects to int values
object_types = dataset.select_dtypes(include=['O'])
new_cols = {}
for col in object_types:
data_set = set(dataset[col].tolist())
data_indexed = {}
for i, item in enumerate(data_set):
data_indexed[item] = i
new_list = []
for item in dataset[col].tolist():
new_list.append(data_indexed[item])
new_cols[col]=new_list
for key, val in new_cols.items():
dataset['{0}_int_value'.format(key)] = val
But is there a better (or existing) way to do the same?
I would use factorize method, which is designed for this particular task:
In [90]: x
Out[90]:
A B
9 c z
10 c z
4 b x
5 b y
1 a w
7 b z
In [91]: x.apply(lambda col: pd.factorize(col, sort=True)[0])
Out[91]:
A B
9 2 3
10 2 3
4 1 1
5 1 2
1 0 0
7 1 3
or:
In [92]: x.apply(lambda col: pd.factorize(col)[0])
Out[92]:
A B
9 0 0
10 0 0
4 1 1
5 1 2
1 2 3
7 1 0
consider df
df = pd.DataFrame(dict(A=list('aaaabbbbcccc'),
B=list('wwxxxyyzzzzz')))
df
you can convert to integers like this
def intify(s):
u = np.unique(s)
i = np.arange(len(u))
return s.map(dict(zip(u, i)))
or shorter version
def intify(s):
u = np.unique(s)
return s.map({k: i for i, k in enumerate(u)})
df.apply(intify)
Or in a single line
df.apply(lambda s: s.map({k:i for i,k in enumerate(s.unique())}))

Write 2d dictionary into a dataframe or tab-delimited file using python

I have a 2-d dictionary in the following format:
myDict = {('a','b'):10, ('a','c'):20, ('a','d'):30, ('b','c'):40, ('b','d'):50,('c','d'):60}
How can I write this into a tab-delimited file so that the file contains the following. While filling a tuple (x, y) will fill two locations: (x,y) and (y,x). (x,x) is always 0.
The output would be :
a b c d
a 0 10 20 30
b 10 0 40 50
c 20 40 0 60
d 30 50 60 0
PS: If somehow the dictionary can be converted into a dataframe (using pandas) then it can be easily written into a file using pandas function
You can do this with the lesser-known align method and a little unstack magic:
In [122]: s = Series(myDict, index=MultiIndex.from_tuples(myDict))
In [123]: df = s.unstack()
In [124]: lhs, rhs = df.align(df.T)
In [125]: res = lhs.add(rhs, fill_value=0).fillna(0)
In [126]: res
Out[126]:
a b c d
a 0 10 20 30
b 10 0 40 50
c 20 40 0 60
d 30 50 60 0
Finally, to write this to a CSV file, use the to_csv method:
In [128]: res.to_csv('res.csv', sep='\t')
In [129]: !cat res.csv
a b c d
a 0.0 10.0 20.0 30.0
b 10.0 0.0 40.0 50.0
c 20.0 40.0 0.0 60.0
d 30.0 50.0 60.0 0.0
If you want to keep things as integers, cast using DataFrame.astype(), like so:
In [137]: res.astype(int).to_csv('res.csv', sep='\t')
In [138]: !cat res.csv
a b c d
a 0 10 20 30
b 10 0 40 50
c 20 40 0 60
d 30 50 60 0
(It was cast to float because of the intermediate step of filling in nan values where indices from one frame were missing from the other)
#Dan Allan's answer using combine_first is nice:
In [130]: df.combine_first(df.T).fillna(0)
Out[130]:
a b c d
a 0 10 20 30
b 10 0 40 50
c 20 40 0 60
d 30 50 60 0
Timings:
In [134]: timeit df.combine_first(df.T).fillna(0)
100 loops, best of 3: 2.01 ms per loop
In [135]: timeit lhs, rhs = df.align(df.T); res = lhs.add(rhs, fill_value=0).fillna(0)
1000 loops, best of 3: 1.27 ms per loop
Those timings are probably a bit polluted by construction costs, so what do things look like with some really huge frames?
In [143]: df = DataFrame({i: randn(1e7) for i in range(1, 11)})
In [144]: df2 = DataFrame({i: randn(1e7) for i in range(10)})
In [145]: timeit lhs, rhs = df.align(df2); res = lhs.add(rhs, fill_value=0).fillna(0)
1 loops, best of 3: 4.41 s per loop
In [146]: timeit df.combine_first(df2).fillna(0)
1 loops, best of 3: 2.95 s per loop
DataFrame.combine_first() is faster for larger frames.
In [49]: data = map(list, zip(*myDict.keys())) + [myDict.values()]
In [50]: df = DataFrame(zip(*data)).set_index([0, 1])[2].unstack()
In [52]: df.combine_first(df.T).fillna(0)
Out[52]:
a b c d
a 0 10 20 30
b 10 0 40 50
c 20 40 0 60
d 30 50 60 0
For posterity: If you are just tuning in, check out Phillip Cloud's answer below for a neater way to construct df.
Not as elegant as I'd like (and not using pandas) but until you find something better:
adj = dict()
for ((u, v), w) in myDict.items():
if u not in adj: adj[u] = dict()
if v not in adj: adj[v] = dict()
adj[u][v] = adj[v][u] = w
keys = adj.keys()
print '\t' + '\t'.join(keys)
for u in keys:
def f(v):
try:
return str(adj[u][v])
except KeyError:
return "0"
print u + '\t' + '\t'.join(f(v) for v in keys)
or equivalently (if you don't want to construct the adjacency matrix):
k = dict()
for ((u, v), w) in myDict.items():
k[u] = k[v] = True
keys = k.keys()
print '\t' + '\t'.join(keys)
for u in keys:
def f(v):
if (u, v) in myDict:
return str(myDict[(u, v)])
elif (v, u) in myDict:
return str(myDict[(v, u)])
else:
return "0"
print u + '\t' + '\t'.join(f(v) for v in keys)
Got it working using pandas package.
#Find all column names
z = []
[z.extend(x) for x in myDict.keys()]
colnames = sorted(set(z))
#Create an empty DataFrame using pandas
myDF = DataFrame(index= colnames, columns = colnames )
myDF = myDF.fillna(0) #Initialize with zeros
#Fill each item one by one
for val in myDict:
myDF[val[0]][val[1]] = myDict[val]
myDF[val[1]][val[0]] = myDict[val]
#Write to a file
outfilename = "matrixCooccurence.txt"
myDF.to_csv(outfilename, sep="\t", index=True, header=True, index_label = "features" )

Categories