Pandas adding date of file creation as a variable - python

I have multiple csv files in a folder. I want to add "date_created" as an variable to my dataframe for each csv file. Currently I have something like this:
import glob
import pandas as pd
df = pd.concat([pd.read_csv(f, encoding="utf-16", delimiter = "^") for f in glob.glob('*.csv')])
df.to_csv("all_together.csv")
How could I do this?

Use assign with custom function:
import os
import platform
#https://stackoverflow.com/a/39501288
def creation_date(path_to_file):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
L = [pd.read_csv(f, encoding="utf-16", delimiter = "^").assign(date_created=creation_date(f))
for f in glob.glob('*.csv')]
df = pd.concat(L, ignore_index=True)
df.to_csv("all_together.csv")

Related

How do you iterate through files directory and if a keyword is in them write to a different data frame?

I am currently trying to iterate through all of the files in a directory and then write 'Yes' or 'No' to columns in a new data frame if certain strings appear in the files.
This works the way I would expect it to, it prints 'Yes' or 'No' to the terminal based on if any of the words_in_file are present.
import pandas as pd
import numpy as np
from Byron import copy_to_processor_directory
from pip import qualify_file_name, FileCompare, normalize_file_extension
from pep.settings import WORKSPACE_ROOT, ACCOUNT_HOME
import sys
import os
file_results = pd.DataFrame()
file_results['test_case_found'] = ''
words_in_file = ['remote_directory', 'file_path']
def main():
for subdir, dirs, files in os.walk(ACCOUNT_HOME):
for file in files:
directory_files = open(os.path.join(subdir, file), 'r')
directory_file_code = directory_files.read()
for key_word in words_in_file:
if key_word in directory_file_code:
print('yes')
else:
print('No')
file_results.to_csv('test.csv', index=False)
if __name__ == '__main__':
main()
However, I expect the code below to then proceed to write 'Yes' or 'No' to each row of my file_results data frame, but it does not.
import pandas as pd
import numpy as np
from Byron import copy_to_processor_directory
from pip import qualify_file_name, FileCompare, normalize_file_extension
from pep. settings import WORKSPACE_ROOT, ACCOUNT_HOME
import sys
import os
file_results = pd.DataFrame()
file_results['test_case_found'] = ''
words_in_file = ['remote_directory', 'file_path']
def main():
for subdir, dirs, files in os.walk(ACCOUNT_HOME):
for file in files:
directory_files = open(os.path.join(subdir, file), 'r')
directory_file_code = directory_files.read()
for key_word in words_in_file:
if key_word in directory_file_code:
print('yes')
file_results['test_case_found'] = 'Yes'
else:
print('No')
file_results['test_cause_found'] = 'No'
file_results.to_csv('test.csv', index=False)
if __name__ == '__main__':
main()
I have found lots of examples for if you are writing to the same data frame as you are iterating through, but I am iterating through files that I am reading and trying to write to a new data frame rather than just a file. Please help!
What's wrong with your code :
For a given dataframe df with n rows, df['col'] = '' would create the column col if it doesn't exist and set all the n entries of the column to the value '' (same for any other string).
As you started from an empty dataframe, file_results['test_case_found'] = '' creates a column test_case_found and sets all its values, which are none, to '', so it basically just creates an empty column in your empty dataframe.
Then, everytime you are repeating the same mistake of setting up the 0 values of an empty column to a string, which changes nothing.
Also, you're saving to csv before the main function is called, so even if your function were correct, you'd still be saving an empty dataframe.
What you should do :
You could create a list that you update throughout your loop, and then you create your column from that list, so your column will have the same length of that list, and it will store the same data :
file_results = pd.DataFrame()
words_in_file = ['remote_directory', 'file_path']
def main():
results = []
for subdir, dirs, files in os.walk(ACCOUNT_HOME):
for file in files:
directory_files = open(os.path.join(subdir, file), 'r')
directory_file_code = directory_files.read()
for key_word in words_in_file:
if key_word in directory_file_code:
print('yes')
results.append('Yes')
else:
print('No')
results.append('No')
file_results['test_case_found'] = results
Alternatively, you could create your dataframe directly from the list, so you don't need the first line, and you replace the last line with : file_results = pd.DataFrame({'test_case_found': results})

Trying to take multiple excel spreadsheets, extract specific data, add them all to one dataframe and save it as a csv file

Very new to this, so please go easy on me :)
Trying to take multiple excel spreadsheets, extract data specific from specific cells, add them all to one dataframe and save it as a csv file.
The csv output only contains the data from the last excel file. Please could you help?
import pandas as pd
import os
from pathlib import Path
ip = "//NETWORKLOCATION/In"
op = "//NETWORKLOCATION/Out"
file_exist = False
dir_list = os.listdir(ip)
print(dir_list)
for xlfile in dir_list:
if xlfile.endswith('.xlsx') or xlfile.endswith('.xls'):
file_exist = True
str_file = os.path.join(ip, xlfile)
df1 = pd.read_excel(str_file)
columns1 = {*VARIOUSDATA -*
}
#creates an empty dataframe for the data to all sequentially be added into
df1a = pd.DataFrame([])
#appends the array to the new dataframe df1a
df1a = df1a.append(pd.DataFrame(columns1, columns = ['*VARIOUS COLUMNS*]))
if not file_exist:
print('cannot find any valid excel file in the folder ' + ip)
print(str_file)
df1a.to_csv('//NETWORKLOCATION/Out/Test.csv')
print(df1a)
I think You should put:
#creates an empty dataframe for the data to all sequentially be added into
df1a = pd.DataFrame([])
before for xlfile in dir_list: loop not inside the loop.
Otherwise df1a recreates empty on each file iteration.
A couple of things. First, you'll never encounter:
if not file_exist:
print('cannot find any valid excel file in the folder ' + ip)
print(str_file)
as is written, because it's a nested if statement and so file_exists is always set to true before it's reached.
You're creating df1a inside of your for loop. So you're always setting it back to empty.
Why import Path, and then use os.path and os.listdir?
Why not just use Path(ip).glob('.xls')
This would look like:
import pandas as pd
import os
from pathlib import Path
ip = "//NETWORKLOCATION/In"
op = "//NETWORKLOCATION/Out"
#creates an empty dataframe for the data to all sequentially be added into
df1a = pd.DataFrame([])
for xlfile in Path(ip).glob('*.xls*'):
df1 = pd.read_excel(xlfile)
columns1 = {"VARIOUSDATA"}
#appends the array to the new dataframe df1a
df1a = df1a.append(pd.DataFrame(columns1, columns = ['VARIOUS_COLUMNS']))
if df1a.empty:
print('cannot find any valid excel file in the folder ' + ip)
print(str_file)
else:
df1a.to_csv(op+'/Test.csv')
print(df1a)
The csv output only contains the data from the last excel file.
You create the df1a DataFrame inside the for loop. Each time you read a new xlfile you create a new empty DataFrame.
You have to put df1a = pd.DataFrame([]) on the 9th line of your script before the loop.
Something like this should work for you.
import os
import pandas as pd
import glob
glob.glob("C:\\your_path\\*.xlsx")
all_data = pd.DataFrame()
for f in glob.glob("C:\\your_path\\*.xlsx"):
df = pd.read_excel(f)
all_data = all_data.append(df,ignore_index=True)
type(all_data)
Check out this link.
https://pbpython.com/excel-file-combine.html

How to read "abc.csv" and if not present read "xyz.csv", using pandas?

I want to read alternative of any of this 2 file xyz.csv and abc.csv, at a time one will be present:
if abc.csv not in Path8:
pd.read_csv(Path8 + 'xyz.csv')
You can use Python's os.path.isfile() function to test if your file exists before asking Pandas to open the CSV file. Pandas itself does not support such logic. For example:
import pandas as pd
import os.path
Path8 = '/my/path'
file1 = os.path.join(Path8, 'abc.csv')
file2 = os.path.join(Path8, 'xyz.csv')
if os.path.isfile(file1):
print(f"Opening {file1}")
df = pd.read_csv(file1)
print(df)
elif os.path.isfile(file2):
print(f"Opening {file2}")
df = pd.read_csv(file2)
print(df)
else:
print(f"abc.csv and xyz.csv not found in {Path8}")
os.path.join() is a safer way of constructing file paths.
An alternative approach would be to catch the FileNotFound exception that Pandas would raise if the file was not found and to then try another file. This approach would also allow you to easily extend it to give more possible filenames to try:
import pandas as pd
import os.path
Path8 = '/my/path'
files = [os.path.join(Path8, filename) for filename in ["abc.csv", "xyz.csv"]]
for csv_file in files:
try:
df = pd.read_csv(csv_file)
break
except FileNotFoundError:
df = None
if df is None:
print(f"{', '.join(files)} - not found in {Path8}")
else:
print(f"Opened {csv_file}")
print(df)

Loop through file in different folder in Python

I have a problem with a loop in Python. My folder looks like this:
|folder_initial
|--data_loop
|--example1
|--example2
|--example3
|--python_jupyter_notebook
I would like to loop through all files in data_loop, open them, run a simple operation, save them with another name and then do the same with the subsequent file. I have created the following code:
import pandas as pd
import numpy as np
import os
def scan_folder(parent):
# iterate over all the files in directory 'parent'
for file_name in os.listdir(parent):
if file_name.endswith(".csv"):
print(file_name)
df = pd.read_csv("RMB_IT.csv", low_memory=False, header=None, names=['column1','column2','column3','column4']
df = df[['column2','column4']
#Substitute ND with missing data
df = df.replace('ND,1',np.nan)
df = df.replace('ND,2',np.nan)
df = df.replace('ND,3',np.nan)
df = df.replace('ND,4',np.nan)
df = df.replace('ND,5',np.nan)
df = df.replace('ND,6',np.nan)
else:
current_path = "".join((parent, "/", file_name))
if os.path.isdir(current_path):
# if we're checking a sub-directory, recall this method
scan_folder(current_path)
scan_folder("./data_loop") # Insert parent direcotry's path
I get the error:
FileNotFoundError
FileNotFoundError: File b'example2.csv' does not exist
Moreover, I would like to run the code without the necessity of having the Jupyter notebook in the folder folder_initial but I would like to have something like this:
|scripts
|--Jupiter Notebook
|data
|---csv files
|--example1.csv
|--example2.csv
Any idea?
-- Edit:
I create something like this on user suggestion
import os
import glob
os.chdir('C:/Users/bedinan/Documents/python_scripts_v02/data_loop')
for file in list(glob.glob('*.csv')):
df = pd.read_csv(file, low_memory=False, header=None, names=[
df = df[[
#Substitute ND with missing data
df = df.replace('ND,1',np.nan)
df = df.replace('ND,2',np.nan)
df = df.replace('ND,3',np.nan)
df = df.replace('ND,4',np.nan)
df = df.replace('ND,5',np.nan)
df = df.replace('ND,6',np.nan)
df.to_pickle(file+"_v02"+".pkl")
f = pd.read_pickle('folder\\data_loop\\RMB_PT.csv_v02.pkl')
But the name of the file that results is not properly composed since it has inside the name the extension -csv
You can use this answer to iterate over all subfolders:
import os
import shutil
import pathlib
import pandas as pd
def scan_folder(root):
for path, subdirs, files in os.walk(root):
for name in files:
if name.endswith('.csv'):
src = pathlib.PurePath(path, name)
dst = pathlib.PurePath(path, 'new_' + name)
shutil.copyfile(src, dst)
df = pd.read_csv(dst)
# do something with DF
df.to_csv()
scan_folder(r'C:\User\Desktop\so\55648849')
Here's a solution which only uses pathlib, which I'm quite a big fan of. I pulled out your DataFrame operations into their own function, which you can re-name and re-write to actually do what you want it to do.
import pandas as pd
import numpy as np
from pathlib import Path
# rename the function to something more relevant
def df_operation(csv_path):
df = pd.read_csv(
csv_path.absolute(),
low_memory=False,
header=None,
names=['column1','column2','column3','column4']
)
# do some stuff with the dataframe
def scan_folder(parent):
p = Path(parent)
# Probably want a check here to make sure the provided
# parent is a directory, not a file
assert p.is_dir()
[df_operation(f) for f in p.rglob('*') if f.suffix == '.csv']
print(scan_folder("./example/dir"))

Import multiple CSV files into pandas and concatenate into one DataFrame

I would like to read several CSV files from a directory into pandas and concatenate them into one big DataFrame. I have not been able to figure it out though. Here is what I have so far:
import glob
import pandas as pd
# Get data file names
path = r'C:\DRO\DCL_rawdata_files'
filenames = glob.glob(path + "/*.csv")
dfs = []
for filename in filenames:
dfs.append(pd.read_csv(filename))
# Concatenate all data into one DataFrame
big_frame = pd.concat(dfs, ignore_index=True)
I guess I need some help within the for loop?
See pandas: IO tools for all of the available .read_ methods.
Try the following code if all of the CSV files have the same columns.
I have added header=0, so that after reading the CSV file's first row, it can be assigned as the column names.
import pandas as pd
import glob
import os
path = r'C:\DRO\DCL_rawdata_files' # use your path
all_files = glob.glob(os.path.join(path , "/*.csv"))
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
frame = pd.concat(li, axis=0, ignore_index=True)
Or, with attribution to a comment from Sid.
all_files = glob.glob(os.path.join(path, "*.csv"))
df = pd.concat((pd.read_csv(f) for f in all_files), ignore_index=True)
It's often necessary to identify each sample of data, which can be accomplished by adding a new column to the dataframe.
pathlib from the standard library will be used for this example. It treats paths as objects with methods, instead of strings to be sliced.
Imports and Setup
from pathlib import Path
import pandas as pd
import numpy as np
path = r'C:\DRO\DCL_rawdata_files' # or unix / linux / mac path
# Get the files from the path provided in the OP
files = Path(path).glob('*.csv') # .rglob to get subdirectories
Option 1:
Add a new column with the file name
dfs = list()
for f in files:
data = pd.read_csv(f)
# .stem is method for pathlib objects to get the filename w/o the extension
data['file'] = f.stem
dfs.append(data)
df = pd.concat(dfs, ignore_index=True)
Option 2:
Add a new column with a generic name using enumerate
dfs = list()
for i, f in enumerate(files):
data = pd.read_csv(f)
data['file'] = f'File {i}'
dfs.append(data)
df = pd.concat(dfs, ignore_index=True)
Option 3:
Create the dataframes with a list comprehension, and then use np.repeat to add a new column.
[f'S{i}' for i in range(len(dfs))] creates a list of strings to name each dataframe.
[len(df) for df in dfs] creates a list of lengths
Attribution for this option goes to this plotting answer.
# Read the files into dataframes
dfs = [pd.read_csv(f) for f in files]
# Combine the list of dataframes
df = pd.concat(dfs, ignore_index=True)
# Add a new column
df['Source'] = np.repeat([f'S{i}' for i in range(len(dfs))], [len(df) for df in dfs])
Option 4:
One liners using .assign to create the new column, with attribution to a comment from C8H10N4O2
df = pd.concat((pd.read_csv(f).assign(filename=f.stem) for f in files), ignore_index=True)
or
df = pd.concat((pd.read_csv(f).assign(Source=f'S{i}') for i, f in enumerate(files)), ignore_index=True)
An alternative to darindaCoder's answer:
path = r'C:\DRO\DCL_rawdata_files' # use your path
all_files = glob.glob(os.path.join(path, "*.csv")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f) for f in all_files)
concatenated_df = pd.concat(df_from_each_file, ignore_index=True)
# doesn't create a list, nor does it append to one
import glob
import os
import pandas as pd
df = pd.concat(map(pd.read_csv, glob.glob(os.path.join('', "my_files*.csv"))))
Almost all of the answers here are either unnecessarily complex (glob pattern matching) or rely on additional third-party libraries. You can do this in two lines using everything Pandas and Python (all versions) already have built in.
For a few files - one-liner
df = pd.concat(map(pd.read_csv, ['d1.csv', 'd2.csv','d3.csv']))
For many files
import os
filepaths = [f for f in os.listdir(".") if f.endswith('.csv')]
df = pd.concat(map(pd.read_csv, filepaths))
For No Headers
If you have specific things you want to change with pd.read_csv (i.e., no headers) you can make a separate function and call that with your map:
def f(i):
return pd.read_csv(i, header=None)
df = pd.concat(map(f, filepaths))
This pandas line, which sets the df, utilizes three things:
Python's map (function, iterable) sends to the function (the
pd.read_csv()) the iterable (our list) which is every CSV element
in filepaths).
Panda's read_csv() function reads in each CSV file as normal.
Panda's concat() brings all these under one df variable.
Easy and Fast
Import two or more CSV files without having to make a list of names.
import glob
import pandas as pd
df = pd.concat(map(pd.read_csv, glob.glob('data/*.csv')))
The Dask library can read a dataframe from multiple files:
>>> import dask.dataframe as dd
>>> df = dd.read_csv('data*.csv')
(Source: https://examples.dask.org/dataframes/01-data-access.html#Read-CSV-files)
The Dask dataframes implement a subset of the Pandas dataframe API. If all the data fits into memory, you can call df.compute() to convert the dataframe into a Pandas dataframe.
I googled my way into Gaurav Singh's answer.
However, as of late, I am finding it faster to do any manipulation using NumPy and then assigning it once to a dataframe rather than manipulating the dataframe itself on an iterative basis and it seems to work in this solution too.
I do sincerely want anyone hitting this page to consider this approach, but I don't want to attach this huge piece of code as a comment and making it less readable.
You can leverage NumPy to really speed up the dataframe concatenation.
import os
import glob
import pandas as pd
import numpy as np
path = "my_dir_full_path"
allFiles = glob.glob(os.path.join(path,"*.csv"))
np_array_list = []
for file_ in allFiles:
df = pd.read_csv(file_,index_col=None, header=0)
np_array_list.append(df.as_matrix())
comb_np_array = np.vstack(np_array_list)
big_frame = pd.DataFrame(comb_np_array)
big_frame.columns = ["col1", "col2"....]
Timing statistics:
total files :192
avg lines per file :8492
--approach 1 without NumPy -- 8.248656988143921 seconds ---
total records old :1630571
--approach 2 with NumPy -- 2.289292573928833 seconds ---
A one-liner using map, but if you'd like to specify additional arguments, you could do:
import pandas as pd
import glob
import functools
df = pd.concat(map(functools.partial(pd.read_csv, sep='|', compression=None),
glob.glob("data/*.csv")))
Note: map by itself does not let you supply additional arguments.
If you want to search recursively (Python 3.5 or above), you can do the following:
from glob import iglob
import pandas as pd
path = r'C:\user\your\path\**\*.csv'
all_rec = iglob(path, recursive=True)
dataframes = (pd.read_csv(f) for f in all_rec)
big_dataframe = pd.concat(dataframes, ignore_index=True)
Note that the three last lines can be expressed in one single line:
df = pd.concat((pd.read_csv(f) for f in iglob(path, recursive=True)), ignore_index=True)
You can find the documentation of ** here. Also, I used iglobinstead of glob, as it returns an iterator instead of a list.
EDIT: Multiplatform recursive function:
You can wrap the above into a multiplatform function (Linux, Windows, Mac), so you can do:
df = read_df_rec('C:\user\your\path', *.csv)
Here is the function:
from glob import iglob
from os.path import join
import pandas as pd
def read_df_rec(path, fn_regex=r'*.csv'):
return pd.concat((pd.read_csv(f) for f in iglob(
join(path, '**', fn_regex), recursive=True)), ignore_index=True)
Inspired from MrFun's answer:
import glob
import pandas as pd
list_of_csv_files = glob.glob(directory_path + '/*.csv')
list_of_csv_files.sort()
df = pd.concat(map(pd.read_csv, list_of_csv_files), ignore_index=True)
Notes:
By default, the list of files generated through glob.glob is not sorted. On the other hand, in many scenarios, it's required to be sorted e.g. one may want to analyze number of sensor-frame-drops v/s timestamp.
In pd.concat command, if ignore_index=True is not specified then it reserves the original indices from each dataframes (i.e. each individual CSV file in the list) and the main dataframe looks like
timestamp id valid_frame
0
1
2
.
.
.
0
1
2
.
.
.
With ignore_index=True, it looks like:
timestamp id valid_frame
0
1
2
.
.
.
108
109
.
.
.
IMO, this is helpful when one may want to manually create a histogram of number of frame drops v/s one minutes (or any other duration) bins and want to base the calculation on very first timestamp e.g.
begin_timestamp = df['timestamp'][0]
Without, ignore_index=True, df['timestamp'][0] generates the series containing very first timestamp from all the individual dataframes, it does not give just a value.
Another one-liner with list comprehension which allows to use arguments with read_csv.
df = pd.concat([pd.read_csv(f'dir/{f}') for f in os.listdir('dir') if f.endswith('.csv')])
Alternative using the pathlib library (often preferred over os.path).
This method avoids iterative use of pandas concat()/apped().
From the pandas documentation:
It is worth noting that concat() (and therefore append()) makes a full copy of the data, and that constantly reusing this function can create a significant performance hit. If you need to use the operation over several datasets, use a list comprehension.
import pandas as pd
from pathlib import Path
dir = Path("../relevant_directory")
df = (pd.read_csv(f) for f in dir.glob("*.csv"))
df = pd.concat(df)
If multiple CSV files are zipped, you may use zipfile to read all and concatenate as below:
import zipfile
import pandas as pd
ziptrain = zipfile.ZipFile('yourpath/yourfile.zip')
train = []
train = [ pd.read_csv(ziptrain.open(f)) for f in ziptrain.namelist() ]
df = pd.concat(train)
Based on Sid's good answer.
To identify issues of missing or unaligned columns
Before concatenating, you can load CSV files into an intermediate dictionary which gives access to each data set based on the file name (in the form dict_of_df['filename.csv']). Such a dictionary can help you identify issues with heterogeneous data formats, when column names are not aligned for example.
Import modules and locate file paths:
import os
import glob
import pandas
from collections import OrderedDict
path =r'C:\DRO\DCL_rawdata_files'
filenames = glob.glob(path + "/*.csv")
Note: OrderedDict is not necessary, but it'll keep the order of files which might be useful for analysis.
Load CSV files into a dictionary. Then concatenate:
dict_of_df = OrderedDict((f, pandas.read_csv(f)) for f in filenames)
pandas.concat(dict_of_df, sort=True)
Keys are file names f and values are the data frame content of CSV files.
Instead of using f as a dictionary key, you can also use os.path.basename(f) or other os.path methods to reduce the size of the key in the dictionary to only the smaller part that is relevant.
import os
os.system("awk '(NR == 1) || (FNR > 1)' file*.csv > merged.csv")
Where NR and FNR represent the number of the line being processed.
FNR is the current line within each file.
NR == 1 includes the first line of the first file (the header), while FNR > 1 skips the first line of each subsequent file.
In case of an unnamed column issue, use this code for merging multiple CSV files along the x-axis.
import glob
import os
import pandas as pd
merged_df = pd.concat([pd.read_csv(csv_file, index_col=0, header=0) for csv_file in glob.glob(
os.path.join("data/", "*.csv"))], axis=0, ignore_index=True)
merged_df.to_csv("merged.csv")
You can do it this way also:
import pandas as pd
import os
new_df = pd.DataFrame()
for r, d, f in os.walk(csv_folder_path):
for file in f:
complete_file_path = csv_folder_path+file
read_file = pd.read_csv(complete_file_path)
new_df = new_df.append(read_file, ignore_index=True)
new_df.shape
Consider using convtools library, which provides lots of data processing primitives and generates simple ad hoc code under the hood.
It is not supposed to be faster than pandas/polars, but sometimes it can be.
e.g. you could concat csv files into one for further reuse - here's the code:
import glob
from convtools import conversion as c
from convtools.contrib.tables import Table
import pandas as pd
def test_pandas():
df = pd.concat(
(
pd.read_csv(filename, index_col=None, header=0)
for filename in glob.glob("tmp/*.csv")
),
axis=0,
ignore_index=True,
)
df.to_csv("out.csv", index=False)
# took 20.9 s
def test_convtools():
table = None
for filename in glob.glob("tmp/*.csv"):
table_ = Table.from_csv(filename, header=False)
if table is None:
table = table_
else:
table = table.chain(table_)
table.into_csv("out_convtools.csv", include_header=False)
# took 15.8 s
Of course if you just want to obtain a dataframe without writing a concatenated file, it will take 4.63 s and 10.9 s correspondingly (pandas is faster here because it doesn't need to zip columns for writing it back).
import pandas as pd
import glob
path = r'C:\DRO\DCL_rawdata_files' # use your path
file_path_list = glob.glob(path + "/*.csv")
file_iter = iter(file_path_list)
list_df_csv = []
list_df_csv.append(pd.read_csv(next(file_iter)))
for file in file_iter:
lsit_df_csv.append(pd.read_csv(file, header=0))
df = pd.concat(lsit_df_csv, ignore_index=True)
This is how you can do it using Colaboratory on Google Drive:
import pandas as pd
import glob
path = r'/content/drive/My Drive/data/actual/comments_only' # Use your path
all_files = glob.glob(path + "/*.csv")
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
frame = pd.concat(li, axis=0, ignore_index=True,sort=True)
frame.to_csv('/content/drive/onefile.csv')

Categories