Search CSV from subdirectory and add folder name as a column - python

I want read csv's from different sub-directories in my working directory to create a combined csv file. The combined csv should have a column containing the sub-directory name from which that particular csv was read from.
This is what I tried.
import os
import glob
import pandas as pd
all_filenames = [i for i in glob.glob('*/*.csv'),recursive=True)]
list_subfolder = [f.name for f in os.scandir(ride_path) if f.is_dir()]
df_list = []
for i in range(len(all_filenames)):
dir_name = list_subfolder[i]
current_csv = all_filenames[i]
data = pd.read_csv(current_csv)
data["sub_folder"]= dir_name
df_list.append(data)
combined_df = pd.concat(df_list)
combined_df.to_csv("combined_csv.csv", index=False)
The problem is that, it adds sub-directories that does not have csvs' in them, which is wrong and problematic. What is the best way to this right.

You can do this via pathlib module:
from pathlib import Path
inp_path = Path('.') # specify the inp path. Here, ('.') means current working dir
df_list= []
for csv_file in inp_path.glob('**/*.csv'): # glob here will return generator obj which will yield csv file one by one
df = pd.read_csv(csv_file)
df['file_name'] = csv_file.parent # possible to get parent dir via pathlib
df_list.append(df_list)
combined_df = pd.concat(df_list)
combined_df.to_csv("combined_csv.csv", index=False)
Note.
1- use csv_file.parent.name if you just need the name.
2- use csv_file.parent.absolute() if you want the full path of parent dir.

You can use us os.path.split():
import os
import glob
import pandas as pd
all_filenames = [i for i in glob.glob("**/*.csv", recursive=True)]
df_list = []
for f in all_filenames:
current_csv = f
data = pd.read_csv(current_csv)
data["sub_folder"] = os.path.split(f)[0] # <-- [0] is directory [1] is filename
df_list.append(data)
combined_df = pd.concat(df_list)
print(combined_df)
combined_df.to_csv("combined_csv.csv", index=False)

Another option with glob and os:
import os
import glob
import pandas as pd
df_list = []
for csv in glob.glob('**/*.csv', recursive=True):
parent_folder = os.path.split(os.path.dirname(csv))[-1]
df = pd.read_csv(csv)
df['subfolder'] = parent_folder
df_list.append(df)
combined_df = pd.concat(df_list)
combined_df.to_csv("combined_csv.csv", index=False)

One-line method (adapted from the #nk03 answer).
import pandas as pd
import pathlib as pth
pd.concat([pd.read_csv(csvfile).assign(file_name=csvfile.parent)
for csvfile in pth.Path(".").glob("**/*.csv")]) \
.to_csv("combined_csv.csv", index=False)

Related

How to upload all csv files that have specific name inside filename in python

I want to concat all csv file that have this specific word 'tables' on the filename.
Below code is upload all csv file without filter the specific word that i want.
# importing the required modules
import glob
import pandas as pd
# specifying the path to csv files
#path = "csvfoldergfg"
path = "folder_directory"
# csv files in the path
files = glob.glob(path + "/*.csv")
# defining an empty list to store
# content
data_frame = pd.DataFrame()
content = []
# checking all the csv files in the
# specified path
for filename in files:
# reading content of csv file
# content.append(filename)
df = pd.read_csv(filename, index_col=None)
content.append(df)
# converting content to data frame
data_frame = pd.concat(content)
print(data_frame)
example filename are:
abcd-tables.csv
abcd-text.csv
abcd-forms.csv
defg-tables.csv
defg-text.csv
defg-forms.csv
From the example filenames. The expected output is concat filenames
abcd-tables.csv
defg-tables.csv
into single dataframe. Assuming the header are same.
*Really appreciate you guys can solve this
You can use:
import pandas as pd
import pathlib
path = 'folder_directory'
content = []
for filename in pathlib.Path(path).glob('*-tables.csv'):
df = pd.read_csv(filename, index_col=None)
content.append(df)
df = pd.concat(content, ignore_index=True)

How to import a file from a folder where the ending characters can change - python pandas?

I currently have a folder that contains multiple files with similar names that I am trying to read from.
For example:
Folder contains files:
apple_2019_08_26_23434.xls
apple_2019_08_25_55345.xls
apple_2019_08_24_99345.xls
the name format of the file is very simple:
apple_<date>_<5 random numbers>.xls
How can I read the excel file into a pandas df if I do not care about the random 5 digits at the end?
e.g.
df = pd.read_excel('e:\Document\apple_2019_08_26_<***wildcard***>.xls')
Thank you!
You could use unix style pathname expansions via glob.
import glob
# get .txt files in current directory
txt_files = glob.glob('./*.txt')
# get .xls files in some_dir
xls_files = glob.glob('some_dir/*.xls')
# do stuff with files
# ...
Here, * basically means "anything".
Example with pandas:
import glob
for xls_file in glob.glob('e:/Document/apple_2019_08_26_*.xls'):
df = pd.read_excel(xls_file)
# do stuff with df
# ...
Change your directory with os.chdir then import all files which startwith the correct name:
import os
os.chdir(r'e:\Document')
dfs = [pd.read_excel(file) for file in os.listdir() if file.startswith('apple_2019_08')]
Now you can access each dataframe by index:
print(dfs[0])
print(dfs[1])
Or combine them to one large dataframe if they have the same format
df_all = pd.concat(dfs, ignore_index=True)
If you want the 5-digit part to be changeable in the code, you could try something like this:
from os import listdir
from os.path import isfile, join
import pandas as pd
mypath = '/Users/username/aPath'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
fiveDigitNumber = onlyfiles[0][17:22]
filename = onlyfiles[0][:17]+fiveDigitNumber+onlyfiles[0][22:]
df = pd.read_excel(filename)

Read multiple csv files zipped in one file

I have several csv files in several zip files in on folder, so for example:
A.zip (contains csv1,csv2,csv3)
B.zip (contains csv4, csv5, csv6)
which are in the folder path C:/Folder/, when I load normal csv files in a folder I use the following code:
import glob
import pandas as pd
files = glob.glob("C/folder/*.csv")
dfs = [pd.read_csv(f, header=None, sep=";") for f in files]
df = pd.concat(dfs,ignore_index=True)
followed by this post: Reading csv zipped files in python
One csv in zip works like this:
import pandas as pd
import zipfile
zf = zipfile.ZipFile('C:/Users/Desktop/THEZIPFILE.zip')
df = pd.read_csv(zf.open('intfile.csv'))
Any idea how to optimize this loop for me?
Use zip.namelist() to get list of files inside the zip
Ex:
import glob
import zipfile
import pandas as pd
for zip_file in glob.glob("C/folder/*.zip"):
zf = zipfile.ZipFile(zip_file)
dfs = [pd.read_csv(zf.open(f), header=None, sep=";") for f in zf.namelist()]
df = pd.concat(dfs,ignore_index=True)
print(df)
I would try to tackle it in two passes. First pass, extract the contents of the zipfile onto the filesystem. Second Pass, read all those extracted CSVs using the method you already have above:
import glob
import pandas as pd
import zipfile
def extract_files(file_path):
archive = zipfile.ZipFile(file_path, 'r')
unzipped_path = archive.extractall()
return unzipped_path
zipped_files = glob.glob("C/folder/*.zip")]
file_paths = [extract_files(zf) for zf in zipped_files]
dfs = [pd.read_csv(f, header=None, sep=";") for f in file_paths]
df = pd.concat(dfs,ignore_index=True)

Loop through file in different folder in Python

I have a problem with a loop in Python. My folder looks like this:
|folder_initial
|--data_loop
|--example1
|--example2
|--example3
|--python_jupyter_notebook
I would like to loop through all files in data_loop, open them, run a simple operation, save them with another name and then do the same with the subsequent file. I have created the following code:
import pandas as pd
import numpy as np
import os
def scan_folder(parent):
# iterate over all the files in directory 'parent'
for file_name in os.listdir(parent):
if file_name.endswith(".csv"):
print(file_name)
df = pd.read_csv("RMB_IT.csv", low_memory=False, header=None, names=['column1','column2','column3','column4']
df = df[['column2','column4']
#Substitute ND with missing data
df = df.replace('ND,1',np.nan)
df = df.replace('ND,2',np.nan)
df = df.replace('ND,3',np.nan)
df = df.replace('ND,4',np.nan)
df = df.replace('ND,5',np.nan)
df = df.replace('ND,6',np.nan)
else:
current_path = "".join((parent, "/", file_name))
if os.path.isdir(current_path):
# if we're checking a sub-directory, recall this method
scan_folder(current_path)
scan_folder("./data_loop") # Insert parent direcotry's path
I get the error:
FileNotFoundError
FileNotFoundError: File b'example2.csv' does not exist
Moreover, I would like to run the code without the necessity of having the Jupyter notebook in the folder folder_initial but I would like to have something like this:
|scripts
|--Jupiter Notebook
|data
|---csv files
|--example1.csv
|--example2.csv
Any idea?
-- Edit:
I create something like this on user suggestion
import os
import glob
os.chdir('C:/Users/bedinan/Documents/python_scripts_v02/data_loop')
for file in list(glob.glob('*.csv')):
df = pd.read_csv(file, low_memory=False, header=None, names=[
df = df[[
#Substitute ND with missing data
df = df.replace('ND,1',np.nan)
df = df.replace('ND,2',np.nan)
df = df.replace('ND,3',np.nan)
df = df.replace('ND,4',np.nan)
df = df.replace('ND,5',np.nan)
df = df.replace('ND,6',np.nan)
df.to_pickle(file+"_v02"+".pkl")
f = pd.read_pickle('folder\\data_loop\\RMB_PT.csv_v02.pkl')
But the name of the file that results is not properly composed since it has inside the name the extension -csv
You can use this answer to iterate over all subfolders:
import os
import shutil
import pathlib
import pandas as pd
def scan_folder(root):
for path, subdirs, files in os.walk(root):
for name in files:
if name.endswith('.csv'):
src = pathlib.PurePath(path, name)
dst = pathlib.PurePath(path, 'new_' + name)
shutil.copyfile(src, dst)
df = pd.read_csv(dst)
# do something with DF
df.to_csv()
scan_folder(r'C:\User\Desktop\so\55648849')
Here's a solution which only uses pathlib, which I'm quite a big fan of. I pulled out your DataFrame operations into their own function, which you can re-name and re-write to actually do what you want it to do.
import pandas as pd
import numpy as np
from pathlib import Path
# rename the function to something more relevant
def df_operation(csv_path):
df = pd.read_csv(
csv_path.absolute(),
low_memory=False,
header=None,
names=['column1','column2','column3','column4']
)
# do some stuff with the dataframe
def scan_folder(parent):
p = Path(parent)
# Probably want a check here to make sure the provided
# parent is a directory, not a file
assert p.is_dir()
[df_operation(f) for f in p.rglob('*') if f.suffix == '.csv']
print(scan_folder("./example/dir"))

Retrieving data from multiple files into multiple dataframes

Scenario: I have a list of files in a folder (including the file paths). I am trying to get the content of each of those files into a dataframe (one for each file), then further perform some operations and later merge these dataframes.
From various other questions in SO, I found multiple ways to iterate over the files in a folder and get the data, but all of those I found usually ready the files in a loop and concatenate them into a single dataframe automatically, which does not work for me.
For example:
import os
import pandas as pd
path = os.getcwd()
files = os.listdir(path)
files_xls = [f for f in files if f[-3:] == 'xls*']
df = pd.DataFrame()
for f in files_xls:
data = pd.read_excel(f, 'Sheet1')
df = df.append(data)
or
import pandas as pd
import glob
all_data = pd.DataFrame()
for f in glob.glob("*.xls*"):
df = pd.read_excel(f)
all_data = all_data.append(df,ignore_index=True)
The only piece of code I could put together from what I found is:
from os.path import isfile, join
import glob
mypath = "/DGMS/Destop/uploaded"
listoffiles = glob.glob(os.path.join(mypath, "*.xls*"))
contentdataframes = (pd.read_excel(f) for f in listoffiles)
This lines run without error, but they appear not to do anything, no variables or created nor changed.
Question: What am I doing wrong here? Is there a better way to do this?
You are really close, need join all data by concat from generator:
contentdataframes = (pd.read_excel(f) for f in listoffiles)
df = pd.concat(contentdataframes, ignore_index=True)
If need list of DataFrames:
contentdataframes = [pd.read_excel(f) for f in listoffiles]

Categories