Add folder name to exported file - python

I'm hoping someone can assist. I want to add the folder name to a file export so the exported filename is "combined_summary_of .xls" but can't seem to be able to add in the right reference name. The list of folders does work but stuck at the folder name.
import os
import glob
import pandas as pd
df_list = list() # list of dataframes
folder = r"D:/summary_tables/"
os.chdir(folder)
for root, dirs, files in os.walk(folder):
for folder in folder:
keyword = folder
os.chdir("D:/summary_tables/")
glob.glob("D:/summary_tables/"+ keyword + "/filtered*.xls")
#initialize a empty dataframe and append individual files
all_data = pd.DataFrame()
for f in glob.glob("D:/summary_tables/" +keyword + "/filtered*.xls"):
df = pd.read_excel(f)
all_data = all_data.append(df,ignore_index=True)
all_data.head()
#group all of the files together and sort
all_data2 = pd.concat([all_data]).groupby(['host_name_queried']).sum().reset_index()
all_data2 = all_data2.sort_values('Total_count', ascending=False)
all_data2.head(n=10)
all_data2['Total_nx_domain'] = all_data2['Total_nx_domain'].astype(float)
#send to xls
import openpyxl
all_data2.to_excel('D:/summary_tables/combined_summary_of_' + '.xls', index=False)
print ("file has been saved")
all_data

Related

Issues reading many excel files

I'm reading many xls files with this code:
import os
import pandas as pd
#reading the name of files
path = os.getcwd()
files = os.listdir(path)
files_xls = [f for f in files if f[-3:] == 'xls']
#creating empty dataframe
dfs = pd.DataFrame()
#reading and append xls files
for i in files_xls:
data = pd.read_excel(i, 'Sheet 1',converters={'CODIGO':str})
dfs = dfs.append(data)
With this code, I can read all the xls files with no problem.
But when I want to define the path, I get an error.
#reading the name of files
path = "/path/to/file"
files = os.listdir(path)
files_xls = [f for f in files if f[-3:] == 'xls']
#creating empty dataframe
dfs = pd.DataFrame()
#reading and append xls files
for i in files_xls:
data = pd.read_excel(i, 'Sheet 1',converters={'CODIGO':str})
dfs = dfs.append(data)
Error message:
FileNotFoundError: [Errno 2] No such file or directory: 'Acobamba000659a.xls'
How can I solve this?
os.listdir gives you file name not path.
you can use jurez solution or just use glob
import glob
dfs = pd.DataFrame()
path = "/path/to/file/*.xls"
for i in glob.glob(path):
data = pd.read_excel(i, 'Sheet 1',converters={'CODIGO':str})
dfs = dfs.append(data)
You are probably forgetting that os.listdir() returns just the file names, without the path. You might try this:
files_xls = [os.path.join(path, f) for f in files if f[-3:] == 'xls']

Open multiple Excel files to separate Pandas dataframes

Brand new to Python and could use some help importing multiple Excel files to separate Pandas dataframes. I have successfully implemented the following code, but of course it imports everything into one frame. I would like to import them into df1, df2, df3, df4, df5, etc.
Anything helps, thank you!
import pandas as pd
import glob
def get_files():
directory_path = input('Enter directory path: ')
filenames = glob.glob(directory_path + '/*.xlsx')
number_of_files = len(filenames)
df = pd.DataFrame()
for f in filenames:
data = pd.read_excel(f, 'Sheet1')
df = df.append(data)
print(df)
print(number_of_files)
get_files()
The easiest way to do that is to use a list. Each element of the list is a dataframe
def get_files():
directory_path = input('Enter directory path: ')
filenames = glob.glob(directory_path + '/*.xlsx')
number_of_files = len(filenames)
df_list = []
for f in filenames:
data = pd.read_excel(f, 'Sheet1')
df_list.append(data)
print(df_list)
print(number_of_files)
return df_list
get_files()
You can then access your dataframes with df_list[0], df_list[1]...
Just as another option by Jezrael answer here https://stackoverflow.com/a/52074347/13160821 but modified for your code.
from os.path import basename
def get_files():
directory_path = input('Enter directory path: ')
filenames = glob.glob(directory_path + '/*.xlsx')
number_of_files = len(filenames)
df_list = {basename(f) : pd.read_excel(f, 'Sheet1') for f in filenames}
print(number_of_files)
return df_list
get_files()
Which can then be accessed by the filename eg. dfs['file_name1.xlsx'] or dfs['some_file.xlsx']. You can also do things like splitext to remove the xlsx from the key or use just part of the filename.

Need help creating a function that loops over list of csv files, selects certain columns and saves them as csv's again

I've created a list using the below code:
path = r'M:\BI\HisRms'
fileList = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith('.csv'):
fileList.append(file)
print of file list for context:
['736815-RM.csv', '736816-RM.csv', '736817-RM.csv', '736818-RM.csv', '736819-RM.csv', '736820-RM.csv', '736821-RM.csv', '736822-RM.csv', '736823-RM.csv', '736824-RM.csv', '736825-RM.csv', '736826-RM.csv', '736827-RM.csv', '736828-RM.csv', '736829-RM.csv', '736830-RM.csv', '736831-RM.csv', '736832-RM.csv', '736833-RM.csv', '736834-RM.csv', '736835-RM.csv', '736836-RM.csv', '736837-RM.csv', '736838-RM.csv', '736839-RM.csv', '736840-RM.csv', '736841-RM.csv', '736842-RM.csv', '736843-RM.csv', '736844-RM.csv', '736845-RM.csv', '736846-RM.csv', '736847-RM.csv', '736848-RM.csv', '736849-RM.csv', '736850-RM.csv']
Now that i have the list i need to be able to define a function that does the below:
"For each file in my file list, select A,B,C,D columns and remove all other columns. Then save it down using the same name of the file but to a different folder"
I've been able to do this for each individual file (stupidly manual) using the below code:
df = pd.read_csv('736818-RM.csv', index_col= 0, encoding = 'iso-8859-1')
df2 = df[['Purchase Order','SKU','Markdown','Landed Cost','Original Price','Current Sale Price','Free Stock','OPO','ID Style','Supplier Style No']]
df2.to_csv(r"C:\Users\jonathon.kindred\Desktop\RM\2019\FEB 2019\736818-RM", index = False)
but i have over 500 + files that i need to amend and i don't really have the time to do this by file.
Any help, or points in the right direction would be great!
import os
path = r'M:\BI\HisRms'
fileList = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith('.csv'):
fileList.append(file)
for name in fileList:
df = pd.read_csv(name, index_col= 0, encoding = 'iso-8859-1')
df2 = df[['Purchase Order','SKU','Markdown','Landed Cost','Original Price','Current Sale Price','Free Stock','OPO','ID Style','Supplier Style No']]
df2.to_csv(os.path.join("C:/", "Users", "jonathon.kindred", "Desktop", "RM", "2019", "FEB 2019", name), index = False)

Python , get duplicates in 1st column of all csv files in a directory

import pandas as pd
import glob
dataset = pd.read_csv('masterfeedproduction-EURNA_2016-06-27.csv',sep =
',',delimiter = None) # select 1 file in the directory
datasets_cols = ['transactionID','gvkey','companyName']
df= dataset.transactionID
df.shape
df.loc[df.duplicated()]
returns the duplicates in the selected file. displays row number and transactionID. so this is correct.
target_directory = r'C:\Users\nikol\Downloads\fullDailyDeltas\fullDailyDeltas'
file_list = glob.glob(target_directory + "/*.csv")
df_result = df.loc[df.duplicated()]
for file in file_list:
return(df_result)
here I am stuck.
target_directory = r'C:\Users\nikol\Downloads\fullDailyDeltas\fullDailyDeltas'
file_list = glob.glob(target_directory + "/*.csv")
for file in file_list:
dataset = pd.read_csv(file)
df = dataset.transactionID
duplicated = df.loc[df.duplicated()]
if duplicated.empty == False:
print(file)
print(duplicated)
Have a look at the glob module.
import pandas as pd
import glob
def your_function(file):
# put your df processing logic here
return df_result
Step 1 - Create list of files in directory
target_directory = r'Path/to/your/dir'
file_list = glob.glob(target_directory + "/*.csv")
# Include slash or it will search in the wrong directory!!
Step 2 - Loop through files in list
for file in file_list: # Loop files
df_result = your_function(file) # Put your logic into a separate function
new_filename = file.replace('.csv', '_processed.csv')
df_result.to_csv(new_filename, index = False)
Comment
In case you would have included your code showing your attempts to do this yourself, your question was answered within seconds.

Loop through directory and create data frame

I'm trying to create a data frame and then loop through a directory filled with csv files and add those to the data frame. I'm trying to use the following code:
df = []
for dirName, subdirList, fileList in os.walk(rootDir):
for fname in fileList:
df = pd.read_csv(fname)
Unfortunately I'm getting an error stating that "File CIN_2017 does not exist" (it does). Any insight into how to add all these csv files into a dataframe? There is a .DS_Store in there but everything else is just a csv. Thanks.
You can try another solution with glob for return file names, then loop in list comprehension and create list of DataFrames. last concate them to one big df:
import glob
files = glob.glob('files/*.csv')
df = pd.concat([pd.read_csv(fp) for fp in files], ignore_index=True)
It is same as:
import glob
files = glob.glob('files/*.csv')
dfs = []
for fp in files:
dfs.append(pd.read_csv(fp))
df = pd.concat(dfs, ignore_index=True)
import os
import pandas as pd
un_process_file = []
master_frame = pd.DataFrame(columns=['item_sku', 'external_product_id', 'standard_price', 'quantity'])
for root, dirs, files in os.walk(os.getcwd()):
for file_path in files:
if file_path.endswith('.csv'):
try:
print file_path
file_name = os.path.join(root, file_path)
file_frames = pd.read_csv(file_name, skiprows=2,
usecols=['item_sku', 'external_product_id', 'standard_price', 'quantity'])
master_frame = master_frame.append(file_frames)
except:
un_process_file.append(file_path)
master_frame = master_frame.rename(
columns={'item_sku': 'sku', 'external_product_id': 'asin', 'standard_price': 'price'})
master_frame = master_frame.drop_duplicates(subset='asin')
master_frame.to_csv('masterfile.txt', sep='\t')
if un_process_file:
print '\nUnable To Process these files\n'
for files in un_process_file:
print files
I have a similar problem. I made this solution. Modify columns name according to you need

Categories