How to enumerate some text files using python? - python

I want to make a program to automate excel task using Openpyxl. I am using "enumerate" to open some text files and then auto input to excel file.
import os
os.chdir(r'F:\tes')
filenames = ["eb.txt", "ea.txt"]
for i, filename in enumerate(filenames):
file = open(filename, 'r')
text = file.read().replace('\u2014', '-').replace('—', '-')
start = 0
startcheck = True
end = 0
endcheck = True
for idx, letter in enumerate(text):
if text[idx:idx+4] == 'NPWP' and startcheck:
start = idx + 7
startcheck = False
if text[idx:idx+7] == 'Pembeli' and endcheck:
end = idx
endcheck = False
data = text[start:end]
from openpyxl import load_workbook
wb = load_workbook(filename = r'F:\tes\Book1.xlsx')
sheet_ranges = wb['1771 III']
cell_name = 'M' + str(20 + (3*i))
sheet_ranges[cell_name] = data
wb.save(filename = r'F:\tes\Form 1771.xlsx')
I've tried to open 2 text files but it only open and input 1 text file to excel. How do I code it to open multiple text files?

Relocate some of the statements and ensure statements are in the appropriate loops (correct indentation). I have not tested this but it looks correct.
First all imports and setup at the beginning.
import os
from openpyxl import load_workbook
os.chdir(r'F:\tes')
filenames = ["eb.txt", "ea.txt"]
wb = load_workbook(filename = r'F:\tes\Book1.xlsx')
Then start the iteration.
# outer loop
for i, filename in enumerate(filenames):
file = open(filename, 'r')
text = file.read().replace('\u2014', '-').replace('—', '-')
# explicitly close the file
file.close()
start = 0
startcheck = True
end = 0
endcheck = True
# find the data
# inner loop
for idx, letter in enumerate(text):
if text[idx:idx+4] == 'NPWP' and startcheck:
start = idx + 7
startcheck = False
if text[idx:idx+7] == 'Pembeli' and endcheck:
end = idx
endcheck = False
# ensure this section in the outer loop
data = text[start:end]
sheet_ranges = wb['1771 III']
cell_name = 'M' + str(20 + (3*i))
sheet_ranges[cell_name] = data
Finally save the workbook. Indentation ensures it is saved after all data has been written to it.
wb.save(filename = r'F:\tes\Form 1771.xlsx')
It is probably best to open a file using the with keyword which will ensure that the file is closed.
with open(filename, 'r') as f:
text = f.read().replace('\u2014', '-').replace('—', '-')
In your example you iterate over each character in the file using enumerate to find the index of the start and end of your data, text[idx:idx+4] == 'NPWP'.
strings have a find method that will do that for you.
start = text.find('NPWP')
end = text.find('Pembeli', start)
data = text[start:end]
With these changes your code would look like this:
import os
from openpyxl import load_workbook
os.chdir(r'F:\tes')
filenames = ["eb.txt", "ea.txt"]
wb = load_workbook(filename = r'F:\tes\Book1.xlsx')
for i, filename in enumerate(filenames):
with open(filename, 'r') as f:
text = f.read().replace('\u2014', '-').replace('—', '-')
start = text.find('NPWP')
end = text.find('Pembeli', start)
data = text[start:end]
sheet_ranges = wb['1771 III']
cell_name = 'M' + str(20 + (3*i))
sheet_ranges[cell_name] = data
wb.save(filename = r'F:\tes\Form 1771.xlsx')

Related

TabError: inconsistent use of tabs and spaces in indentation when adding to a dictionary

I am trying to move selected images from nested subdirectories. I am match sku from an excel file to the image name (which is also the sku number). Any that matches are then moved into a new folder.
My challenge when I try to create a dictionary to save my full directory I am being faced with the following error message.
File "c:\printing\python\data_clean.py", line 56
fullpath_filelist = {file: os.path.join(root,dirs, file}
^
TabError: inconsistent use of tabs and spaces in indentation
#! python 3
# Create clean version of data file
import openpyxl, webbrowser, sys,re, os, shutil
print('Opening workbook')
#*********************
Main_Searchterm = 'Find'
Sub_Searchterm = 'Marine'
Data_path = 'C:\Printing\Python\data\datafile.xlsx'
Image_folder = 'C:\Printing\Python\data\images'
Sorted_folder ='C:\Printing\Python\data\sorted'
#**********************
def find_category():
wb = openpyxl.load_workbook(Data_path)
sheet = wb['Sheet1']
#This looks for the main search term and put it into column 6
for rowNum in range(2, sheet.max_row+1):
category = sheet['E' + str(rowNum)].value #This control which column to search from
keywordRegex= re.compile(Main_Searchterm)
mo = keywordRegex.search(category)
try:
if mo.group() == Main_Searchterm:
sheet.cell(row = rowNum, column = 6).value = Main_Searchterm #This control which column to add the new search term
except:
pass
#This looks for the sub search term and put it into column 7
for rowNum in range(2, sheet.max_row+1):
category = sheet['E' + str(rowNum)].value #This control which column to search from
keywordRegex= re.compile(Sub_Searchterm)
mo = keywordRegex.search(category)
try:
if mo.group() == Sub_Searchterm:
sheet.cell(row = rowNum, column = 7).value = Sub_Searchterm #This control which column to add the new search term
except:
pass
wb.save(Data_path)
wb = openpyxl.load_workbook(Data_path)
sheet = wb['Sheet1']
filelist = [] #List of all files in directory and subdirectory
fullpath_filelist ={}
for root, dirs, files in os.walk(Image_folder):
for file in files:
#append the file name to the list
filelist.append(file)
fullpath_filelist = {file: os.path.join(root,dirs, file}
for filename in filelist:
for rowNum in range(2, sheet.max_row+1):
#for rowNum in range(2, 3):
image = sheet['H' + str(rowNum)].value #This control which column to search from
final_path = os.path.join(root,Main_Searchterm,Sub_Searchterm,filename)
if str(image) == str(filename):
shutil.move(filename,final_path)
find_category()
Depending on the IDE, ctrl-F for the '\t' and replace with ' ' (4 spaces)

How do I get user input into an excel spreadsheet via input() either in a csv or xlsx spreadsheet?

So far, I have been able to access csv and xlsx files in python, but I am unsure how to put in user inputs input() to add data to the spreadsheet.
I would also want this input() to only be enterable once per day but for different columns in my spreadsheet. (this is a separate issue)
Here is my code so far, first for csv, second for xlsx, I don't need both just either will do:
# writing to a CSV file
import csv
def main():
filename = "EdProjDBeg.csv"
header = ("Ans1", "Ans2", "Ans3")
data = [(0, 0, 0)]
writer(header, data, filename, "write")
updater(filename)
def writer(header, data, filename, option):
with open(filename, "w", newline = "") as csvfile:
if option == "write":
clidata = csv.writer(csvfile)
clidata.writerow(header)
for x in data:
clidata.writerow(x)
elif option == "update":
writer = csv.DictWriter(csvfile, fieldnames = header)
writer.writeheader()
writer.writerows(data)
else:
print("Option is not known")
# Updating the CSV files with new data
def updater(filename):
with open(filename, newline= "") as file:
readData = [row for row in csv.DictReader(file)]
readData[0]['Ans2'] = 0
readHeader = readData[0].keys()
writer(readHeader, readData, filename, "update")
# Reading and updating xlsx files
import openpyxl
theFile = openpyxl.load_workbook(r'C:\Users\joe_h\OneDrive\Documents\Data Analysis STUDYING\Excel\EdProjDBeg.xlsx')
print(theFile.sheetnames)
currentsheet = theFile['Customer1']
print(currentsheet['B3'].value)
wb = openpyxl.load_workbook(r'C:\Users\joe_h\OneDrive\Documents\Data Analysis STUDYING\Excel\EdProjDBeg.xlsx')
ws = wb.active
i = 0
cell_val = ''
# Finds which row is blank first
while cell_val != '':
cell_val = ws['A' + i].value
i += 1
# Modify Sheet, Starting With Row i
wb.save(r'C:\Users\joe_h\OneDrive\Documents\Data Analysis STUDYING\Excel\EdProjDBeg.xlsx')
x = input('Prompt: ')
This works for inputting data into an xlsx file.
Just use:
ws['A1'] = "data"
to input into cell A1
See code below for example using your original code:
wb = openpyxl.load_workbook('sample.xlsx')
print(wb.sheetnames)
currentsheet = wb['Sheet']
ws = currentsheet
#ws = wb.active <-- defaults to first sheet
i = 0
cell_val = ''
# Finds which row is blank first
while cell_val != None:
i += 1
cell_val = ws['A' + str(i)].value
print(cell_val)
x = input('Prompt: ')
#sets A column of first blank row to be user input
ws['A' + str(i)] = x
#saves spreadsheet
wb.save("sample.xlsx")
Also just made a few edits to your original while loop in the above code:
When a cell is blank, 'None' is returned
A1 is the first cell on the left, not A0 (moved i += 1 above finding value of cell)
Converted variable 'i' to a string when accessing the cell
See https://openpyxl.readthedocs.io/en/stable/ for the full documentation

to_csv multiple dataframes from loop with filename

I'm trying to create multiple good/bad files from original .csv files from a directory.
Im fairly new to Python, but have cobbled together the below, but it's not saving multiple files, just x1 "good" and x1 "bad" file. in the dir i have testfile1 and testfile2. the output should be testfile1good testfile1bad testfile2good testfile2bad.
Any help would be greatly appreciated.
Thanks
import pandas as pd
from string import ascii_letters
import glob
from pathlib import Path
files = glob.glob('C:\\Users\\nickn\\OneDrive\\Documents\\Well\\*.csv')
for f in files:
filename = []
filename = Path(f)
#Can not be null fields
df = pd.read_csv(f)
emptyvals = []
emptyvals = df['First Name'].isnull() | df['Last Name'].isnull()
#Bank Account Number is not 8 digits long
accountnolen = []
ac = []
accountnolen = df['AccNumLen'] = df['Bank Account Number'].astype(str).map(len)
ac = df[(df['AccNumLen'] != 8)]
acd= ac.drop(['AccNumLen'],axis=1)
#Create Exclusions
allexclusions = []
allexclusions = df[emptyvals].append(acd)
allexclusions.to_csv(filename.stem+"bad.csv",header =True,index=False)
#GoodList
#for f in files:
# filename = []
# filename = Path(f)
origlist = df
df = pd.merge(origlist, allexclusions, how='outer', indicator=True)
cl = df[(df['_merge'] == 'left_only')]
cld = cl.drop(['_merge','AccNumLen'],axis=1)
cld['Well ID'] = cld['Well ID'].str.rstrip(ascii_letters)
cld.to_csv(filename.stem+'good.csv',header =True,index=False)
i think you do loop but leave it and do the rest on line 14 - there you have filename set and you save your data once.
What you want is do the loop and the rest should happen for each iteration, so code should look like this:
import pandas as pd
from string import ascii_letters
import glob
from pathlib import Path
files = glob.glob('C:\\Users\\nickn\\OneDrive\\Documents\\Well\\*.csv')
for f in files:
filename = []
filename = Path(f)
#EDIT: we stay in loop and process each file one by one with following lines:
#Can not be null fields
df = pd.read_csv(f)
emptyvals = []
emptyvals = df['First Name'].isnull() | df['Last Name'].isnull()
#Bank Account Number is not 8 digits long
accountnolen = []
ac = []
accountnolen = df['AccNumLen'] = df['Bank Account Number'].astype(str).map(len)
ac = df[(df['AccNumLen'] != 8)]
acd= ac.drop(['AccNumLen'],axis=1)
#Create Exclusions
allexclusions = []
allexclusions = df[emptyvals].append(acd)
allexclusions.to_csv(filename.stem+"bad.csv",header =True,index=False)
#GoodList
#for f in files:
# filename = []
# filename = Path(f)
origlist = df
df = pd.merge(origlist, allexclusions, how='outer', indicator=True)
cl = df[(df['_merge'] == 'left_only')]
cld = cl.drop(['_merge','AccNumLen'],axis=1)
cld['Well ID'] = cld['Well ID'].str.rstrip(ascii_letters)
cld.to_csv(filename.stem+'good.csv',header =True,index=False)
In another words - you iterate over file names found in directory and THEN you take last "filename" and process it in one pass. By adding 4 spaces to rest of code we say to python interpreter that this part of code is part of loop and should be executed for each file. Hope it makes sense

Taking Same Worksheet from a Folder of xlsm Files with Python

I'm new to pandas/python and Ive come up with the following code to extract data from a specific part of a worksheet.
import openpyxl as xl
import pandas as pd
rows_with_data = [34,37,38,39,44,45,46,47,48,49, 50,54,55,57,58,59,60,62,63,64,65,66,70,71,72,76,77, 78,79,80,81,82,83,84,88,89,90,91,92]
path = r'XXX'
xpath = input('XXX')
file = r'**.xlsm'
xfile = input('Change file name, current is ' + file + ' :')
sheetname = r'Summary'
wb = xl.load_workbook(filename = xpath + '\\' +file, data_only = True)
sheet = wb.get_sheet_by_name(sheetname)
rows = len(rows_with_data)
line_items = []
for i in range(rows) :
line_items.append(sheet.cell(row = rows_with_data[i], column = 13).value)
period = []
for col in range(17,35):
period.append(sheet.cell(row = 20, column = col).value)
print(line_items)
vals = []
x = []
for i in range(rows):
if i != 0:
vals.append(x)
x = []
for col in range(17,35):
x.append(sheet.cell(row = rows_with_data[i], column = col).value)
vals.append(x)
all_values = {}
all_values['Period'] = period
for i in range(rows):
print(line_items[i])
all_values[line_items[i]] = vals[i]
print(all_values)
period_review = input('Enter a period (i.e. 2002): ')
item = input('Enter a period (i.e. XXX): ')
time = period.index(period_review)
display_item = str(all_values[item][time])
print(item + ' for ' + period_review + " is " + display_item)
Summary_Dataframe = pd.DataFrame(all_values)
writer = pd.ExcelWriter(xpath + '\\' + 'values.xlsx')
Summary_Dataframe.to_excel(writer,'Sheet1')
writer.save()
writer.close()
I have the same worksheet (summary results) across a library of 60 xlsm files and I'm having a hard time figuring out how to iterate this across the entire folder of files. I also want change this from extracting specific rows to taking the entire "Summary" worksheet, pasting it to the new file and naming the worksheet by its filename ("Experiment_A") when pasted to the new excel file. Any advice?
I was having hard time to read your code to understand that what you want to do finally. So it is just an advice not a solution. You can iterate through all files in the folder using os then read the files in to one dataframe then save the single big data frame in to csv. I usually avoid excel but I guess you need the excel conversion. In the example below I have read all txt file from a directory put them in to dataframe list then store the big data frame as json. You can also store it as excel/csv.
import os
import pandas as pd
def process_data():
# input file path in 2 part in case it is very long
input_path_1 = r'\\path\to\the\folder'
input_path_2 = r'\second\part\of\the\path'
# adding the all file path
file_path = input_path_1 + input_path_2
# listing all file in the file folder
file_list = os.listdir(os.path.join(file_path))
# selecting only the .txt files in to a list object
file_list = [file_name for file_name in file_list if '.txt' in file_name]
# selecting the fields we need
field_names = ['country', 'ticket_id']
# defining a list to put all the datafremes in one list
pd_list = []
inserted_files = []
# looping over txt files and storing in to database
for file_name in file_list:
# creating the file path to read the file
file_path_ = file_path + '\\' + file_name
df_ = pd.read_csv(os.path.join(file_path_), sep='\t', usecols=field_names)
# converting the datetime to date
# few internal data transformation example before writting
df_['sent_date'] = pd.to_datetime(df_['sent_date'])
df_['sent_date'] = df_['sent_date'].values.astype('datetime64[M]')
# adding each dataframe to the list
pd_list.append(df_)
# adding file name to the inserted list to print later
inserted_files.append(file_name)
print(inserted_files)
# sql like union all dataframes and create a single data source
df_ = pd.concat(pd_list)
output_path_1 = r'\\path\to\output'
output_path_2 = r'\path\to\output'
output_path = output_path_1 + output_path_2
# put the file name
file_name = 'xyz.json'
# adding the day the file processed
df_['etl_run_time'] = pd.to_datetime('today').strftime('%Y-%m-%d')
# write file to json
df_.to_json(os.path.join(output_path, file_name), orient='records')
return print('Data Stored as json successfully')
process_data()

Trying to iterate through CSV files in a directory and grab a particular cell then post results to a single csv

I am trying to iterate through several CSV files in a directory and grab a particular cell (same cell location) from each CSV file (cell location found when opened in Excel) and then post all similar cells in a single CSV or xls file, one after the other.
I have writen the code below (with some researched help) but I am just iterating over the first csv file in my list and printing the same value each time, dependant on the number of CSV files in my list. Could anybody point me in the right direction?
Here's my poor attempt!
import xlwt
import xlrd
import csv
import glob
import os
files = ['1_IQ_QTA.csv','2_IQ_QTA.csv','3_IQ_QTA.csv','4_IQ_QTA.csv']
n = 0
row = 0
filename = ('outputList.csv', 'a')
fname = files[n]
workbookr = xlrd.open_workbook(fname)
sheetr = workbookr.sheet_by_index(0)
workbookw = xlwt.Workbook()
sheetw = workbookw.add_sheet('test')
while n<len(files):
fname = files[n]
workbookr = xlrd.open_workbook(fname[n])
data = [sheetr.cell_value(12, 1) for col in range(sheetr.ncols)]
for index, value in enumerate(data):
sheetw.write(row, index, value)
workbookw.save('outputList.csv')
row = row +1
n = n+1
workbookw.save('outputList.csv')
My code is still a bit messy, I may have leftover code from my various attempts!
Thanks
MikG
Assuming you are just trying to make a CSV file of the same cells from each file. So if you had 4 files, your output file will have 4 entries.
files = ['1_IQ_QTA.csv','2_IQ_QTA.csv','3_IQ_QTA.csv','4_IQ_QTA.csv']
n = 0
row = 0
outputfile = open('outputList.csv', 'w')
cellrow = 12 #collect the cell (12, 1) from each file and put it in the output list
cellcolumn = 1
while n<len(files):
fname = files[n]
currentfile = open(fname,'r')
for i in range (cellrow):
currentrow = currentfile.readline()
# print currentrow #for testing
columncnt=0
currentcell = ''
openquote = False
for char in currentrow:
if char == '"' and not openquote:
openquote = True
elif char == '"' and openquote:
openquote = False
elif char == ',' and not openquote:
columncnt+=1
if columncnt == cellcolumn:
cellvalue = currentcell
# print cellvalue #for testing
currentcell=''
else:
currentcell += char
outputfile.write (cellvalue + ',')
currentfile.close()
n += 1
outputfile.close()
It seemed to me that since you already had a CSV it would be easier to deal with as a regular file and parse through to find the right information, plus nothing to import. Happy coding!
I think you have an error at this line in the while loop:
workbookr = xlrd.open_workbook(fname[n])
must be:
workbookr = xlrd.open_workbook(fname)
otherwise your workbookr remains as you set it before outside the loop:
fname = files[n]
workbookr = xlrd.open_workbook(fname)
which is the first file in your list.
Since they are just csv files, there is no need for the excel libraries.
#!/usr/bin/env python
import argparse, csv
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='merge csv files on field', version='%(prog)s 1.0')
parser.add_argument('infile', nargs='+', type=str, help='list of input files')
parser.add_argument('--col', type=int, default=0, help='Column to grab')
parser.add_argument('--row', type=int, default=0, help='Row to grab')
parser.add_argument('--out', type=str, default='temp.csv', help='name of output file')
args = parser.parse_args()
data = []
for fname in args.infile:
with open(fname, 'rb') as df:
reader = csv.reader(df)
for index, line in enumerate(reader):
if index == args.row:
data.push(line[args.column])
del reader
writer = csv.writer(open(args.out, "wb"), dialect='excel')
writer.writerows(data)
del writer

Categories