Pandas and CSV Libraries CSV Manipulation - python

I am building a simple app.
I want some values in my CSV to be updated every 15 minutes. I want
this part of my app to run in the background to prevent blocking the interface.
I couldn't get it to work the way I want to.
My code:
# I'm using the pandas, sched and time imports here:
#INTERFACE
#app.route("/")
def home():
s = sched.scheduler(time.time, time.sleep)
s.enter(15, 1, timer)
s.run()
return render_template("index.html")
#TIMER
def timer():
timer = 0
csv_file='C:\Python27\Walmart\sheet.csv'
print("UPDATING")
data_df = pd.read_csv(csv_file)
print("READ")
for i, row in data_df.iterrows() :
sku = data_df.iloc[i]['Walmart SKU']
print (sku)
if sku is '':
break
else:
update(sku)
print("Item Updated")
print("UPDATECOMPLETE")
home()
#UPDATE
def update(sku):
lookup=str(sku)
lookup = lookup.replace('.0', '')
product = wapy.product_lookup(lookup)
ts = time.time()
st = datetime.datetime.today().strftime('%Y-%m-%d %I:%M %p')
print (product.name)
if product.available_online is 'TRUE':
instock = 'yes'
else:
instock ='no'
quote_page = product.product_url
page = urlopen(quote_page)
soup = BeautifulSoup(page, 'html.parser')
sold_box = soup.find('a', attrs={'class': 'font-bold prod-SoldShipByMsg'})
sold = sold_box.text.strip()
left_box = soup.find('div', attrs={'class': 'prod-ProductOffer-urgencyMsg'})
left = left_box.text.strip()
if left is '':
stock=product.stock
else:
stock=left
# fields=[lookup + ',' + '$'+str(product.sale_price) + ',' + instock + ',' + stock + ',' + str(sold) + ',' + st + ',' + '$'+str(product.msrp)]
pathto_csv = 'C:\Python27\Walmart\sheet.csv'
data_df = pd.read_csv(pathto_csv)
print("CSV READ")
data_df.set_value([lookup], ['Price'], '$'+str(product.sale_price))
data_df.set_value([lookup], ['In Stock'], instock )
data_df.set_value([lookup], ['Quantity'], stock)
data_df.set_value([lookup], ['Last Update'], str(sold))
data_df.set_value([lookup], ['Min Price'], '$'+str(product.msrp))
data_df.to_csv(pathto_csv)
with open(r'sheet.csv', 'a') as f:
writer = csv.writer(f, delimiter=' ', quotechar = ' ')
writer.writerow(fields)
print(st)
print("UPDATED! 15 Minutes Have Passed!")
I have two problems:
1. On replacing the value on my update I'm getting this error:
KeyError: "['879091509'] not in index"
I thought that [row],[column] would let me replace the value I want for that cell, for example:
data_df.set_value([lookup], ['Price'], '$'+str(product.sale_price))
I read this as:
on row of where my SKU or ID is represented by ['lookup']
replace the ['Price'] by '$'+str(product.sale_price)
2. I can't get to my interface.
I think this is because:
when the time resets it runs the code again.
Question: How can I make this run only in background?

Q1
There are two issues:
The first argument set_value searches the DataFrame's index, but your DataFrame doesn't have an index.
You're passing lists to set_value but it just needs values.
To fix, first set an index:
data_df = data_df.set_index('Walmart SKU')
And remove the lists from set_value:
data_df.set_value(lookup, 'Price', '$'+str(product.sale_price))
Q2
Is not clear enough to answer. Try asking a new question with a minimum test case.

Related

Same entry, although only found in one column

from sepa import parser
import re
import csv
import pandas as pd
import numpy as np
# Utility function to remove additional namespaces from the XML
def strip_namespace(xml):
return re.sub(' xmlns="[^"]+"', '', xml, count=1)
# Read file
with open('test.xml', 'r') as f:
input_data = f.read()
# Parse the bank statement XML to dictionary
camt_dict = parser.parse_string(parser.bank_to_customer_statement, bytes(strip_namespace(input_data), 'utf8'))
statements = pd.DataFrame.from_dict(camt_dict['statements'])
all_entries = []
for i, _ in statements.iterrows():
if 'entries' in camt_dict['statements'][i]:
df = pd.DataFrame()
dd = pd.DataFrame.from_records(camt_dict['statements'][i]['entries'])
dg = dd['entry_details']
df['Date'] = dd['value_date'].str['date']
df['Date'] = pd.to_datetime(df['Date']).dt.strftime('%d-%m-%Y')
iban = camt_dict['statements'][i]['account']['id']['iban']
df['IBAN'] = iban
df['Currency'] = dd['amount'].str['currency']
# Sort Credit/Debit in separate Columns
df['Credit'] = np.where(dd['credit_debit_indicator'] == 'CRDT', dd['amount'].str['_value'], '')
df['Debit'] = np.where(dd['credit_debit_indicator'] == 'DBIT', dd['amount'].str['_value'], '')
# Get destination IBAN
getlength = len(dg.index) #2
for i in range(0, getlength):
result = str(dd['entry_details'][i])
print(result + "Resultat " + str(i))
search_for_iban = re.search("CH\d{2}[ ]\d{4}[ ]\d{4}[ ]\d{4}[ ]\d{4}[ ]\d{1}|CH\d{19}", result)
if(search_for_iban is None):
print('the search is none')
df['Test'] = 'None'
else:
print('the search is a match')
df['Test'] = 'Yes'
all_entries.append(df)
df_entries = pd.concat(all_entries)
print(df_entries)
**My problem here is just with this code block **
for i in range(0, getlength):
result = str(dd['entry_details'][i])
search_for_iban = re.search("CH\d{2}[ ]\d{4}[ ]\d{4}[ ]\d{4}[ ]\d{4}[ ]\d{1}|CH\d{19}", result)
if(search_for_iban is None):
df['Test'] = 'None'
else:
df['Test'] = search_for_iban.group()
all_entries.append(df)
I have already tried to solve various things via the index, this also counts cleanly high in the variable i and the getlength is also correct for 2 entries
What im expecting
If there is an IBAN number in the 'search_for_iban' (which is using regex lookup (re.search)) which is matching in 2nd row i want that iban just in 2nd row (dataframe) "Test" as follows:
what i expect
What im getting
I got double the entry in row 1 and 2 although none was found in row 1. What am i overlooking, my head is hurting! :D
what i got
i think i am making a thinking error here between normal for loop and panda entries
You can try:
for i in range(0, getlength):
.
.
.
else:
df.loc[i, 'Test'] = search_for_iban

Remove row from the CSV file if condition met

I am trying to scrape pickels.com.au.
I am trying to update the pickels_dataset.csv file if the link is the same and if the price is not the same them I am removing the list and inserting the new row to the CSV file, but it doesn't remove the old entry from the CSV file.
What would be the best way to remove and update the row in the CSV file.
Below is my code...
import requests
from scrapy.selector import Selector
import csv
import re
from tqdm import tqdm
from time import sleep
with open('pickels_dataset.csv', 'a+', newline='', encoding='utf-8') as auction_csv_file:
auction_csv_writer = csv.writer(auction_csv_file)
live_auctions_api = 'https://www.pickles.com.au/PWR-Web/services/api/sales/future'
api_request = requests.get(url=live_auctions_api)
for auctions in api_request.json():
auction_link = auctions.get('viewSaleListingLink')
if 'cars/item/search/-/listing/listSaleItems/' in auction_link:
auction_request = requests.get(url=auction_link)
response = Selector(text=auction_request.text)
sales_id_re = response.xpath('//script[contains(text(), "Product_Type_Sequence")]/text() | //script[contains(text(), "lot_number_suffix_sequence")]/text()').get()
sales_id = re.findall(r'"Product_Type_Sequence";var n="(.*?)"', sales_id_re) or re.findall(r'"lot_number_suffix_sequence";var n="(.*?)"', sales_id_re)
if sales_id == []:
continue
auction_sale_link = f'https://www.pickles.com.au/v4/caradvert/saleid-{sales_id[0]}-public?count=true&inav=Car%7Cbc%7Cha%7Cu&q=(And.ProductType.Vehicles._.Year.range(2010..2021).)&sr=%7Clot_number_suffix_sequence%7C0%7C30'
auction_sale_link_requests = requests.get(url=auction_sale_link)
auctions_data = auction_sale_link_requests.json().get('SearchResults')
if auctions_data == []:
print("NO RESULTS")
for auction_data in auctions_data:
if int(auction_data.get('MinimumBid')) > 0:
ids = auction_data.get('TargetId')
main_title = auction_data.get('Title')
short_title = str(auction_data.get('Year')) + ' ' + str(auction_data.get('Make')) + ' ' + str(auction_data.get('Model'))
make = auction_data.get('M ake')
model = auction_data.get('Model')
variant = auction_data.get('Series')
transmission = auction_data.get('Transmission')
odometer = auction_data.get('Odometer')
state = auction_data.get('Location').get('State')
sale_price = auction_data.get('MinimumBid')
link_path = main_title.replace(' ', '-').replace('/', '-').replace(',', '-') + '/' + str(ids)
link = f'https://www.pickles.com.au/cars/item/-/details/{link_path}'
sale_date = auction_data.get('SaleEndString')
auction_values = [
main_title, short_title, make,
model, variant, transmission, odometer,
state, "${:,.2f}".format(sale_price).strip() ,
link, sale_date
]
with open('pickels_dataset.csv', 'r+') as csv_read:
auction_reader = list(csv.reader(csv_read))
for each in auction_reader:
if link in each:
each_link, each_price = each[9], each[0]
if (link == each_link) and (sale_price != each_price):
auction_reader.clear()
print('New list found, old list deleted')
auction_csv_writer.writerow(auction_values)
print('New value added')
continue
elif (link == each[9]) and (sale_price == each[0]):
print('Same result already exist in the file')
continue
else:
auction_csv_writer.writerow(auction_values)
print('Unique result found and added.')
break
Your current script is opening your auction CSV file for appending, and then whilst it is still open, attempting to open it again for reading. This is probably why it is not updating as expected.
A better approach would be to first read the entire contents of your existing saved auction file into a dictionary. The key could be the link which would then make it easy to determine if you have already seen an existing auction.
Next scrape the current auctions and update the saved_auctions dictionary as needed.
Finally at the end, write the contents of saved_auctions back to the CSV file.
For example:
import requests
from scrapy.selector import Selector
import csv
import re
auction_filename = 'pickels_dataset.csv'
# Load existing auctions into a dictionary with link as key
saved_auctions = {}
with open(auction_filename, newline='', encoding='utf-8') as f_auction_file:
for row in csv.reader(f_auction_file):
saved_auctions[row[9]] = row # dictionary key is link
live_auctions_api = 'https://www.pickles.com.au/PWR-Web/services/api/sales/future'
api_request = requests.get(url=live_auctions_api)
for auctions in api_request.json():
auction_link = auctions.get('viewSaleListingLink')
if 'cars/item/search/-/listing/listSaleItems/' in auction_link:
auction_request = requests.get(url=auction_link)
response = Selector(text=auction_request.text)
sales_id_re = response.xpath('//script[contains(text(), "Product_Type_Sequence")]/text() | //script[contains(text(), "lot_number_suffix_sequence")]/text()').get()
sales_id = re.findall(r'"Product_Type_Sequence";var n="(.*?)"', sales_id_re) or re.findall(r'"lot_number_suffix_sequence";var n="(.*?)"', sales_id_re)
if sales_id == []:
continue
auction_sale_link = f'https://www.pickles.com.au/v4/caradvert/saleid-{sales_id[0]}-public?count=true&inav=Car%7Cbc%7Cha%7Cu&q=(And.ProductType.Vehicles._.Year.range(2010..2021).)&sr=%7Clot_number_suffix_sequence%7C0%7C30'
auction_sale_link_requests = requests.get(url=auction_sale_link)
auctions_data = auction_sale_link_requests.json().get('SearchResults')
if auctions_data == []:
print("NO RESULTS")
for auction_data in auctions_data:
if int(auction_data.get('MinimumBid')) > 0:
ids = auction_data.get('TargetId')
main_title = auction_data.get('Title')
short_title = str(auction_data.get('Year')) + ' ' + str(auction_data.get('Make')) + ' ' + str(auction_data.get('Model'))
make = auction_data.get('Make')
model = auction_data.get('Model')
variant = auction_data.get('Series')
transmission = auction_data.get('Transmission')
odometer = auction_data.get('Odometer')
state = auction_data.get('Location').get('State')
minimum_bid = auction_data.get('MinimumBid')
sale_price = "${:,.2f}".format(minimum_bid).strip()
link_path = main_title.replace(' ', '-').replace('/', '-').replace(',', '-') + '/' + str(ids)
link = f'https://www.pickles.com.au/cars/item/-/details/{link_path}'
sale_date = auction_data.get('SaleEndString')
auction_values = [
main_title, short_title, make,
model, variant, transmission, odometer,
state, sale_price,
link, sale_date
]
if link in saved_auctions:
if saved_auctions[link][8] == sale_price:
print('Same result already exists in the file')
else:
print('New value updated')
saved_auctions[link] = auction_values # Updated the entry
else:
print('New auction added')
saved_auctions[link] = auction_values
# Update the saved auction file
with open(auction_filename, 'w', newline='', encoding='utf-8') as f_auction_file:
csv_auction_file = csv.writer(f_auction_file)
csv_auction_file.writerows(saved_auctions.values())
If you want to also remove auctions that are no longer active, then it would probably be best to simply ignore the saved file and just write all current entries as is.

Why does my python script with sleep in infinite loop stop running?

I'm working on a python script to transfer data from an .xlsx file to a html: I read/parse the excel with pandas and use beautifulsoup to edit the html (reading the paths to these two files from two .txt's). This, on its own, works. However, this script has to run constantly so everything is called in an infinite while that loops every 15 minutes, each time messages being displayed on the console.
My problem is the following: for some reason, after an aleatoric number of loops, the code just doesn't run anymore, and by that I mean no text on the console and no changes in the html file. When this happens, I have to rerun it in order to get it to function again.
Here is the main function:
def mainFunction():
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(sys.executable)
elif __file__:
application_path = os.path.dirname(__file__)
excelFiles = open(str(application_path) +"\\pathsToExcels.txt")
htmlFiles = open(str(application_path) +"\\pathsToHTMLs.txt")
sheetFiles = open(str(application_path) +"\\sheetNames.txt")
print("Reading file paths ...")
linesEx = excelFiles.readlines()
linesHtml = htmlFiles.readlines()
linesSheet = sheetFiles.readlines()
print("Begining transfer")
for i in range (len(linesEx)):
excel = linesEx[i].strip()
html = linesHtml[i].strip()
sheet = linesSheet[i].strip()
print("Transfering data for " + sheet)
updater = UpdateHtml(excel, sheet, str(application_path) + "\\pageTemplate.html", html)
updater.refreshTable()
updater.addData()
updater.saveHtml()
print("Transfer done")
excelFiles.close()
htmlFiles.close()
sheetFiles.close()
UpdateHtml is the one actually responsible for the data transfer.
The "__main__" which also contains the while loop:
if __name__ == "__main__":
while(True):
print("Update at " + str(datetime.now()))
mainFunction()
print("Next update in 15 minutes\n")
time.sleep(900)
And finally, the batch code that launches this
python "C:\Users\Me\PythonScripts\excelToHtmlTransfer.py"
pause
From what I've noticed through trials, this situation doesn't occur when sleep is set to under 5 minutes (still happens for 5 minutes) or if it's omitted altogether.
Does anyone have any clue why this might be happening? Or any alternatives to sleep in this context?
EDIT: UpdateHtml:
import pandas as pd
from bs4 import BeautifulSoup
class UpdateHtml:
def __init__(self, pathToExcel, sheetName, pathToHtml, pathToFinalHtml):
with open(pathToHtml, "r") as htmlFile:
self.soup = BeautifulSoup(htmlFile.read(), features="html.parser")
self.df = pd.read_excel (pathToExcel, sheet_name=sheetName)
self.html = pathToFinalHtml
self.sheet = sheetName
def refreshTable(self):
#deletes the inner html of all table cells
for i in range(0, 9):
td = self.soup.find(id = 'ok' + str(i))
td.string = ''
td = self.soup.find(id = 'acc' + str(i))
td.string = ''
td = self.soup.find(id = 'nok' + str(i))
td.string = ''
td = self.soup.find(id = 'problem' + str(i))
td.string = ''
def prepareData(self):
#changes the names of columns according to their data
counter = 0
column_names = {}
for column in self.df.columns:
if 'OK' == str(self.df[column].values[6]):
column_names[self.df.columns[counter]] = 'ok'
elif 'Acumulate' == str(self.df[column].values[6]):
column_names[self.df.columns[counter]] = 'acc'
elif 'NOK' == str(self.df[column].values[6]):
column_names[self.df.columns[counter]] = 'nok'
elif 'Problem Description' == str(self.df[column].values[7]):
column_names[self.df.columns[counter]] = 'prob'
counter += 1
self.df.rename(columns = column_names, inplace=True)
def saveHtml(self):
with open(self.html, "w") as htmlFile:
htmlFile.write(self.soup.prettify())
def addData(self):
groupCounter = 0
index = 0
self.prepareData()
for i in range(8, 40):
#Check if we have a valid value in the ok column
if pd.notna(self.df['ok'].values[i]) and str(self.df['ok'].values[i]) != "0":
td = self.soup.find(id = 'ok' + str(index))
td.string = str(self.df['ok'].values[i])
#Check if we have a valid value in the accumulate column
if pd.notna(self.df['acc'].values[i]) and str(self.df['acc'].values[i]) != "0":
td = self.soup.find(id = 'acc' + str(index))
td.string = str(self.df['acc'].values[i])
#Check if we have a valid value in the nok column
if pd.notna(self.df['nok'].values[i]) and str(self.df['nok'].values[i]) != "0":
td = self.soup.find(id = 'nok' + str(index))
td.string = str(self.df['nok'].values[i])
#Check if we have a valid value in the problem column
if pd.notna(self.df['prob'].values[i]):
td = self.soup.find(id = 'problem' + str(index))
td.string = str(self.df['prob'].values[i])
if groupCounter == 3:
index += 1
groupCounter = 0
else:
groupCounter += 1
The excel I'm working with is a bit strange hence why I perform so many (seemingly) redundant operations. Still, it has to remain in its current form.
The main thing is the fact that the 'rows' that contain data is actually formed out of 4 regular rows, hence the need for groupCounter.
Found a workaround for this problem. Basically what I did was move the loop in the batch script, as so:
:whileLoop
python "C:\Users\Me\PythonScripts\excelToHtmlTransfer.py"
timeout /t 900 /nobreak
goto :whileLoop
After leaving it to run for a few hours the situation didn't occur anymore, however unfortunately I still don't know what caused it.

Multi-threading list iterating for loop

this function reads from a text file and re-formats the contents, and then writes the contents to a csv. I'm trying to use threading to multi-thread the for i in lines loop, this is the longest part of a larger script and takes up most of the run time because the list lines contains thousands of elements. Can someone help me straighten this out? Doing this synchronously instead of in parallel is taking up tons of time. I have seen many other answers to similar questions but I've yet to understand the answers and implement them correctly so far.
def sheets(i):
# time format for spreadsheet
dt_time = datetime.now().strftime('%m/%d|%H:%M')
# for league name (NFL,NBA,NHL ETC.) in list containing league names
for league_name in leagues2:
league_name = league_name.split('|')[0]
with open(final_stats_path, 'r+') as lines:
lines = lines.readlines()
# i = one long string containg details about the event in the loop, eg. sport, game day, game id, home team name
for i in lines:
i = i.split(',')
minprice = i[6]
totaltix = i[5]
event_date = i[2]
try:
dayofweek = datetime.strptime(event_date, '%Y-%m-%d').strftime('%A')
except:
continue
event_date = i[2][2:]
event_date = str(event_date).split('-')
event_date = event_date[1]+'/'+event_date[2]
sport = i[4]
event = i[1].replace('Basketball','').replace('\n','')
away = i[8].replace('Basketball', '').replace('\n','')
eventid = i[0]
event_home = i[9].replace('Basketball', '').replace('\n','')
event = event.split(' at ')[0]
tixdata = str(totaltix)
eventid = 'https://pro.stubhub.com/simweb/sim/services/priceanalysis?eventId='+str(eventid)+'&sectionId=0'
directory = root+'\data'+'\\'+sport+'\\'
report = directory+'report.xlsx'
fname = directory+'teams.txt'
eventleague = sport
f = open(directory+'acronym.txt', 'r+')
lines_2 = f.readlines()
for qt in lines_2:
qt = qt.split('-')
compare = qt[1]
if event_home in compare:
event_home = qt[0]
else:
pass
troop = []
d = {
'ID' : eventid,
'Date' : event_date,
'Day' : dayofweek,
'Away' : away,
}
s = {
'time' : tixdata
}
numbers = event_home+'.txt'
numbers_new = 'bk\\bk_'+numbers
with open(directory+numbers_new, 'a+') as y:
pass
with open(directory+numbers, 'a+') as o:
pass
with open(directory+numbers, 'r+') as g:
for row in g:
if str(eventid) in row:
#print('the event is in the list')
row_update = row.replace('}', ", '"+dt_time+"': '"+tixdata+"'}")
with open(directory+numbers_new, 'a+') as y:
y.write(row_update)
break
else:
with open(directory+numbers, 'a+') as p:
#print('the event is not in the list')
p.write(str(d)+'\n')
with open(directory+numbers_new, 'a+') as n:
n.write(str(d)+'\n')
sizefile = os.path.getsize(directory+numbers_new)
if sizefile > 0:
shutil.copy(directory+numbers_new, directory+numbers)
open(directory+numbers_new, 'w').close()
else:
pass
df = []
with open(directory+numbers, 'r+') as t:
for row in t:
b = eval(row)
dfs = df.append(b)
df = pd.DataFrame(df)
yark = list(df.columns)[:-5]
zed = ['ID', 'Date', 'Day', 'Away']
columns = zed+yark
try:
df = df[columns]
except:
pass
df.index = range(1, 2*len(df)+1, 2)
df = df.reindex(index=range(2*len(df)))
writer = pd.ExcelWriter(directory+event_home+'.xlsx', engine='xlsxwriter')
try:
df.to_excel(writer, sheet_name=event_home)
except:
continue
workbook = writer.book
worksheet = writer.sheets[event_home]
format1 = workbook.add_format({'num_format': '#,##0.00'})
worksheet.set_column('A:ZZ', 18, format1)
writer.save()
if __name__ == "__main__":
pool = ThreadPool(8) # Make the Pool of workers
results = pool.map(sheets) #Open the urls in their own threads
pool.close() #close the pool and wait for the work to finish
pool.join()
##get_numbers()
##stats_to_csv()
##stats_to_html()
#sheets()
Try changing the following line:
results = pool.map(sheets)
to:
results = pool.map(sheets,range(8))

Python parsing csv file to store in file

I have not used Python in years and trying to get back into it. I have a Input_file (.csv) that I want to parse and store the output in a output.csv or .txt
I have managed to parse the .csv file using this code, and for the most part the it works but I cant get it save to save to file (Issue 1) without getting the below error (error 1)
import csv
import re
import itertools
file_name = 'PhoneCallData1.txt'
try:
lol = list(csv.reader(open(file_name, 'r'), delimiter=' '))
count =0
except:
print('File cannot be opened:',file_name)
exit()
try:
fout = open('output.txt','w')
except:
Print("File cannot be written to:","OutputFile")
exit()
d = dict()
for item in itertools.chain(lol): # Lists all items (field) in the CSV file.
count +=1 # counter to keep track of row im looping through
if lol[count][3] is None:
print("value is not blank")
count +=1
else:
try:
check_date = re.search(r'(\d+/\d+/\d+)', lol[count][3]) # check to determine if date is a date
except:
continue
check_cost = re.compile(r'($+\d*)', lol[count][9]) # check to determine if value is a cost
if check_date ==TRUE:
try:
key =lol[count][3] # If is a date value, store key
except ValueError:
continue
if check_cost==TRUE:
value = lol[count][9] # if is a cost ($) store value
d[key] = value
print (d[key])
# fout.write((d[key])
# What if there is no value in the cell?
# I keep getting "IndexError: list index out of range", anyone know why?
# Is there a better way to do this?
# I only want to store the destination and the charge
and now comes the complicated part. The file I need to parse has a number of irrelevant rows of data before and in between the required data.
Data Format
What I want to do;
I want to iterate over two columns of data, and only store the rows that have a date or cost in them, dis-guarding the rest of the data.
import csv
import re
import itertools
lol = list(csv.reader(open('PhoneCallData1.txt', 'r'), delimiter=' '))
count =0
d = dict()
for item in itertools.chain(lol): #Lists all items (field) in the CSV file.
count +=1 # counter to keep track of row im looping through
check_date = re.search(r'(\d+/\d+/\d+)', lol[count][3]) #check to determine
check_cost = re.compile(r'($+\d*)', lol[count][9]) #check to determine if value is a cost
if check_date ==TRUE:
key =lol[count][3] #If is a date value, store key
if check_cost==TRUE:
value = lol[count][9] #if is a cost ($) store value
d[key] = value
print (d[key])
#What if there is no value in the cell?
# I keep getting "IndexError: list index out of range", anyone know why?
# Is there a better way to do this?
# I only want to store the destination and the charges
What I have tried;
I tried to index the data after I loaded it, but that didn't seem to work.
I created this to only look at rows at that were more than a certain length, but its terrible code. I was hoping for something more practical and reusable.
import re
with open('PhoneCallData1.txt','r') as f, open('sample_output.txt','w') as fnew:
for line in f:
if len(line) > 50:
print(line)
fnew.write(line + '\n')
Import csv
lol = list(csv.reader(open('PhoneCallData1.txt', 'rb'), delimiter='\t'))
#d = dict()
#key = lol[5][0] # cell A7
#value = lol[5][3] # cell D7
#d[key] = value # add the entry to the dictionary
Keep getting index out of bounds errors
import re
import csv
match=re.search(r'(\d+/\d+/\d+)','testing date 11/12/2017')
print match.group(1)
Trying to use regex to search for the date in the first column of data.
NOTE: I wanted to try Pandas but I feel I need to start here. Any Help would be awesome.
answer to if next record need to be parsed must be specific, and I have answer a similar question, in the same way, finite-state machine may help
main code is:
state = 'init'
output = []
# for line loop:
if state == 'init': # seek for start parsing
# check if start parsing
state = 'start'
elif state == 'start': # start parsing now
# parsing
# check if need to end parsing
state = 'init'
import csv
import re
import itertools
import timeit
start_time = timeit.default_timer()
# code you want to evaluate
file_name = 'PhoneCallData.txt'
try:
lol = list(csv.reader(open(file_name, 'r'), delimiter=' '))
except:
print('File cannot be opened:', file_name)
exit()
try:
fout = open('output.txt','w')
except:
Print("File cannot be written to:","OutputFile")
exit()
# I could assign key value pairs and store in dictionry. Then print, search,ect on the dictionary. Version2
# d = dict()
count =0
total = 0
for row in lol: # Lists all items (field) in the CSV file.
#print(len(row))
count +=1 # counter to keep track of row im looping through
if len(row) == 8:
if row[2].isdigit():
# Remove the $ and convert to float
cost = re.sub('[$]', '', row[7])
# Assign total value
try:
# Calculate total for verification purposes
total = total + float(cost)
total = round(total, 2)
except:
continue
string = str(row[2] + " : " + (row[7]) + " : " + str(total) + "\n")
print (string)
fout.write(string)
if len(row) == 9:
if row[2].isdigit():
# Remove the $ and convert to float
cost = re.sub('[$]', '', row[8])
# Assign total value
try:
# Calculate total for verification purposes
total = total + float(cost)
total = round(total, 2)
except:
continue
string = str(row[2] + " : " + row[8] + " : " + str(total) + "\n")
print(string)
fout.write(string)
if len(row) == 10:
# print (row[2] +":"+ row[9])
# Remove the $ and convert to float
cost = re.sub('[$]', '', row[9])
# Assign total value
try:
# Calculate total for verification purposes
total = total + float(cost)
total = round(total, 2)
except:
continue
string = str(row[2] + " : " + row[9] + " : " + str(total) + "\n")
print(string)
fout.write(string)
# Convert to string so I can print and store in file
count_string = str(count)
total_string = str(total)
total_string.split('.', 2)
# Write to screen
print (total_string + " Total\n")
print("Rows parsed :" + count_string)
# write to file
fout.write(count_string + " Rows were parsed\n")
fout.write(total_string + " Total")
# Calcualte time spent on task
elapsed = timeit.default_timer() - start_time
round_elapsed = round(elapsed, 2)
string_elapsed = str(round_elapsed)
fout.write(string_elapsed)
print(string_elapsed + " seconds")
fout.close()

Categories