pandas dataframe doesn't add all elements - python

I'm trying this code on a folder with bunch of 'txt' files
import pandas as pd
import os
for review in os.listdir('ebert_reviews'):
with open(os.path.join('ebert_reviews',review),encoding='utf-8') as file:
list_dir=[]
title = file.readline()[:-1]
url = file.readline()[:-1]
review_text = file.read()
list_dir.append({'title':title,
'url':url,
'review_text':review_text})
#print(list_dir)
df = pd.DataFrame(list_dir)
print(df)
the dataframe only saves one of the rows and it's not even the last one, how can I add every entry to it?!

This should work
import pandas as pd
import os
list_dir=[]
for review in os.listdir('ebert_reviews'):
with open(os.path.join('ebert_reviews',review),encoding='utf-8') as file:
title = file.readline()[:-1]
url = file.readline()[:-1]
review_text = file.read()
list_dir.append({'title':title,
'url':url,
'review_text':review_text})
#print(list_dir)
df = pd.DataFrame(list_dir)
print(df)

Related

How to download a csv file in Python

I am trying to download a csv file from the url
https://qubeshub.org/publications/1220/supportingdocs/1#supportingdocs .
the file is Elephant Morphometrics and Tusk Size-originaldata-3861.csv
I have tried using using pd.read_csv()
and
import pandas as pd
import io
import requests
url="https://qubeshub.org/publications/1220/supportingdocs/1#supportingdocs/Elephant Morphometrics and Tusk Size-originaldata-3861.csv"
s=requests.get(url).content
c=pd.read_csv(io.StringIO(s.decode('utf-8')))
Try:
import requests
url = "https://qubeshub.org/publications/1220/serve/1/3861?el=1&download=1"
r = requests.get(url)
filename = r.headers["Content-Disposition"].split('"')[1]
with open(filename, "wb") as f_out:
print(f"Downloading {filename}")
f_out.write(r.content)
Prints:
Downloading Elephant Morphometrics and Tusk Size-originaldata-3861.csv
and saves the file.
This should download the file and parse the rows and columns into a csv file
import requests
import csv
url = "https://qubeshub.org/publications/1220/serve/1/3861?el=1&download=1"
req=requests.get(url)
rows = req.content.decode('utf-8').split("\r\n")
rows.pop()
csv_local_filename = "test.csv"
with open(csv_local_filename, 'w') as fs:
writer = csv.writer(fs, delimiter = ',')
for row in rows:
entries = row.split(',')
b=writer.writerow(entries)
You'll likely want to convert those columns into the desired types before you start working with them. The example code above leaves everything as a string.
After I run the above code I see:
>tail test.csv
2005-13,88,m,32.5,290,162.3,40
2005-13,51,m,37.5,270,113.2,40
2005-13,86,m,37.5,310,175.3,38
and
>head test.csv
Years of sample collection,Elephant ID,Sex,Estimated Age (years),shoulder Height in cm,Tusk Length in cm,Tusk Circumference in cm
1966-68,12,f,0.08,102,,
1966-68,34,f,0.08,89,,
1966-68,162,f,0.083,89,,
1966-68,292,f,0.083,92,,
In Firefox after downloading file in browser you can check link to this file and it shows
https://qubeshub.org/publications/1220/serve/1/3861?el=1&download=1
and this link you should use in code
import pandas as pd
df = pd.read_csv('https://qubeshub.org/publications/1220/serve/1/3861?el=1&download=1')
print(df)

Extracting Data from Multiple PDFs'

I am trying to extract data from PDF document and have regarding that - I was able to get the code working for one single PDF. However, is there a way I can point the code to a folder with multiple PDF's and get the extract out in CSV? I am a complete beginner in Python, so any help will be appreciated. Below is the current code that I have.
import pdfplumber
import pandas as pd
file = 'Test Slip.pdf'
lines = []
with pdfplumber.open(file) as pdf:
pages = pdf.pages
for page in pdf.pages:
text = page.extract_text()
for line in text.split('\n'):
lines.append(line)
print(line)
df = pd.DataFrame(lines)
df.to_csv('test.csv')
One possible option would be to use os.listdir and only read files that end in .pdf:
import os
folder_with_pdfs = '/path/to/folder'
for pdf_file in os.listdir(folder_with_pdfs):
if pdf_file.endswith('.pdf'):
pdf_file_path = os.path.join(folder_with_pdfs, pdf_file)
# do pdf reading with opening pdf_file_path
I am not sure why you aim to write lines to a dataframe as rows but this should be what you need:
import pdfplumber
import pandas as pd
import os
def extract_pdf(pdf_path):
linesOfFile = []
with pdfplumber.open(pdf_path) as pdf:
for pdf_page in pdf.pages:
single_page_text = pdf_page.extract_text()
for linesOfFile in single_page_text.split('\n'):
linesOfFile.append(line)
#print(linesOfFile)
return linesOfFile
folder_with_pdfs = 'folder_path'
linesOfFiles = []
for pdf_file in os.listdir(folder_with_pdfs):
if pdf_file.endswith('.pdf'):
pdf_file_path = os.path.join(folder_with_pdfs, pdf_file)
linesOfFile = extract_pdf(pdf_file_path)
linesOfFiles.append(linesOfFile)
df = pd.DataFrame(linesOfFiles)
df.to_csv('test.csv')

API request loop through pagination in python

i am looking to loop through the coingecko api for all of the exchanges listed on there, pull the tickers that are listed for each exchange [this is paginated to 100 rows], loop through all of the pages. There is no way of telling how many. Then store all rows out to a CSV.
Here is what I have came up with so far.
from pycoingecko import CoinGeckoAPI
cg = CoinGeckoAPI()
import pandas as pd
#grab a list of all the exchangeslisted on CG
ex_list = cg.get_exchanges_list()
#df_ex_list = pd.read_json(exchanges_list)
df = pd.json_normalize(ex_list)
#output to csv
df.to_csv('exchange_list.csv', encoding='utf-8', index=False)
id_list = df['id'].tolist()
def get_ex_tickers():
for x in id_list:
# get tickers
d = cg.get_exchanges_tickers_by_id(x, page_integer = 2)###the num of page integers is not know
#import into pandas df
df = pd.json_normalize(d, record_path=['tickers'], meta=['name'])
#define path + filename
path = 'ticker_lists/'
filename = path+ x +'_ticker_list' + '.csv'
#output to csv
df.to_csv(filename, encoding='utf-8', index=False)
get_ex_tickers()

Ignoring inner header rows in pandas dataframe

I am scraping multiple tables from the web that are exactly like this one (the big batting gamelogs table) and I need the dataframe to ignore the inner header rows that start with the month of the season.
Here is my script so far:
from bs4 import BeautifulSoup
import pandas as pd
import csv
import urllib2
def stir_the_soup():
player_links = open('player_links.txt', 'r')
player_ID_nums = open('player_ID_nums.txt', 'r')
id_nums = [x.rstrip('\n') for x in player_ID_nums]
idx = 0
for url in player_links:
#open the url and create bs object
player_link = urllib2.urlopen(url)
bs = BeautifulSoup(player_link, 'html5lib')
#identify which table is needed
table_id = ""
if url[-12] == 'b':
table_id = "batting"
elif url[-12] == 'p':
table_id = "pitching"
#find the table and create dataframe
table = str(bs.find('table', {'id' : (table_id + '_gamelogs')}))
df = pd.read_html(table, header=0)
df2 = df[0]
df2 = df2[df2.PA != 'PA']
#for the name of the file and file path
file_path = '/Users/kramerbaseball/Desktop/MLB_Web_Scraping_Program/game_logs_non_concussed/'
name_of_file = str(id_nums[idx])
df2.to_csv(path_or_buf=(file_path + name_of_file + '.csv'), sep=',', encoding='utf-8')
idx += 1
if __name__ == "__main__":
stir_the_soup()
I tried taking the dataframe and ignoring the rows where PA == PA or HR == HR but it will not delete the rows. Any help is appreciated
Notice that in some inner headers columns values are constant. This will drop intermediate headers from your df:
df3 = df2[df2['Gtm']!='Date']

Appending non-DataFrame to pandas csv

I'm trying to find out if there is an easier way to append a Date column and an additional info column to my existing csv file. I'm adding these columns because this information is not in the JSON string from the REST API call.
import requests
import json
import http.client
import datetime
import pandas as pd
from pandas.io.json import json_normalize
url = api.getinfo()
r = requests.get(url, headers=headers, verify=False)
if r.status_code != http.client.OK:
raise requests.HTTPError(r)
jsonstring = json.dumps(r.json()["data"])
load = json.loads(jsonstring)
df = json_normalize(load)
col = ["poolId", "totalPoolCapacity", "totalLocatedCapacity",
"availableVolumeCapacity", "usedCapacityRate"]
with open('hss.csv', 'a') as f:
df.to_csv(f, header=False, columns=col)
a = pd.read_csv('hss.csv')
a['date'] = [datetime.date.today()] * len(a)
a.to_csv('hss.csv')
b = pd.read_csv('hss.csv')
b['storage system'] = "ssystem22"
b.to_csv('hss.csv')
I end up getting extra columns Unnamed: 0,Unnamed: 0.1 in my csv file each time the script is run. Also each time I append it overwrites the old dates.
,Unnamed: 0,Unnamed: 0.1,poolId,totalPoolCapacity, totalLocatedCapacity,availableVolumeCapacity,usedCapacityRate,date,storage system
0,155472,223618,565064,51,,2017-04-12,ssystem22
1,943174,819098,262042,58,,2017-04-12,ssystem22
0,764600,966017,046668,71,,2017-04-12,ssystem22
1,764600,335680,487650,76,,2017-04-12,ssystem22
2,373700,459800,304446,67,,2017-04-12,ssystem22
I kept researching and found how to fix this. I should have been using the pd.Series function. Below is the corrected code:
import requests
import json
import http.client
import datetime
import pandas as pd
from pandas.io.json import json_normalize
url = api.getinfo()
r = requests.get(url, headers=headers, verify=False)
if r.status_code != http.client.OK:
raise requests.HTTPError(r)
jsonstring = json.dumps(r.json()["data"])
load = json.loads(jsonstring)
df = json_normalize(load)
df['storage system'] = pd.Series('ssystem22', index=df.index)
df['date'] = pd.Series(datetime.date.today().strftime('%m-%d-%Y'),
index=df.index)
col = ["poolId", "totalPoolCapacity", "totalLocatedCapacity",
"availableVolumeCapacity", "usedCapacityRate", "storage system",
"date"]
with open(csvfile, 'a') as f:
df.to_csv(f, header=False, columns=col)

Categories