Split data into multiple worksheets - python

Since I am going to create a number of dataframes I know won't fit inside just a single google worksheet (because of the limitation of columns) I want to split the data into multiple worksheets. I'm using set_with_dataframe() and defining which worksheet the dataframes is going to get imported to, so my first thought was to create and define several worksheets and then use the same method - the problem is just that I don't know how to "split" the data when there's no more columns in the first worksheet (and then the second, and the third and so on...)
I'm quite new at working with Python and I have been stuck with this for days so any kind of help would be appreciated.
My code looks like this:
import gspread
from gspread_dataframe import get_as_dataframe, set_with_dataframe
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
from bs4 import BeautifulSoup
import pandas as pd
import requests
import traceback
import os
class DataScraper():
def __init__(self, sheets):
self.data_worksheet = sheets.data_worksheet
self.total_urls = 0
self.urls = self.getAllUrls(sheets.url_worksheet)
def getAllUrls(self, urlWorkSheet):
urls = urlWorkSheet.get_all_values()
finalUrls = []
for r in urls:
# Get all urls
modifiedUrls = [d for d in r[:14] if "https://" in d]
if len(modifiedUrls) != 0:
self.total_urls += len(modifiedUrls)
finalUrls.append(modifiedUrls)
return finalUrls
def StartScrape(self):
current_column_count = 1
last_data_frame_max_width = 0
current_element = 0
for urlRow in self.urls:
current_row_count = 1
for url in urlRow:
current_element += 1
error = False
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
try:
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
labels = []
results = []
tbl = soup.find('table')
for tr in tbl.findAll('tr'):
headers = [th.text.strip() for th in tr.findAll('th')]
data = [td.text.strip() for td in tr.findAll('td')]
labels.append(headers)
results.append(data)
final_results = []
for final_labels, final_data in zip(labels, results):
final_results.append({'Labels': final_labels, 'Data': final_data})
df = pd.DataFrame(final_results)
df['Labels'] = df['Labels'].str[0]
df['Data'] = df['Data'].str[0]
indexNames = df[df['Labels'] == 'Links'].index
df.drop(indexNames , inplace=True)
set_with_dataframe(self.data_worksheet, df, col=current_column_count, row=current_row_count, include_column_header=False)
current_row_count += df.shape[0]+2
if df.shape[1] > last_data_frame_max_width:
last_data_frame_max_width = df.shape[1]
except Exception:
error = True
finally:
print(f"Processed page {current_element}/{self.total_urls} with status: {'success' if not error else 'error'}")
current_column_count += last_data_frame_max_width+5
last_data_frame_max_width = 0
class Sheets():
def __init__(self, filename, key):
self.filename = filename
self.key = key
self.data_worksheet = None
self.url_worksheet = None
self.getSheets(self.getCredentials())
def getCredentials(self):
# sep = seperator
_ = os.path.normpath(__file__).split(os.sep)
_.insert(1, "/")
credentials = service_account.Credentials.from_service_account_file(os.path.join(os.path.join(*_[0:-1]), self.filename))
return credentials.with_scopes( ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'])
def getSheets(self, scoped_credentials):
gc = gspread.Client(auth=scoped_credentials)
gc.session = AuthorizedSession(scoped_credentials)
spreadsheet_key = gc.open_by_key(self.key)
# Get sheet with data import
self.data_worksheet = spreadsheet_key.worksheet("Data")
# Get list with url's
self.url_worksheet = url_worksheet = spreadsheet_key.worksheet("Felix Copy")
# Get sheets
sheets = Sheets("credentials.json", "key_id")
# Start scraping
scraper = DataScraper(sheets)
scraper.StartScrape()

Related

Scraping data, appending to list, and making it a dataframe

I am trying to scrape a bunch of baseball statistics and get all that data into separate data Frames so I can use it for my project. I am able to get all of the data, but I am having trouble figuring out how to store all of this data in variables and slice it accordingly.
def parse_row(rows):
return [str(x.string)for x in rows.find_all('td')]
def soop(url):
page = requests.get(url)
text = soup(page.text, features = 'lxml')
row = text.find_all('tr')
data = [parse_row(rows)for rows in row]
df = pd.DataFrame(data)
df = df.dropna()
if dp_num in url:
df.columns = dp_col
elif sb_num in url:
df.columns = sb_col
elif hr_num in url:
df.columns = hr_col
elif obp_num in url:
df.columns = obp_col
elif b2_num in url:
df.columns = b2_col
elif b3_num in url:
df.columns = b3_col
elif era_num in url:
df.columns = era_col
elif fld_num in url:
df.columns = fld_col
else:
print('error')
return(df)
# ncaa scraping function
def scrape(id_num):
loop = 1
page_num = 2
page_numii = 2
page_numiii = 2
url = 'https://www.ncaa.com/stats/baseball/d1/current/team/' + id_num
dii_url = 'https://www.ncaa.com/stats/baseball/d2/current/team/' + id_num
diii_url = 'https://www.ncaa.com/stats/baseball/d3/current/team/' + id_num
while loop == 1: #first di page
df = soop(url)
loop += 1
print(df)
while loop <= 6: #number of remaining di pages
df = soop(url + '/p' + str(page_num))
page_num += 1
loop += 1
print(df)
while loop == 7: # first d2 page
df = soop(dii_url)
loop += 1
print(df)
while loop <= 11:#remaining d2 pages
df = soop(dii_url + '/p' + str(page_numii))
page_numii += 1
loop += 1
print(df)
while loop == 12: #first diii page
df = soop(diii_url)
loop += 1
print(df)
while loop < 20:#remaining d3 pages
df = soop(diii_url + '/p' + str(page_numiii))
page_numiii += 1
loop += 1
print(df)
All of the code works, and I get no errors, but I would like to store the data it prints out in variables instead of printing it out, and then have those as separate data Frames for each stat page I scraped. But I have no clue where to start doing that, I have seen on here that maybe i should try appending it to a list? I am a statistics major in college, and I am pretty new to programming. Any help is appreciated.
To store dataframes into variables, you would have to construct a list or dictionary to store the dataframes.
With that being said, I probably wouldn't store the tables into variables, but rather write to a database or csv files so that you have the data locally available. Otherwise you'd have to run the scrape every time to get the data. Pandas can handle that for you (as well as parse the tables with .read_html()).
Not sure exactly what data you want or how you want it (I'm also surprised to not see an api here to get that data), but this will grab it and store it into folders with the structure of:
-data
-d1
-INDIVIDUAL STATISTICS
csv files
...
...
-TEAM STATISTICS
.csv files
...
...
-d2
-INDIVIDUAL STATISTICS
csv files
...
...
-TEAM STATISTICS
csv files
...
...
-d3
-INDIVIDUAL STATISTICS
csv files
...
...
-TEAM STATISTICS
csv files
...
...
So looks like this:
Code:
import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
statsIds_dict = {}
for division in [1,2,3]:
statsIds_dict[f'd{division}'] = {}
url = f'https://www.ncaa.com/stats/baseball/d{division}/'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
statsIds = soup.find_all('div', {'class':'stats-header__filter'})
for each in statsIds:
statsType = each.text.split('\n')[0]
statsIds_dict[f'd{division}'][statsType] = {}
options = each.find_all('option')
for option in options:
if option['value']:
statsIds_dict[f'd{division}'][statsType][option.text] = 'https://www.ncaa.com' + option['value']
for division, v1 in statsIds_dict.items():
for statsType, v2 in v1.items():
for statTitle, link in v2.items():
response = requests.get(link)
soup = BeautifulSoup(response.text, 'html.parser')
try:
totPages = int(soup.find('ul', {'class':'stats-pager'}).find_all('li')[-2].text)
except:
totPages = 1
df = pd.read_html(link)[0]
print(link)
for page in range(2, totPages+1):
temp_df = pd.read_html(link + f'/p{page}')[0]
print(link + f'/p{page}')
df = df.append(temp_df).reset_index(drop=True)
path = f'data/{division}/{statsType}'
# Check whether the specified path exists or not
isExist = os.path.exists(path)
if not isExist:
# Create a new directory because it does not exist
os.makedirs(path)
print(f"The data/{division}/{statsType} directory is created!")
df.to_csv(f'data/{division}/{statsType}/{division}_{statsType}_{statTitle}.csv' , index=False)
print(f'Saved: {division} {statsType} {statTitle}')

JSONDecodeError: Expecting value: line 1 column 1 (char 0) when using Pushift API to scrape Reddit Data

import pandas as pd
import requests
import json
import datetime
import csv
def get_pushshift_data(after, before, sub):
url = 'https://api.pushshift.io/reddit/search/submission/?&after=' + str(after) + '&before='+ str(before) + '&subreddit='+ str(sub) + '&sort=asc&sort_type=created_utc&size=400'
print(url)
r = requests.get(url).json()
# data = json.loads(r.text, strict=False)
return r['data']
def collect_subData(subm):
subData = list() #list to store data points
title = subm['title']
url = subm['url']
try:
flair = subm['link_flair_text']
except KeyError:
flair = "NaN"
try:
# returns the body of the posts
body = subm['selftext']
except KeyError:
body = ''
author = subm['author']
subId = subm['id']
score = subm['score']
created = datetime.datetime.fromtimestamp(subm['created_utc']) #1520561700.0
numComms = subm['num_comments']
permalink = subm['permalink']
subData.append((subId,title,body,url,author,score,created,numComms,permalink,flair))
subStats[subId] = subData
def update_subFile():
upload_count = 0
location = "subreddit_data_uncleaned/"
print("Input filename of submission file, please add .csv")
filename = input()
file = location + filename
with open(file, 'w', newline='', encoding='utf-8') as file:
a = csv.writer(file, delimiter=',')
headers = ["Post ID","Title","Body","Url","Author","Score","Publish Date","Total No. of Comments","Permalink","Flair"]
a.writerow(headers)
for sub in subStats:
a.writerow(subStats[sub][0])
upload_count+=1
print(str(upload_count) + " submissions have been uploaded into a csv file")
# global dictionary to hold 'subData'
subStats = {}
# tracks no. of submissions
subCount = 0
#Subreddit to query
sub = 'politics'
# Unix timestamp of date to crawl from.
before = int(datetime.datetime(2021,5,17,0,0).timestamp())
after = int(datetime.datetime(2014,1,1,0,0).timestamp())
data = get_pushshift_data(after, before, sub)
while len(data) > 0:
for submission in data:
collect_subData(submission)
subCount+=1
# Calls getPushshiftData() with the created date of the last submission
print(len(data))
print(str(datetime.datetime.fromtimestamp(data[-1]['created_utc'])))
after = data[-1]['created_utc']
data = get_pushshift_data(after, before, sub)
print(len(data))
update_subFile()
At line 1: I call the get_pushshift_data(after, before, sub) function to scrape the data and there is no error. But then when I want to the same thing again at line 11 but with different time for after variable(type: int), the program comes out the error of JSONDecodeError: Expecting value: line 1 column 1 (char 0).
This is the image for you to refer to which I have just described above
This is the Error Image

Unable to concatenate 2 lists using Python (Int and text lists) and to split output data into separate files

I am facing issue with concatenating data of 2 lists. I have page number (integer value) and table data (text value) both within a list while merging them to print output I am getting this error:
TypeError: object of type 'int' has no len()
My goal is to print following output and also need help to save data from table 3 based on "Aircraft operator ID" keyword into table name "Table 3 A" and data with Header value of "Installation ID" into table name "Table 3 I" into 2 separate tabs of excel file. My code is given below:
import time
import requests
import random
from lxml import html # used to use Xpath
from bs4 import BeautifulSoup
import xlsxwriter
def append_row(ws, row):
for col, value in enumerate(row):
ws.write_string(ws.cur_row, col, value)
ws.cur_row += 1
workbook = xlsxwriter.Workbook('Output.xlsx')
ws_3_A = workbook.add_worksheet("Table 3 A")
ws_3_I = workbook.add_worksheet("Table 3 I")
# Keep a track of the row to use in each worksheet
ws_3_A.cur_row = 0
ws_3_I.cur_row = 0
# Code starts from here:
start = 1
end = 3
link = "http://ec.europa.eu/environment/ets/ohaDetails.do?returnURL=&languageCode=en&accountID=&registryCode=&buttonAction=all&action=&account.registryCode=&accountType=&identifierInReg=&accountHolder=&primaryAuthRep=&installationIdentifier=&installationName=&accountStatus=&permitIdentifier=&complianceStatus=&mainActivityType=-1&searchType=oha&resultList.currentPageNumber={}&nextList=Next%C2%A0%3E&selectedPeriods="
for page_number in range(start, end):
print("Page {}".format(page_number))
url = link.format(page_number)
r = requests.get(url)
print(url)
serial_no = [int(x) for x in str(page_number)]
print(serial_no)
time.sleep(random.randint(2, 5))
soup = BeautifulSoup(r.content, "lxml")
# Table 3 Aircraft Operator ID data:
for items in soup.find(id="tblChildDetails").find("table").find_all("tr")[1:]:
dataset = [item.get_text(strip=True) for item in items.find_all("td")[:]]
print(dataset)
append_row(ws_3_A, serial_no + [url] + dataset)
# Table 3 Installation ID data:
for items in soup.find(id="tblChildDetails").find("table").find_all("tr")[1:]:
dataset = [item.get_text(strip=True) for item in items.find_all("td")[:]]
print(dataset)
append_row(ws_3_I, serial_no + [url] + dataset)
workbook.close()
Current and Expected output is attached as screenshot. Expected output:
Traceback:
Traceback (most recent call last):
File "D:\QRS\Script.py", line 42, in <module>
append_row(ws_3_A, serial_no + [url] + dataset)
File "D:\QRS\Script.py", line 10, in append_row
ws.write_string(ws.cur_row, col, value)
File "C:\Users\varun\AppData\Roaming\Python\Python36\site-packages\xlsxwriter\worksheet.py", line 67, in cell_wrapper
return method(self, *args, **kwargs)
File "C:\Users\varun\AppData\Roaming\Python\Python36\site-packages\xlsxwriter\worksheet.py", line 500, in write_string
return self._write_string(row, col, string, cell_format)
File "C:\Users\varun\AppData\Roaming\Python\Python36\site-packages\xlsxwriter\worksheet.py", line 512, in _write_string
if len(string) > self.xls_strmax:
TypeError: object of type 'int' has no len()
Each element in [int(x) for x in str(page_number)] should be string. Because each element will pass to variable value in function append_row().
And then , you want to have an expected output , you need to ignore the first tr when page_number bigger start.
Use try...finally , so you can close workbook when script occured error
import time
import requests
import random
from lxml import html
from bs4 import BeautifulSoup
import xlsxwriter
def append_row(ws, row):
for col, value in enumerate(row):
ws.write_string(ws.cur_row, col, value)
ws.cur_row += 1
workbook = xlsxwriter.Workbook('Output.xlsx')
def ws_3(name):
return workbook.add_worksheet("Table 3 {}".format(name))
# Code starts from here:
start = 1
end = 5
link = "http://ec.europa.eu/environment/ets/ohaDetails.do?returnURL=&languageCode=en&accountID=&registryCode=&buttonAction=all&action=&account.registryCode=&accountType=&identifierInReg=&accountHolder=&primaryAuthRep=&installationIdentifier=&installationName=&accountStatus=&permitIdentifier=&complianceStatus=&mainActivityType=-1&searchType=oha&resultList.currentPageNumber={}&nextList=Next%C2%A0%3E&selectedPeriods="
coldict = {}
try:
for page_number in [1,2,3,342,343]:
print("Page {}".format(page_number))
url = link.format(page_number)
r = requests.get(url)
serial_no = [str(page_number)]
time.sleep(random.randint(2, 5))
soup = BeautifulSoup(r.content, "lxml")
# Table 3 Aircraft Operator ID data:
tr = soup.find(id="tblChildDetails").find("table").find_all("tr")
dataset = [item.get_text(strip=True) for item in tr[1].find_all("td")]
#select or create new table
if not coldict.get(dataset[0]):
ws = ws_3(dataset[0])
ws.cur_row = 0
coldict[dataset[0]] = ws
append_row(ws, ["Page no","Url"] + dataset)
else:
ws = coldict.get(dataset[0])
for items in tr[2:]:
dataset = [item.get_text(strip=True) for item in items.find_all("td")]
print(url)
print(dataset)
append_row(ws, serial_no + [url] + dataset)
finally:
workbook.close()

Out of range BeautifulSoup

I'm getting a list index out of range error, and not sure why. My code is a webscraper to collect temperature data from a website. All worked fine for months, until recently.
I have a number of functions shown below as reference. The important one is getDailyAve(), which is where I'm getting the exception thrown.
Any thoughts or advice is appreciated.
import sys
import urllib
from bs4 import BeautifulSoup
from urllib2 import urlopen, URLError
import webbrowser
import time
from collections import Counter
import numpy as np
import re
import csv
import datetime
from datetime import timedelta
DATE_FORMAT = '%Y/%m/%d'
def daterange(start, end):
def convert(date):
try:
date = datetime.datetime.strptime(date, DATE_FORMAT)
return date.date()
except TypeError:
return date
def get_date(n):
return datetime.datetime.strftime(convert(start) + timedelta(days=n), DATE_FORMAT)
days = (convert(end) - convert(start)).days
if days <= 0:
raise ValueError('The start date must be before the end date.')
for n in range(0, days):
yield get_date(n)
class SiteLocation:
"""class defining mine location parameters to lookup on weather search"""
def __init__(self, city, state, zip, code):
self.city = city
self.state = state
self.zip = zip
self.code = code
def getDailyAve(url):
url = urllib.urlopen(url)
soup = BeautifulSoup(url.read(), 'lxml')
form = soup.find("form",{"id": "archivedate"})
table = form.find_next_sibling("table")
rows = table.select("tr")[1:]
time=[]
temp=[]
minutes=[]
# handle no data case
if soup.find(text="Archive data not available for this date."):
print("Data not available, URL: '%s'" % url)
return None
# capture time and temps
for row in rows:
data = [td.text for td in row.find_all("td")]
match = re.search(r"[+-]?(?<!\.)\b[0-9]+\b(?!\.[0-9])",data[2])
if match:
temp.append(match.group())
time.append(data[0])
minutes.append(data[0][-4:-2])
common = Counter(minutes).most_common()[0][0]
finalTimes = []
finalTemps = []
for i in range(0,len(time)):
if minutes[i] == common:
finalTimes.append(time[i])
finalTemps.append(int(temp[i]))
dailyAve = sum(finalTemps) / float(len(finalTimes))
return dailyAve
def writeToCsv(list1, list2, list3, list4, list5, list6, list7, list8):
with open('results.csv', 'wb') as csvfile:
results = csv.writer(csvfile, delimiter=',')
results.writerow(['T-SJ', 'T- RB', 'T-DS', 'T-JW', 'T-GB', 'D', 'M', 'Y'])
for idx in range(0,len(list1)):
results.writerow([str(list1[idx]), str(list2[idx]), str(list3[idx]), str(list4[idx]), str(list5[idx]), str(list6[idx]), str(list7[idx]), str(list8[idx])])
def buildURL(location, day, month, year):
if day < 10:
strDay = '0'+str(day)
else:
strDay = str(day)
baseURL = "http://www.weatherforyou.com/reports/index.php?forecast=pass&pass=archive&zipcode=" + location.zip + "&pands=" + location.city + "%2" + "C" + location.state + "&place=" + location.city + "&state=" + location.state + "&icao=" + location.code + "&country=us&month=" + str(month) + "&day=" + strDay + "&year=" + str(year) + "&dosubmit=Go"
return baseURL
def main():
loc1 = SiteLocation('Farmington','NM','87401','KFMN')
loc2 = SiteLocation('Whitesville','WV','25209','KBKW')
loc3 = SiteLocation('Rangely','CO','81648','KVEL')
loc4 = SiteLocation('Brookwood','AL','35444','KTCL')
loc5 = SiteLocation('Princeton','IN','47670','KAJG')
start = '2016/08/31'
end = datetime.date.today()
dateRange = list(daterange(start, end))
listDailyAve1 = []
listDailyAve2 = []
listDailyAve3 = []
listDailyAve4 = []
listDailyAve5 = []
listDays = []
listMonths = []
listYears = []
for idx in range(0,len(dateRange)):
strDate = str(dateRange[idx]).split("/")
year = strDate[0]
month = strDate[1]
day = strDate[2]
url1 = buildURL(loc1, day, month, year)
url2 = buildURL(loc2, day, month, year)
url3 = buildURL(loc3, day, month, year)
url4 = buildURL(loc4, day, month, year)
url5 = buildURL(loc5, day, month, year)
dailyAve1 = getDailyAve(url1)
dailyAve2 = getDailyAve(url2)
dailyAve3 = getDailyAve(url3)
dailyAve4 = getDailyAve(url4)
dailyAve5 = getDailyAve(url5)
listDailyAve1.append(dailyAve1)
listDailyAve2.append(dailyAve2)
listDailyAve3.append(dailyAve3)
listDailyAve4.append(dailyAve4)
listDailyAve5.append(dailyAve5)
listDays.append(day)
listMonths.append(month)
listYears.append(year)
writeToCsv(listDailyAve1, listDailyAve2, listDailyAve3, listDailyAve4,listDailyAve5, listDays, listMonths, listYears)
if __name__ == '__main__':
status = main()
sys.exit(status)
Here is the exception thrown:
Traceback (most recent call last):
File ".\weatherScrape2.py", line 147, in <module>
status = main()
File ".\weatherScrape2.py", line 128, in main
dailyAve1 = getDailyAve(url1)
File ".\weatherScrape2.py", line 61, in getDailyAve
match = re.search(r"[+-]?(?<!\.)\b[0-9]+\b(?!\.[0-9])",data[2])
IndexError: list index out of range
First of all, you need to handle situations when there is no available data. Here is one way:
# handle "no data" case
if soup.find(text="Archive data not available for this date."):
print("Data not available, URL: '%s'." % url)
return None
Also, I think there is a problem in the logic of getting the rows. I'd do it this way:
form = soup.find("form", {"id": "archivedate"})
table = form.find_next_sibling("table")
rows = table.select("tr")[1:]
Here is a complete snippet that I'm executing (for a single URL):
import requests
from bs4 import BeautifulSoup
from collections import Counter
import re
def getDailyAve(url):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
form = soup.find("form", {"id": "archivedate"})
table = form.find_next_sibling("table")
rows = table.select("tr")[1:]
time = []
temp = []
minutes = []
# handle no data case
if soup.find(text="Archive data not available for this date."):
print("Data not available, URL: '%s'" % url)
return None
# capture time and temps
for row in rows:
data = [td.text for td in row.find_all("td")]
match = re.search(r"[+-]?(?<!\.)\b[0-9]+\b(?!\.[0-9])", data[2])
if match:
temp.append(match.group())
time.append(data[0])
minutes.append(data[0][-4:-2])
common = Counter(minutes).most_common()[0][0]
finalTimes = []
finalTemps = []
for i in range(0, len(time)):
if minutes[i] == common:
finalTimes.append(time[i])
finalTemps.append(int(temp[i]))
dailyAve = sum(finalTemps) / float(len(finalTimes))
return dailyAve
print(getDailyAve("https://www.weatherforyou.com/reports/index.php?forecast=pass&pass=archive&zipcode=87401&pands=Farmington%2CNM&place=Farmington&state=NM&icao=KFMN&country=us&month=09&day=03&year=2016&dosubmit=Go"))

Extract data from web page

I have a script to extract data from here: http://espn.go.com/nba/statistics/player/_/stat/scoring-per-48-minutes/
Part of obtaining the data in the script looks like this:
pts_start = data.find('">',mpg_end) + 2
pts_end = data.find('<',pts_start)
store.append(data[pts_start:pts_end])
mf_start = data.find(' >',pts_end) + 2
mf_end = data.find('<',mf_start)
store.append(data[mf_start:mf_end])
fg_start = data.find(' >',mf_end) + 2
fg_end = data.find('<',fg_start)
store.append(data[fg_start:fg_end])
I see that the names like fg and pts correspond to the table headlines, but I don't understand why certain ones are abbreviated in the script.
I want to modify the script to obtain the headlines on this table: http://espn.go.com/nba/statistics/player/_/stat/rebounds. I tried doing this by just plugging in the names as they appear at the top of the table but the resulting CSV file had missing information.
Full code :
import os
import csv
import time
import urllib2
uri = 'http://espn.go.com/nba/statistics/player/_/stat/scoring-per-48-minutes'
def get_data():
try:
req = urllib2.Request(uri)
response = urllib2.urlopen(req, timeout=600)
content = response.read()
return content
except Exception, e:
print "\n[!] Error: " + str(e)
print ''
return False
def extract(data,rk):
print '\n[+] Extracting data.'
start = 0
while True:
store = [rk]
if data.find('nba/player/',start) == -1:
break
with open("data.csv", "ab") as fcsv:
main = data.find('nba/player/',start)
name_start = data.find('>',main) + 1
name_end = data.find('<',name_start)
store.append(data[name_start:name_end])
team_start = data.find('">',name_end) + 2
team_end = data.find('<',team_start)
store.append(data[team_start:team_end])
gp_start = data.find(' >',team_end) + 2
gp_end = data.find('<',gp_start)
store.append(data[gp_start:gp_end])
mpg_start = data.find(' >',gp_end) + 2
mpg_end = data.find('<',mpg_start)
store.append(data[mpg_start:mpg_end])
pts_start = data.find('">',mpg_end) + 2
pts_end = data.find('<',pts_start)
store.append(data[pts_start:pts_end])
mf_start = data.find(' >',pts_end) + 2
mf_end = data.find('<',mf_start)
store.append(data[mf_start:mf_end])
fg_start = data.find(' >',mf_end) + 2
fg_end = data.find('<',fg_start)
store.append(data[fg_start:fg_end])
m3_start = data.find(' >',fg_end) + 2
m3_end = data.find('<',m3_start)
store.append(data[m3_start:m3_end])
p3_start = data.find(' >',m3_end) + 2
p3_end = data.find('<',p3_start)
store.append(data[p3_start:p3_end])
ft_start = data.find(' >',p3_end) + 2
ft_end = data.find('<',ft_start)
store.append(data[ft_start:ft_end])
ftp_start = data.find(' >',ft_end) + 2
ftp_end = data.find('<',ftp_start)
store.append(data[ftp_start:ftp_end])
start = name_end
rk = rk + 1
csv.writer(fcsv).writerow(store)
fcsv.close()
def main():
print "\n[+] Initializing..."
if not os.path.exists("data.csv"):
with open("data.csv", "ab") as fcsv:
csv.writer(fcsv).writerow(["RK","PLAYER","TEAM","GP", "MPG","PTS","FGM-FGA","FG%","3PM-3PA","3P%","FTM-FTA","FT%"])
fcsv.close()
rk = 1
global uri
while True:
time.sleep(1)
start = 0
print "\n[+] Getting data, please wait."
data = get_data()
if not data:
break
extract(data,rk)
print "\n[+] Preparing for next page."
time.sleep(1.5)
rk = rk + 40
if rk > 300:
print "\n[+] All Done !\n"
break
uri = 'http://espn.go.com/nba/statistics/player/_/stat/scoring-per-48-minutes/sort/avg48Points/count/' + str(rk)
if __name__ == '__main__':
main()
I specifically want to know how to grab info based on the headlines. Like TEAM GP MPG PTS FGM-FGA FG% 3PM-3PA 3P% FTM-FTA FT%
So the script doesn't need to be changed besides things like pts or mpg in pts_start = data.find('">',mpg_end) + 2
I don't understand why I can't just input the name of the headline in the table has shown for certain ones. Like instead of FTM-FTA, the script puts ft.
Extracting html data rather easy with BeautifulSoup. Following example is you to get the idea but not a complete solution to your problem. However you can easily extend.
from bs4 import BeautifulSoup
import urllib2
def get_html_page_dom(url):
response = urllib2.urlopen(url)
html_doc = response.read()
return BeautifulSoup(html_doc, 'html5lib')
def extract_rows(dom):
table_rows = dom.select('.mod-content tbody tr')
for tr in table_rows:
# skip headers
klass = tr.get('class')
if klass is not None and 'colhead' in klass:
continue
tds = tr.select('td')
yield {'RK': tds[0].string,
'PLAYER': tds[1].select('a')[0].string,
'TEAM': tds[2].string,
'GP': tds[3].string
# you can fetch rest of the indexs for corresponding headers
}
if __name__ == '__main__':
dom = get_html_page_dom('http://espn.go.com/nba/statistics/player/_/stat/scoring-per-48-minutes/')
for data in extract_rows(dom):
print(data)
You can simply run and see the result ;).

Categories