Beautiful Soup Wikipidia nested tables - python

I am new to Beautiful Soup and nested table and therefore I try to get some experience scraping a wikipedia table.
I have searched for any good example on the web but unfortunately I have not found anything.
My goal is to parse via pandas the table "States of the United States of America" on this web page. As you can see from my code below I have the following issues:
1) I can not extract all the columns. Apparently my code does not allow to import all the columns properly in a pandas DataFrame and writes the entries of the third column of the html table below the first column.
2) I do not know how to deal with colspan="2" which appears in some lines of the table. In my pandas DataFrame I would like to have the same entry when capital and largest city are the same.
Here is my code. Note that I got stuck trying to overcome my first issue.
Code:
from urllib.request import urlopen
import pandas as pd
wiki='https://en.wikipedia.org/wiki/List_of_states_and_territories_of_the_United_States'
page = urlopen(wiki)
from bs4 import BeautifulSoup
soup = BeautifulSoup(page)
right_table=soup.find_all('table')[0] # First table
rows = right_table.find_all('tr')[2:]
A=[]
B=[]
C=[]
D=[]
F=[]
for row in rows:
cells = row.findAll('td')
# print(len(cells))
if len(cells)>=11: #Only extract table body not heading
A.append(cells[0].find(text=True))
B.append(cells[1].find(text=True))
C.append(cells[2].find(text=True))
D.append(cells[3].find(text=True))
F.append(cells[4].find(text=True))
df=pd.DataFrame(A,columns=['State'])
df['Capital']=B
df['Largest']=C
df['Statehood']=D
df['Population']=F
df
print(df)
Do you have any suggestings?
Any help to understand better BeautifulSoup would be appreciated.
Thanks in advance.

Here's the strategy I would use.
I notice that each line in the table is complete but, as you say, some lines have two cities in the 'Cities' column and some have only one. This means that we can use the numbers of items in a line to determine whether we need to 'double' the city name mentioned in that line or not.
I begin the way you did.
>>> import requests
>>> import bs4
>>> page = requests.get('https://en.wikipedia.org/wiki/List_of_states_and_territories_of_the_United_States').content
>>> soup = bs4.BeautifulSoup(page, 'lxml')
>>> right_table=soup.find_all('table')[0]
Then I find all of the rows in the table and verify that it's at least approximately correct.
>>> trs = right_table('tr')
>>> len(trs)
52
I poke around until I find the lines for Alabama and Wyoming, the first and last rows, and display their texts. They're example of the two types of rows!
>>> trs[2].text
'\n\xa0Alabama\nAL\nMontgomery\nBirmingham\n\nDec 14, 1819\n\n\n4,863,300\n\n52,420\n135,767\n50,645\n131,171\n1,775\n4,597\n\n7\n\n'
>>> trs[51].text
'\n\xa0Wyoming\nWY\nCheyenne\n\nJul 10, 1890\n\n\n585,501\n\n97,813\n253,335\n97,093\n251,470\n720\n1,864\n\n1\n\n'
I notice that I can split these strings on \n and \xa0. This can be done with a regex.
>>> [_ for _ in re.split(r'[\n\xa0]', trs[51].text) if _]
['Wyoming', 'WY', 'Cheyenne', 'Jul 10, 1890', '585,501', '97,813', '253,335', '97,093', '251,470', '720', '1,864', '1']
>>> [_ for _ in re.split(r'[\n\xa0]', trs[2].text) if _]
['Alabama', 'AL', 'Montgomery', 'Birmingham', 'Dec 14, 1819', '4,863,300', '52,420', '135,767', '50,645', '131,171', '1,775', '4,597', '7']
The if _ conditional in these list comprehensions is to discard empty strings.
The Wyoming string has a length of 12, Alabama's is 13. I would leave Alabama's string as it is for pandas. I would extend Wyoming's (and all the others of length 12) using:
>>> row = [_ for _ in re.split(r'[\n\xa0]', trs[51].text) if _]
>>> row[:3]+row[2:]
['Wyoming', 'WY', 'Cheyenne', 'Cheyenne', 'Jul 10, 1890', '585,501', '97,813', '253,335', '97,093', '251,470', '720', '1,864', '1']

The solution below should fix both issues you have mentioned.
from urllib.request import urlopen
import pandas as pd
from bs4 import BeautifulSoup
wiki='https://en.wikipedia.org/wiki/List_of_states_and_territories_of_the_United_States?action=render'
page = urlopen(wiki)
soup = BeautifulSoup(page, 'html.parser')
right_table=soup.find_all('table')[0] # First table
rows = right_table.find_all('tr')[2:]
A=[]
B=[]
C=[]
D=[]
F=[]
for row in rows:
cells = row.findAll('td')
combine_cells = cells[1].get('colspan') # Tells us whether columns for Capital and Established are the same
cells = [cell.text.strip() for cell in cells] # Extracts text and removes whitespace for each cell
index = 0 # allows us to modify columns below
A.append(cells[index]) # State Code
B.append(cells[index + 1]) # Capital
if combine_cells: # Shift columns over by one if columns 2 and 3 are combined
index -= 1
C.append(cells[index + 2]) # Largest
D.append(cells[index + 3]) # Established
F.append(cells[index + 4]) # Population
df=pd.DataFrame(A,columns=['State'])
df['Capital']=B
df['Largest']=C
df['Statehood']=D
df['Population']=F
df
print(df)
Edit: Here's a cleaner version of the above code
import pandas as pd
from bs4 import BeautifulSoup
from urllib.request import urlopen
wiki = 'https://en.wikipedia.org/wiki/List_of_states_and_territories_of_the_United_States'
page = urlopen(wiki)
soup = BeautifulSoup(page, 'html.parser')
table_rows = soup.find('table')('tr')[2:] # Get all table rows
cells = [row('td') for row in table_rows] # Get all cells from rows
def get(cell): # Get stripped string from tag
return cell.text.strip()
def is_span(cell): # Check if cell has the 'colspan' attribute <td colspan="2"></td>
return cell.get('colspan')
df = pd.DataFrame()
df['State'] = [get(cell[0]) for cell in cells]
df['Capital'] = [get(cell[1]) for cell in cells]
df['Largest'] = [get(cell[2]) if not is_span(cell[1]) else get(cell[1]) for cell in cells]
df['Statehood'] = [get(cell[3]) if not is_span(cell[1]) else get(cell[2]) for cell in cells]
df['Population'] = [get(cell[4]) if not is_span(cell[1]) else get(cell[3]) for cell in cells]
print(df)

Related

Parsing a table from website (choosing correct HTML tag)

I need to make dataframe from the following page: http://pitzavod.ru/products/upakovka/
from bs4 import BeautifulSoup
import pandas as pd
import requests
kre = requests.get(f'http://pitzavod.ru/products/upakovka/')
soup = BeautifulSoup(kre.text, 'lxml')
table1 = soup.find('table', id="tab3")
I chose "tab3", as I find in the HTML text <div class="tab-pane fade" id="tab3". But the variable table1 gives no output. How can I get the table? Thank You.
NOTE: you can get the table as a DataFrame in one statement with .read_html, but the DataFrame returned by pd.read_html('http://pitzavod.ru/products/upakovka/')[0] will not retain line breaks.
.find('table', id="tab3") searches for table tags with id="tab3", and there are no such elements in that page's HTML.
There's a div with id="tab3" (as you've notice), but it does not contain any tables.
The only table on the page is contained in a div with id="tab4", so you might have used table1 = soup.find('div', id="tab4").table [although I prefer using .select with CSS selectors for targeting nested tags].
Suggested solution:
kre = requests.get('http://pitzavod.ru/products/upakovka/')
# print(kre.status_code, kre.reason, 'from', kre.url)
kre.raise_for_status()
soup = BeautifulSoup(kre.content, 'lxml')
# table = soup.select_one('div#tab4>div.table-responsive>table')
table = soup.find('table') # soup.select_one('table')
tData = [{
1 if 'center' in c.get('style', '') else ci: '\n'.join([
l.strip() for l in c.get_text('\n').splitlines() if l.strip()
]) for ci, c in enumerate(r.select('td'))
} for r in table.select('tr')]
df = pandas.DataFrame(tData)
## combine the top 2 rows to form header ##
df.columns = ['\n'.join([
f'{d}' for d in df[c][:2] if pandas.notna(d)
]) for c in df.columns]
df = df.drop([0,1], axis='rows').reset_index(drop=True)
# print(df.to_markdown(tablefmt="fancy_grid"))
(Normally, I would use this function if I wanted to specify the separator for tag-contents inside cells, but the middle cell in the 2nd header row would be shifted if I used .DataFrame(read_htmlTable(table, tSep='\n', asObj='dicts')) - the 1 if 'center' in c.get('style', '') else ci bit in the above code is for correcting that.)

How to store elements of a list of HTML tags fetched with BeautifulSoup within a dataframe separated in alphabetically columns with pandas?

I am completely new to Jupiter Notebook, Python, Webscraping and stuff. I looked and different answers but no one seems to has the same problem (and I am not good in adapting "a similar" approach, change it a bit so I can use it for my purpose).
I want to create a data grid with all existing HTML tags. As source I am using MDN docs. It works find to get all Tags with Beautiful Soup but I struggle to go any further with this data.
Here is the code from fetching the data with beautiful soup
from bs4 import BeautifulSoup
import requests
url = "https://developer.mozilla.org/en-US/docs/Web/HTML/Element"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
get_nav_tag = soup.find("nav", class_="sidebar-inner")
get_second_div = nav_tag.find_all("div")[2]
get_ol = get_second_div.find("ol")
get_li = get_second_div.find_all("li", class_="toggle")[3]
tag_list = get_li.find_all("code")
print("There are currently", len(tag_list), "tags.")
for tags in tag_list:
print(tags.text)
The list is already sorted.
Now I work with Pandas to create a dataframe
import pandas as pd
tag_data = []
for tag in tag_list:
tag_data.append({"Tags": tag.text})
df = pd.DataFrame(tag_data)
df
The output looks like
QUESTION
How do I create a dataframe where there are columns for each character and the elements are listed under each column?
Like:
A B C
1 <a> <b> <caption>
2 <abbr> <body> <code>
3 <article> .. ...
4 ... ... ...
How do I separate this list in more list corresponding to each elements first letter? I guess I will need it for further interactions as well, like creating graphs as such. E.g. to show in a bar chart, how many tags starting with "a", "b" etc exists.
Thank you!
The code below should do the work.
df['first_letter'] = df.Tags.str[1]
tag_matrix = pd.DataFrame()
for letter in df.first_letter.unique():
# Create a pandas series whose name matches the first letter of the tag and contains tags starting with the letter
matching_tags = pd.Series(df[df.first_letter==letter].reset_index(drop=True).Tags, name=letter)
# Append the series to the tag_matrix
tag_matrix = pd.concat([tag_matrix, matching_tags], axis=1)
tag_matrix
Here's a sample of the output:
Note that you might want to do some additional cleaning, such as dropping duplicate tags or converting to lower case.
You can use pivot and concat methods to achieve this
df["letter"] = df["Tags"].str[1].str.upper()
df = df.pivot(columns="letter", values="Tags")
df = pd.concat([df[c].dropna().reset_index(drop=True) for c in df.columns], axis=1)
This gives

How to fix this “TypeError: sequence item 0: expected str instance, float found”

I am trying to combine the cell values (strings) in a dataframe column using groupby method, separating the cell values in the grouped cell using commas. I ran into the following error:
TypeError: sequence item 0: expected str instance, float found
The error occurs on the following line of code, see the code block for complete codes:
toronto_df['Neighbourhood'] = toronto_df.groupby(['Postcode','Borough'])['Neighbourhood'].agg(lambda x: ','.join(x))
It seems that in the groupby function, the index corresponding to each row in the un-grouped dataframe is automatically added to the string before it was joined. This causes the TypeError. However, I have no idea how to fix the issue. I browsed a lot of threads but didn't find a solution. I would appreciate any guidance or assistance!
# Import Necessary Libraries
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
# Use BeautifulSoup to scrap information in the table from the Wikipedia page, and set up the dataframe containing all the information in the table
wiki_html = requests.get('https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M').text
soup = BeautifulSoup(wiki_html, 'lxml')
# print(soup.prettify())
table = soup.find('table', class_='wikitable sortable')
table_columns = []
for th_txt in table.tbody.findAll('th'):
table_columns.append(th_txt.text.rstrip('\n'))
toronto_df = pd.DataFrame(columns=table_columns)
for row in table.tbody.findAll('tr')[1:]:
row_data = []
for td_txt in row.findAll('td'):
row_data.append(td_txt.text.rstrip('\n'))
toronto_df = toronto_df.append({table_columns[0]: row_data[0],
table_columns[1]: row_data[1],
table_columns[2]: row_data[2]}, ignore_index=True)
toronto_df.head()
# Remove cells with a borough that is Not assigned
toronto_df.replace('Not assigned',np.nan, inplace=True)
toronto_df = toronto_df[toronto_df['Borough'].notnull()]
toronto_df.reset_index(drop=True, inplace=True)
toronto_df.head()
# If a cell has a borough but a Not assigned neighborhood, then the neighborhood will be the same as the borough
toronto_df['Neighbourhood'] = toronto_df.groupby(['Postcode','Borough'])['Neighbourhood'].agg(lambda x: ','.join(x))
toronto_df.drop_duplicates(inplace=True)
toronto_df.head()
The expected result of the 'Neighbourhood' column should separate the cell values in the grouped cell using commas, showing something like this (I cannot post images yet, so I just provide the link):
https://d3c33hcgiwev3.cloudfront.net/imageAssetProxy.v1/7JXaz3NNEeiMwApe4i-fLg_40e690ae0e927abda2d4bde7d94ed133_Screen-Shot-2018-06-18-at-7.17.57-PM.png?expiry=1557273600000&hmac=936wN3okNJ1UTDA6rOpQqwELESvqgScu08_Spai0aQQ
As mentioned in the comments, the NaN is a float, so trying to do string operations on it doesn't work (and this is the reason for the error message)
Replace your last part of code with this:
The filling of the nan is done with boolean indexing according to the logic you specified in your comment
# If a cell has a borough but a Not assigned neighborhood, then the neighborhood will be the same as the borough
toronto_df.Neighbourhood = np.where(toronto_df.Neighbourhood.isnull(),toronto_df.Borough,toronto_df.Neighbourhood)
toronto_df['Neighbourhood'] = toronto_df.groupby(['Postcode','Borough'])['Neighbourhood'].agg(lambda x: ','.join(x))

Is it possible to scrape data from this page?

I'm having issues with extracting table from this page, and I really need this data for my paper. I came up with this code, but it got stuck on second row.
browser.get('https://www.eex.com/en/market-data/power/futures/french-futures#!/2018/02/01')
table = browser.find_element_by_xpath('//*[#id="content"]/div/div/div/div[1]/div/div/div')
html_table = html.fromstring(table.get_attribute('innerHTML'))
html_code = etree.tostring(html_table)
df = pd.read_html(html_code)[0]
df.drop(['Unnamed: 12', 'Unnamed: 13'], axis=1, inplace=True)
Any advice?
You can always parse the table manually.
I prefer to use BeautifulSoup since I find it much easier to work with.
from bs4 import BeautifulSoup
soup = BeautifulSoup(browser.page_source, "html.parser")
Let's parse the first table, and get the column names:
table = soup.select("table.table-horizontal")[0]
columns = [i.get_text() for i in table.find_all("th")][:-2] ## We don't want the last 2 columns
Now, let's go through the table row by row:
rs = []
for r in table.find_all("tr"):
ds = []
for d in r.find_all("td"):
ds.append(d.get_text().strip())
rs.append(ds[:-2])
You can write the same code more concisely using list comprehensions:
rs = [[d.get_text().strip() for d in r.find_all("td")][:-2] for r in table.find_all("tr")]
Next, we filter rs to remove lists with length != 12 (since we have 12 columns):
rs = [i for i in rs if len(i)==12]
Finally, we can put this into a DataFrame:
df = pd.DataFrame({k:v for k, v in zip(columns, zip(*rs))})
You can follow a similar procedure for the second table. Hope this helps!

Append multiple reshaped lists into pandas DataFrame

I am scrapping England's Joint Data and have the results in the correct format I want when I do one hospital at a time. I eventually want to iterate over all hospitals but first decided to make an array of three different hospitals and figure out the iteration.
The code below gives me the correct format of the final results in a pandas DataFrame when I have just one hospital:
import requests
from bs4 import BeautifulSoup
import pandas
import numpy as np
r=requests.get("http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?
hospitalName=Norfolk%20and%20Norwich%20Hospital")
c=r.content
soup=BeautifulSoup(c,"html.parser")
all=soup.find_all(["div"],{"class":"toggle_container"})[1]
i=0
temp = []
for item in all.find_all("td"):
if i%4 ==0:
temp.append(soup.find_all("span")[4].text)
temp.append(soup.find_all("h5")[0].text)
temp.append(all.find_all("td")[i].text.replace(" ",""))
i=i+1
table = np.array(temp).reshape(12,6)
final = pandas.DataFrame(table)
final
In my iterated version, I cannot figure out a way to append each result set into a final DataFrame:
hosplist = ["http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?hospitalName=Norfolk%20and%20Norwich%20Hospital",
"http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?hospitalName=Barnet%20Hospital",
"http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?hospitalName=Altnagelvin%20Area%20Hospital"]
temp2 = []
df_final = pandas.DataFrame()
for item in hosplist:
r=requests.get(item)
c=r.content
soup=BeautifulSoup(c,"html.parser")
all=soup.find_all(["div"],{"class":"toggle_container"})[1]
i=0
temp = []
for item in all.find_all("td"):
if i%4 ==0:
temp.append(soup.find_all("span")[4].text)
temp.append(soup.find_all("h5")[0].text)
temp.append(all.find_all("td")[i].text)
i=i+1
table = np.array(temp).reshape((int(len(temp)/6)),6)
temp2.append(table)
#df_final = pandas.DataFrame(df)
At the end, the 'table' has all the data I want but its not easy to manipulate so I want to put it in a DataFrame. However, I am getting an "ValueError: Must pass 2-d input" error.
I think this error is saying that I have 3 arrays which would make it 3 dimensional. This is just a practice iteration, there are over 400 hospitals whose data I plan to put into a dataframe but I am stuck here now.
The simple answer to your question would be HERE.
The tough part was taking your code and finding what was not right yet.
Using your full code, I modified it as shown below. Please copy and diff with yours.
import requests
from bs4 import BeautifulSoup
import pandas
import numpy as np
hosplist = ["http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?hospitalName=Norfolk%20and%20Norwich%20Hospital",
"http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?hospitalName=Barnet%20Hospital",
"http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?hospitalName=Altnagelvin%20Area%20Hospital"]
temp2 = []
df_final = pandas.DataFrame()
for item in hosplist:
r=requests.get(item)
c=r.content
soup=BeautifulSoup(c,"html.parser")
all=soup.find_all(["div"],{"class":"toggle_container"})[1]
i=0
temp = []
for item in all.find_all("td"):
if i%4 ==0:
temp.append(soup.find_all("span")[4].text)
temp.append(soup.find_all("h5")[0].text)
temp.append(all.find_all("td")[i].text)
i=i+1
table = np.array(temp).reshape((int(len(temp)/6)),6)
for array in table:
newArray = []
for x in array:
try:
x = x.encode("ascii")
except:
x = 'cannot convert'
newArray.append(x)
temp2.append(newArray)
df_final = pandas.DataFrame(temp2, columns=['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
print df_final
I tried to use a list comprehension for the ascii conversion, which was absolutely necessary for the strings to show up in the dataframe, but the comprehension was throwing an error, so I built in an exception, and the exception never shows.
I reorganized the code a little and was able to create the dataframe without having to encode.
Solution:
hosplist = ["http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?hospitalName=Norfolk%20and%20Norwich%20Hospital",
"http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?hospitalName=Barnet%20Hospital",
"http://www.njrsurgeonhospitalprofile.org.uk/HospitalProfile?hospitalName=Altnagelvin%20Area%20Hospital"]
temp = []
temp2 = []
df_final = pandas.DataFrame()
for item in hosplist:
r=requests.get(item)
c=r.content
soup=BeautifulSoup(c,"html.parser")
all=soup.find_all(["div"],{"class":"toggle_container"})[1]
i=0
for item in all.find_all("td"):
if i%4 ==0:
temp.append(soup.find_all("span")[4].text)
temp.append(soup.find_all("h5")[0].text)
temp.append(all.find_all("td")[i].text.replace("-","NaN").replace("+",""))
i=i+1
temp2.append(temp)
table = np.array(temp2).reshape((int(len(temp2[0])/6)),6)
df_final = pandas.DataFrame(table, columns=['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
df_final

Categories