Right now I have a script that can print out basic info on Airbnb listings based on a specified URL:
from bs4 import BeautifulSoup
import requests
import csv
headers = {'User-Agent':'Google Chrome, Windows 10'}
url = 'https://www.airbnb.com/s/Tokyo--Japan/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&flexible_trip_dates%5B%5D=april&flexible_trip_dates%5B%5D=may&flexible_trip_lengths%5B%5D=weekend_trip&date_picker_type=calendar&checkin=2021-04-09&checkout=2021-04-23&adults=3&source=structured_search_input_header&search_type=autocomplete_click&query=Tokyo%2C%20Japan&place_id=ChIJ51cu8IcbXWARiRtXIothAS'
response = requests.get(url,headers=headers)
soup = BeautifulSoup(response.content,'lxml')
for item in soup.select('[itemprop=itemListElement]'):
try:
print('----------------------------------------')
print(item.select('a')[0]['aria-label']) #Title
print(item.select('a')[0]['href']) #URL
print(item.select('._krjbj')[0].get_text()) #Price
print(item.select('._krjbj')[2].get_text()) #Total price
print(item.select('._kqh46o')[0].get_text()) #Facilities
print(item.select('._kqh46o')[1].get_text()) #Amenities
print(item.select('._18khxk1')[0].get_text()) #Rating with number of reviews in parentheses
# print(name)
print('----------------------------------------')
except Exception as e:
#raise e
print('')
I'd like output for this stored in a csv. Here's an attempt using BeautifulSoup:
from bs4 import BeautifulSoup
import requests
import csv
headers = {'User-Agent':'Google Chrome, Windows 10'}
f = csv.writer(open('airbnbscraping.csv', 'w'))
f.writerow(["title", "weburl", "nightprice", "totalprice", "facilities", "amenities", "ratings"])
for item in soup.select('[itemprop=itemListElement]'):
try:
title = item.select(('a')[0]['aria-label'])
weburl = item.select(('a')[0]['href'])
nightprice = str(('._krjbj')[0].get_text())
totalprice = str(('._krjbj')[2].get_text())
facilities = str(('._kqh46o')[0].get_text())
amenities = str(('._kqh46o')[1].get_text())
ratings = str(('._18khxk1')[0].get_text())
except Exception as e:
# raise e
continue
f.writerow([title, weburl, nightprice, totalprice, facilities, amenities, ratings])
Currently only the header row is written to the csv...how could I get the desired values into the table as well? Would I have to use .find and .find_all instead?
I would use a context manager to handle the file open/write and re-work some of your selectors.
Move the try except to where it actually needs to handle the current exception.
Please note that the classes look dynamic so scraping using current classes is not particularly robust. I would instead look for relationships between more stable looking elements/attributes.
I also needed to change your user-agent to one the server was happy with otherwise only the header would be written due to no results from the initial soup.select used for the loop.
from bs4 import BeautifulSoup
import requests
import csv
headers = {'User-Agent':'Mozilla/5.0'}
url = 'https://www.airbnb.com/s/Tokyo--Japan/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&flexible_trip_dates%5B%5D=april&flexible_trip_dates%5B%5D=may&flexible_trip_lengths%5B%5D=weekend_trip&date_picker_type=calendar&checkin=2021-04-09&checkout=2021-04-23&adults=3&source=structured_search_input_header&search_type=autocomplete_click&query=Tokyo%2C%20Japan&place_id=ChIJ51cu8IcbXWARiRtXIothAS'
response = requests.get(url,headers=headers)
soup = BeautifulSoup(response.content,'lxml')
with open("airbnbscraping.csv", "w", encoding="utf-8-sig", newline='') as f:
w = csv.writer(f, delimiter = ",", quoting=csv.QUOTE_MINIMAL)
w.writerow(["title", "weburl", "nightprice", "totalprice", "facilities", "amenities", "ratings"])
for item in soup.select('[itemprop=itemListElement]'):
title = item.select_one('._8s3ctt a')['aria-label']
weburl = 'https://www.airbnb.co.uk/' + item.select_one('a')['href']
nightprice = item.select_one('._olc9rf0').text
totalprice = item.select_one('button span:contains("total")').text.split(' ')[0]
facilities = item.select_one('._kqh46o').get_text()
amenities = item.select_one('[itemprop=itemListElement] ._kqh46o + ._kqh46o').get_text()
try:
ratings = item.select_one('._10fy1f8').text
except:
ratings = 'None'
w.writerow([title, weburl, nightprice, totalprice, facilities, amenities, ratings])
Related
I have a list of twitter usernames. I need to get their number of followers. I used BS and requests. However, I've only received one account every time.
from bs4 import BeautifulSoup
import requests
import pandas as pd
purcsv = pd.read_csv('pureeng.csv', engine= 'python')
followers = purcsv['username']
followers.head(10)
handle = purcsv['username'][0:40]
temp = ("https://twitter.com/"+handle)
temp = temp.tolist()
for url in temp:
page = requests.get(url)
bs = BeautifulSoup(page.text,'lxml')
follow_box = bs.find('li',{'class':'ProfileNav-item ProfileNav-item--followers'})
followers = follow_box.find('a').find('span',{'class':'ProfileNav-value'})
print("Number of followers: {} ".format(followers.get('data-count')))
That's because you are looping over the urls first and fetching the content for each in the same variable page here:
for url in temp:
page = requests.get(url)
so page will always contain the last url page accessed, to solve this you need to process a page once fetched
followers_list = []
for url in temp:
page = requests.get(url)
bs = BeautifulSoup(page.text, "html.parser")
follow_box = bs.find('li',{'class':'ProfileNav-item ProfileNav-item--followers'})
followers = follow_box.find('a').find('span',{'class':'ProfileNav-value'})
print("Number of followers: {} ".format(followers.get('data-count')))
followers_list.append(followers.get('data-count'))
print(followers_list)
here is a full example to verify
from bs4 import BeautifulSoup
import requests
import pandas as pd
purcsv = pd.read_csv('pureeng.csv')
followers = purcsv['username']
handles = purcsv['username'][0:40].tolist()
followers_list = []
for handle in handles:
url = "https://twitter.com/" + handle
try:
page = requests.get(url)
except Exception as e:
print(f"Failed to fetch page for url {url} due to: {e}")
continue
bs = BeautifulSoup(page.text, "html.parser")
follow_box = bs.find('li',{'class':'ProfileNav-item ProfileNav-item--followers'})
followers = follow_box.find('a').find('span',{'class':'ProfileNav-value'})
print("Number of followers: {} ".format(followers.get('data-count')))
followers_list.append(followers.get('data-count'))
print(followers_list)
output:
Number of followers: 13714085
Number of followers: 4706511
['13714085', '4706511']
You may consider using async function for fetching and processing those urls if you have two many of them.
I'm trying to scrape information about the datasets available on this website.
I want to collect the URLs to the resources and at least the title of the dataset.
Using this resource as an example, I want to capture the URL embedded in "Go to resource" and the title listed in the table:
I have created a basic scraper, but it doesn't seem work:
import requests
import csv
from bs4 import BeautifulSoup
site = requests.get('https://data.nsw.gov.au/data/dataset');
data_list=[]
if site.status_code is 200:
content = BeautifulSoup(site.content, 'html.parser')
internals = content.select('.resource-url-analytics')
for url in internals:
title = internals.select=('.resource-url-analytics')[0].get_text()
link = internals.select=('.resource-url-analytics')[0].get('href')
new_data = {"title": title, "link": link}
data_list.append(new_data)
with open ('selector.csv','w') as file:
writer = csv.DictWriter(file, fieldnames = ["dataset", "link"], delimiter = ';')
writer.writeheader()
for row in data_list:
writer.writerow(row)
I would like to write the output to a CSV with columns for the URLs and the titles.
This is an example of the desired output
Greatly appreciative for any assistance
Have a look at the API for the datasets that will likely be the easiest way to do this.
In the meantime, here is how you can get the API links at id level from those pages and store the entire package info for all packages in one list, data_sets, and just the info of interest in another variable (results). Be sure to review the API documentation in case there is a better method - for example, it would be nice if ids could be submitted in batches rather than per id.
Answer below is taking advantage of the endpoint detailed in the documentation which is used to get a full JSON representation of a dataset, resource or other object
Taking the current first result on landing page of:
Vegetation of the Guyra 1:25000 map sheet VIS_ID 240.
We want the last child a of parent h3 with a parent having class .dataset-item. In the below, the spaces between selectors are descendant combinators.
.dataset-item h3 a:last-child
You can shorten this to h3 a:last-child for a small efficiency gain.
This relationship reliably selects all relevant links on page.
Continuing with this example, visiting that retrieved url for first listed item, we can find the id using api endpoint (which retrieves json related to this package), via an attribute=value selector with contains, *, operator. We know this particular api endpoint has a common string so we substring match on the href attribute value:
[href*="/api/3/action/package_show?id="]
The domain can vary and some retrieved links are relative so we have to test if relative and add the appropriate domain.
First page html for that match:
Notes:
data_sets is a list containing all the package data for each package and is extensive. I did this in case you are interest in looking at what is in those packages (besides reviewing the API documentation)
You can get total number of pages from soup object on a page via
num_pages = int(soup.select('[href^="/data/dataset?page="]')[-2].text)
You can alter the loop for less pages.
Session object is used for efficiency of re-using connection. I'm sure there are other improvements to be made. In particular I would look for any method which reduced the number of requests (why I mentioned looking for a batch id endpoint for example).
There can be none to more than one resource url within a returned package. See example here. You can edit code to handle this.
Python:
from bs4 import BeautifulSoup as bs
import requests
import csv
from urllib.parse import urlparse
json_api_links = []
data_sets = []
def get_links(s, url, css_selector):
r = s.get(url)
soup = bs(r.content, 'lxml')
base = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url))
links = [base + item['href'] if item['href'][0] == '/' else item['href'] for item in soup.select(css_selector)]
return links
results = []
#debug = []
with requests.Session() as s:
for page in range(1,2): #you decide how many pages to loop
links = get_links(s, 'https://data.nsw.gov.au/data/dataset?page={}'.format(page), '.dataset-item h3 a:last-child')
for link in links:
data = get_links(s, link, '[href*="/api/3/action/package_show?id="]')
json_api_links.append(data)
#debug.append((link, data))
resources = list(set([item.replace('opendata','') for sublist in json_api_links for item in sublist])) #can just leave as set
for link in resources:
try:
r = s.get(link).json() #entire package info
data_sets.append(r)
title = r['result']['title'] #certain items
if 'resources' in r['result']:
urls = ' , '.join([item['url'] for item in r['result']['resources']])
else:
urls = 'N/A'
except:
title = 'N/A'
urls = 'N/A'
results.append((title, urls))
with open('data.csv','w', newline='') as f:
w = csv.writer(f)
w.writerow(['Title','Resource Url'])
for row in results:
w.writerow(row)
All pages
(very long running so consider threading/asyncio):
from bs4 import BeautifulSoup as bs
import requests
import csv
from urllib.parse import urlparse
json_api_links = []
data_sets = []
def get_links(s, url, css_selector):
r = s.get(url)
soup = bs(r.content, 'lxml')
base = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url))
links = [base + item['href'] if item['href'][0] == '/' else item['href'] for item in soup.select(css_selector)]
return links
results = []
#debug = []
with requests.Session() as s:
r = s.get('https://data.nsw.gov.au/data/dataset')
soup = bs(r.content, 'lxml')
num_pages = int(soup.select('[href^="/data/dataset?page="]')[-2].text)
links = [item['href'] for item in soup.select('.dataset-item h3 a:last-child')]
for link in links:
data = get_links(s, link, '[href*="/api/3/action/package_show?id="]')
json_api_links.append(data)
#debug.append((link, data))
if num_pages > 1:
for page in range(1, num_pages + 1): #you decide how many pages to loop
links = get_links(s, 'https://data.nsw.gov.au/data/dataset?page={}'.format(page), '.dataset-item h3 a:last-child')
for link in links:
data = get_links(s, link, '[href*="/api/3/action/package_show?id="]')
json_api_links.append(data)
#debug.append((link, data))
resources = list(set([item.replace('opendata','') for sublist in json_api_links for item in sublist])) #can just leave as set
for link in resources:
try:
r = s.get(link).json() #entire package info
data_sets.append(r)
title = r['result']['title'] #certain items
if 'resources' in r['result']:
urls = ' , '.join([item['url'] for item in r['result']['resources']])
else:
urls = 'N/A'
except:
title = 'N/A'
urls = 'N/A'
results.append((title, urls))
with open('data.csv','w', newline='') as f:
w = csv.writer(f)
w.writerow(['Title','Resource Url'])
for row in results:
w.writerow(row)
For simplicity use selenium package:
from selenium import webdriver
import os
# initialise browser
browser = webdriver.Chrome(os.getcwd() + '/chromedriver')
browser.get('https://data.nsw.gov.au/data/dataset')
# find all elements by xpath
get_elements = browser.find_elements_by_xpath('//*[#id="content"]/div/div/section/div/ul/li/div/h3/a[2]')
# collect data
data = []
for item in get_elements:
data.append((item.text, item.get_attribute('href')))
Output:
('Vegetation of the Guyra 1:25000 map sheet VIS_ID 240', 'https://datasets.seed.nsw.gov.au/dataset/vegetation-of-the-guyra-1-25000-map-sheet-vis_id-2401ee52')
('State Vegetation Type Map: Riverina Region Version v1.2 - VIS_ID 4469', 'https://datasets.seed.nsw.gov.au/dataset/riverina-regional-native-vegetation-map-version-v1-0-vis_id-4449')
('Temperate Highland Peat Swamps on Sandstone (THPSS) spatial distribution maps...', 'https://datasets.seed.nsw.gov.au/dataset/temperate-highland-peat-swamps-on-sandstone-thpss-vegetation-maps-vis-ids-4480-to-4485')
('Environmental Planning Instrument - Flood', 'https://www.planningportal.nsw.gov.au/opendata/dataset/epi-flood')
and so on
I am designing scraping project for my research but i am stuck in to write scrape data in csv. Please help me for that?
i have successfully scrape data but i want to store it in csv here below is my code
need to write code to pull all of the html from a website then save it to a csv file.
I believe I somehow need to turn the links into a list and then write the list, but I'm unsure how to do that.
This is what I have so far:
import requests
import time
from bs4 import BeautifulSoup
import csv
# Collect and parse first page
page = requests.get('https://www.myamcat.com/jobs')
soup = BeautifulSoup(page.content, 'lxml')
print("Wait Scraper is working on ")
time.sleep(10)
if(page.status_code != 200):
print("Error in Scraping check the url")
else:
print("Successfully scrape the data")
time.sleep(10)
print("Loading data in csv")
file = csv.writer(open('dataminer.csv', 'w'))
file.writerow(['ProfileName', 'CompanyName', 'Salary', 'Job', 'Location'])
for pname in soup.find_all(class_="profile-name"):
#print(pname.text)
profname = pname.text
file.writerow([profname, ])
for cname in soup.find_all(class_="company_name"):
print(cname.text)
for salary in soup.find_all(class_="salary"):
print(salary.text)
for lpa in soup.find_all(class_="jobText"):
print(lpa.text)
for loc in soup.find_all(class_="location"):
print(loc.text)
Make a dict and save the data into it then save to csv, check below code!
import requests
import time
from bs4 import BeautifulSoup
import csv
# Collect and parse first page
page = requests.get('https://www.myamcat.com/jobs')
soup = BeautifulSoup(page.content, 'lxml')
data = []
print("Wait Scrapper is working on ")
if(page.status_code != 200):
print("Error in Srapping check the url")
else:
print("Successfully scrape the data")
for x in soup.find_all('div',attrs={'class':'job-page'}):
data.append({
'pname':x.find(class_="profile-name").text.encode('utf-8'),
'cname':x.find(class_="company_name").text.encode('utf-8'),
'salary':x.find(class_="salary").text.encode('utf-8'),
'lpa':x.find(class_="jobText").text.encode('utf-8'),
'loc':x.find(class_="location").text.encode('utf-8')})
print("Loading data in csv")
with open('dataminer.csv', 'w') as f:
fields = ['salary', 'loc', 'cname', 'pname', 'lpa']
writer = csv.DictWriter(f, fieldnames=fields)
writer.writeheader()
writer.writerows(data)
Apart from what you have got in other answer, you can scrape and write the content at the same time as well. I used .select() instead of .find_all() to achieve the same.
import csv
import requests
from bs4 import BeautifulSoup
URL = "https://www.myamcat.com/jobs"
page = requests.get(URL)
soup = BeautifulSoup(page.text, 'lxml')
with open('myamcat_doc.csv','w',newline="",encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(['pname','cname','salary','loc'])
for item in soup.select(".job-listing .content"):
pname = item.select_one(".profile-name h3").get_text(strip=True)
cname = item.select_one(".company_name").get_text(strip=True)
salary = item.select_one(".salary .jobText").get_text(strip=True)
loc = item.select_one(".location .jobText").get_text(strip=True)
writer.writerow([pname,cname,salary,loc])
I am trying to scrape a table from a website. It runs but I am not getting an output to my file. Where am I going wrong?
Code:
from bs4 import BeautifulSoup
import urllib.request
f = open('nbapro.txt','w')
errorFile = open('nbaerror.txt','w')
page = urllib.request.urlopen('http://www.numberfire.com/nba/fantasy/full-fantasy-basketball-projections')
content = page.read()
soup = BeautifulSoup(content)
tableStats = soup.find('table', {'class': 'data-table xsmall'})
for row in tableStats.findAll('tr')[2:]:
col = row.findAll('td')
try:
name = col[0].a.string.strip()
f.write(name+'\n')
except Exception as e:
errorFile.write (str(e) + '******'+ str(col) + '\n')
pass
f.close
errorFile.close
The problem is that the table data you are trying to scrape is filled out by invoking javascript code on the browser-side. urllib is not a browser and, hence, cannot execute javascript.
If you want to solve it via urllib and BeautifulSoup, you have to extract the JSON object from the script tag and load it via json.loads(). Example, that prints player names:
import json
import re
import urllib.request
from bs4 import BeautifulSoup
soup = BeautifulSoup(urllib.request.urlopen('http://www.numberfire.com/nba/fantasy/full-fantasy-basketball-projections'))
script = soup.find('script', text=lambda x: x and 'NF_DATA' in x).text
data = re.search(r'NF_DATA = (.*?);', script).group(1)
data = json.loads(data)
for player_id, player in data['players'].items():
print(player['name'] + ' ' + player['last_name'])
I am grabbing an HTML table with this code :
import csv
import urllib2
from bs4 import BeautifulSoup
with open('listing.csv', 'wb') as f:
writer = csv.writer(f)
for i in range(39):
url = "file:///C:/projects/HTML/Export.htm".format(i)
u = urllib2.urlopen(url)
try:
html = u.read()
finally:
u.close()
soup=BeautifulSoup(html)
for tr in soup.find_all('tr')[2:]:
tds = tr.find_all('td')
row = [elem.text.encode('utf-8') for elem in tds]
writer.writerow(row)
Everything works perfectly, but I am trying to grab column 9 Href URL. It is currently giving me the txt value but not the URL.
Also, I have two tables in my HTML, anyway to skip the first table and just build the csv file using the second table?
Any help is very welcomed as I am new to Python and need this for a project I am automating a daily conversion.
Many thanks!
You should access the href attribute of the a tag within the 8th td tag:
import csv
import urllib2
from bs4 import BeautifulSoup
records = []
for index in range(39):
url = get_url(index) # where is the formatting in your example happening?
response = urllib2.urlopen(url)
try:
html = response.read()
except Exception:
raise
else:
my_parse(html)
finally:
try:
response.close()
except (UnboundLocalError, NameError):
raise UnboundLocalError
def my_parse(html):
soup = BeautifulSoup(html)
table2 = soup.find_all('table')[1]
for tr in table2.find_all('tr')[2:]:
tds = tr.find_all('td')
url = tds[8].a.get('href')
records.append([elem.text.encode('utf-8') for elem in tds])
# perhaps you want to update one of the elements of this last
# record with the found url now?
# It's more efficient to write only once
with open('listing.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(records)
I have taken the liberty to define a function get_url based on the index because your example rereads the same file every time, which is something I guess you don't actually want. I'll leave the implementation to you. Also, I've added some better exception handling.
At the same time, I've shown how you can access the 2nd table from that webpage's tables.
Was fully able to get it working with the following code:
import csv
import urllib2
from bs4 import BeautifulSoup
#Grab second table from HTML
def my_parse(html):
soup = BeautifulSoup(html)
table2 = soup.find_all('table')[1]
for tr in table2.find_all('tr')[2:]:
tds = tr.find_all('td')
url = tds[8].a.get('href')
tds[8].a.replaceWith(url)
records.append([elem.text.encode('utf-8') for elem in tds])
records = []
#Read HTML file into memory
for index in range(39):
url = "file:///C:/projects/HTML/Export.htm".format(index)
response = urllib2.urlopen(url)
try:
html = response.read()
except Exception:
raise
else:
my_parse(html)
finally:
try:
response.close()
except (UnboundLocalError, NameError):
raise UnboundLocalError
#Writing CSV file
with open('listing.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(records)
Many thanks for all the help!!!!!