Pulling all yelp reviews via beautifulsoup - python

I need some help in pulling all reviews for a hotel using beautiful soup; this is what i have thus far, but i need some inspiration pulling all the reviews via API or regular.
import time
import random
from bs4 import BeautifulSoup as bs
import urllib.request as url
html = urllib.request.urlopen('https://www.yelp.com/biz/shore-cliff-hotel-pismo-beach-2').read().decode('utf-8')
soup = bs(html, 'html.parser')
relevant= soup.find_all('p', class_='comment__09f24__gu0rG css-qgunke')
reviews = []
for div in relevant:
for html_class in div.find_all('span',class_="raw__09f24__T4Ezm"):
text = html_class.find('span')
review = html_class.getText(
reviews.append(review)
enter code here

This does the job,
base_url = "https://www.yelp.com/biz/capri-laguna-laguna-beach"
new_page = "?start={}"
content = requests.get(url).content
soup = BeautifulSoup(content, "html.parser")
reviews = []
for i in range(0, 501, 10):
new_page_url = url + new_page.format(i)
new_content = requests.get(url).content
new_soup = BeautifulSoup(content, "html.parser")
relevant= new_soup.find_all('p', class_='comment__09f24__gu0rG css-qgunke')
for div in relevant:
for html_class in div.find_all('span',class_="raw__09f24__T4Ezm"):
text = html_class.find('span')
review = html_class.getText()
reviews.append(review)
Code explaination -
If you click to go to the 2nd page you'll see that ?start=10 get's add to the base URL https://www.yelp.com/biz/capri-laguna-laguna-beach. If you go to the 3rd page then you'll see ?start=20 and so on. The number here is the index of the review, and each page has 10 of them. There are 51 total pages meaning the first review on the 51st page would have the index 501. So the added part to the URL would be ?start=500.
So for each page on the website, the code creates a new URL, gets the HTML content of that URL, creates a soup for it and fetches the review from this newly created soup.

Related

Pulling p tags from multiple URLs

I've struggled on this for days and not sure what the issue could be - basically, I'm trying to extract the profile box data (picture below) of each link -- going through inspector, I thought I could pull the p tags and do so.
I'm new to this and trying to understand, but here's what I have thus far:
-- a code that (somewhat) succesfully pulls the info for ONE link:
import requests
from bs4 import BeautifulSoup
# getting html
url = 'https://basketball.realgm.com/player/Darius-Adams/Summary/28720'
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
container = soup.find('div', attrs={'class', 'main-container'})
playerinfo = container.find_all('p')
print(playerinfo)
I then also have a code that pulls all of the HREF tags from multiple links:
from bs4 import BeautifulSoup
import requests
def get_links(url):
links = []
website = requests.get(url)
website_text = website.text
soup = BeautifulSoup(website_text)
for link in soup.find_all('a'):
links.append(link.get('href'))
for link in links:
print(link)
print(len(links))
get_links('https://basketball.realgm.com/dleague/players/2022')
get_links('https://basketball.realgm.com/dleague/players/2021')
get_links('https://basketball.realgm.com/dleague/players/2020')
So basically, my goal is to combine these two, and get one code that will pull all of the P tags from multiple URLs. I've been trying to do it, and I'm really not sure at all why this isn't working here:
from bs4 import BeautifulSoup
import requests
def get_profile(url):
profiles = []
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
container = soup.find('div', attrs={'class', 'main-container'})
for profile in container.find_all('a'):
profiles.append(profile.get('p'))
for profile in profiles:
print(profile)
get_profile('https://basketball.realgm.com/player/Darius-Adams/Summary/28720')
get_profile('https://basketball.realgm.com/player/Marial-Shayok/Summary/26697')
Again, I'm really new to web scraping with Python but any advice would be greatly appreciated. Ultimately, my end goal is to have a tool that can scrape this data in a clean way all at once.
(Player name, Current Team, Born, Birthplace, etc).. maybe I'm doing it entirely wrong but any guidance is welcome!
You need to combine your two scripts together and make requests for each player. Try the following approach. This searches for <td> tags that have the data-td=Player attribute:
import requests
from bs4 import BeautifulSoup
def get_links(url):
data = []
req_url = requests.get(url)
soup = BeautifulSoup(req_url.content, "html.parser")
for td in soup.find_all('td', {'data-th' : 'Player'}):
a_tag = td.a
name = a_tag.text
player_url = a_tag['href']
print(f"Getting {name}")
req_player_url = requests.get(f"https://basketball.realgm.com{player_url}")
soup_player = BeautifulSoup(req_player_url.content, "html.parser")
div_profile_box = soup_player.find("div", class_="profile-box")
row = {"Name" : name, "URL" : player_url}
for p in div_profile_box.find_all("p"):
try:
key, value = p.get_text(strip=True).split(':', 1)
row[key.strip()] = value.strip()
except: # not all entries have values
pass
data.append(row)
return data
urls = [
'https://basketball.realgm.com/dleague/players/2022',
'https://basketball.realgm.com/dleague/players/2021',
'https://basketball.realgm.com/dleague/players/2020',
]
for url in urls:
print(f"Getting: {url}")
data = get_links(url)
for entry in data:
print(entry)

python crawling beautifulsoup how to crawl several pages?

Please Help.
I want to get all the company names of each pages and they have 12 pages.
http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/1
http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/2
-- this website only changes the number.
So Here is my code so far.
Can I get just the title (company name) of 12 pages?
Thank you in advance.
from bs4 import BeautifulSoup
import requests
maximum = 0
page = 1
URL = 'http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/1'
response = requests.get(URL)
source = response.text
soup = BeautifulSoup(source, 'html.parser')
whole_source = ""
for page_number in range(1, maximum+1):
URL = 'http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/' + str(page_number)
response = requests.get(URL)
whole_source = whole_source + response.text
soup = BeautifulSoup(whole_source, 'html.parser')
find_company = soup.select("#content > div.wrap_analysis_data > div.public_con_box.public_list_wrap > ul > li:nth-child(13) > div > strong")
for company in find_company:
print(company.text)
---------Output of one page
---------page source :)
So, you want to remove all the headers and get only the string of the company name?
Basically, you can use the soup.findAll to find the list of company in the format like this:
<strong class="company"><span>중소기업진흥공단</span></strong>
Then you use the .find function to extract information from the <span> tag:
<span>중소기업진흥공단</span>
After that, you use .contents function to get the string from the <span> tag:
'중소기업진흥공단'
So you write a loop to do the same for each page, and make a list called company_list to store the results from each page and append them together.
Here's the code:
from bs4 import BeautifulSoup
import requests
maximum = 12
company_list = [] # List for result storing
for page_number in range(1, maximum+1):
URL = 'http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/{}'.format(page_number)
response = requests.get(URL)
print(page_number)
whole_source = response.text
soup = BeautifulSoup(whole_source, 'html.parser')
for entry in soup.findAll('strong', attrs={'class': 'company'}): # Finding all company names in the page
company_list.append(entry.find('span').contents[0]) # Extracting name from the result
The company_list will give you all the company names you want
I figured it out eventually. Thank you for your answer though!
image : code captured in jupyter notebook
Here is my final code.
from urllib.request import urlopen
from bs4 import BeautifulSoup
company_list=[]
for n in range(12):
url = 'http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/{}'.format(n+1)
webpage = urlopen(url)
source = BeautifulSoup(webpage,'html.parser',from_encoding='utf-8')
companys = source.findAll('strong',{'class':'company'})
for company in companys:
company_list.append(company.get_text().strip().replace('\n','').replace('\t','').replace('\r',''))
file = open('company_name1.txt','w',encoding='utf-8')
for company in company_list:
file.write(company+'\n')
file.close()

Trouble parsing product names out of some links with different depth

I've written a script in python to reach the target page where each category has their avaiable item names in a website. My below script can get the product names from most of the links (generated through roving category links and then subcategory links).
The script can parse sub-category links revealed upon clicking + sign located right next to each category which are visible in the below image and then parse all the product names from the target page. This is one of such target pages.
However, few of the links do not have the same depth as other links. For example this link and this one are different from usual links like this one.
How can I get all the product names from all the links irrespective of their different depth?
This is what I've tried so far:
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
link = "https://www.courts.com.sg/"
res = requests.get(link)
soup = BeautifulSoup(res.text,"lxml")
for item in soup.select(".nav-dropdown li a"):
if "#" in item.get("href"):continue #kick out invalid links
newlink = urljoin(link,item.get("href"))
req = requests.get(newlink)
sauce = BeautifulSoup(req.text,"lxml")
for elem in sauce.select(".product-item-info .product-item-link"):
print(elem.get_text(strip=True))
How to find trget links:
The site has six main product categories. Products that belong to a subcategory can also be found in a main category (for example the products in /furniture/furniture/tables can also be found in /furniture), so you only have to collect products from the main categories. You could get the categories links from the main page, but it'd be easier to use the sitemap.
url = 'https://www.courts.com.sg/sitemap/'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
cats = soup.select('li.level-0.category > a')[:6]
links = [i['href'] for i in cats]
As you've mentioned there are some links that have differend structure, like this one: /televisions. But, if you click the View All Products link on that page you will be redirected to /tv-entertainment/vision/television. So, you can get all the /televisions rpoducts from /tv-entertainment. Similarly, the products in links to brands can be found in the main categories. For example, the /asus products can be found in /computing-mobile and other categories.
The code below collects products from all the main categories, so it should collect all the products on the site.
from bs4 import BeautifulSoup
import requests
url = 'https://www.courts.com.sg/sitemap/'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
cats = soup.select('li.level-0.category > a')[:6]
links = [i['href'] for i in cats]
products = []
for link in links:
link += '?product_list_limit=24'
while link:
r = requests.get(link)
soup = BeautifulSoup(r.text, 'html.parser')
link = (soup.select_one('a.action.next') or {}).get('href')
for elem in soup.select(".product-item-info .product-item-link"):
product = elem.get_text(strip=True)
products += [product]
print(product)
I've increased the number of products per page to 24, but still this code takes a lot of time, as it collects products from all main categories and their pagination links. However, we could make it much faster with the use of threads.
from bs4 import BeautifulSoup
import requests
from threading import Thread, Lock
from urllib.parse import urlparse, parse_qs
lock = Lock()
threads = 10
products = []
def get_products(link, products):
soup = BeautifulSoup(requests.get(link).text, 'html.parser')
tags = soup.select(".product-item-info .product-item-link")
with lock:
products += [tag.get_text(strip=True) for tag in tags]
print('page:', link, 'items:', len(tags))
url = 'https://www.courts.com.sg/sitemap/'
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
cats = soup.select('li.level-0.category > a')[:6]
links = [i['href'] for i in cats]
for link in links:
link += '?product_list_limit=24'
soup = BeautifulSoup(requests.get(link).text, 'html.parser')
last_page = soup.select_one('a.page.last')['href']
last_page = int(parse_qs(urlparse(last_page).query)['p'][0])
threads_list = []
for i in range(1, last_page + 1):
page = '{}&p={}'.format(link, i)
thread = Thread(target=get_products, args=(page, products))
thread.start()
threads_list += [thread]
if i % threads == 0 or i == last_page:
for t in threads_list:
t.join()
print(len(products))
print('\n'.join(products))
This code collects 18,466 products from 773 pages in about 5 minutes. I'm using 10 threads because I don't want to stress the server too much, but you could use more (most servers can handle 20 threads easily).
I would recommend starting your scrape from the pages sitemap
Found here
If they were to add products, it's likely to show up here as well.
Since your main issue is finding the links, here is a generator that will find all of the category and sub-category links using the sitemap krflol pointed out in his solution:
from bs4 import BeautifulSoup
import requests
def category_urls():
response = requests.get('https://www.courts.com.sg/sitemap')
html_soup = BeautifulSoup(response.text, features='html.parser')
categories_sitemap = html_soup.find(attrs={'class': 'xsitemap-categories'})
for category_a_tag in categories_sitemap.find_all('a'):
yield category_a_tag.attrs['href']
And to find the product names, simply scrape each of the yielded category_urls.
I saw the website for parsing and found that all the products are available at the bottom left side of the main page https://www.courts.com.sg/ .After clicking one of these we goes to advertisement front page of a particular category. Where we have to go in click All Products for getting it.
Following is the code as whole:
import requests
from bs4 import BeautifulSoup
def parser():
parsing_list = []
url = 'https://www.courts.com.sg/'
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
ul = soup.find('footer',{'class':'page-footer'}).find('ul')
for l in ul.find_all('li'):
nextlink = url + l.find('a').get('href')
response = requests.get(nextlink)
inner_soup = BeautifulSoup(response.text, "html.parser")
parsing_list.append(url + inner_soup.find('div',{'class':'category-static-links ng-scope'}).find('a').get('href'))
return parsing_list
This function will return list of all products of all categories which your code didn't scrape from it.

Get links from a site's homepage using python

I want to write a script to get a home page's links to social media (twitter / facebook mostly), and I'm completely stuck since I am fairly new to Python.
The task I want to accomplish is to parse the website, find the social media links, and save it in a new data frame where each column would contain the original URL, the twitter link, and the facebook link. Here's what I have so far of this code for the new york times website:
from bs4 import BeautifulSoup
import requests
url = "http://www.nytimes.com"
r = requests.get(url)
sm_sites = ['twitter.com','facebook.com']
soup = BeautifulSoup(r.content, 'html5lib')
all_links = soup.find_all('a', href = True)
for site in sm_sites:
if all(site in sm_sites for link in all_links):
print(site)
else:
print('no link')
I'm having some problems understanding what the loop is doing, or how to make it work for what I need it to. I also had tried to store the site instead of doing print(site) but that was not working... So I figured I'd ask for help. Before asking, I went through a bunch of responses here but none could get me to do what I needed to do.
the way this code works, you already have your links. Your homepage link is the starting url, so http://www.nytimes.com.
And you have the social media urls sm_sites = ['twitter.com','facebook.com'], all you're doing is confirming they exist on the main page. If you want to save the list of confirmed social media urls, then append them to a list
Here is one way to get the social media links off a page
import requests
from bs4 import BeautifulSoup
url = "https://stackoverflow.com/questions/tagged/python"
r = requests.get(url)
sm_sites = ['twitter.com','facebook.com']
sm_sites_present = []
soup = BeautifulSoup(r.content, 'html5lib')
all_links = soup.find_all('a', href = True)
for sm_site in sm_sites:
for link in all_links:
if sm_site in link.attrs['href']:
sm_sites_present.append(link.attrs['href'])
print(sm_sites_present)
output:
['https://twitter.com/stackoverflow', 'https://www.facebook.com/officialstackoverflow/']
Update
for a df of urls
import requests
import pandas as pd
from bs4 import BeautifulSoup
from IPython.display import display
urls = [
"https://stackoverflow.com/questions/tagged/python",
"https://www.nytimes.com/",
"https://en.wikipedia.org/"
]
sm_sites = ['twitter.com','facebook.com']
sm_sites_present = []
columns = ['url'] + sm_sites
df = pd.DataFrame(data={'url' : urls}, columns=columns)
def get_sm(row):
r = requests.get(row['url'])
output = pd.Series()
soup = BeautifulSoup(r.content, 'html5lib')
all_links = soup.find_all('a', href = True)
for sm_site in sm_sites:
for link in all_links:
if sm_site in link.attrs['href']:
output[sm_site] = link.attrs['href']
return output
sm_columns = df.apply(get_sm, axis=1)
df.update(sm_columns)
df.fillna(value='no link')
output
This will do what you want with regards to adding it to a DataFrame. You can iterate through a list of websites (urlsToSearch), adding a row to the dataframe for each one containing the base website, all facebook links, and all twitter links.
from bs4 import BeautifulSoup
import requests
import pandas as pd
df = pd.DataFrame(columns=["Website", "Facebook", "Twitter"])
urlsToSearch = ["http://www.nytimes.com","http://www.businessinsider.com/"]
for url in urlsToSearch:
r = requests.get(url)
tw_links = []
fb_links = []
soup = BeautifulSoup(r.text, 'html.parser')
all_links = [link['href'] for link in soup.find_all('a', href = True)] #only get href
for link in all_links:
if "twitter.com" in link:
tw_links.append(link)
elif "facebook.com" in link:
fb_links.append(link)
df.loc[df.shape[0]] = [url,fb_links,tw_links] #Add row to end of df

Scraping multiple paginated links with BeautifulSoup and Requests

Python Beginner here. I'm trying to scrape all products from one category on dabs.com. I've managed to scrape all products on a given page, but I'm having trouble iterating over all the paginated links.
Right now, I've tried to isolate all the pagination buttons with the span class='page-list" but even that isn't working. Ideally, I would like to make the crawler keep clicking next until it has scraped all products on all pages. How can I do this?
Really appreciate any input
from bs4 import BeautifulSoup
import requests
base_url = "http://www.dabs.com"
page_array = []
def get_pages():
html = requests.get(base_url)
soup = BeautifulSoup(html.content, "html.parser")
page_list = soup.findAll('span', class="page-list")
pages = page_list[0].findAll('a')
for page in pages:
page_array.append(page.get('href'))
def scrape_page(page):
html = requests.get(base_url)
soup = BeautifulSoup(html.content, "html.parser")
Product_table = soup.findAll("table")
Products = Product_table[0].findAll("tr")
if len(soup.findAll('tr')) > 0:
Products = Products[1:]
for row in Products:
cells = row.find_all('td')
data = {
'description' : cells[0].get_text(),
'price' : cells[1].get_text()
}
print data
get_pages()
[scrape_page(base_url + page) for page in page_array]
Their next page button has a title of "Next" you could do something like:
import requests
from bs4 import BeautifulSoup as bs
url = 'www.dabs.com/category/computing/11001/'
base_url = 'http://www.dabs.com'
r = requests.get(url)
soup = bs(r.text)
elm = soup.find('a', {'title': 'Next'})
next_page_link = base_url + elm['href']
Hope that helps.

Categories