Scrap a datas using CSS selector (Python, BS4) - python

I am scraping datas using CSS selector for the first time.
And There is a problem scraping content of anchor.
Here is my code:
import requests
from bs4 import BeautifulSoup
url = "https://weworkremotely.com/remote-jobs/search?utf8=✓&term=ruby"
wwr_result = requests.get(url)
wwr_soup = BeautifulSoup(wwr_result.text, "html.parser")
posts = wwr_soup.find_all("li", {"class": "feature"})
link = post.select("#category-2 > article > ul > li:nth-child(1) > a[href]")
title = post.find("span", {"class": "title"}).get_text()
company = post.find("span", {"class": "company"}).get_text()
location = post.find("span", {"class": "region company"}).get_text()
link = post.select("#category-2 > article > ul > li:nth-child(1) > a[href]")
print {"title": title, "company": company, "location": location, "link":f"https://weworkremotely.com/{link}"}
I want to scrap the content of anchor to make a link of each post. So, I put a[href].
But it doesn't work but scrap contents of all subcategory.
How do I have to change to scrap just the content of anchor?

Assuming you have correctly selected the jobs of interest from all jobs listed, you need a loop, then extract the first href attribute with substring -jobs i.e. post.select_one('[href*=-jobs]' during the loop:
import requests
from bs4 import BeautifulSoup
url = "https://weworkremotely.com/remote-jobs/search?utf8=✓&term=ruby"
wwr_result = requests.get(url)
wwr_soup = BeautifulSoup(wwr_result.text, "html.parser")
posts = wwr_soup.find_all("li", {"class": "feature"})
for post in posts:
print('https://weworkremotely.com' + post.select_one('a[href*=-jobs]')['href'])
To get all the listings on the page switch to:
posts = wwr_soup.select('li:has(.tooltip)')

Related

Pulling all yelp reviews via beautifulsoup

I need some help in pulling all reviews for a hotel using beautiful soup; this is what i have thus far, but i need some inspiration pulling all the reviews via API or regular.
import time
import random
from bs4 import BeautifulSoup as bs
import urllib.request as url
html = urllib.request.urlopen('https://www.yelp.com/biz/shore-cliff-hotel-pismo-beach-2').read().decode('utf-8')
soup = bs(html, 'html.parser')
relevant= soup.find_all('p', class_='comment__09f24__gu0rG css-qgunke')
reviews = []
for div in relevant:
for html_class in div.find_all('span',class_="raw__09f24__T4Ezm"):
text = html_class.find('span')
review = html_class.getText(
reviews.append(review)
enter code here
This does the job,
base_url = "https://www.yelp.com/biz/capri-laguna-laguna-beach"
new_page = "?start={}"
content = requests.get(url).content
soup = BeautifulSoup(content, "html.parser")
reviews = []
for i in range(0, 501, 10):
new_page_url = url + new_page.format(i)
new_content = requests.get(url).content
new_soup = BeautifulSoup(content, "html.parser")
relevant= new_soup.find_all('p', class_='comment__09f24__gu0rG css-qgunke')
for div in relevant:
for html_class in div.find_all('span',class_="raw__09f24__T4Ezm"):
text = html_class.find('span')
review = html_class.getText()
reviews.append(review)
Code explaination -
If you click to go to the 2nd page you'll see that ?start=10 get's add to the base URL https://www.yelp.com/biz/capri-laguna-laguna-beach. If you go to the 3rd page then you'll see ?start=20 and so on. The number here is the index of the review, and each page has 10 of them. There are 51 total pages meaning the first review on the 51st page would have the index 501. So the added part to the URL would be ?start=500.
So for each page on the website, the code creates a new URL, gets the HTML content of that URL, creates a soup for it and fetches the review from this newly created soup.

bs4 p tags returning as None

import bs4
import requests
import re
r = requests.get('https://www.the961.com/latest-news/lebanon-news/').text
soup = bs4.BeautifulSoup(r, 'lxml')
for article in soup.find_all('article'):
title = article.h3.text
print(title)
date = article.find('span', class_='byline-part date')
if date: print('Date:', date.text)
author = article.find('span', class_="byline-part author")
if author: print('Author:', author.text)
link = article.find('h3', class_='title').a['href']
link_r = requests.get(link).text
soup_link = bs4.BeautifulSoup(link_r, 'lxml')
// scraping link from title, then opening that link and trying to scrape the whole article, very new to this so I don't know what to do!
for article in soup_link.find_all('article'):
paragraph = article.find('p')
print(paragraph)
print()
On some pages the <p> tags are not under an <article>, and therefor is returning None. Instead, to scrape all the paragraphs (and <li> tags if they exist) use the following CSS Selector: .entry-content > p, .entry-content li.
To use a CSS Selector, use the .select() method instead of .find_all().
In your code example:
import bs4
import requests
r = requests.get("https://www.the961.com/latest-news/lebanon-news/").text
soup = bs4.BeautifulSoup(r, "lxml")
for article in soup.find_all("article"):
title = article.h3.text
print(title)
date = article.find("span", class_="byline-part date")
if date:
print("Date:", date.text)
author = article.find("span", class_="byline-part author")
if author:
print("Author:", author.text, "\n")
link = article.find("h3", class_="title").a["href"]
link_r = requests.get(link).text
soup_link = bs4.BeautifulSoup(link_r, "lxml")
# Select all `p` tags (and `li`) under the class `entry-content`
for page in soup_link.select(".entry-content > p, .entry-content li"):
print(page.get_text(strip=True))
print("-" * 80)
print()

python crawling beautifulsoup how to crawl several pages?

Please Help.
I want to get all the company names of each pages and they have 12 pages.
http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/1
http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/2
-- this website only changes the number.
So Here is my code so far.
Can I get just the title (company name) of 12 pages?
Thank you in advance.
from bs4 import BeautifulSoup
import requests
maximum = 0
page = 1
URL = 'http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/1'
response = requests.get(URL)
source = response.text
soup = BeautifulSoup(source, 'html.parser')
whole_source = ""
for page_number in range(1, maximum+1):
URL = 'http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/' + str(page_number)
response = requests.get(URL)
whole_source = whole_source + response.text
soup = BeautifulSoup(whole_source, 'html.parser')
find_company = soup.select("#content > div.wrap_analysis_data > div.public_con_box.public_list_wrap > ul > li:nth-child(13) > div > strong")
for company in find_company:
print(company.text)
---------Output of one page
---------page source :)
So, you want to remove all the headers and get only the string of the company name?
Basically, you can use the soup.findAll to find the list of company in the format like this:
<strong class="company"><span>중소기업진흥공단</span></strong>
Then you use the .find function to extract information from the <span> tag:
<span>중소기업진흥공단</span>
After that, you use .contents function to get the string from the <span> tag:
'중소기업진흥공단'
So you write a loop to do the same for each page, and make a list called company_list to store the results from each page and append them together.
Here's the code:
from bs4 import BeautifulSoup
import requests
maximum = 12
company_list = [] # List for result storing
for page_number in range(1, maximum+1):
URL = 'http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/{}'.format(page_number)
response = requests.get(URL)
print(page_number)
whole_source = response.text
soup = BeautifulSoup(whole_source, 'html.parser')
for entry in soup.findAll('strong', attrs={'class': 'company'}): # Finding all company names in the page
company_list.append(entry.find('span').contents[0]) # Extracting name from the result
The company_list will give you all the company names you want
I figured it out eventually. Thank you for your answer though!
image : code captured in jupyter notebook
Here is my final code.
from urllib.request import urlopen
from bs4 import BeautifulSoup
company_list=[]
for n in range(12):
url = 'http://www.saramin.co.kr/zf_user/jobs/company-labs/list/page/{}'.format(n+1)
webpage = urlopen(url)
source = BeautifulSoup(webpage,'html.parser',from_encoding='utf-8')
companys = source.findAll('strong',{'class':'company'})
for company in companys:
company_list.append(company.get_text().strip().replace('\n','').replace('\t','').replace('\r',''))
file = open('company_name1.txt','w',encoding='utf-8')
for company in company_list:
file.write(company+'\n')
file.close()

Scraping multiple paginated links with BeautifulSoup and Requests

Python Beginner here. I'm trying to scrape all products from one category on dabs.com. I've managed to scrape all products on a given page, but I'm having trouble iterating over all the paginated links.
Right now, I've tried to isolate all the pagination buttons with the span class='page-list" but even that isn't working. Ideally, I would like to make the crawler keep clicking next until it has scraped all products on all pages. How can I do this?
Really appreciate any input
from bs4 import BeautifulSoup
import requests
base_url = "http://www.dabs.com"
page_array = []
def get_pages():
html = requests.get(base_url)
soup = BeautifulSoup(html.content, "html.parser")
page_list = soup.findAll('span', class="page-list")
pages = page_list[0].findAll('a')
for page in pages:
page_array.append(page.get('href'))
def scrape_page(page):
html = requests.get(base_url)
soup = BeautifulSoup(html.content, "html.parser")
Product_table = soup.findAll("table")
Products = Product_table[0].findAll("tr")
if len(soup.findAll('tr')) > 0:
Products = Products[1:]
for row in Products:
cells = row.find_all('td')
data = {
'description' : cells[0].get_text(),
'price' : cells[1].get_text()
}
print data
get_pages()
[scrape_page(base_url + page) for page in page_array]
Their next page button has a title of "Next" you could do something like:
import requests
from bs4 import BeautifulSoup as bs
url = 'www.dabs.com/category/computing/11001/'
base_url = 'http://www.dabs.com'
r = requests.get(url)
soup = bs(r.text)
elm = soup.find('a', {'title': 'Next'})
next_page_link = base_url + elm['href']
Hope that helps.

Beautifulsoup to retrieve the href list

Thanks for attention!
I'm trying to retrieve the href of products in search result.
For example this page:
However When I narrow down to the product image class, the retrived href are image links....
Can anyone solve that? Thanks in advance!
url = 'http://www.homedepot.com/b/Husky/N-5yc1vZrd/Ntk-All/Ntt-chest%2Band%2Bcabinet?Ntx=mode+matchall&NCNI-5'
content = urllib2.urlopen(url).read()
content = preprocess_yelp_page(content)
soup = BeautifulSoup(content)
content = soup.findAll('div',{'class':'content dynamic'})
draft = str(content)
soup = BeautifulSoup(draft)
items = soup.findAll('div',{'class':'cell_section1'})
draft = str(items)
soup = BeautifulSoup(draft)
content = soup.findAll('div',{'class':'product-image'})
draft = str(content)
soup = BeautifulSoup(draft)
You don't need to load the content of each found tag with BeautifulSoup over and over again.
Use CSS selectors to get all product links (a tag under a div with class="product-image")
import urllib2
from bs4 import BeautifulSoup
url = 'http://www.homedepot.com/b/Husky/N-5yc1vZrd/Ntk-All/Ntt-chest%2Band%2Bcabinet?Ntx=mode+matchall&NCNI-5'
soup = BeautifulSoup(urllib2.urlopen(url))
for link in soup.select('div.product-image > a:nth-of-type(1)'):
print link.get('href')
Prints:
http://www.homedepot.com/p/Husky-41-in-16-Drawer-Tool-Chest-and-Cabinet-Set-HOTC4016B1QES/205080371
http://www.homedepot.com/p/Husky-26-in-6-Drawer-Chest-and-Cabinet-Combo-Black-C-296BF16/203420937
http://www.homedepot.com/p/Husky-52-in-18-Drawer-Tool-Chest-and-Cabinet-Set-Black-HOTC5218B1QES/204825971
http://www.homedepot.com/p/Husky-26-in-4-Drawer-All-Black-Tool-Cabinet-H4TR2R/204648170
...
div.product-image > a:nth-of-type(1) CSS selector would match every first a tag directly under the div with class product-image.
To save the links into a list, use a list comprehension:
links = [link.get('href') for link in soup.select('div.product-image > a:nth-of-type(1)')]

Categories