Scrape a class within a class - python

I want to scrape class_="href" with in the class_="_e4d". Basically looking to scrape a class within a class using BeautifulSoup.
from bs4 import BeautifulSoup
import selenium.webdriver as webdriver
url = ("https://www.google.com/search?...")
def get_related_search(url):
driver = webdriver.Chrome("C:\\Users\\John\\bin\\chromedriver.exe")
driver.get(url)
soup = BeautifulSoup(driver.page_source)
relate_result = soup.find_all("p", class_="_e4b")
return relate_result[0]
relate_url = get_related_search(url)
print(relate_url)
Results: markup_type=markup_type))
p class="_e4b"}{a href="/search?...a}{/p}
I now want to scrape the href result. I am not sure what the next step would be. Thanks for the help.
Note: I replaced <> with {} since it was not showing up as html script

You can actually find this inner a element in one go with a CSS selector:
links = soup.select("p._e4b a[href]")
for link in links:
print(link['href'])
p._e4b a[href] would locate all a elements having the href attribute inside the p elements having _e4b class.

Related

How to print the first google search result link using bs4?

I'm a beginner in python, I'm trying to get the first search result link from google which was stored inside a div with class='yuRUbf' using beautifulsoup. When I run the script output is 'None' what is the error here.
import requests
import bs4
url = 'https://www.google.com/search?q=site%3Astackoverflow.com+how+to+use+bs4+in+python&sxsrf=AOaemvKrCLt-Ji_EiPLjcEso3DVfBUmRbg%3A1630215433722&ei=CR0rYby7K7ue4-EP7pqIkAw&oq=site%3Astackoverflow.com+how+to+use+bs4+in+python&gs_lcp=Cgdnd3Mtd2l6EAM6BwgAEEcQsAM6BwgjELACECc6BQgAEM0CSgQIQRgAUMw2WPh_YLiFAWgBcAJ4AIABkAKIAd8lkgEHMC4xMC4xM5gBAKABAcgBCMABAQ&sclient=gws-wiz&ved=0ahUKEwj849XewdXyAhU7zzgGHW4NAsIQ4dUDCA8&uact=5'
request_result=requests.get( url )
soup = bs4.BeautifulSoup(request_result.text,"html.parser")
productDivs = soup.find("div", {"class": "yuRUbf"})
print(productDivs)
Let's see:
from bs4 import BeautifulSoup
import requests, json
headers = {
'User-agent':
"useragent"
}
html = requests.get('https://www.google.com/search?q=hello', headers=headers).text
soup = BeautifulSoup(html, 'lxml')
# locating div element with a tF2Cxc class
# calling for <a> tag and then calling for 'href' attribute
link = soup.find('div', class_='tF2Cxc').a['href']
print(link)
output:
'''
https://www.youtube.com/watch?v=YQHsXMglC9A
As you want first google search in which class name which you are looking for might be differ with name so first you can first find manually that link so it will be easy to identify
import requests
import bs4
url = 'https://www.google.com/search?q=site%3Astackoverflow.com+how+to+use+bs4+in+python&sxsrf=AOaemvKrCLt-Ji_EiPLjcEso3DVfBUmRbg%3A1630215433722&ei=CR0rYby7K7ue4-EP7pqIkAw&oq=site%3Astackoverflow.com+how+to+use+bs4+in+python&gs_lcp=Cgdnd3Mtd2l6EAM6BwgAEEcQsAM6BwgjELACECc6BQgAEM0CSgQIQRgAUMw2WPh_YLiFAWgBcAJ4AIABkAKIAd8lkgEHMC4xMC4xM5gBAKABAcgBCMABAQ&sclient=gws-wiz&ved=0ahUKEwj849XewdXyAhU7zzgGHW4NAsIQ4dUDCA8&uact=5'
request_result=requests.get( url )
soup = bs4.BeautifulSoup(request_result.text,"html.parser")
Using select method:
I have used css selector method in which it identifies all matching
divs and from list i have taken from index postion 1
And than i have use select_one to get a tag and find href
according to it!
main_data=soup.select("div.ZINbbc.xpd.O9g5cc.uUPGi")[1:]
main_data[0].select_one("a")['href'].replace("/url?q=","")
Using find method:
main_data=soup.find_all("div",class_="ZINbbc xpd O9g5cc uUPGi")[1:]
main_data[0].find("a")['href'].replace("/url?q=","")
Output [Same for Both the Case]:
'https://stackoverflow.com/questions/23102833/how-to-scrape-a-website-which-requires-login-using-python-and-beautifulsoup&sa=U&ved=2ahUKEwjGxv2wytXyAhUprZUCHR8mBNsQFnoECAkQAQ&usg=AOvVaw280R9Wlz2mUKHFYQUOFVv8'

Scrape tab href value from a webpage by python Beautiful Soup

I have code that extracts links from the main page and navigates through each page in the list of links, the new link has a tab page that is represented as follows in the source:
<Li Class=" tab-contacts" Id="contacts"><A Href="?id=448&tab=contacts"><Span Class="text">Contacts</Span>
I want to extract the href value and navigate to that page to get some information, here is my code so far:
import re
import requests
from bs4 import BeautifulSoup
r = requests.get(link_to_the_website)
data = r.content
soup = BeautifulSoup(data, "html.parser")
links = []
for i in soup.find_all('div',{'class':'leftInfoWrap'}):
link = i.find('a',href=True)
if link is None:
continue
links.append(link.get('href'))
for link in links:
soup = BeautifulSoup(link,"lxml")
tabs = soup.select('Li',{'class':' tab-contacts'})
print(tabs)
However I am getting an empty list with 'print(tabs)' command. I did verify the link variable and it is being populated. Thanks in advance
Looks like you are trying to mix find syntax with select.
I would use the parent id as an anchor then navigate to the child with css selectors and child combinator.
partial_link = soup.select_one('#contacts > a')['href']
You need to append the appropriate prefix.

extract information inside span tag

I am trying to extract PMC ID between "span" tag.
To do so, I used find element by xpath, but I'm facing the following error:
selenium.common.exceptions.NoSuchElementException:Message: Unable to locate element: /div/main/div/details/div/div[2]/details/summary/span[5]
Following is the link:
https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?tool=my_tool&email=my_email#example.com&ids=9811893
Following is my code:
driver = webdriver.Firefox(executable_path='geckodriver.exe')
driver.implicitly_wait(10) # this lets webdriver wait 10 seconds for the website to load
driver.get("https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?tool=my_tool&email=my_email#example.com&ids=9811893")
pmc= driver.find_element_by_xpath('/div/main/div/details/div/div[2]/details/summary/span[5]')
pmc.get_text()
The output should be:
PMC24938
You can use a css attribute selector then get_attribute to get the attribute value
from selenium import webdriver
driver = webdriver.Firefox(executable_path='geckodriver.exe')
driver.get("https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?tool=my_tool&email=my_email#example.com&ids=9811893")
pmc = driver.find_element_by_css_selector('[pmcid]')
print(pmc.get_attribute('pmcid'))
Result:
Though you don't need selenium for this site. Use faster requests and bs4
import requests
from bs4 import BeautifulSoup as bs
r = requests.get('https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?tool=my_tool&email=my_email#example.com&ids=9811893')
soup = bs(r.content, 'lxml')
pmc = soup.select_one('[pmcid]')['pmcid']
print(pmc)

Scraping hidden product details on a webpage using Selenium

Sorry I am a Selenium noob and have done a lot of reading but am still having trouble getting the product price (£0.55) from this page:
https://groceries.asda.com/product/spaghetti-tagliatelle/asda-spaghetti/36628. Product details are not visible when parsing the html using bs4. Using Selenium I can get a string of the entire page and can see the price in there (using the following code). I should be able to extract the price from this somehow but would prefer a less hacky solution.
browser = webdriver.Firefox(executable_path=r'C:\Users\Paul\geckodriver.exe')
browser.get('https://groceries.asda.com/product/tinned-tomatoes/asda-smart-price-chopped-tomatoes-in-tomato-juice/19560')
content = browser.page_source
If I run something like this:
elem = driver.find_element_by_id("bodyContainerTemplate")
print(elem)
It just returns: selenium.webdriver.firefox.webelement.FirefoxWebElement (session="df23fae6-e99c-403c-a992-a1adf1cb8010", element="6d9aac0b-2e98-4bb5-b8af-fcbe443af906")
The price is the text associated with this element: p class="prod-price" but I cannot seem to get this working. How should I go about getting this text (the product price)?
The type of elem is WebElement. If you need to extract text value of web-element you might use below code:
elem = driver.find_element_by_class_name("prod-price-inner")
print(elem.text)
Try this solution, it works with selenium and beautifulsoup
from bs4 import BeautifulSoup
from selenium import webdriver
url='https://groceries.asda.com/product/spaghetti-tagliatelle/asda-spaghetti/36628'
driver = webdriver.PhantomJS()
driver.get(url)
data = driver.page_source
soup = BeautifulSoup(data, 'html.parser')
ele = soup.find('span',{'class':'prod-price-inner'})
print ele.text
driver.quit()
It will print :
£0.55

How to solve, finding two of each link (Beautifulsoup, python)

Im using beautifulsoup4 to parse a webpage and collect all the href values using this code
#Collect links from 'new' page
pageRequest = requests.get('http://www.supremenewyork.com/shop/all/shirts')
soup = BeautifulSoup(pageRequest.content, "html.parser")
links = soup.select("div.turbolink_scroller a")
allProductInfo = soup.find_all("a", class_="name-link")
print allProductInfo
linksList1 = []
for href in allProductInfo:
linksList1.append(href.get('href'))
print(linksList1)
linksList1 prints two of each link. I believe this is happening as its taking the link from the title as well as the item colour. I have tried a few things but cannot get BS to only parse the title link, and have a list of one of each link instead of two. I imagine its something real simple but im missing it. Thanks in advance
This code will give you the result without getting duplicate results
(also using set() may be a good idea as #Tarum Gupta)
But I changed the way you crawl
import requests
from bs4 import BeautifulSoup
#Collect links from 'new' page
pageRequest = requests.get('http://www.supremenewyork.com/shop/all/shirts')
soup = BeautifulSoup(pageRequest.content, "html.parser")
links = soup.select("div.turbolink_scroller a")
# Gets all divs with class of inner-article then search for a with name-link class
that is inside an h1 tag
allProductInfo = soup.select("div.inner-article h1 a.name-link")
# print (allProductInfo)
linksList1 = []
for href in allProductInfo:
linksList1.append(href.get('href'))
print(linksList1)
alldiv = soup.findAll("div", {"class":"inner-article"})
for div in alldiv:
linkList1.append(div.h1.a['href'])
set(linksList1) # use set() to remove duplicate link
list(set(linksList1)) # use list() convert set to list if you need

Categories