Exact website links from google through BeautifulSoup - python

I want to search google using BeautifulSoup and open the first link. But when I opened the link it shows error. The reason i think is that because google is not providing exact link of website, it has added several parameters in url. How to get exact url?
When i tried to use cite tag it worked but for big urls its creating problem.
The first link which i get using soup.h3.a['href'][7:] is:
'http://www.wikipedia.com/wiki/White_holes&sa=U&ved=0ahUKEwi_oYLLm_rUAhWJNI8KHa5SClsQFggbMAI&usg=AFQjCNGN-vlBvbJ9OPrnq40d0_b8M0KFJQ'
Here is my code:
import requests
from bs4 import Beautifulsoup
r = requests.get('https://www.google.com/search?q=site:wikipedia.com+Black+hole&gbv=1&sei=YwHNVpHLOYiWmQHk3K24Cw')
soup = BeautifulSoup(r.text, "html.parser")
print(soup.h3.a['href'][7:])

You could split the returned string:
url = soup.h3.a['href'][7:].split('&')
print(url[0])

hope by clubbing all answer together presented above ,your code will look like
this:
from bs4 import BeautifulSoup
import requests
import csv
import os
import time
url = "https://www.google.co.in/search?q=site:wikipedia.com+Black+hole&dcr=0&gbv=2&sei=Nr3rWfLXMIuGvQT9xZOgCA"
r = requests.get(url)
data = r.text
url1 = "https://www.google.co.in"
soup = BeautifulSoup(data, "html.parser")
get_details = soup.find_all("div", attrs={"class":"g"})
final_data = []
for details in get_details:
link = details.find_all("h3")
#links = ""
for mdetails in link:
links = mdetails.find_all("a")
lmk = ""
for lnk in links:
lmk = lnk.get("href")[7:].split("&")
sublist = []
sublist.append(lmk[0])
final_data.append(sublist)
filename = "Google.csv"
with open("./"+filename, "w")as csvfile:
csvfile = csv.writer(csvfile, delimiter=",")
csvfile.writerow("")
for i in range(0, len(final_data)):
csvfile.writerow(final_data[i])

It's much simpler. You're looking for this:
# instead of this:
soup.h3.a['href'][7:].split('&')
# use this:
soup.select_one('.yuRUbf a')['href']
Code and example in the online IDE:
from bs4 import BeautifulSoup
import requests, lxml
headers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
params = {
"q": "site:wikipedia.com black hole", # query
"gl": "us", # country to search from
"hl": "en" # language
}
html = requests.get("https://www.google.com/search", headers=headers, params=params)
soup = BeautifulSoup(html.text, 'lxml')
first_link = soup.select_one('.yuRUbf a')['href']
print(first_link)
# https://en.wikipedia.com/wiki/Primordial_black_hole
Alternatively, you can achieve the same thing by using Google Organic Results API from SerpApi. It's a paid API with a free plan.
The difference in your case is that you only need to extract the data from the structured JSON rather than figuring out why things don't work and then maintain it over time if some selectors will change.
Code to integrate:
import os
from serpapi import GoogleSearch
params = {
"engine": "google",
"q": "site:wikipedia.com black hole",
"hl": "en",
"gl": "us",
"api_key": os.getenv("API_KEY"),
}
search = GoogleSearch(params)
results = search.get_dict()
# [0] - first index of search results
first_link = results['organic_results'][0]['link']
print(first_link)
# https://en.wikipedia.com/wiki/Primordial_black_hole
Disclaimer, I work for SerpApi.

Related

scraping google search results page data python

i want to scrape emails on search resulted query. but when i access to class with css selecter "select" and print it always shows empty list. How can i access .r class or "class=g"?
import requests
from bs4 import BeautifulSoup
url = "https://www.google.com/search?sxsrf=ACYBGNQA4leQETe0psVZPu7daLWbdsc9Ow%3A1579194494737&ei=fpggXpvRLMakwQKkqpSICg&q=%22computer+science+%22%22usa%22+%22%40yahoo.com%22&oq=%22computer+science+%22%22usa%22+%22%40yahoo.com%22&gs_l=psy-ab.12...0.0..7407...0.0..0.0.0.......0......gws-wiz.82okhpdJLYg&ved=0ahUKEwibiI_3zYjnAhVGUlAKHSQVBaEQ4dUDCAs"
responce = requests.get(url)
soup = BeautifulSoup(responce.text, "html.parser")
test = soup.select('.r')
print(test)
Your program is correct, but to get correct answer from Google, you need to specify User-Agent header:
import requests
from bs4 import BeautifulSoup
url = "https://www.google.com/search?sxsrf=ACYBGNQA4leQETe0psVZPu7daLWbdsc9Ow%3A1579194494737&ei=fpggXpvRLMakwQKkqpSICg&q=%22computer+science+%22%22usa%22+%22%40yahoo.com%22&oq=%22computer+science+%22%22usa%22+%22%40yahoo.com%22&gs_l=psy-ab.12...0.0..7407...0.0..0.0.0.......0......gws-wiz.82okhpdJLYg&ved=0ahUKEwibiI_3zYjnAhVGUlAKHSQVBaEQ4dUDCAs"
headers = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0'}
responce = requests.get(url, headers=headers) # <-- specify custom header
soup = BeautifulSoup(responce.text, "html.parser")
test = soup.select('.r')
print(test)
Prints:
[<div class="r"><a href="https://www.yahoo.com/news/11-course-complete-computer-science-171322233.html" onmousedown="return rwt(this,'','','','1','AOvVaw2wM4TUxc_4V7s9GjeWTNAG','','2ahUKEwjt17Kk-YjnAhW2R0EAHcnsC3QQFjAAegQIAxAB','','',event)"><div class="TbwUpd"><img alt="https://...
...
To get the emails out of the Google Search results you need to use regex
# this regex needs possible modifications
re.findall(r'[\w\.-]+#[\w\.-]+\.\w+', variable_where_to_search_from)
Code:
from bs4 import BeautifulSoup
import requests, lxml, re
headers = {
"User-agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
"Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
html = requests.get('https://www.google.com/search?q="computer science ""usa" "#yahoo.com"', headers=headers)
soup = BeautifulSoup(html.text, 'lxml')
for result in soup.select('.tF2Cxc'):
try:
snippet = result.select_one('.lyLwlc').text
except:
snippet = None
match_email = re.findall(r'[\w\.-]+#[\w\.-]+\.\w+', str(snippet))
email = '\n'.join(match_email).strip()
print(email)
----------
'''
ahmed_733#yahoo.com
yjzou#uguam.uog
yzou2002#yahoo.com
...
Alternatively, you can do the same thing by using Google Organic Results API from SerpApi. It's a paid API with a free plan.
It doesn't extract emails using regex although it would be a great possible feature. The main difference is that much easier and faster to get things done rather than creating everything from scratch.
Code to integrate:
from serpapi import GoogleSearch
import re
params = {
"api_key": "YOUR_API_KEY",
"engine": "google",
"q": '"computer science ""usa" "#yahoo.com"',
}
search = GoogleSearch(params)
results = search.get_dict()
for result in results['organic_results']:
try:
snippet = result['snippet']
except:
snippet = None
match_email = re.findall(r'[\w\.-]+#[\w\.-]+\.\w+', str(snippet))
email = '\n'.join(match_email).strip()
print(email)
---------
'''
shaikotweb#yahoo.com
ahmed_733#yahoo.com
RPeterson#L1id.com
rj_peterson#yahoo.com
'''
Disclaimer, I work for SerpApi.

Beautifulsoup is returning double links

I am trying to learn how to scrape websites and therefore not using an API. I am trying to scrape eBay's websites and my script will print double URL. I did my due diligence and search on Google/StackOverflow help but was unable to find any solution. Thanks in advance.
driver.get('https://www.ebay.com/sch/i.html?_from=R40&_nkw=watches&_sacat=0&_pgn=' + str(i))
soup = BeautifulSoup(driver.page_source, 'lxml')
driver.maximize_window()
tempList = []
for link in soup.find_all('a', href=True):
if 'itm' in link['href']:
print(link['href'])
tempList.append(link['href'])
Entire code: https://pastebin.com/q41eh3Q6
Just add the class name while searching for all the links.Hope this helps.
i=1
driver.get('https://www.ebay.com/sch/i.html?_from=R40&_nkw=watches&_sacat=0&_pgn=' + str(i))
soup = BeautifulSoup(driver.page_source, 'lxml')
driver.maximize_window()
tempList = []
for link in soup.find_all('a',class_='s-item__link', href=True):
if 'itm' in link['href']:
print(link['href'])
tempList.append(link['href'])
print(len(tempList))
You're looking for this:
# container with needed data: title, link, price, condition, number of reviews, etc.
for item in soup.select('.s-item__wrapper.clearfix'):
# only link will be extracted from the container
link = item.select_one('.s-item__link')['href']
Code and full example in the online IDE:
from bs4 import BeautifulSoup
import requests, lxml
headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
html = requests.get('https://www.ebay.com/sch/i.html?_nkw=Wathces', headers=headers).text
soup = BeautifulSoup(html, 'lxml')
temp_list = []
for item in soup.select('.s-item__wrapper.clearfix'):
link = item.select_one('.s-item__link')['href']
temp_list.append(link)
print(link)
------------
'''
https://www.ebay.com/itm/203611966827?hash=item2f68380d6b:g:pBAAAOSw1~NhRy4Y
https://www.ebay.com/itm/133887696438?hash=item1f2c541e36:g:U3IAAOSwBKthN4yg
https://www.ebay.com/itm/154561925393?epid=26004285120&hash=item23fc9bd111:g:TWUAAOSwf3pgNP08
https://www.ebay.com/itm/115010872425?hash=item1ac72ea469:g:yQsAAOSweMBhT4gs
https://www.ebay.com/itm/115005461839?epid=1776383383&hash=item1ac6dc154f:g:QskAAOSwDe9hS7Ys
https://www.ebay.com/itm/224515689673?hash=item34462d8cc9:g:oTwAAOSwAO5gna8u
https://www.ebay.com/itm/124919898822?hash=item1d15ce62c6:g:iEoAAOSwhAthQnX9
https://www.ebay.com/itm/133886767671?hash=item1f2c45f237:g:htkAAOSwNAhhQOyf
https://www.ebay.com/itm/115005341920?hash=item1ac6da40e0:g:4SIAAOSwWi1hR5Mx
...
'''
Alternatively, you can achieve the same thing by using eBay Organic Results API from SerpApi. It's a paid API with a free plan.
The difference in your case is that you don't have to deal with the extraction process and maintain it over time, instead, you only need to iterate over structured JSON and get the data you want.
Code to integrate:
from serpapi import GoogleSearch
import os
params = {
"engine": "ebay",
"ebay_domain": "ebay.com",
"_nkw": "watches",
"api_key": os.getenv("API_KEY"),
}
search = GoogleSearch(params)
results = search.get_dict()
temp_list = []
for result in results['organic_results']:
link = result['link']
temp_list.append(link)
print(link)
------------
'''
https://www.ebay.com/itm/203611966827?hash=item2f68380d6b:g:pBAAAOSw1~NhRy4Y
https://www.ebay.com/itm/133887696438?hash=item1f2c541e36:g:U3IAAOSwBKthN4yg
https://www.ebay.com/itm/154561925393?epid=26004285120&hash=item23fc9bd111:g:TWUAAOSwf3pgNP08
https://www.ebay.com/itm/115010872425?hash=item1ac72ea469:g:yQsAAOSweMBhT4gs
https://www.ebay.com/itm/115005461839?epid=1776383383&hash=item1ac6dc154f:g:QskAAOSwDe9hS7Ys
https://www.ebay.com/itm/224515689673?hash=item34462d8cc9:g:oTwAAOSwAO5gna8u
https://www.ebay.com/itm/124919898822?hash=item1d15ce62c6:g:iEoAAOSwhAthQnX9
https://www.ebay.com/itm/133886767671?hash=item1f2c45f237:g:htkAAOSwNAhhQOyf
https://www.ebay.com/itm/115005341920?hash=item1ac6da40e0:g:4SIAAOSwWi1hR5Mx
...
'''
P.S - I wrote a bit more in-depth blog post about how to scrape eBay search with Python.
Disclaimer, I work for SerpApi.

Why am I getting repetitive output while trying to scrape data from Google Scholar?

I am trying to scrape the PDF links from the search results from Google Scholar. I have tried to set a page counter based on the change in URL, but after the first eight output links, I am getting repetitive links as output.
#!/usr/bin/env python
from mechanize import Browser
from BeautifulSoup import BeautifulSoup
from bs4 import BeautifulSoup
import urllib2
import requests
#modifying the url as per page
urlCounter = 0
while urlCounter <=30:
urlPart1 = "http://scholar.google.com/scholar?start="
urlPart2 = "&q=%22entity+resolution%22&hl=en&as_sdt=0,4"
url = urlPart1 + str(urlCounter) + urlPart2
page = urllib2.Request(url,None,{"User-Agent":"Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11"})
resp = urllib2.urlopen(page)
html = resp.read()
soup = BeautifulSoup(html)
urlCounter = urlCounter + 10
recordCount = 0
while recordCount <=9:
recordPart1 = "gs_ggsW"
finRecord = recordPart1 + str(recordCount)
recordCount = recordCount+1
#printing the links
for link in soup.find_all('div', id = finRecord):
linkstring = str(link)
soup1 = BeautifulSoup(linkstring)
for link in soup1.find_all('a'):
print(link.get('href'))
Change the following line in your code:
finRecord = recordPart1 + str(recordCount)
To
finRecord = recordPart1 + str(recordCount+urlCounter-10)
The real problem: div ids in the first page are gs_ggsW[0-9], but ids on the second page are gs_ggsW[10-19]. So beautiful soup will find no links on the 2nd page.
Python's variable scope may confuse people from other languages, like Java. After the for loop below being executed, the variable link still exists. So the link is referenced to the last link on the 1st page.
for link in soup1.find_all('a'):
print(link.get('href'))
Updates:
Google may not provide pdf download links for some papers, so you can't use id to match the link of each paper. You can use css selecters to match all the links together.
soup = BeautifulSoup(html)
urlCounter = urlCounter + 10
for link in soup.select('div.gs_ttss a'):
print(link.get('href'))
Have a look at the SelectorGadget Chrome extension to grab CSS selectors by clicking on the desired element in your browser.
Code and example in the online IDE to extract PDF's:
from bs4 import BeautifulSoup
import requests, lxml
params = {
"q": "entity resolution", # search query
"hl": "en" # language
}
# https://requests.readthedocs.io/en/master/user/quickstart/#custom-headers
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3538.102 Safari/537.36 Edge/18.19582",
}
html = requests.get("https://scholar.google.com/scholar", params=params, headers=headers, timeout=30)
soup = BeautifulSoup(html.text, "lxml")
for pdf_link in soup.select(".gs_or_ggsm a"):
pdf_file_link = pdf_link["href"]
print(pdf_file_link)
# output from the first page:
'''
https://linqs.github.io/linqs-website/assets/resources/getoor-vldb12-slides.pdf
http://ilpubs.stanford.edu:8090/859/1/2008-7.pdf
https://drum.lib.umd.edu/bitstream/handle/1903/4241/umi-umd-4070.pdf;sequence=1
https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.169.9535&rep=rep1&type=pdf
https://arxiv.org/pdf/1208.1927
https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.77.6875&rep=rep1&type=pdf
http://da.qcri.org/ntang/pubs/vldb18-deeper.pdf
'''
Alternatively, you can achieve the same thing by using Google Scholar Organic Results API from SerpApi. It's a paid API with a free plan.
The main difference is that you only need to grab the data from structured JSON instead of figuring out how to extract the data from HTML, how to bypass blocks from search engines.
Code to integrate:
from serpapi import GoogleSearch
params = {
"api_key": "YOUR_API_KEY", # SerpApi API key
"engine": "google_scholar", # Google Scholar organic reuslts
"q": "entity resolution", # search query
"hl": "en" # language
}
search = GoogleSearch(params)
results = search.get_dict()
for pdfs in results["organic_results"]:
for link in pdfs.get("resources", []):
pdf_link = link["link"]
print(pdf_link)
# output:
'''
https://linqs.github.io/linqs-website/assets/resources/getoor-vldb12-slides.pdf
http://ilpubs.stanford.edu:8090/859/1/2008-7.pdf
https://drum.lib.umd.edu/bitstream/handle/1903/4241/umi-umd-4070.pdf;sequence=1
https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.169.9535&rep=rep1&type=pdf
https://arxiv.org/pdf/1208.1927
https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.77.6875&rep=rep1&type=pdf
http://da.qcri.org/ntang/pubs/vldb18-deeper.pdf
'''
If you want to scrape more data from organic results, there's a dedicated Scrape Google Scholar with Python blog post of mine.
Disclaimer, I work for SerpApi.

Google search url given by href is wrong

It appears that google searches will give the following url:
/url?q= "URL WOULD BE HERE" &sa=U&ei=9LFsUbPhN47qqAHSkoGoDQ&ved=0CCoQFjAA&usg=AFQjCNEZ_f4a9Lnb8v2_xH0GLQ_-H0fokw
When subjected to a html parsing by BeautifulSoup.
I am getting the links by using soup.findAll('a') and then using a['href'].
More specifically, the code I have used is the following:
import urllib2
from BeautifulSoup import BeautifulSoup, SoupStrainer
import re
main_site = 'https://www.google.com/'
search = 'search?q='
query = 'pillows'
full_url = main_site+search+query
request = urllib2.Request(full_url, headers={'User-Agent': 'Chrome/16.0.912.77'})
main_html = urllib2.urlopen(request).read()
results = BeautifulSoup(main_html, parseOnlyThese=SoupStrainer('div', {'id': 'search'}))
try:
for search_hit in results.findAll('li', {'class':'g'}):
for elm in search_hit.findAll('h3',{'class':'r'}):
for a in elm.findAll('a',{'href':re.compile('.+')}):
print a['href']
except TypeError:
pass
Also, I have noticed on other sites that the a['href'] may return something like /dsoicjsdaoicjsdcj where the link would take you to website.com/dsoicjsdaoicjsdcj.
I know if this is the case that I can simply concatenate them, but I feel like it shouldn't be that I should have to change the way I parse up and treat the a['href'] based on which website I'm looking at. Is there a better way to get this link? Is there some javascript that I need to take into account? Surely there is a simply way in BeautifulSoup to get the full html to follow from a?
SoupStrainer('div', {'class': "vsc"})
returns nothing cause when you do:
print main_html
and search for "vsc", there is no result
You're looking for this:
# container with needed data: title, link, etc.
for result in soup.select('.tF2Cxc'):
link = result.select_one('.yuRUbf a')['href']
Also, while using requests library, you can pass URL params easily like so:
# this:
main_site = 'https://www.google.com/'
search = 'search?q='
query = 'pillows'
full_url = main_site+search+query
# could be translated to this:
params = {
'q': 'minecraft',
'gl': 'us',
'hl': 'en',
}
html = requests.get('https://www.google.com/search', params=params)
While using urllib you can do it like so (In python 3, this has been moved to urllib.parse.urlencode):
# https://stackoverflow.com/a/54050957/15164646
# https://stackoverflow.com/a/2506425/15164646
url = "https://disc.gsfc.nasa.gov/SSW/#keywords="
params = {'keyword':"(GPM_3IMERGHHE)", 't1':"2019-01-02", 't2':"2019-01-03", 'bboxBbox':"3.52,32.34,16.88,42.89"}
quoted_params = urllib.parse.urlencode(params)
# 'bboxBbox=3.52%2C32.34%2C16.88%2C42.89&t2=2019-01-03&keyword=%28GPM_3IMERGHHE%29&t1=2019-01-02'
full_url = url + quoted_params
# 'https://disc.gsfc.nasa.gov/SSW/#keywords=bboxBbox=3.52%2C32.34%2C16.88%2C42.89&t2=2019-01-03&keyword=%28GPM_3IMERGHHE%29&t1=2019-01-02'
resp = urllib.urlopen(full_url).read()
Code and example in the online IDE:
from bs4 import BeautifulSoup
import requests, lxml
headers = {
'User-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
params = {
'q': 'minecraft',
'gl': 'us',
'hl': 'en',
}
html = requests.get('https://www.google.com/search', headers=headers, params=params)
soup = BeautifulSoup(html.text, 'lxml')
for result in soup.select('.tF2Cxc'):
link = result.select_one('.yuRUbf a')['href']
print(link)
---------
'''
https://www.minecraft.net/en-us/
https://classic.minecraft.net/
https://play.google.com/store/apps/details?id=com.mojang.minecraftpe&hl=en_US&gl=US
https://en.wikipedia.org/wiki/Minecraft
'''
Alternatively, you can achieve the same thing by using Google Organic Results API from SerpApi. It's a paid API with a free plan.
The difference in your case is that you don't have to make everything from scratch, bypass blocks, and maintain the parser over time.
Code to integrate to achieve your goal:
import os
from serpapi import GoogleSearch
params = {
"engine": "google",
"q": "minecraft",
"hl": "en",
"gl": "us",
"api_key": os.getenv("API_KEY"),
}
search = GoogleSearch(params)
results = search.get_dict()
for result in results["organic_results"]:
print(result['link'])
---------
'''
https://www.minecraft.net/en-us/
https://classic.minecraft.net/
https://play.google.com/store/apps/details?id=com.mojang.minecraftpe&hl=en_US&gl=US
https://en.wikipedia.org/wiki/Minecraft
'''
Disclaimer, I work for SerpApi.

beautiful soup extract a href from google search

A google search gives me the following first result on HTML:
<h3 class="r"><em>Quantitative Trading</em>: <em>How to Build Your Own Algorithmic</em> <b>...</b> - Amazon</h3>
I would like to extract the link http://www.amazon.com/Quantitative-Trading-Build-Algorithmic-Business/dp/0470284889 from this, but when I use beautiful soup to extract the information, I obtain
soup.find("h3").find("a").get("href")
I obtain the following string instead:
/url?q=http://www.amazon.com/Quantitative-Trading-Build-Algorithmic-Business/dp/0470284889&sa=U&ei=P2ycT6OoNuasiAL2ncV5&ved=0CBIQFjAA&usg=AFQjCNEo_ujANAKnjheWDRlBKnJ1BGeA7A
I know that the link is in there and I could parse it by deleting the /url?q= and everything after the & symbol, but I was wondering if there was a cleaner solution.
Thanks!
You can use a combination of urlparse.urlparse and urlparse.parse_qs, e.g
>>> import urlparse
>>> url = '/url?q=http://www.amazon.com/Quantitative-Trading-Build-Algorithmic-Business/dp/0470284889&sa=U&ei=P2ycT6OoNuasiAL2ncV5&ved=0CBIQFjAA&usg=AFQjCNEo_ujANAKnjheWDRlBKnJ1BGe'
>>> data = urlparse.parse_qs(
... urlparse.urlparse(url).query
... )
>>> data
{'ei': ['P2ycT6OoNuasiAL2ncV5'],
'q': ['http://www.amazon.com/Quantitative-Trading-Build-Algorithmic-Business/dp/0470284889'],
'sa': ['U'],
'usg': ['AFQjCNEo_ujANAKnjheWDRlBKnJ1BGe'],
'ved': ['0CBIQFjAA']}
>>> data['q'][0]
'http://www.amazon.com/Quantitative-Trading-Build-Algorithmic-Business/dp/0470284889'
To extract only the first result from the page you can use select_one() by passing a CSS selectors or find() bs4 methods.
Code and example in the online IDE:
import requests, lxml
from bs4 import BeautifulSoup
headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3538.102 Safari/537.36 Edge/18.19582"
}
# passing parameters in URLs
# https://docs.python-requests.org/en/master/user/quickstart/#passing-parameters-in-urls
params = {'q': 'Quantitative Trading How to Build Your Own Algorithmic - amazon'}
def bs4_get_first_googlesearch():
html = requests.get('https://www.google.com/search', headers=headers, params=params).text
soup = BeautifulSoup(html, 'lxml')
first_link = soup.select_one('.yuRUbf').a['href']
print(first_link)
bs4_get_first_googlesearch()
# output:
'''
https://www.amazon.com/Quantitative-Trading-Build-Algorithmic-Business/dp/0470284889
'''
Alternatively, you can do the same thing using Google Search Engine Results API from SerpApi. It's a paid API with a free trial of 5,000 searches. Check out the playground.
The big difference is that everything is already done for the end-user: selecting elements, bypass blocking, proxy rotation, and more.
Code to integrate:
from serpapi import GoogleSearch
import os
def serpapi_get_first_googlesearch():
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google",
"q": "Quantitative Trading How to Build Your Own Algorithmic - amazon",
"hl": "en",
}
search = GoogleSearch(params)
results = search.get_dict()
# [0] - first element from the search results
first_link = results['organic_results'][0]['link']
print(first_link)
serpapi_get_first_googlesearch()
# output:
'''
https://www.amazon.com/Quantitative-Trading-Build-Algorithmic-Business/dp/0470284889
'''
Disclaimer, I work for SerpApi.

Categories