I am trying to perfom parsing but when I send POST method to get searching results, getting page with error:
The requested URL was rejected. Please consult with your administrator.
Website: https://prod.ceidg.gov.pl/CEIDG/CEIDG.Public.UI/Search.aspx
I've collected data like viewstate, viewstategenerator etc.. to pass throught form but doesn't work.
What am I missing?
#import requests
from bs4 import BeautifulSoup
import lxml
import urllib
from requests_html import HTMLSession
from requests_html import AsyncHTMLSession
import time
#s = HTMLSession(browser_args=["--no-sandbox", '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'])
s= HTMLSession()
header_simple = {
'User_Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
}
r = s.request('get', 'http://prod.ceidg.gov.pl/CEIDG/CEIDG.Public.UI/Search.aspx')
soup_dummy = BeautifulSoup(r.content, "lxml")
# parse and retrieve two vital form values
viewstate = soup_dummy.select("#__VIEWSTATE")[0]['value']
viewstategen = soup_dummy.select("#__VIEWSTATEGENERATOR")[0]['value']
eventvalidation = soup_dummy.select("#__EVENTVALIDATION")[0]['value']
english = soup_dummy.select("#hfEnglishWebsiteUrl")[0]['value']
data = {
'__VIEWSTATE': viewstate,
'__VIEWSTATEGENERATOR': viewstategen,
'__EVENTVALIDATION': eventvalidation,
'ctl00$MainContent$txtName': 'bank',
'ctl00$MainContent$cbIncludeCeased': 'on',
'ctl00$MainContent$btnSearch': 'Find',
'ctl00$hfAuthRequired': 'False',
'ctl00$hfEnglishWebsiteUrl': english,
'ctl00$stWarningLength': '30',
'ctl00$stIdleAfter': '1200',
'ctl00$stPollingInterval': '60',
'ctl00$stMultiTabTimeoutSyncInterval': '20'
}
time.sleep(3)
p = s.request('post', 'https://prod.ceidg.gov.pl/CEIDG/CEIDG.Public.UI/Search.aspx', params=data, headers=header_simple)
print(p.content)
This is one of the ways how you can populate results from that page using requests module. Be sure to include all the keys and values within data parameters while sending with post requests in order to access the desired content.
Working script:
import lxml
import requests
from pprint import pprint
from bs4 import BeautifulSoup
with requests.Session() as s:
s.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
r = s.get('http://prod.ceidg.gov.pl/CEIDG/CEIDG.Public.UI/Search.aspx')
soup = BeautifulSoup(r.text,"lxml")
data = {i['name']:i.get('value','') for i in soup.select('input[name]')}
data['ctl00$MainContent$txtName'] = 'bank'
data['ctl00$MainContent$cbIncludeCeased'] = 'on'
data['ctl00$MainContent$btnSearch'] = 'Find'
data.pop('ctl00$MainContent$btnClear')
data.pop('ctl00$versionDetails$btnClose')
# pprint(data) #print it to see the keys and values that have been included within data
p = s.post('https://prod.ceidg.gov.pl/CEIDG/CEIDG.Public.UI/Search.aspx', data=data)
soup = BeautifulSoup(p.text,"lxml")
print(soup.select_one("table#MainContent_DataListEntities"))
Related
I want to scrap a website, when I reach any tag the link is "job/undifined" , I used post request to fetch data from the page :
post request with postdata in this code :
from bs4 import BeautifulSoup
import requests
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"}
postData = {
'search': 'search',
'facets[camp_type]':'day_camp',
'open[choices-made-content]': 'true'}
url = 'https://www.trustme.work/en'
html_1 = requests.post(url, headers=headers, data=postData)
soup1 = BeautifulSoup(html_1.text, 'lxml')
a = soup1.select('div.MuiGrid-root MuiGrid-grid-xs-12 ')
b = soup1.select('span[class="MuiTypography-root MuiTypography-h2"]')
print('soup:',b)
sample from the output :
<span class="MuiTypography-root MuiTypography-h2" style="cursor:pointer">
<a href="job/undefined" style="color:#413E52;text-decoration:none">
Network and Security engineer
</a>
</span>
EDIT
Part of content is served dynamically so, you have to fetch the jobs hashid via api and then create the link yourself or use the data from JSON response:
import requests
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"}
url = 'https://api.trustme.work/api/job_offers?include=technologies%2Cjob%2Ccompany%2Ccontract_type%2Clevel'
jobs = requests.get(url, headers=headers).json()['included']['jobs']
['https://www.trustme.work/job/' + v['hashid'] for k,v in jobs.items()]
To get the links from each job post change your css selector to select your elements more specific, also try to use static identifiers or HTML structure over classes:
.select('h2 a')
To get a list of all links use a list comprehension:
['https://www.trustme.work' + a.get('href') for a in soup1.select('h2 a')]
Example
from bs4 import BeautifulSoup
import requests
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"}
postData = {
'search': 'search',
'facets[camp_type]':'day_camp',
'open[choices-made-content]': 'true'}
url = 'https://www.trustme.work/en'
html_1 = requests.post(url, headers=headers, data=postData)
soup1 = BeautifulSoup(html_1.text, 'lxml')
['https://www.trustme.work' + a.get('href') for a in soup1.select('h2 a')]
I am trying to retrieve a url using requests.get
import requests
from bs4 import BeautifulSoup
baseurl = "https://www.olx.com.eg/"
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36'
}
r = requests.get('https://www.olx.com.eg/jobs/')
soup = BeautifulSoup(r.content, 'lxml')
product_list = soup.findAll('div',class_ = 'ads__item')
print(product_list)
but it returns an empty list because it does not even open the URL.
What is the issue here?
Add headers= parameter to requests.get:
import requests
from bs4 import BeautifulSoup
baseurl = "https://www.olx.com.eg/"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36"
}
r = requests.get("https://www.olx.com.eg/jobs/", headers=headers)
soup = BeautifulSoup(r.content, "lxml")
product_list = soup.findAll("div", class_="ads__item")
print(len(product_list))
Prints:
45
I have a code to collect all of the URLs from the "oddsportal" website for a page:
from bs4 import BeautifulSoup
import requests
headers = {
'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'}
source = requests.get("https://www.oddsportal.com/soccer/africa/africa-cup-of-nations/results/",headers=headers)
soup = BeautifulSoup(source.text, 'html.parser')
main_div=soup.find("div",class_="main-menu2 main-menu-gray")
a_tag=main_div.find_all("a")
for i in a_tag:
print(i['href'])
which returns these results:
/soccer/africa/africa-cup-of-nations/results/
/soccer/africa/africa-cup-of-nations-2019/results/
/soccer/africa/africa-cup-of-nations-2017/results/
/soccer/africa/africa-cup-of-nations-2015/results/
/soccer/africa/africa-cup-of-nations-2013/results/
/soccer/africa/africa-cup-of-nations-2012/results/
/soccer/africa/africa-cup-of-nations-2010/results/
/soccer/africa/africa-cup-of-nations-2008/results/
I would like the URLs to be returned as:
https://www.oddsportal.com/soccer/africa/africa-cup-of-nations/results/
https://www.oddsportal.com/soccer/africa/africa-cup-of-nations/results/#/page/2/
https://www.oddsportal.com/soccer/africa/africa-cup-of-nations/results/#/page/3/
for all the parent urls generated for results.
I can see that the urls can be appended as seen from inspect element as below for div id = "pagination"
The data under id="pagination" is loaded dynamically, so requests won't support it.
However, you can get the table of all those pages (1-3) via sending a GET request to:
https://fb.oddsportal.com/ajax-sport-country-tournament-archive/1/MN8PaiBs/X0/1/0/{page}/?_={timestampe}"
where {page} is corresponding to the page number (1-3) and {timestampe} is the current time
You'll also need to add:
"Referer": "https://www.oddsportal.com/"
to your headers.
also, use the lxml parser instead of html.parser to avoid a RecursionError.
import re
import requests
from datetime import datetime
from bs4 import BeautifulSoup
headers = {
"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"Referer": "https://www.oddsportal.com/",
}
with requests.Session() as session:
session.headers.update(headers)
for page in range(1, 4):
response = session.get(
f"https://fb.oddsportal.com/ajax-sport-country-tournament-archive/1/MN8PaiBs/X0/1/0/{page}/?_={datetime.now().timestamp()}"
)
table_data = re.search(r'{"html":"(.*)"}', response.text).group(1)
soup = BeautifulSoup(table_data, "lxml")
print(soup.prettify())
I am new to Python scraping, so as part of the practice I was trying few other sites where often data wasn't returned at all, but when I checked Groupon, I found that urllib only returns the first 8 results, while there are 36 results on the browser page.
I am using urllib and BS4. below is the code
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
req = Request('https://www.groupon.com/browse/chicago?category=beauty-and-spas')
req.add_header('User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36')
try:
with urlopen(req) as response:
htmlcontent = response.read().decode('utf-8')
except:
htmlcontent = None
soup = BeautifulSoup(htmlcontent, 'lxml')
all_links = soup.find('div', { 'id': 'pull-results' }).select('figure > div > a')
Can somebody please tell, what am I missing in the code to be able to extract all the data?
If this doesn't work or shouldn't work, then do we have selenium as the only option?
Try the following to get all the items and their links traversing next pages:
import requests
from bs4 import BeautifulSoup
base_link = 'https://www.groupon.com/browse/chicago?category=beauty-and-spas'
url = 'https://www.groupon.com/partial/browse/get-paginated-cards?'
params = {
'category': 'beauty-and-spas',
'page': 1
}
with requests.Session() as s:
s.headers['user-agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
r = s.get(base_link)
params['_csrf'] = r.cookies['_csrf']
while True:
print("current page----------->",params['page'])
res = s.get(url,params=params)
soup = BeautifulSoup(res.json()[0]['cardSlotHtml'],"lxml")
if not soup.select_one("figure[data-pingdom-info='purchasable-deal']"):break
for item in soup.select("figure[data-pingdom-info='purchasable-deal']"):
item_title = item.select_one(".cui-udc-details .cui-udc-title").get_text(strip=True)
item_link = item.select_one(".cui-content > a[href]").get("href")
print(item_title,item_link)
params['page']+=1
The other 28 links are loaded dynamically, therefore urllib doesn't support it. However, you can scrape them by sending a GET request to:
https://www.groupon.com/partial/browse/get-lazy-loaded-cards?category=beauty-and-spas&_csrf=P6rFPl1o-xDta8uOABKo_9LOiUajyK9bieMg
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
req = Request(
"https://www.groupon.com/partial/browse/get-lazy-loaded-cards?category=beauty-and-spas&_csrf=P6rFPl1o-xDta8uOABKo_9LOiUajyK9bieMg"
)
req.add_header(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36",
)
with urlopen(req) as response:
htmlcontent = response.read().decode("utf-8")
soup = BeautifulSoup(htmlcontent, "lxml")
for tag in soup.find_all(class_=r'\"cui-content\"'):
try:
link = tag.find("a")["href"]
except TypeError:
continue
print(link.replace('\\"', "").replace("\\n", ""))
Output:
https://www.groupon.com/deals/yupo-health-2?deal_option=75076cab-9268-4c2d-9459-e79a559dfed6
https://www.groupon.com/deals/infinity-laser-spa-2-4?deal_option=4dc14f7d-29ac-45e1-a664-ea76ddc44718
https://www.groupon.com/deals/dr-laser-nyc-3?deal_option=232325a0-8b3f-4fa9-b42b-7dc668ff7474
...
...
I am trying to get the value marked in the picture extracted to be a variable, but it seems that when it is within Vue Components, bs4 is not doing the searching like i am expecting. Can anyone point me in the general direction as to how i would be able to extract the value from this document in Python?
Code is found below picture, thanks in advance.
import requests
from bs4 import BeautifulSoup
URL = 'https://api.tracker.gg/api/v2/rocket-league/standard/profile/steam/76561198060134880'
headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'}
page = requests.get(URL, headers = headers)
soup = BeautifulSoup(page.content, 'html.parser')
#print(soup.prettify())
div_list = soup.findAll({"class":'value'})
print(div_list)
Since the page is returning a json response you don't need beautifulsoup to parse it.
import requests
import json
URL = 'https://api.tracker.gg/api/v2/rocket-league/standard/profile/steam/76561198060134880'
headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'}
response = requests.get(URL, headers = headers)
dict_of_response = json.loads(response.text)
obj_list = dict_of_response['data']['segments']
print(obj_list)
The obj_list variable now contains a list of dicts. Those dicts contain the data you want and now you only need to loop trough the list and do what you want with the data.