I tried to extract some data from dynamically loaded javascript website using scrapy-playwright but I stuck at the very beginning.
From where I'm facing trubles in settings.py file is as follows:
#playwright
DOWNLOAD_HANDLERS = {
"http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
"https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
}
#TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor'
#ASYNCIO_EVENT_LOOP = 'uvloop.Loop'
When I inject the following scrapy-playwright hanndler:
DOWNLOAD_HANDLERS = {
"http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
"https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
}
Then I got:
scrapy.exceptions.NotSupported: Unsupported URL scheme 'https': The installed reactor
(twisted.internet.selectreactor.SelectReactor) does not match the requested one (twisted.internet.asyncioreactor.AsyncioSelectorReactor)
When I inject TWISTED_REACTOR"
TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor'
Then I got:
raise TypeError(
TypeError: SelectorEventLoop required, instead got: <ProactorEventLoop running=False closed=False debug=False>
After all,When I inject ASYNCIO_EVENT_LOOP
Then I got:
ModuleNotFoundError: No module named 'uvloop'
At last, fail to install 'uvloop'
pip install uvloop
Script
import scrapy
from scrapy_playwright.page import PageCoroutine
class ProductSpider(scrapy.Spider):
name = 'product'
def start_requests(self):
yield scrapy.Request(
'https://shoppable-campaign-demo.netlify.app/#/',
meta={
'playwright': True,
'playwright_include_page': True,
'playwright_page_coroutines': [
PageCoroutine("wait_for_selector", "div#productListing"),
]
}
)
async def parse(self, response):
pass
# parses content
It's been suggested by the developers of scrapy_playwright to instantiate the DOWNLOAD_HANDLERS and TWISTER_REACTOR into your script.
A similar comment is provided here
Here's a working script implementing just this:
import scrapy
from scrapy_playwright.page import PageCoroutine
from scrapy.crawler import CrawlerProcess
class ProductSpider(scrapy.Spider):
name = 'product'
def start_requests(self):
yield scrapy.Request(
'https://shoppable-campaign-demo.netlify.app/#/',
callback = self.parse,
meta={
'playwright': True,
'playwright_include_page': True,
'playwright_page_coroutines': [
PageCoroutine("wait_for_selector", "div#productListing"),
]
}
)
async def parse(self, response):
container = response.xpath("(//div[#class='col-md-6'])[1]")
for items in container:
yield {
'products':items.xpath("(//h3[#class='card-title'])[1]//text()").get()
}
# parses content
if __name__ == "__main__":
process = CrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"DOWNLOAD_HANDLERS": {
"https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
"http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
},
"CONCURRENT_REQUESTS": 32,
"FEED_URI":'Products.jl',
"FEED_FORMAT":'jsonlines',
}
)
process.crawl(ProductSpider)
process.start()
And we get the following output:
{'products': 'Oxford Loafers'}
If you are using Windows then your problem is that Playwright doesn't support Windows. Check it out here https://github.com/scrapy-plugins/scrapy-playwright/issues/154
Related
Source : https://docs.twisted.org/en/twisted-17.9.0/core/howto/time.html
My Code:
import scrapy
from scrapy.crawler import CrawlerRunner
from twisted.internet import task
from twisted.internet import reactor
class Livescores1(scrapy.Spider):
name = 'Total'
runner = CrawlerRunner(settings = {
"FEEDS": {
"Total.json": {"format": "json", "overwrite": True},
},
})
def start_requests(self):
yield scrapy.Request('https://www.livescores.com/football/turkey/super-lig/?tz=3&table=league-total')
def parse(self, response):
for total in response.css('td'):
yield{
'total': total.css('::text').get()
}
class Livescores2(scrapy.Spider):
name = 'Home'
runner = CrawlerRunner(settings = {
"FEEDS": {
"Home.json": {"format": "json", "overwrite": True},
},
})
def start_requests(self):
yield scrapy.Request('https://www.livescores.com/football/turkey/super-lig/?tz=3&table=league-home')
def parse(self, response):
for total in response.css('td'):
yield{
'total': total.css('::text').get()
}
class Livescores3(scrapy.Spider):
name = 'Away'
runner = CrawlerRunner(settings = {
"FEEDS": {
"Away.json": {"format": "json", "overwrite": True},
},
})
def start_requests(self):
yield scrapy.Request('https://www.livescores.com/football/turkey/super-lig/?tz=3&table=league-away')
def parse(self, response):
for total in response.css('td'):
yield{
'total': total.css('::text').get()
}
loopTimes = 99999999999999999999999999999
failInTheEnd = False
_loopCounter = 0
def runEverySecond():
"""
Called at ever loop interval.
"""
runner.crawl(Livescores1)
runner.crawl(Livescores2)
runner.crawl(Livescores3)
global _loopCounter
if _loopCounter < loopTimes:
_loopCounter += 1
print('A new second has passed.')
return
loop = task.LoopingCall(runEverySecond)
# Start looping every 1 second.
loopDeferred = loop.start(1.0)
reactor.run()
This code has an error
"runner" is not defined
All i want is to save crawling to different json files. For this I added a separate CrawlerRunner(settings) for each class. But I should add another runner = CrawlerRunner(settings) for runEverySecond function, otherwise the code does not work.
If I add this (although in these settings I can give a single json file name) all classes obey this rule and don't save data to different files!
This structure has a function that works repeatly. That has looping function for spiders.
Can I add different settings for all classes to CrawlerRunner(settings) which I create inside runEverySecond? Is this possible or should I try a different solution?
Could you help me to fix this?
Thank you very much
I am trying to scrape all cars from the website: www.webuycars.co.za
I am using scrapy to do this and each page has 24 vehicles that I want to send to a json file.
Through data analysis it seems that I am just scraping the first page only or overwriting the variable used to create the json file.
import json
import scrapy
from scrapy.crawler import CrawlerProcess
class carSpider(scrapy.Spider):
name = 'car'
body = {"to":24,"size":24,"type":"All","filter_type":"all","subcategory":None,"q":"","Make":None,"Roadworthy":None,"Auctions":[],"Model":None,"Variant":None,"DealerKey":None,"FuelType":None,"BodyType":None,"Gearbox":None,"AxleConfiguration":None,"Colour":None,"FinanceGrade":None,"Priced_Amount_Gte":0,"Priced_Amount_Lte":0,"MonthlyInstallment_Amount_Gte":0,"MonthlyInstallment_Amount_Lte":0,"auctionDate":None,"auctionEndDate":None,"auctionDurationInSeconds":None,"Kilometers_Gte":0,"Kilometers_Lte":0,"Priced_Amount_Sort":"","Bid_Amount_Sort":"","Kilometers_Sort":"","Year_Sort":"","Auction_Date_Sort":"","Auction_Lot_Sort":"","Year":[],"Price_Update_Date_Sort":"","Online_Auction_Date_Sort":"","Online_Auction_In_Progress":""}
def start_requests(self):
yield scrapy.Request(
url='https://website-elastic-api.webuycars.co.za/api/search',
callback=self.parse,
body=json.dumps(self.body),
method="POST",
headers= {
"content-type": "application/json",
"User-Agent":"mozilla/5.0"
}
)
def parse(self, response):
response = json.loads(response.body)
cars = []
filename = "webuycar.json"
for item in range(0,6528,24):
response['total']['value']=item
cars.append(response['data'])
with open(filename, "w") as f:
json.dump(cars, f, indent=4)
for resp in response['data']:
yield {
'Title': resp['OnlineDescription']
}
#Code that runs the spider
process = CrawlerProcess()
process.crawl(carSpider)
process.start()
I would like to fix this as it messes with the accuracy of the database I have created and makes redundant data prevalent.
I have looked at my json file to see if the issue was from extraction. It seems that my webscraper is the problem. I would appreciate some thoughts on this.
You shouldn't try to dump the data into a file from the parse method. You should either use command line arguments, or in the case when running as a script like in your example, you can use feed exports.
Like THis:
import json
import scrapy
from scrapy.crawler import CrawlerProcess
class carSpider(scrapy.Spider):
name = 'car'
body = {"to":24,"size":24,"type":"All","filter_type":"all","subcategory":None,"q":"","Make":None,"Roadworthy":None,"Auctions":[],"Model":None,"Variant":None,"DealerKey":None,"FuelType":None,"BodyType":None,"Gearbox":None,"AxleConfiguration":None,"Colour":None,"FinanceGrade":None,"Priced_Amount_Gte":0,"Priced_Amount_Lte":0,"MonthlyInstallment_Amount_Gte":0,"MonthlyInstallment_Amount_Lte":0,"auctionDate":None,"auctionEndDate":None,"auctionDurationInSeconds":None,"Kilometers_Gte":0,"Kilometers_Lte":0,"Priced_Amount_Sort":"","Bid_Amount_Sort":"","Kilometers_Sort":"","Year_Sort":"","Auction_Date_Sort":"","Auction_Lot_Sort":"","Year":[],"Price_Update_Date_Sort":"","Online_Auction_Date_Sort":"","Online_Auction_In_Progress":""}
custom_settings = {"FEEDS": {
"webuycar.json":{
'format': 'json',
'encoding': 'utf8',
'store_empty': False,
'indent': 4
}
}}
def start_requests(self):
yield scrapy.Request(
url='https://website-elastic-api.webuycars.co.za/api/search',
callback=self.parse,
body=json.dumps(self.body),
method="POST",
headers= {
"content-type": "application/json",
"User-Agent":"mozilla/5.0"
}
)
def parse(self, response):
data = response.json()
for item in range(0,6528,24):
data['total']['value']=item
yield data
for item in data['data']:
yield {'Title': item['OnlineDescription']}
#Code that runs the spider
process = CrawlerProcess()
process.crawl(carSpider)
process.start()
Im not totally sure this solves your problem because you are still scraping a single url, but this should avoid overwriting the file.
Although I tested this and the output json file was 7143882 lines long
Update:
After taking a closer look at your code, I think that this is closer to what you are actually trying to achieve. This makes many calls to the api and extracts all 24 OnlineDescription fields from each api call response.
import json
import scrapy
from scrapy.crawler import CrawlerProcess
class carSpider(scrapy.Spider):
name = 'car'
body = {"to":24,"size":24,"type":"All","filter_type":"all","subcategory":None,"q":"","Make":None,"Roadworthy":None,"Auctions":[],"Model":None,"Variant":None,"DealerKey":None,"FuelType":None,"BodyType":None,"Gearbox":None,"AxleConfiguration":None,"Colour":None,"FinanceGrade":None,"Priced_Amount_Gte":0,"Priced_Amount_Lte":0,"MonthlyInstallment_Amount_Gte":0,"MonthlyInstallment_Amount_Lte":0,"auctionDate":None,"auctionEndDate":None,"auctionDurationInSeconds":None,"Kilometers_Gte":0,"Kilometers_Lte":0,"Priced_Amount_Sort":"","Bid_Amount_Sort":"","Kilometers_Sort":"","Year_Sort":"","Auction_Date_Sort":"","Auction_Lot_Sort":"","Year":[],"Price_Update_Date_Sort":"","Online_Auction_Date_Sort":"","Online_Auction_In_Progress":""}
custom_settings = {"FEEDS": {
"webuycar.json":{
'format': 'json',
'encoding': 'utf8',
'store_empty': False,
'indent': 4
}
}}
def start_requests(self):
for i in range(24,6528,24):
self.body["to"] = i
yield scrapy.Request(
url='https://website-elastic-api.webuycars.co.za/api/search',
callback=self.parse,
body=json.dumps(self.body),
method="POST",
headers= {
"content-type": "application/json",
"User-Agent":"mozilla/5.0"
}
)
def parse(self, response):
data = response.json()
for item in data['data']:
yield {"Title": item['OnlineDescription']}
#Code that runs the spider
process = CrawlerProcess()
process.crawl(carSpider)
process.start()
I am trying to fetch dynamic phone number from this page (among others): https://www.europages.fr/LEMMERFULLWOOD-GMBH/DEU241700-00101.html
The phone number appears after a click on the element div with the class page-action click-tel. I am trying to get to this data with scrapy_splash using a LUA script to execute a click.
After pulling splash on my ubuntu:
sudo docker run -d -p 8050:8050 scrapinghub/splash
Here is my code so far (I am using a proxy service) :
class company(scrapy.Spider):
name = "company"
custom_settings = {
"FEEDS" : {
'/home/ubuntu/scraping/europages/data/company.json': {
'format': 'jsonlines',
'encoding': 'utf8'
}
},
"DOWNLOADER_MIDDLEWARES" : {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
},
"SPLASH_URL" : 'http://127.0.0.1:8050/',
"SPIDER_MIDDLEWARES" : {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
},
"DUPEFILTER_CLASS" : 'scrapy_splash.SplashAwareDupeFilter',
"HTTPCACHE_STORAGE" : 'scrapy_splash.SplashAwareFSCacheStorage'
}
allowed_domains = ['www.europages.fr']
def __init__(self, company_url):
self.company_url = "https://www.europages.fr/LEMMERFULLWOOD-GMBH/DEU241700-00101.html" ##forced
self.item = company_item()
self.script = """
function main(splash)
splash.private_mode_enabled = false
assert(splash:go(splash.args.url))
assert(splash:wait(0.5))
local element = splash:select('.page-action.click-tel')
local bounds = element:bounds()
element:mouse_click{x=bounds.width/2, y=bounds.height/2}
splash:wait(4)
return splash:html()
end
"""
def start_requests(self):
yield scrapy.Request(
url = self.company_url,
callback = self.parse,
dont_filter = True,
meta = {
'splash': {
'endpoint': 'execute',
'url': self.company_url,
'args': {
'lua_source': self.script,
'proxy': 'http://usernamepassword#proxyhost:port',
'html':1,
'iframes':1
}
}
}
)
def parse(self, response):
soup = BeautifulSoup(response.body, "lxml")
print(soup.find('div',{'class','page-action click-tel'}))
The problem is that it has no effect, I still have nothing as if no button were clicked.
Shouldn't the return splash:html() return the results of element:mouse_click{x=bounds.width/2, y=bounds.height/2} (as element:mouse_click() waits for the changes to appear) in response.body ?
Am I missing something here ?
Most times when sites load data dynamically, they do so via background XHR requests to the server. A close examination of the network tab when you click the 'telephone' button, shows that the browser sends an XHR request to the url https://www.europages.fr/InfosTelecomJson.json?uidsid=DEU241700-00101&id=1330. You can emulate the same in your spider and avoid using scrapy splash altogether. See sample implementation below using one url:
import scrapy
from urllib.parse import urlparse
class Company(scrapy.Spider):
name = 'company'
allowed_domains = ['www.europages.fr']
start_urls = ['https://www.europages.fr/LEMMERFULLWOOD-GMBH/DEU241700-00101.html']
def parse(self, response):
# obtain the id and uuid to make xhr request
uuid = urlparse(response.url).path.split('/')[-1].rstrip('.html')
id = response.xpath("//div[#itemprop='telephone']/a/#onclick").re_first(r"event,'(\d+)',")
yield scrapy.Request(f"https://www.europages.fr/InfosTelecomJson.json?uidsid={uuid}&id={id}", callback=self.parse_address)
def parse_address(self, response):
yield response.json()
I get the response
{'digits': '+49 220 69 53 30'}
I'm trying to scrape the website using Scrapy. To get the content which I want I need to login first. The url is login_url
There I have form as follows:
My code is as follows:
LOGIN_URL1 = "https://www.partslink24.com/partslink24/user/login.do"
class PartsSpider(scrapy.Spider):
name = "parts"
login_url = LOGIN_URL1
start_urls = [
login_url,
]
def parse(self, response):
form_data = {
'accountLogin': COMPANY_ID,
'userLogin': USERNAME,
'loginBean.password': PASSWORD
}
yield FormRequest(url=self.login_url, formdata=form_data, callback=self.parse1)
def parse1(self, response):
inspect_response(response, self)
print("RESPONSE: {}".format(response))
def start_scraper(vin_number):
process = CrawlerProcess()
process.crawl(PartsSpider)
process.start()
But the problem is that they check if the session is activated and I get an error, the form can not be submitted.
When I check the response which I get after submitting the login form, I get the following error:
The code on their site which checks that is as follows:
var JSSessionChecker = {
check: function()
{
if (!Ajax.getTransport())
{
alert('NO_AJAX_IN_BROWSER');
}
else
{
new Ajax.Request('/partslink24/checkSessionCookies.do', {
method:'post',
onSuccess: function(transport)
{
if (transport.responseText != 'true')
{
if (Object.isFunction(JSSessionChecker.showError)) JSSessionChecker.showError();
}
},
onFailure: function(e)
{
if (Object.isFunction(JSSessionChecker.showError)) JSSessionChecker.showError();
},
onException: function (request, e)
{
if (Object.isFunction(JSSessionChecker.showError)) JSSessionChecker.showError();
}
});
}
},
showError: function()
{
var errorElement = $('sessionCheckError');
if (errorElement)
{
errorElement.show();
}
}
};
JSSessionChecker.check();
And on success it returns only true.
Is there any way that I can activate the session before submitting a form?
Thanks in advance.
EDIT
The error page which I get using the answer from #fam.
Please check this code.
import scrapy
LOGIN_URL1 = "https://www.partslink24.com/partslink24/user/login.do"
class PartsSpider(scrapy.Spider):
name = "parts"
login_url = LOGIN_URL1
start_urls = [
login_url,
]
def parse(self, response):
form_data = {
'loginBean.accountLogin': "COMPANY_ID",
'loginBean.userLogin': "USERNAME",
'loginBean.sessionSqueezeOut' : "false",
'loginBean.password': "PASSWORD",
'loginBean.userOffsetSec' : "18000",
'loginBean.code2f' : ""
}
yield scrapy.FormRequest.from_response(response=response, url=self.login_url, formdata=form_data, callback=self.parse1)
def parse1(self, response):
#scrapy.inspect_response(response, self)
print("RESPONSE: {}".format(response))
def start_scraper(vin_number):
process = scrapy.CrawlerProcess()
process.crawl(PartsSpider)
process.start()
I am not getting an error and the response is as follows:
RESPONSE: <200 https://www.partslink24.com/partslink24/user/login.do>
EDIT:
The following code is for Selenium. It will log you into the page easily. You only need to download the chrome driver and install Selenium.
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.options import Options
import time
chrome_options = Options()
#chrome_options.add_argument("--headless")
driver = webdriver.Chrome(executable_path="./chromedriver", options=chrome_options)
driver.get("https://www.partslink24.com/partslink24/user/login.do")
# enter the form fields
company_ID = "company id"
user_name = "user name"
password = "password"
company_ID_input = driver.find_element_by_xpath("//input[#name='accountLogin']")
company_ID_input.send_keys(company_ID)
time.sleep(1)
user_name_input = driver.find_element_by_xpath("//input[#name='userLogin']")
user_name_input.send_keys(user_name)
time.sleep(1)
password_input = driver.find_element_by_xpath("//input[#id='inputPassword']")
password_input.send_keys(password)
time.sleep(1)
# click the search button and get links from first page
click_btn = driver.find_element_by_xpath("//a[#tabindex='5']")
click_btn.click()
time.sleep(5)
Don't forget to change the credentials.
I am new to Scrapy and Python on general.
Here is the code:
import scrapy
import json
class MOOCSpider(scrapy.Spider):
name = 'mooc'
start_urls = ['https://www.plurk.com/search?q=italy']
custom_settings = {
'DUPEFILTER_CLASS': 'scrapy.dupefilters.BaseDupeFilter',
}
global_id = 1458122036
def parse(self, response):
url = 'https://www.plurk.com/Search/search2'
headers = {
...omitted...
}
for i in range(1,10):
formdata = {
"after_id": str(self.global_id)
}
yield scrapy.FormRequest(url, callback=self.parse_api, formdata=formdata, headers=headers)
def parse_api(self, response):
raw = response.body
data = json.loads(raw)
posts = data["plurks"]
users = data["users"]
l = len(posts)
i = 0
for post in posts:
i = i + 1
if (i == l):
self.global_id = post["plurk_id"]
...omitted code...
yield {
'Author': user_name,
'Body': post['content'],
'app': 'plurk'
}
The problem that I have is that Scrapy is making first all the requests in the for loop and then it is executing the code in parse_api.
What I would like to do is let scrapy do one iteration of the for loop, call the callback function, wait for it to return and then do another iteration.
This because the id that I need for the next request will be set in the global_id variable by the callback function.
You can't achieve this by scheduling requests in loop.
You can implement this only if you will schedule only one (next) request per parse/parse_api method call:
class MOOCSpider(scrapy.Spider):
name = 'mooc'
start_urls = ['https://www.plurk.com/search?q=italy']
custom_settings = {
'DUPEFILTER_CLASS': 'scrapy.dupefilters.BaseDupeFilter',
'DOWNLOAD_DELAY':5,
"USER_AGENT": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36",
}
def parse(self, response):
# schedule only first request (withour loop)
formdata = {
"query": 'italy',
"start_date": "2019/12",
"end_date": "2020/12",
"after_id": '1458122036', #<- your initial global_id
}
yield scrapy.FormRequest('https://www.plurk.com/Search/search2', callback=self.parse_api, formdata=formdata)
def parse_api(self, response):
data = json.loads(response.body)
after_id = None
for post in data["plurks"]:
after_id = post["plurk_id"]
yield {
'Author': data["users"][str(post["owner_id"])]["nick_name"], # instead of user_id?
'Body': post["content"],
'app': 'plurk'
}
# after end of this loop - after_id should contain required data for next request
# instead of separate loop variable response.meta["depth"] used to limit number requests
if response.meta["depth"] <=11 and after_id: # schedule next request
formdata = {
"query": 'italy',
"start_date": "2019/12",
"end_date": "2020/12",
"after_id": str(after_id),
}
yield scrapy.FormRequest('https://www.plurk.com/Search/search2', callback=self.parse_api, formdata=formdata)
Answering my own question:
Now the parse method does just one request and calls once the parse_api method. Parse_api processes the response and sets the global_id variable. Once it's done processing its own response it makes another request passing itself as the callback function.
By doing this you are guaranteed that the global_id variable will be properly set, since the new request will be made only once parse_api has finished running.
request.cb_kwargs["loop_l"] is used to pass an additional argument to the callback function. This time it's a counter that controls the number of requests we want to make. When the counter is equal to 100 we stop the crawling
import scrapy
import json
plurk_id = []
class MOOCSpider(scrapy.Spider):
name = 'mooc'
start_urls = ['https://www.plurk.com/search?q=']
custom_settings = {
'DUPEFILTER_CLASS': 'scrapy.dupefilters.BaseDupeFilter',
}
global_id = 1455890167
url = 'https://www.plurk.com/Search/search2'
headers = {
...OMITTED...
}
def parse(self, response):
formdata = {
"after_id": str(self.global_id)
}
request = scrapy.FormRequest(self.url, callback=self.parse_api, formdata=formdata, headers=self.headers)
request.cb_kwargs["loop_l"] = str(0)
yield request
def parse_api(self, response, loop_l):
int_loop_l = int(loop_l)
int_loop_l = int_loop_l + 1
if (int_loop_l == 200):
return
raw = response.body
data = json.loads(raw)
...omitted code...
... GET AND SET THE NEW global_id FROM THE RESPONSE ...
# make another request with the new id
formdata = {
"after_id": str(self.global_id)
}
request = scrapy.FormRequest(self.url, callback=self.parse_api, formdata=formdata, headers=self.headers)
request.cb_kwargs["loop_l"] = str(int_loop_l)
yield request