Scrapy-Splash Requested URL vs Real URL - python

I'm trying to make instantaneous reports for some of my webpages using the Splash and Scrapy-Splash python module.
The problem is that I can't obtain the corect last url like in splash render.json. When a website redirects.
For example on localhost:8050/render.json the result for rendering www.google.com is:
{"requestedUrl": "http://www.google.com/",
"url": "https://www.google.com/?gws_rd=ssl",
"title": "Google", "geometry": [0, 0, 1024, 768]}
But inside my python script I only manage to obtain "http://www.google.com"
My code is:
def start_requests(self):
return [Request(self.url, callback=self.parse, dont_filter=True)]
def parse(self, response):
splash_args = { 'wait': 1 }
return SplashRequest(
response.url,
self.parse_link,
args=splash_args,
endpoint='render.json',
)
def parse_link(self, response):
result = {
'requested_url': response.data['requestedUrl'],
'real_url': response.data['url'],
'response': response.request.url,
'splash_url': response.real_url
}
But any of this returns:
{"requested_url": "http://www.google.com/",
"real_url": "http://www.google.com/",
"response": "http://127.0.0.1:8050/render.json",
"splash_url": "http://127.0.0.1:8050/render.json"}
Is there any way to overcome this?

Related

Not getting any data scraped when running the following code using Scrapy on Python

This is the spider I am using to scrape email addresses and names of restaurants from tripadvisor
import scrapy
class RestaurantSpider(scrapy.Spider):
name = 'tripadvisorbot'
start_urls = [
'https://www.tripadvisor.com/Restaurants-g188633-The_Hague_South_Holland_Province.html#EATERY_OVERVIEW_BOX'
]
def parse(self, response):
for listing in response.xpath('//div[contains(#class,"__cellContainer--")]'):
link = listing.xpath('.//a[contains(#class,"__restaurantName--")]/#href').get()
text = listing.xpath('.//a[contains(#class,"__restaurantName--")]/text()').get()
complete_url = response.urljoin(link)
yield scrapy.Request(
url=complete_url,
callback=self.parse_listing,
meta={'link': complete_url,'text': text}
)
next_url = response.xpath('//*[contains(#class,"pagination")]/*[contains(#class,"next")]/#href').get()
if next_url:
yield scrapy.Request(response.urljoin(next_url), callback=self.parse)
def parse_listing(self, response):
link = response.meta['link']
text = response.meta['text']
email = response.xpath('//a[contains(#href, "mailto:")]/#href').get()
yield {'Link': link,'Text': text,'Email': email}
I run the following command line in the Anaconda prompt to run the above Spider and save it as a json file
scrapy crawl tripadvisorbot -O tripadvisor.json
No data gets scraped, a json file is created but it's empty.
I am not sure what the problem is, I am quite new to web scraping and Python coding in general. All help would be much appreciated
thanks
On my computer there is no class _cellContainer-- and __restaurantName-- in HTML.
Page uses random chars as class names.
But every item is in div directly in <div data-test-target="restaurants-list"> and I use this to get all items.
Later I get first <a> (which has image instead of name) and I skip text and complete_url but directly run reponse.follow(link).
And when I get page with details then I get reponse.url to get complete_url and h1 to get text
You can put all code in one file and run python script.py without creating project.
import scrapy
class RestaurantSpider(scrapy.Spider):
name = 'tripadvisorbot'
start_urls = [
'https://www.tripadvisor.com/Restaurants-g188633-The_Hague_South_Holland_Province.html#EATERY_OVERVIEW_BOX'
]
def parse(self, response):
for listing in response.xpath('//div[#data-test-target="restaurants-list"]/div'):
url = listing.xpath('.//a/#href').get()
print('link:', url)
if url:
yield response.follow(url, callback=self.parse_listing)
next_url = response.xpath('//*[contains(#class,"pagination")]/*[contains(#class,"next")]/#href').get()
if next_url:
yield response.follow(next_url)
def parse_listing(self, response):
print('url:', response.url)
link = response.url
text = response.xpath('//h1[#data-test-target]/text()').get()
email = response.xpath('//a[contains(#href, "mailto:")]/#href').get()
yield {'Link': link, 'Text': text, 'Email': email}
# --- run without project and save data in `output.json` ---
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
'FEEDS': {'output.json': {'format': 'json'}}, # new in 2.1
})
c.crawl(RestaurantSpider)
c.start()
Part of result:
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d4766834-Reviews-Bab_mansour-The_Hague_South_Holland_Province.html", "Text": "Bab mansour", "Email": null},
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d3935897-Reviews-Milos-The_Hague_South_Holland_Province.html", "Text": "Milos", "Email": null},
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d10902380-Reviews-Nefeli_deli-The_Hague_South_Holland_Province.html", "Text": "Nefeli deli", "Email": "mailto:info#foodloversnl.com?subject=?"},
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d8500914-Reviews-Waterkant-The_Hague_South_Holland_Province.html", "Text": "Waterkant", "Email": "mailto:alles#dewaterkant.nl?subject=?"},
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d4481254-Reviews-Salero_Minang-The_Hague_South_Holland_Province.html", "Text": "Salero Minang", "Email": null},
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d6451334-Reviews-Du_Passage-The_Hague_South_Holland_Province.html", "Text": "Du Passage", "Email": "mailto:info#dupassage.nl?subject=?"},
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d4451714-Reviews-Lee_s_Garden-The_Hague_South_Holland_Province.html", "Text": "Lee's Garden", "Email": null},
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d2181693-Reviews-Warunee-The_Hague_South_Holland_Province.html", "Text": "Warunee", "Email": "mailto:info#warunee.nl?subject=?"},
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d8064876-Reviews-Sallo_s-The_Hague_South_Holland_Province.html", "Text": "Sallo's", "Email": "mailto:info#sallos.nl?subject=?"},
{"Link": "https://www.tripadvisor.com/Restaurant_Review-g188633-d16841532-Reviews-Saravanaa_Bhavan_Den_Haag-The_Hague_South_Holland_Province.html", "Text": "Saravanaa Bhavan Den Haag", "Email": "mailto:hsbamsterdam#saravanabhavan.com?subject=?"},

Scrapy - Splash fetch dynamic data

I am trying to fetch dynamic phone number from this page (among others): https://www.europages.fr/LEMMERFULLWOOD-GMBH/DEU241700-00101.html
The phone number appears after a click on the element div with the class page-action click-tel. I am trying to get to this data with scrapy_splash using a LUA script to execute a click.
After pulling splash on my ubuntu:
sudo docker run -d -p 8050:8050 scrapinghub/splash
Here is my code so far (I am using a proxy service) :
class company(scrapy.Spider):
name = "company"
custom_settings = {
"FEEDS" : {
'/home/ubuntu/scraping/europages/data/company.json': {
'format': 'jsonlines',
'encoding': 'utf8'
}
},
"DOWNLOADER_MIDDLEWARES" : {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
},
"SPLASH_URL" : 'http://127.0.0.1:8050/',
"SPIDER_MIDDLEWARES" : {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
},
"DUPEFILTER_CLASS" : 'scrapy_splash.SplashAwareDupeFilter',
"HTTPCACHE_STORAGE" : 'scrapy_splash.SplashAwareFSCacheStorage'
}
allowed_domains = ['www.europages.fr']
def __init__(self, company_url):
self.company_url = "https://www.europages.fr/LEMMERFULLWOOD-GMBH/DEU241700-00101.html" ##forced
self.item = company_item()
self.script = """
function main(splash)
splash.private_mode_enabled = false
assert(splash:go(splash.args.url))
assert(splash:wait(0.5))
local element = splash:select('.page-action.click-tel')
local bounds = element:bounds()
element:mouse_click{x=bounds.width/2, y=bounds.height/2}
splash:wait(4)
return splash:html()
end
"""
def start_requests(self):
yield scrapy.Request(
url = self.company_url,
callback = self.parse,
dont_filter = True,
meta = {
'splash': {
'endpoint': 'execute',
'url': self.company_url,
'args': {
'lua_source': self.script,
'proxy': 'http://usernamepassword#proxyhost:port',
'html':1,
'iframes':1
}
}
}
)
def parse(self, response):
soup = BeautifulSoup(response.body, "lxml")
print(soup.find('div',{'class','page-action click-tel'}))
The problem is that it has no effect, I still have nothing as if no button were clicked.
Shouldn't the return splash:html() return the results of element:mouse_click{x=bounds.width/2, y=bounds.height/2} (as element:mouse_click() waits for the changes to appear) in response.body ?
Am I missing something here ?
Most times when sites load data dynamically, they do so via background XHR requests to the server. A close examination of the network tab when you click the 'telephone' button, shows that the browser sends an XHR request to the url https://www.europages.fr/InfosTelecomJson.json?uidsid=DEU241700-00101&id=1330. You can emulate the same in your spider and avoid using scrapy splash altogether. See sample implementation below using one url:
import scrapy
from urllib.parse import urlparse
class Company(scrapy.Spider):
name = 'company'
allowed_domains = ['www.europages.fr']
start_urls = ['https://www.europages.fr/LEMMERFULLWOOD-GMBH/DEU241700-00101.html']
def parse(self, response):
# obtain the id and uuid to make xhr request
uuid = urlparse(response.url).path.split('/')[-1].rstrip('.html')
id = response.xpath("//div[#itemprop='telephone']/a/#onclick").re_first(r"event,'(\d+)',")
yield scrapy.Request(f"https://www.europages.fr/InfosTelecomJson.json?uidsid={uuid}&id={id}", callback=self.parse_address)
def parse_address(self, response):
yield response.json()
I get the response
{'digits': '+49 220 69 53 30'}

Scraping Infinite scroll page

I am trying to scrape a infinite scroll ajax request page but not able to go the next page and get the yield items. I'm able to get the response.txt. I tried debbuging but not able get any solution. Can anyone help me in out in this.
import scrapy
class InfiniteScrollingSpider(scrapy.Spider):
name = 'wegotthiscovered_review'
scrolling_url = 'https://wegotthiscovered.com/wp-admin/admin-ajax.php'
def start_requests(self):
yield scrapy.FormRequest(
self.scrolling_url,
formdata={
'action': "face3_infinite_scroll",
'page': '1',
'attrs': "{\"id\":\"1\",\"order\":\"\",\"orderby\":\"\",\"catnames\":\"movies+reviews\",\"postnotin\":\"905069,904520,904521,903475,901576,900303,893944,895136,891795,886876,884402,881283\",\"timestampbefore\":1591800990}"
},
callback=self.parse_page,
meta={'page': 1},
)
def parse_page(self, response):
next_page = response.meta.get('page') + 1
print('next_page:', next_page)
print(response.text)
json_data = json.loads(response.text)
print(json_data.keys())
print('success:', json_data.get('success'))
print('data:', json_data.get('data'))
if not json_data.get('success') or not json_data.get('data') or not json_data['data'].get('content'):
return
articles = scrapy.Selector(text=json_data['data']['content']).css('article')
for article in articles:
yield {
'page_title': article.css('h4 ::text').extract_first().strip(),
'review_link': article.css('h4 ::attr(href)').extract_first().strip(),
}
print('next page >>>')
yield scrapy.FormRequest(
self.scrolling_url,
formdata={
'action': "face3_infinite_scroll",
'page': str(next_page),
"query_args":"{\"archive_type\":\"masonry\",\"show_first\":false,\"columns\":2,\"meta_cat\":false,\"meta\":true,\"summary\":true,\"standard_summary\":\"excerpt\",\"more_button\":false,\"reduce_margin\":false,\"orientation\":\"landscape\",\"list_width\":\"6\",\"widgets\":false,\"widgets_sidebar\":\"sidebar-archive\",\"widgets_after\":3,\"widgets_repeat\":false,\"highlight\":\"featured\",\"pagination_type\":\"ajax\",\"infinite_load\":true}"
},
callback=self.parse_page,
meta={'page': next_page},
)

how do i scrape this kind of dynamic generated website data?

I'm trying to scrape E-commerce website,
example link: https://www.lazada.sg/products/esogoal-2-in-1-selfie-stick-tripod-bluetooth-selfie-stand-with-remote-shutter-foldable-tripod-monopod-i279432816-s436738661.html?mp=1
Data is being rendered via React and when i perform scraping on few links most of the data is being returned as null, and when i view the page source i cannot find actually HTML that is available via inspect element, just a json inside Javascript tags. I tested few times running scrapy scraper on the same links and data which was not found before, actually returns content, so its somehow randomly. I cannot figure out how should i scrape this kind of website.
As well i'm using pool of useragents and breaks between requests.
script = '''
function main(splash, args)
assert(splash:go(args.url))
assert(splash:wait(1.5))
return splash:html()
end
'''
def start_requests(self):
url= [
'https://www.lazada.sg/products/esogoal-tactical-sling-bag-outdoor-chest-pack-shoulder-backpack-military-sport-bag-for-trekking-camping-hiking-rover-sling-daypack-for-men-women-i204814494-s353896924.html?mp=1',
'https://www.lazada.sg/products/esogoal-2-in-1-selfie-stick-tripod-bluetooth-selfie-stand-with-remote-shutter-foldable-tripod-monopod-i279432816-s436738661.html?mp=1',
'https://www.lazada.sg/products/esogoal-selfie-stick-tripod-extendable-selfie-stick-monopod-with-integrated-tripod-and-bluetooth-remote-shutter-wireless-selfie-stick-tripod-for-cellphonecameras-i205279097-s309050125.html?mp=1',
'https://www.lazada.sg/products/esogoal-mini-umbrella-travel-umbrella-sun-rain-umbrella8-ribs-98cm-big-surface-lightweight-compact-parasol-uv-protection-for-men-women-i204815487-s308312226.html?mp=1',
'https://www.lazada.sg/products/esogoal-2-in-1-selfie-stick-tripod-bluetooth-selfie-stand-with-remote-shutter-foldable-tripod-monopod-i279432816-s436738661.html?mp=1'
]
for link in url:
yield SplashRequest(url=link, callback=self.parse, endpoint='render.html', args={'wait' : 0.5, 'lua_source' : self.script}, dont_filter=True)
def parse(self, response):
yield {
'title' : response.xpath("//span[#class='pdp-mod-product-badge-title']/text()").extract_first(),
'price' : response.xpath("//span[contains(#class, 'pdp-price')]/text()").extract_first(),
'description' : response.xpath("//div[#id='module_product_detail']").extract_first()
}
I try this:
Pass 'execute' as argument of the splash method instead of 'render html'
from scrapy_splash import SplashRequest
class DynamicSpider(scrapy.Spider):
name = 'products'
url = [
'https://www.lazada.sg/products/esogoal-tactical-sling-bag-outdoor-chest-pack-shoulder-backpack-military-sport-bag-for-trekking-camping-hiking-rover-sling-daypack-for-men-women-i204814494-s353896924.html?mp=1',
'https://www.lazada.sg/products/esogoal-2-in-1-selfie-stick-tripod-bluetooth-selfie-stand-with-remote-shutter-foldable-tripod-monopod-i279432816-s436738661.html?mp=1',
'https://www.lazada.sg/products/esogoal-selfie-stick-tripod-extendable-selfie-stick-monopod-with-integrated-tripod-and-bluetooth-remote-shutter-wireless-selfie-stick-tripod-for-cellphonecameras-i205279097-s309050125.html?mp=1',
'https://www.lazada.sg/products/esogoal-mini-umbrella-travel-umbrella-sun-rain-umbrella8-ribs-98cm-big-surface-lightweight-compact-parasol-uv-protection-for-men-women-i204815487-s308312226.html?mp=1',
'https://www.lazada.sg/products/esogoal-2-in-1-selfie-stick-tripod-bluetooth-selfie-stand-with-remote-shutter-foldable-tripod-monopod-i279432816-s436738661.html?mp=1',
]
script = """
function main(splash, args)
assert(splash:go(args.url))
assert(splash:wait(1.5))
return {
html = splash:html()
}
end
"""
def start_requests(self):
for link in self.url:
yield SplashRequest(
url=link,
callback=self.parse,
endpoint='execute',
args={'wait': 0.5, 'lua_source': self.script},
dont_filter=True,
)
def parse(self, response):
yield {
'title': response.xpath("//span[#class='pdp-mod-product-badge-title']/text()").extract_first(),
'price': response.xpath("//span[contains(#class, 'pdp-price')]/text()").extract_first(),
'description': response.xpath("//div[#id='module_product_detail']/h2/text()").extract_first()
}
An this is the result

srapy crawl mulitlayer append data in a list and yield this item

The web site structure i'm trying to parse using scrapy is the following:
I'd like the extracted data to have this format:
[{
"project":{"projectname":"project1"},
"samples":["sample1's_content","sample2's_content","sample3's_content"]
},
{
"project":{"projectname":"project2"},
"samples":["sample1's_content","samples2's_content","sample3's_content"]
}]
I tried this:
from item import Item
class Spider(scrapy.scrapy):
name = spider
def start_request(self):
url = "the main page's url"
yield scrapy.Request(url=url, callback=self.parseProjectList)
def parseProjectList(self, response):
for url in Selector(Project_list)
yield scrapy.Request(url=url, callback=self.parseProject)
def parseProject(self, response):
#scrap some data
myItem = Item()
myItem['samples']=[]
myItem['project']={'projectname':projectname,...}
yield scrapy.Request(url=SampleListPage, callback=self.parseSampleListPage,meta={'myItem':myItem})
def parseSampleListPage(self, response):
for url in Selector(Sample_list)
yield scrapy.Request(url=url, callback=self.parseSample,meta={'myItem':'myItem'})
def parseSample(self, response):
#parse some sample data
response.meta['myItem'].append(sample_data)
I tried to put yield response.meta['myItem'] at parseSampleListPage
def parseSampleListPage(self, response):
for url in Selector(Sample_list)
yield scrapy.Request(url=url, callback=self.parseSample,meta={'myItem':'myItem'})
yield response.meta['myItem']
and also yield response.meta['myItem'] in parseSample
def parseSample(self, response):
#parse some sample data
response.meta['myItem'].append(sample_data)
yield response.meta['myItem']
Both solutions failed.
The first one yields empty "samples" fields. The second one creates multiple data with same project like this:
[ {
"project": {
"projectname": "project2"
},
"samples": [
"sample1's_content"
] }, {
"project": {
"projectname": "project2"
},
"samples": [
"sample1's_content",
"sample2's_content"
] }, {
"project": {
"projectname": "project2"
},
"samples": [
"sample1's_content",
"sample2's_content",
"sample3's_content"
] } ]
Wonder is there any way to deal with this problem?

Categories