I am trying to learn Scrapy.
# -*- coding: utf-8 -*-
import scrapy
class QuotesSpider(scrapy.Spider):
name = 'quotes'
allowed_domains = ['quotes.toscrape.com/']
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
quotes = response.xpath('//*[#class="quote"]')
for quote in quotes:
text = quote.xpath(".//*[#class='text']/text()").extract_first()
author = quote.xpath("//*[#itemprop='author']/text()").extract_first()
tags = quote.xpath(".//*[#class='tag']/text()").extract();
item = {
'author_name':author,
'text':text,
'tags':tags
}
yield item
next_page_url = response.xpath("//*[#class='next']/a/#href").extract_first()
absolute_next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(url=absolute_next_page_url,callback=self.parse)
But scrapy is only parsing first page. What is wrong in this code. I copied it from youtube tutorial.
Please help.
It is just that all the requests except the first one are getting filtered as "offsite". This is because you have this extra / at the end of the allowed_domains value:
allowed_domains = ['quotes.toscrape.com/']
# REMOVE THIS SLASH^
Remove or comment out the allowed_domains. Optionally, remove the semicolon line # 15.
Moreover, indent the following code into the parse method:
next_page_url = response.xpath("//*[#class='next']/a/#href").extract_first()
absolute_next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(url=absolute_next_page_url,callback=self.parse)
so it will become this code:
import scrapy
class QuotesSpider(scrapy.Spider):
name = 'quotes'
#allowed_domains = ['quotes.toscrape.com/']
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
quotes = response.xpath('//*[#class="quote"]')
for quote in quotes:
text = quote.xpath(".//*[#class='text']/text()").extract_first()
author = quote.xpath("//*[#itemprop='author']/text()").extract_first()
tags = quote.xpath(".//*[#class='tag']/text()").extract()
item = {
'author_name':author,
'text':text,
'tags':tags
}
yield item
next_page_url = response.xpath("//*[#class='next']/a/#href").extract_first()
absolute_next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(url=absolute_next_page_url,callback=self.parse)
Related
I am trying to scrape data of # pages. I have already done a scraper which can scrape data from a single # page. But it suddenly finished the work after scraping of the first page
The whole file with parse function and scrapd function - Scraper.py
# -*- coding: utf-8 -*-
import scrapy
import csv
import os
from scrapy.selector import Selector
from scrapy import Request
class Proddduct(scrapy.Item):
price = scrapy.Field()
description = scrapy.Field()
link = scrapy.Field()
content = scrapy.Field()
class LapadaScraperSpider(scrapy.Spider):
name = 'lapada_scraper2'
allowed_domains = ['http://www.lapada.org']
start_urls = ['https://lapada.org/art-and-antiques/?search=antique']
def parse(self, response):
next_page_url = response.xpath("//ul/li[#class='next']//a/#href").get()
for item in self.scrape(response):
yield item
if next_page_url:
print("Found url: {}".format(next_page_url))
yield scrapy.Request(url=next_page_url, callback=self.parse)
def scrape(self, response):
parser = scrapy.Selector(response)
products = parser.xpath("//div[#class='content']")
for product in products:
item = Proddduct()
XPATH_PRODUCT_DESCRIPTION = ".//strong/text()"
XPATH_PRODUCT_PRICE = ".//div[#class='price']/text()"
XPATH_PRODUCT_LINK = ".//a/#href"
raw_product_description = product.xpath(XPATH_PRODUCT_DESCRIPTION).extract()
raw_product_price = product.xpath(XPATH_PRODUCT_PRICE).extract()
raw_product_link = product.xpath(XPATH_PRODUCT_LINK).extract_first()
item['description'] = raw_product_description
item['price'] = raw_product_price
item['link'] = raw_product_link
yield item
def get_information(self, response):
item = response.meta['item']
item['phonenumber'] = "12345"
yield item
How can I scrape all items in all pages?
Thanks
Change allowed_domains = ['http://www.lapada.org'] to allowed_domains = ['lapada.org']
I'm using a DOWNLOADER_MIDDLEWARES for rotating proxies with an scrapy.Spider and I would like to get an item , i.e. item['proxy_used'], for the proxy used for each request.
I guess it could be possible to get the Proxy over the "Stats Collector" but I'm new to Python and Scrapy and till now I haven't been able to come across with a solution.
import scrapy
from tutorial.items import QuotesItem
class QuotesSpider(scrapy.Spider):
name = "quotes"
allowed_domains = ["quotes.toscrape.com"]
start_urls = [
'http://quotes.toscrape.com/',
]
def parse_quotes(self, response):
for sel in response.css('div.quote'):
item = QuotesItem()
item['text'] = sel.css('span.text::text').get()
item['author'] = sel.css('small.author::text').get()
item['tags'] = sel.css('div.tags a.tag::text').getall()
item['quotelink'] = sel.css('small.author ~ a[href*="goodreads.com"]::attr(href)').get()
item['proxy_used'] = ??? <-- PROXY USED BY REQUEST - "HOW TO???"
yield item
# follow pagination links #shortcut
for a in response.css('li.next a'):
yield response.follow(a, callback = self.parse_quotes)
You can use the response object to access the proxy used. Like below
response.meta.get("proxy")
Updated in your code too.
import scrapy
from tutorial.items import QuotesItem
class QuotesSpider(scrapy.Spider):
name = "quotes"
allowed_domains = ["quotes.toscrape.com"]
start_urls = [
'http://quotes.toscrape.com/',
]
def parse_quotes(self, response):
for sel in response.css('div.quote'):
item = QuotesItem()
item['text'] = sel.css('span.text::text').get()
item['author'] = sel.css('small.author::text').get()
item['tags'] = sel.css('div.tags a.tag::text').getall()
item['quotelink'] = sel.css('small.author ~ a[href*="goodreads.com"]::attr(href)').get()
item['proxy_used'] = response.meta.get("proxy")
yield item
# follow pagination links #shortcut
for a in response.css('li.next a'):
yield response.follow(a, callback = self.parse_quotes)
I am using scrapy to collect some data. My scrapy program collects 100 elements at one session. I need to limit it to 50 or any random number. How can i do that? Any solution is welcomed. Thanks in advance
# -*- coding: utf-8 -*-
import re
import scrapy
class DmozItem(scrapy.Item):
# define the fields for your item here like:
link = scrapy.Field()
attr = scrapy.Field()
title = scrapy.Field()
tag = scrapy.Field()
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["raleigh.craigslist.org"]
start_urls = [
"http://raleigh.craigslist.org/search/bab"
]
BASE_URL = 'http://raleigh.craigslist.org/'
def parse(self, response):
links = response.xpath('//a[#class="hdrlnk"]/#href').extract()
for link in links:
absolute_url = self.BASE_URL + link
yield scrapy.Request(absolute_url, callback=self.parse_attr)
def parse_attr(self, response):
match = re.search(r"(\w+)\.html", response.url)
if match:
item_id = match.group(1)
url = self.BASE_URL + "reply/ral/bab/" + item_id
item = DmozItem()
item["link"] = response.url
item["title"] = "".join(response.xpath("//span[#class='postingtitletext']//text()").extract())
item["tag"] = "".join(response.xpath("//p[#class='attrgroup']/span/b/text()").extract()[0])
return scrapy.Request(url, meta={'item': item}, callback=self.parse_contact)
def parse_contact(self, response):
item = response.meta['item']
item["attr"] = "".join(response.xpath("//div[#class='anonemail']//text()").extract())
return item
This is what CloseSpider extension and CLOSESPIDER_ITEMCOUNT setting were made for:
An integer which specifies a number of items. If the spider scrapes
more than that amount if items and those items are passed by the item
pipeline, the spider will be closed with the reason
closespider_itemcount. If zero (or non set), spiders won’t be closed
by number of passed items.
I tried alecxe answer but I had to combine all 3 limits to make it work, so leaving it here just in case someone else is having the same issue:
class GenericWebsiteSpider(scrapy.Spider):
"""This generic website spider extracts text from websites"""
name = "generic_website"
custom_settings = {
'CLOSESPIDER_PAGECOUNT': 15,
'CONCURRENT_REQUESTS': 15,
'CLOSESPIDER_ITEMCOUNT': 15
}
...
I'm scraping a collection of urls, but they all lack the base of the url, so I want to append the "start_url" as a base to each scraped url.
Spider class:
class MySpider(BaseSpider):
name = "teslanews"
allowed_domains = ["teslamotors.com"]
start_urls = ["http://www.teslamotors.com/blog"]
def parse(self, response):
hxs = HtmlXPathSelector(response)
updates = hxs.xpath('//div[#class="blog-wrapper no-image"]')
items = []
for article in updates:
item = TeslanewsItem()
item["date"] = article.xpath('./div/span/span/text()').extract()
item["title"] = article.xpath('./h2/a/text()').extract()
item["url"] = article.xpath('./h2/a/#href').extract()
items.append(item)
return items
I can't do a simple item["url"] = article.xpath('./h2/a/#href').extract() + base with base = "http://www.teslamotors.com"
because this adds the base to the end and it does it letter by letter due to being in a for-loop and each letter is separated by commas.
I'm relatively new to Scrapy so I don't exactly know which way to go with this.
from scrapy.spider import BaseSpider
from urlparse import urljoin
class MySpider(BaseSpider):
name = "teslanews"
allowed_domains = ["teslamotors.com"]
base = "http://www.teslamotors.com/blog"
start_urls = ["http://www.teslamotors.com/blog"]
def parse(self, response):
updates = response.xpath('//div[#class="blog-wrapper no-image"]')
items = []
for article in updates:
item = TeslanewsItem()
item["date"] = article.xpath('./div/span/span/text()').extract()
item["title"] = article.xpath('./h2/a/text()').extract()
item['url'] = urljoin(self.base, ''.join(article.xpath('./h2/a/#href').extract()))
return items
I am trying to write program in Scrapy to open links and collect data from this tag: <p class="attrgroup"></p>.
I've managed to make Scrapy collect all the links from given URL but not to follow them. Any help is very appreciated.
You need to yield Request instances for the links to follow, assign a callback and extract the text of the desired p element in the callback:
# -*- coding: utf-8 -*-
import scrapy
# item class included here
class DmozItem(scrapy.Item):
# define the fields for your item here like:
link = scrapy.Field()
attr = scrapy.Field()
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["craigslist.org"]
start_urls = [
"http://chicago.craigslist.org/search/emd?"
]
BASE_URL = 'http://chicago.craigslist.org/'
def parse(self, response):
links = response.xpath('//a[#class="hdrlnk"]/#href').extract()
for link in links:
absolute_url = self.BASE_URL + link
yield scrapy.Request(absolute_url, callback=self.parse_attr)
def parse_attr(self, response):
item = DmozItem()
item["link"] = response.url
item["attr"] = "".join(response.xpath("//p[#class='attrgroup']//text()").extract())
return item