Scrapy saving 200 status urls with empty items in a file - python

I notice in scrapy log that some urls returned 200 status but contained no items. It seems to be the stability of the site as re-crawling these urls 1-2 times again yield items. I would like to save these urls in a separate file for re-crawling.
I tried to create a dictionary in the spider class to store these urls but there is no easy way to save the dictionary into a file.
Another way I tried is to create a 2nd item class for urls and use item pipeline. It still outputs empty file though. I am not too advanced to write my own pipeline. Here is my code.
import scrapy
class MyItem(scrapy.Item):
productCode = scrapy.Field()
productName = scrapy.Field()
...
class UrlItem(scrapy.Item):
eurl = scrapy.Field()
parse
class MySpider(scrapy.Spider):
custom_settings = {
'FEEDS':{
'%(filename)s.csv':{'format':'csv', 'encoding':'utf-8',},
},
'FEED_EXPORTERS': {'csv': 'scrapy.exporters.CsvItemExporter',},}
def parsePage(self, response):
products = response.xpath(...)
if len(products) == 0:
url = UrlItem()
url['eurl'] = response.url
yield url
else:
item = MyItem()
item['...'] = ...
...
yield item
pipeline
from .items import MyItem, UrlItem
import csv
class UrlPipeline:
def open_spider(self, spider):
self.file = open('%s.csv' % "noProductsUrls", 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, url, spider):
if isinstance(url, UrlItem):
csvWriter = csv.writer(self.file)
csvWriter.writerow(ItemAdapter(url))
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
class MyPipeline:
def __init__(self):
self.ids_seen = set()
def process_item(self, item, spider):
if isinstance(item, MyItem):
adapter = ItemAdapter(item)
if adapter['productCode'] in self.ids_seen:
raise DropItem(f"Duplicate item found: {item!r}")
else:
self.ids_seen.add(adapter['productCode'])
return item
settings file
'project.pipelines.MyPipeline': 300,
'project.pipelines.UrlPipeline': 300,
The thing is also I have the exporter setting in spider class already that saves a csv. In the pipeline I just want to add one more csv. Do the two conflict? Or it is better to structure both csv files in the pipeline?
Update: I opted for #marcos solution below which is superior.
There is a way to save csv in the spider class which is based on this post.
def __init__(self):
self.outfile = open("urls.csv", "w", newline = "")
self.writer = csv.writer(self.outfile)
def closed(self,reason):
self.outfile.close()
add the following in def parse
if len(products) == 0:
self.writer.writerow([response.url])

I would suggest you just yield a retry request whenever no product is found on the page, unless you have a very specific reason to store those URLs.
The code would look like:
class MySpider(scrapy.Spider):
custom_settings = {
'FEEDS':{
'%(filename)s.csv':{'format':'csv', 'encoding':'utf-8',},
},
'FEED_EXPORTERS': {'csv': 'scrapy.exporters.CsvItemExporter',},}
def parsePage(self, response):
products = response.xpath(...)
if not len(products):
yield self._retry_request(response)
return
item = MyItem()
item['...'] = ...
...
yield item
def _retry_request(self, response, max_retries=5):
retries = response.meta.get('retry_time', 0)
if retries < max_retries:
return response.request.replace(
meta={**response.meta, 'retry_time': retries + 1},
dont_filter=True,
)
else:
self.logger.warning(f'Max retries reached for {response.url}')

Related

Saving data to seprate csv files in scrapy

I made a scraper for yellow pages. There is a categories.txt file that is read by the script and then it generates links according to those categories:
settings = get_project_settings()
categories = settings.get('CATEGORIES')
links = []
for category in categories:
link = 'https://www.yellowpages.com/search?search_terms=' + category + '&geo_location_terms=NY&page=1'
links.append(link)
then this links list is passed to start urls:
class YpSpider(CrawlSpider):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
dispatcher.connect(self.spider_closed, signals.spider_closed)
name = 'yp'
allowed_domains = ['yellowpages.com']
start_urls = links
rules = (
Rule(LinkExtractor(restrict_xpaths='//a[#class="business-name"]', allow=''), callback='parse_item',
follow=True),
Rule(LinkExtractor(restrict_xpaths='//a[#class="next ajax-page"]', allow=''),
follow=True),
)
it will save all the data from all the links in a csv file named parent.csv. This parent.csv file will have a column named keyword which will help in seperating data from different categories and make seperate csv files for each of them. This is implemented in spider closed function:
def spider_closed(self, spider):
with open('parent.csv', 'r') as file:
reader = csv.reader(file)
for row in reader:
with open('{}.csv'.format(row[0]), 'a') as f:
writer = csv.writer(f)
writer.writerow(row)
The problem i am facing is to get the category name in my parse method corresponding to every link so that it may be saved in parent.csv and used to seperate diiferent categories afterwards:
def parse_item(self, response):
item = YellowItem()
item['keyword'] = # here i need the corresponding category for every link
I think you should change the way you generate links. You can, for example, override the start_requests method and pass the category to the request through either it's cb_kwargs or meta attribute.
I would also suggest, that you change the implementation to get the settings from the crawler calling the spider by overriding from_crawler.
Here's how I would do it:
class YpSpider(CrawlSpider):
def __init__(self, crawler, *args, **kwargs):
super().__init__(*args, **kwargs)
self.crawler = crawler
self.categories = crawler.settings.get('CATEGORIES')
#classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def start_requests(self):
for category in self.categories:
yield Request(
f'https://www.yellowpages.com/search?search_terms={category}&geo_location_terms=NY&page=1',
self.parse,
cb_kwargs={'category': category}
)
def parse(self, response, category):
# do sth
...
for url in response.xpath('//a[#class="business-name"]/#href').extract():
yield Request(
url,
self.parse_item,
cb_kwargs={'category': category}
)
yield Request(
response.xpath('//a[#class="next ajax-page"]/#href').extract_first(),
self.parse,
cb_kwargs={'category': category}
)
def parse_item(self, response, category):
item = YellowItem()
item['keyword'] = category
# do sth else
...

Dropping duplicate items from Scrapy pipeline?

my scrapy crawler collects data from a set of urls, but when I run it again to add new content, the old content is saved to my Mongodb database. Is there a way to check if this item is already found in my Mongodb database(duplicate items have the same title field) and if so, drop it from the pipeline. Also, is it better to delete them from the database after they are saved and if so, how would I implement that in my project.
This is my pipeline:
import logging
import pymongo
from scrapy.exceptions import DropItem
class MongoPipeline(object):
collection_name = 'articles'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
#classmethod
def from_crawler(cls, crawler):
## pull in information from settings.py
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE')
)
def open_spider(self, spider):
## initializing spider
## opening db connection
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
## clean up when spider is closed
self.client.close()
def process_item(self, item, spider):
## how to handle each post
bbcDict = {}
if item['art_content'] != []:
bbcDict['art_content'] = item['art_content']
bbcDict['date'] = item['date']
bbcDict['date_str'] = item['date_str']
bbcDict['title'] = item['title']
bbcDict['url'] = item['url']
self.db[self.collection_name].insert_one(dict(bbcDict))
return item
# self.db[self.collection_name].insert(dict(item))
# logging.debug("Post added to MongoDB")
# return item
This is my crawler
from datetime import datetime as dt
import scrapy
from ArtScraper.items import ArtscraperItem
class PostSpider(scrapy.Spider):
article = ""
name = 'crawly'
allowed_domains = []
start_urls = ['http://feeds.bbci.co.uk/arabic/rss.xml']
def parse(self, response):
# get the subreddit from the URL
#sub = response.url.split('/')[4]
#Get the title
# parse thru each of the posts
#for post in response.css('div.thing'):
articles = response.xpath('//channel/item')
for article in articles:
item = ArtscraperItem()
print ('hello')
item['date'] = dt.today()
item['date_str'] = article.xpath('pubDate/text()').extract_first()
item['url'] = article.xpath('link/text()').extract_first()
item['title'] = article.xpath('title/text()').extract_first()
url = item['url']
yield scrapy.Request(
url,
callback=self.parse_article,
meta={'item': item}, # carry over our item
)
#request = scrapy.Request(url, callback=self.parse_article)
#request.meta['item'] = item
#yield request
def parse_article(self, response):
item = response.meta['item']
pars = response.xpath("//div[#class='story-body']/div[#class='story-body__inner']/p/text()").extract()
item['art_content'] = '-'.join(pars)
print ("HHHH")
yield item
Thanks in advance.
You can filter out duplicates by creating a list of titles on your MongoPipeline class as the items are processed, and use DropItem to delete items during process_items. The official docs provide a great example. You can then save to MongoDB when the item is returned.
In your case here, this would be the implementation of a duplicates filter in your pipeline:
import logging
import pymongo
from scrapy.exceptions import DropItem
class MongoPipeline(object):
collection_name = 'articles'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
self.titles_seen = set()
#classmethod
def from_crawler(cls, crawler):
## pull in information from settings.py
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE')
)
def open_spider(self, spider):
## initializing spider
## opening db connection
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
## clean up when spider is closed
self.client.close()
def process_item(self, item, spider):
if item['title'] in self.titles_seen:
raise DropItem("Duplicate item title found: %s" % item)
else:
self.titles_seen.add(item['title'])
return item
For me was necessary import the ItemAdapter to convert the Item to Array
from itemadapter import ItemAdapter
def process_item(self, item, spider):
adapter = ItemAdapter(item)
if self.db[self.collection_name].find_one({'id':adapter['id']}) != None:
dado = self.db[self.collection_name].find_one_and_update({'id':adapter['id']})
## ----> raise DropItem(f"Duplicate item found: {item!r}") <------
print(f"Duplicate item found: {dado!r}")
else:
self.db[self.collection_name].insert_one(ItemAdapter(item).asdict())
return item
I preferred to update rather than trigger dropitem.

filtering item in scrapy pipeline

I've scrapped the urls i want from a page. Now I want to filter them for keywords using a pipeline:
class GumtreeCouchesPipeline(object):
keywords = ['leather', 'couches']
def process_item(self, item, spider):
if any(key in item['url'] for key in keywords):
return item
Problem is its returning nothing now.
The spider:
import scrapy
from gumtree_couches.items import adItem
from urllib.parse import urljoin
class GumtreeSpider(scrapy.Spider):
name = 'GumtreeCouches'
allowed_domains = ['https://someurl']
start_urls = ['https://someurl']
def parse(self, response):
item = adItem()
for ad_links in response.xpath('//div[#class="view"][1]//a'):
relative_url = ad_links.xpath('#href').extract_first()
item['title'] = ad_links.xpath('text()').extract_first()
item['url'] = response.urljoin(relative_url)
yield item
How can I filter all the scraped urls for keywords using the pipeline?
Thanks!
This should fix your problem:
class GumtreeCouchesPipeline(object):
keywords = ['leather', 'couches']
def process_item(self, item, spider):
if any(key in item['url'] for key in self.keywords):
return item
Notice that I'm using self.keywords to refer to the keywords class attribute.
If you look at your spider logs, you should find some errors saying something like: NameError: name 'keywords' is not defined.
Anyway, I'd recommend you to implement this pipeline like this:
from scrapy.exceptions import DropItem
class GumtreeCouchesPipeline(object):
keywords = ['leather', 'couches']
def process_item(self, item, spider):
if not any(key in item['url'] for key in self.keywords):
raise DropItem('missing keyword in URL')
return item
This way, you'll have the information about the dropped items in the job stats once it's finished.
From reading the documentation I think you have to cater for all paths e.g.
from scrapy.exceptions import DropItem
def process_item(self, item, spider):
keywords = ['leather', 'couches']
if item['url']:
if any(key in item['url'] for key in keywords):
return item
else
raise DropItem("Missing specified keywords.")
else
return item

Item not reaching pipeline

I am new to python and scrapy. I am not getting item data in pipeline. Nothing is being written in csv. Error is
DmozSpider' object has no attribute getitem
Any help will be appreciated:
spider file
import scrapy
import sys
import os
from tutorial.items import TutorialItem
from pprint import pprint
class DmozSpider(scrapy.Spider):
name = "myspider"
allowed_domains = ["www.xyz.co.id"]
start_urls = ["http://www.xyz.co.id/search?q=abc"]
def parse(self, response):
var = response.xpath("//a[#class='img']/#href").extract()[0]
item = TutorialItem()
item['title'] = var
yield item
pipeline file
import csv
class TutorialPipeline(object):
def __init__(self):
self.csvwriter = csv.writer(open('items.csv', 'wb'))
def process_item(self, domain, item):
print item['title']
self.csvwriter.writerow([item['title']])
return item
items file
import scrapy
class TutorialItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
desc = scrapy.Field()
price = scrapy.Field()
Settings file
ITEM_PIPELINES = {
'tutorial.pipelines.TutorialPipeline': 300,
}
The definition of your pipeline method process_item() is incorrect. The bug is in the stated parameters self, domain, item. The official description in the documentation is:
process_item(self, item, spider)
Change the method in your class TutorialPipeline accrodingly to:
def process_item(self, item, spider):
print item['title']
self.csvwriter.writerow([item['title']])
return item
Try item.get('title') instead of item['title']

Scrapy pipeline to export csv file in the right format

I made the improvement according to the suggestion from alexce below. What I need is like the picture below. However each row/line should be one review: with date, rating, review text and link.
I need to let item processor process each review of every page.
Currently TakeFirst() only takes the first review of the page. So 10 pages, I only have 10 lines/rows as in the picture below.
Spider code is below:
import scrapy
from amazon.items import AmazonItem
class AmazonSpider(scrapy.Spider):
name = "amazon"
allowed_domains = ['amazon.co.uk']
start_urls = [
'http://www.amazon.co.uk/product-reviews/B0042EU3A2/'.format(page) for page in xrange(1,114)
]
def parse(self, response):
for sel in response.xpath('//*[#id="productReviews"]//tr/td[1]'):
item = AmazonItem()
item['rating'] = sel.xpath('div/div[2]/span[1]/span/#title').extract()
item['date'] = sel.xpath('div/div[2]/span[2]/nobr/text()').extract()
item['review'] = sel.xpath('div/div[6]/text()').extract()
item['link'] = sel.xpath('div/div[7]/div[2]/div/div[1]/span[3]/a/#href').extract()
yield item
I started from scratch and the following spider should be run with
scrapy crawl amazon -t csv -o Amazon.csv --loglevel=INFO
so that opening the CSV-File with a spreadsheet shows for me
Hope this helps :-)
import scrapy
class AmazonItem(scrapy.Item):
rating = scrapy.Field()
date = scrapy.Field()
review = scrapy.Field()
link = scrapy.Field()
class AmazonSpider(scrapy.Spider):
name = "amazon"
allowed_domains = ['amazon.co.uk']
start_urls = ['http://www.amazon.co.uk/product-reviews/B0042EU3A2/' ]
def parse(self, response):
for sel in response.xpath('//table[#id="productReviews"]//tr/td/div'):
item = AmazonItem()
item['rating'] = sel.xpath('./div/span/span/span/text()').extract()
item['date'] = sel.xpath('./div/span/nobr/text()').extract()
item['review'] = sel.xpath('./div[#class="reviewText"]/text()').extract()
item['link'] = sel.xpath('.//a[contains(.,"Permalink")]/#href').extract()
yield item
xpath_Next_Page = './/table[#id="productReviews"]/following::*//span[#class="paging"]/a[contains(.,"Next")]/#href'
if response.xpath(xpath_Next_Page):
url_Next_Page = response.xpath(xpath_Next_Page).extract()[0]
request = scrapy.Request(url_Next_Page, callback=self.parse)
yield request
If using -t csv (as proposed by Frank in comments) does not work for you for some reason, you can always use built-in CsvItemExporter directly in the custom pipeline, e.g.:
from scrapy import signals
from scrapy.contrib.exporter import CsvItemExporter
class AmazonPipeline(object):
#classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
self.file = open('output.csv', 'w+b')
self.exporter = CsvItemExporter(self.file)
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
which you need to add to ITEM_PIPELINES:
ITEM_PIPELINES = {
'amazon.pipelines.AmazonPipeline': 300
}
Also, I would use an Item Loader with input and output processors to join the review text and replace new lines with spaces. Create an ItemLoader class:
from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import TakeFirst, Join, MapCompose
class AmazonItemLoader(ItemLoader):
default_output_processor = TakeFirst()
review_in = MapCompose(lambda x: x.replace("\n", " "))
review_out = Join()
Then, use it to construct an Item:
def parse(self, response):
for sel in response.xpath('//*[#id="productReviews"]//tr/td[1]'):
loader = AmazonItemLoader(item=AmazonItem(), selector=sel)
loader.add_xpath('rating', './/div/div[2]/span[1]/span/#title')
loader.add_xpath('date', './/div/div[2]/span[2]/nobr/text()')
loader.add_xpath('review', './/div/div[6]/text()')
loader.add_xpath('link', './/div/div[7]/div[2]/div/div[1]/span[3]/a/#href')
yield loader.load_item()

Categories