I want Scrapy to run through each item once - python

I would like Scrapy to run through each item once so that relevant data is grouped together. As it is it just puts all links, headers, dates etc together. It is also posting everything to the file more than once. I am pretty new to both Scrapy and Python so any advice I would be grateful for.
Here is my spider code:
from scrapy.spiders import Spider
from scrapy.selector import Selector
from fashioBlog.functions import extract_data
from fashioBlog.items import Fashioblog
class firstSpider(Spider):
name = "first"
allowed_domains = [
"stopitrightnow.com"
]
start_urls = [
"http://www.stopitrightnow.com"
]
def parse(self, response):
sel = Selector(response)
sites = sel.xpath('//div[#class="post-outer"]')
items= []
for site in sites:
item = Fashioblog()
item['title'] = extract_data(site.xpath('//h3[normalize-space(#class)="post-title entry-title"]//text()').extract())
item['url'] = extract_data(site.xpath('//div[normalize-space(#class)="post-body entry-content"]//#href').extract())
item['date'] = extract_data(site.xpath('//h2[normalize-space(#class)="date-header"]/span/text()').extract())
#item['body'] = site.xpath('//div[#class="post-body entry-content"]/i/text()').extract()
item['labelLink'] = extract_data(site.xpath('//span[normalize-space(#class)="post-labels"]//#href').extract())
item['comment'] = extract_data(site.xpath('//span[normalize-space(#class)="post-comment-link"]//text()').extract())
item['picUrl'] = extract_data(site.xpath('//div[normalize-space(#class)="separator"]//#href').extract())
#item['labelText'] = extract_data(site.xpath('(//i//text()').extract())
#item['labelLink2'] = extract_data(site.xpath('(//i//#href').extract())
yield item

Make your expressions context-specific by prepending a dot:
item['title'] = extract_data(site.xpath('.//h3[normalize-space(#class)="post-title entry-title"]//text()').extract())
^ HERE

Related

Scrapy pipeline extracting in the wrong csv format

My Hacker News spider outputs all the results on one line, instead of one each line, as it can be seen here.
All on the same line
Here is my code.
import scrapy
import string
import urlparse
from scrapy.selector import Selector
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors import LinkExtractor
class HnItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
score = scrapy.Field()
class HnSpider(scrapy.Spider):
name = 'hackernews'
allowed_domains = ["news.ycombinator.com"]
start_urls = ["https://news.ycombinator.com/"]
def parse(self, response):
sel = response
selector_list = response.xpath('.//table[#class="itemlist"]')
for sel in selector_list:
item = HnItem()
item['title'] = sel.xpath('.//td[#class="title"]/text()').extract()
item['link'] = sel.xpath('.//tr[#class="athing"]/td[3]/a/#href').extract()
item['score'] = sel.xpath('.//td[#class="subtext"]/span/text()').extract()
yield item
and my settings.py file
BOT_NAME = 'hnews'
SPIDER_MODULES = ['hnews.spiders']
NEWSPIDER_MODULE = 'hnews.spiders'
USER_AGENT = 'hnews (+http://www.yourdomain.com)'
FEED_URI = '/used/scrapy/hnews/%(name)s/%(time)s.csv'
FEED_FORMAT = 'csv'
I've tried to implement this among many other solutions but no luck so far. I'm still very new at this, so bear with me if possible.
It is happening because your item pipeline is getting all the lists at once. For expample: The item['title'] is getting a list of all the titles at once which is then transferred to the item pipeline and then written to the csv file directly.
The solution is to iterate over the list and yield it to the item pipeline one at a time. Here's a modified code:
import scrapy
from scrapy.selector import Selector
class HnItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
score = scrapy.Field()
class HnSpider(scrapy.Spider):
name = 'hackernews'
allowed_domains = ["news.ycombinator.com"]
start_urls = ["https://news.ycombinator.com/"]
def parse(self, response):
sel = Selector(response)
item = HnItem()
title_list = sel.xpath('.//td[#class="title"]/a/text()').extract()[:-2]
link_list= sel.xpath('.//tr[#class="athing"]/td[3]/a/#href').extract()
score_list = sel.xpath('.//td[#class="subtext"]/span/text()').extract()
for x in range(0,len(title_list)):
item['title'] = title_list[x]
item['link'] = link_list[x]
item['score'] = score_list[x]
yield item

Scrapy - how to join data together from different parts of a website

I am in the process of building a crawler. Now, I want it to navigate all available pages on the site, and [i] fill a number of data fields for each product, and [ii], for each product, drill into the corresponding product url, and populate a number of other data fields. I want all of the data in the same {} for each product. But instead, what the crawler is doing is carrying out [i], and then [ii], so that part [ii] is populated in a separate {}.
I want to somehow add data [i] into [ii]. request.meta['item'] = item looks to be something which could work, but I have not yet succeeded in getting it to work.
I have the following code:
# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy.http import Request
from maxine.items import CrawlerItem
class Crawler1Spider(CrawlSpider):
name = "crawler1"
allowed_domains = ["website.com"]
start_urls = (
'starturl.com',
)
rules = [
#visit each page
Rule(LinkExtractor(allow=(), restrict_xpaths=('//a[#class="listnavpagenum"]')), callback='parse_item', follow=True),
#click on each product link
Rule(LinkExtractor(allow=(), restrict_xpaths=('//a[#class="exhib_status exhib_status_interiors"]')), callback='parse_detail', follow=True),
]
def parse_item(self, response):
sel = Selector(response)
elements = sel.xpath('//div[#class="ez_listitem_wrapper"]')
items = []
results = []
n = 0
for element in elements:
item = CrawlerItem()
n = n + 1
#work out how to put images into image folder
item['title'] = element.css('a.exhib_status.exhib_status_interiors').xpath('text()').extract_first()
item['title_code'] = element.xpath('.//div[#class="ez_merge8"]/text()').extract_first()
item['item_url'] = element.xpath('//div[#class="ez_merge4"]/a/#href').extract_first()
item['count'] = n
yield item
#items.append(item)
#return items
def parse_detail(self, response):
item = CrawlerItem()
item['telephone'] = response.xpath('//div[#id="ez_entry_contactinfo"]//text()').re('[0-9]{4,}\s*[0-9]{4,}')
item['website'] = response.xpath('//div[#id="ez_entry_contactinfo"]//text()').re('(?:http://)?www.[a-z0-9\/?_\- ]+.[0-9a-z]+')
yield item
Suggestion as to how I can get all the data into one {} for each product would be much appreciated.
UPDATE: 20/11/15
I have amended the code as follows:
# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy.http import Request
from maxine.items import CrawlItem
class Crawler1Spider(CrawlSpider):
name = "test"
allowed_domains = ["website.com"]
start_urls = (
'starturl.com',
)
rules = [
Rule(LinkExtractor(allow=(), restrict_xpaths=('//a[#class="listnavpagenum"]')), callback='parse_item', follow=True),
]
def parse_item(self, response):
item = CrawlItem()
sel = Selector(response)
elements = sel.xpath('//div[#class="ez_listitem_wrapper"]')
items = []
n = 0
for element in elements:
n = n + 1
#work out how to put images into image folder
#item['image_urls'] = selector.xpath('//a[#class="exhib_status exhib_status_interiors"]/img/#src').extract()
item['title'] = element.css('a.exhib_status.exhib_status_interiors').xpath('text()').extract_first()
item['title_code'] = element.xpath('.//div[#class="ez_merge8"]/text()').extract_first()
item['item_url'] = element.xpath('//div[#class="ez_merge4"]/a/#href').extract_first()
item['count'] = n
item_detail_url = item['item_url'] = element.xpath('//div[#class="ez_merge4"]/a/#href').extract_first()
# crawl the item and pass the item to the following request with *meta*
yield Request(url=item_detail_url, callback=self.parse_detail,meta=dict(item=item))
def parse_detail(self, response):
#get the item from the previous passed meta
item = response.meta['item']
# keep populating the item
item['telephone'] = response.xpath('//div[#id="ez_entry_contactinfo"]//text()').re('[0-9]{4,}\s*[0-9]{4,}')
item['website'] = response.xpath('//div[#id="ez_entry_contactinfo"]//text()').re('(?:http://)?www.[a-z0-9\/?_\- ]+.[0-9a-z]+')
yield item
I'm getting the data in the same {}'s, however, the robot is only extracting data from the last item per page. Any further suggestions?
I am afraid you can't use rules for this case, as every request is independent when they reach the site you want to crawl.
You'll need to define your own behaviour from start_requests:
def start_requests(self):
yield Request(url=myinitialurl, callback=self.parse)
def parse(self, response):
# crawl the initial page and then do something with that info
yield Request(url=producturl, callback=self.parse_item)
def parse_item(self, response):
item = CrawlerItem()
# crawl the item and pass the item to the following request with *meta*
yield Request(url=item_detail_url, callback=self.parse_detail, meta=dict(item=item))
def parse_detail(self, response):
# get the item from the previous passed meta
item = response.meta['item']
# keep populating the item
yield item
try instantiating item = CrawlItem() within the for loop in parse_item.

HTMLXPathSelector for Scrappy returning null results

I just started learning python / Scrapy. I was able to follow tutorials successfully but I am struggling with a 'test' scraping that I want to do on my own.
What I am trying to do now is go on http://jobs.walmart.com/search/finance-jobs and scrape the job listing.
However, I think I may be doing something wrong in the XPath, but I am not sure what.
There is no "id" for that table, so I am using its class.
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
class MySpider(BaseSpider):
name = "walmart"
allowed_domains = ["jobs.walmart.com"]
start_urls = ["http://jobs.walmart.com/search/finance-jobs"]
def parse(self, response):
hxs = HtmlXPathSelector(response)
titles = hxs.select("//table[#class='tableSearchResults']")
items = []
for titles in titles:
item = walmart()
item ["title"] = titles.select("a/text()").extract()
item ["link"] = titles.select("a/#href").extract()
items.append(item)
return items
here is what the page source looks like:
The problem as you said also, is your XPATH. It is always useful to run:
scrapy view http://jobs.walmart.com/search/finance-jobs
Before running your spider, to see how the website look like from scrapy view.
This should work now:
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
class MySpider(BaseSpider):
name = "walmart"
allowed_domains = ["jobs.walmart.com"]
start_urls = ["http://jobs.walmart.com/search/finance-jobs"]
def parse(self, response):
hxs = HtmlXPathSelector(response)
item = walmart()
titles = hxs.select("//table[#class='tableSearchResults']/tr")
items = []
for title in titles:
if title.select("td[#class='td1']/a").extract():
item ["title"] = title.select("td[#class='td1']/a/text()").extract()
item ["link"] = title.select("td[#class='td1']/a/#href").extract()
items.append(item)
return items

scrapy: A tiny "spider" in a spider?

So when i try to scrape product review info from epinions.com, if the main review text is too long, it has a "read more" link to another page.
I took an example from "http://www.epinions.com/reviews/samsung-galaxy-note-16-gb-cell-phone/pa_~1" you'll see what i mean if you look at the first review.
I am wondering: is it possible to have a tiny spider in each iteration of the for loop to grab the url and scrape the review out of the new link? I have the following code, but it doesn't work for the tiny "spider".
Here is my code:
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from epinions_test.items import EpinionsTestItem
from scrapy.http import Response, HtmlResponse
class MySpider(BaseSpider):
name = "epinions"
allow_domains = ["epinions.com"]
start_urls = ['http://www.epinions.com/reviews/samsung-galaxy-note-16-gb-cell-phone/pa_~1']
def parse(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select('//div[#class="review_info"]')
items = []
for sites in sites:
item = EpinionsTestItem()
item["title"] = sites.select('h2/a/text()').extract()
item["star"] = sites.select('span/a/span/#title').extract()
item["date"] = sites.select('span/span/span/#title').extract()
item["review"] = sites.select('p/span/text()').extract()
# Everything works fine and i do have those four columns beautifully printed out, until....
url2 = sites.select('p/span/a/#href').extract()
url = str("http://www.epinions.com%s" %str(url2)[3:-2])
# This url is a string. when i print it out, it's like "http://www.epinions.com/review/samsung-galaxy-note-16-gb-cell-phone/content_624031731332", which looks legit.
response2 = HtmlResponse(url)
# I tried in a scrapy shell, it shows that this is a htmlresponse...
hxs2 = HtmlXPathSelector(response2)
fullReview = hxs2.select('//div[#class = "user_review_full"]')
item["url"] = fullReview.select('p/text()').extract()
# The three lines above works in an independent spider, where start_url is changed to the url just generated and everything.
# However, i got nothing from item["url"] in this code.
items.append(item)
return items
Why item["url"] returns nothing?
Thanks!
You should instantiate a new Request in the callback and pass your item in the meta dict:
from scrapy.http import Request
from scrapy.item import Item, Field
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
class EpinionsTestItem(Item):
title = Field()
star = Field()
date = Field()
review = Field()
class MySpider(BaseSpider):
name = "epinions"
allow_domains = ["epinions.com"]
start_urls = ['http://www.epinions.com/reviews/samsung-galaxy-note-16-gb-cell-phone/pa_~1']
def parse(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select('//div[#class="review_info"]')
for sites in sites:
item = EpinionsTestItem()
item["title"] = sites.select('h2/a/text()').extract()
item["star"] = sites.select('span/a/span/#title').extract()
item["date"] = sites.select('span/span/span/#title').extract()
url = sites.select('p/span/a/#href').extract()
url = str("http://www.epinions.com%s" % str(url)[3:-2])
yield Request(url=url, callback=self.parse_url2, meta={'item': item})
def parse_url2(self, response):
hxs = HtmlXPathSelector(response)
item = response.meta['item']
fullReview = hxs.select('//div[#class = "user_review_full"]')
item["review"] = fullReview.select('p/text()').extract()
yield item
Also see documentation.
Hope that helps.

Scrapy, recursive crawling with different XPathSelector

Good evening an thanks for help.
I am digging through Scrappy, my need is get informations from a website and recreate the same tree structure of the site.
example:
books [
python [
first [
title = 'Title'
author = 'John Doe'
price = '200'
]
first [
title = 'Other Title'
author = 'Mary Doe'
price = '100'
]
]
php [
first [
title = 'PhpTitle'
author = 'John Smith'
price = '100'
]
first [
title = 'Php Other Title'
author = 'Mary Smith'
price = '300'
]
]
]
from tutorial i have correctly done my basic spider :
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from pippo.items import PippoItem
class PippoSpider(BaseSpider):
name = "pippo"
allowed_domains = ["www.books.net"]
start_urls = [
"http://www.books.net/index.php"
]
def parse(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select('//div[#id="28008_LeftPane"]/div/ul/li')
items = []
for site in sites:
item = PippoItem()
item['subject'] = site.select('a/b/text()').extract()
item['link'] = site.select('a/#href').extract()
items.append(item)
return items
My problem is that any level of my structure is one level deeper in site so if in my basic level I get the subjects of book i need then to crawl the correspondent itemitem['link'] to get the other items. But in the next urls i will need a different HtmlXPathSelector to correcly extract my data, and so on until the end of the structure.
Could you please basically help me and put me in the rigth way?
Thank you.
You will need to make the Requests for link manually: (also see CrawlSpider)
from urlparse import urljoin
from scrapy.http import Request
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from pippo.items import PippoItem
class PippoSpider(BaseSpider):
name = "pippo"
allowed_domains = ["www.books.net"]
start_urls = ["http://www.books.net/"]
def parse(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select('//div[#id="28008_LeftPane"]/div/ul/li')
for site in sites:
item = PippoItem()
item['subject'] = site.select('.//text()').extract()
item['link'] = site.select('.//a/#href').extract()
link = item['link'][0] if len(item['link']) else None
if link:
yield Request(urljoin(response.url, link),
callback=self.parse_link,
errback=lambda _: item,
meta=dict(item=item),
)
else:
yield item
def parse_link(self, response):
item = response.meta.get('item')
item['alsothis'] = 'more data'
return item

Categories