Whenever I use the link of captions and transcription in start_urls variable, it gives me the price of caption in both captions and transcription variable and again give me the price of transcription in both variables. Why and how to solve this issue?
import scrapy
from .. items import FetchingItem
class SiteFetching(scrapy.Spider):
name = 'Site'
start_urls = ['https://www.rev.com/freelancers/captions',
'https://www.rev.com/freelancers/transcription']
def parse(self, response):
items = FetchingItem()
Transcription_price = response.css('#middle-benefit .mt1::text').extract()
Caption_price = response.css('#middle-benefit .mt1::text').extract()
items['Transcription_price'] = Transcription_price
items['Caption_price'] = Caption_price
yield items
I suspect that you need another structure of class, sequential:
import scrapy
from .. items import FetchingItem
class SiteFetching(scrapy.Spider):
name = 'Site'
start_urls = ['https://www.rev.com/freelancers/captions']
def parse(self, response):
items = FetchingItem()
items['Caption_price'] = response.css('#middle-benefit .mt1::text').extract()
yield Request('https://www.rev.com/freelancers/transcription', self.parse_transcription, meta={'items': items})
def parse_transcription(self, response):
items = response.meta['items']
items['Transcription_price'] = response.css('#middle-benefit .mt1::text').extract()
yield items
Related
I am new in Scrapy and I am trying to crawl this page and get the prices from the items, the problem is that scrapy is returning the values unordered and I don't know why.
This is my simple code
import scrapy
from ..items import AmazonItem
from scrapy.http import Request
import time
class QuotesSpider(scrapy.Spider):
name = "main"
def start_requests(self):
urls = [
'https://www.amazon.com/best-sellers-movies-TV-DVD-Blu-ray/zgbs/movies-tv/ref=zg_bs_nav_0',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
# amazon = AmazonItem()
ol_response = response.xpath('//ol[#id="zg-ordered-list"]/li')
for number_ra in range(0,50):
response_div = ol_response[number_ra]
price = response_div.css(".p13n-sc-price::text").extract()
item_name = response_div.xpath("span/div/span/a/div/text()").get().strip()
link = response_div.xpath("span/div/span/a").attrib['href'].split('/')[3].split('?')[0]
print("({}) {} , PRICE: {}".format(number_ra+1,item_name,price))
print(link+"\n")
The name and the id are in the correct order but not the prices.
Thanks, guys
You are doing it wrong way
You should iterate over each Item one by one
def parse(self, response):
for item in response.xpath('//ol[#id="zg-ordered-list"]/li'):
price = item.css(".p13n-sc-price::text").get()
item_name = item.css(".p13n-sc-truncate.p13n-sc-line-clamp-1::text").get()
link = response.urljoin(item.css(".a-link-normal::attr(href)").get())
print("{} , PRICE: {}".format(item_name,price))
I am scraping some news website with scrapy framework, it seems only store the last item scraped and repeated in loop
I want to store the Title,Date,and Link, which i scrape from the first page
and also store the whole news article. So i want to merge the article which stored in a list into a single string.
Item code
import scrapy
class ScrapedItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
source = scrapy.Field()
date = scrapy.Field()
paragraph = scrapy.Field()
Spider code
import scrapy
from ..items import ScrapedItem
class CBNCSpider(scrapy.Spider):
name = 'kontan'
start_urls = [
'https://investasi.kontan.co.id/rubrik/28/Emiten'
]
def parse(self, response):
box_text = response.xpath("//ul/li/div[#class='ket']")
items = ScrapedItem()
for crawl in box_text:
title = crawl.css("h1 a::text").extract()
source ="https://investasi.kontan.co.id"+(crawl.css("h1 a::attr(href)").extract()[0])
date = crawl.css("span.font-gray::text").extract()[0].replace("|","")
items['title'] = title
items['source'] =source
items['date'] = date
yield scrapy.Request(url = source,
callback=self.parseparagraph,
meta={'item':items})
def parseparagraph(self, response):
items_old = response.meta['item'] #only last item stored
paragraph = response.xpath("//p/text()").extract()
items_old['paragraph'] = paragraph #merge into single string
yield items_old
I expect the output that the Date,Title,and Source can be updated through the loop.
And the article can be merged into single string to be stored in mysql
I defined an empty dictionary and put those variables within it. Moreover, I've brought about some minor changes in your xpaths and css selectors to make them less error prone. The script is working as desired now:
import scrapy
class CBNCSpider(scrapy.Spider):
name = 'kontan'
start_urls = [
'https://investasi.kontan.co.id/rubrik/28/Emiten'
]
def parse(self, response):
for crawl in response.xpath("//*[#id='list-news']//*[#class='ket']"):
d = {}
d['title'] = crawl.css("h1 > a::text").get()
d['source'] = response.urljoin(crawl.css("h1 > a::attr(href)").get())
d['date'] = crawl.css("span.font-gray::text").get().strip("|")
yield scrapy.Request(
url=d['source'],
callback=self.parseparagraph,
meta={'item':d}
)
def parseparagraph(self, response):
items_old = response.meta['item']
items_old['paragraph'] = response.xpath("//p/text()").getall()
yield items_old
I am using scrapy to collect some data. My scrapy program collects 100 elements at one session. I need to limit it to 50 or any random number. How can i do that? Any solution is welcomed. Thanks in advance
# -*- coding: utf-8 -*-
import re
import scrapy
class DmozItem(scrapy.Item):
# define the fields for your item here like:
link = scrapy.Field()
attr = scrapy.Field()
title = scrapy.Field()
tag = scrapy.Field()
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["raleigh.craigslist.org"]
start_urls = [
"http://raleigh.craigslist.org/search/bab"
]
BASE_URL = 'http://raleigh.craigslist.org/'
def parse(self, response):
links = response.xpath('//a[#class="hdrlnk"]/#href').extract()
for link in links:
absolute_url = self.BASE_URL + link
yield scrapy.Request(absolute_url, callback=self.parse_attr)
def parse_attr(self, response):
match = re.search(r"(\w+)\.html", response.url)
if match:
item_id = match.group(1)
url = self.BASE_URL + "reply/ral/bab/" + item_id
item = DmozItem()
item["link"] = response.url
item["title"] = "".join(response.xpath("//span[#class='postingtitletext']//text()").extract())
item["tag"] = "".join(response.xpath("//p[#class='attrgroup']/span/b/text()").extract()[0])
return scrapy.Request(url, meta={'item': item}, callback=self.parse_contact)
def parse_contact(self, response):
item = response.meta['item']
item["attr"] = "".join(response.xpath("//div[#class='anonemail']//text()").extract())
return item
This is what CloseSpider extension and CLOSESPIDER_ITEMCOUNT setting were made for:
An integer which specifies a number of items. If the spider scrapes
more than that amount if items and those items are passed by the item
pipeline, the spider will be closed with the reason
closespider_itemcount. If zero (or non set), spiders won’t be closed
by number of passed items.
I tried alecxe answer but I had to combine all 3 limits to make it work, so leaving it here just in case someone else is having the same issue:
class GenericWebsiteSpider(scrapy.Spider):
"""This generic website spider extracts text from websites"""
name = "generic_website"
custom_settings = {
'CLOSESPIDER_PAGECOUNT': 15,
'CONCURRENT_REQUESTS': 15,
'CLOSESPIDER_ITEMCOUNT': 15
}
...
I'm scraping a collection of urls, but they all lack the base of the url, so I want to append the "start_url" as a base to each scraped url.
Spider class:
class MySpider(BaseSpider):
name = "teslanews"
allowed_domains = ["teslamotors.com"]
start_urls = ["http://www.teslamotors.com/blog"]
def parse(self, response):
hxs = HtmlXPathSelector(response)
updates = hxs.xpath('//div[#class="blog-wrapper no-image"]')
items = []
for article in updates:
item = TeslanewsItem()
item["date"] = article.xpath('./div/span/span/text()').extract()
item["title"] = article.xpath('./h2/a/text()').extract()
item["url"] = article.xpath('./h2/a/#href').extract()
items.append(item)
return items
I can't do a simple item["url"] = article.xpath('./h2/a/#href').extract() + base with base = "http://www.teslamotors.com"
because this adds the base to the end and it does it letter by letter due to being in a for-loop and each letter is separated by commas.
I'm relatively new to Scrapy so I don't exactly know which way to go with this.
from scrapy.spider import BaseSpider
from urlparse import urljoin
class MySpider(BaseSpider):
name = "teslanews"
allowed_domains = ["teslamotors.com"]
base = "http://www.teslamotors.com/blog"
start_urls = ["http://www.teslamotors.com/blog"]
def parse(self, response):
updates = response.xpath('//div[#class="blog-wrapper no-image"]')
items = []
for article in updates:
item = TeslanewsItem()
item["date"] = article.xpath('./div/span/span/text()').extract()
item["title"] = article.xpath('./h2/a/text()').extract()
item['url'] = urljoin(self.base, ''.join(article.xpath('./h2/a/#href').extract()))
return items
import scrapy
from ex.items import ExItem
class reddit(scrapy.Spider):
name = "dmoz"
allowed_domains = ["reddit.com"]
start_urls = [
"http://www.reddit.com/"]
"""docstring for reddit"""
def parse(self, response):
item = ExItem()
item ["title"] = response.xpath('//p[contains(#class,"title")]/a/text()').extract()
item ["rank"] = response.xpath('//span[contains(#class,"rank")]/text()').extract()
item ["votes_dislike"] = response.xpath('//div[contains(#class,"score dislikes")]/text()').extract()
item ["votes_unvoted"] = response.xpath('//div[contains(#class,"score unvoted")]/text()').extract()
item ["votes_likes"] = response.xpath('//div[contains(#class,"score likes")]/text()').extract()
item ["video_reference"] = response.xpath('//a[contains(#class,"thumbnail may-blank")]/#href').extract()
item ["image"] = response.xpath('//a[contains(#class,"thumbnail may-blank")]/img/#src').extract()
I am able to convert this into JSON but in the output i am getting a bullet in the JSON how to remove that and still have the JSON format?
There are hidden elements that you don't see in the browser. Scrapy sees them.
You just need to search for the data inside the relevant part of the page (div with id="siteTable"):
def parse(self, response):
# make a selector and search the fields inside it
sel = response.xpath('//div[#id="siteTable"]')
item = ExItem()
item["title"] = sel.xpath('.//p[contains(#class,"title")]/a/text()').extract()
item["rank"] = sel.xpath('.//span[contains(#class,"rank")]/text()').extract()
item["votes_dislike"] = sel.xpath('.//div[contains(#class,"score dislikes")]/text()').extract()
item["votes_unvoted"] = sel.xpath('.//div[contains(#class,"score unvoted")]/text()').extract()
item["votes_likes"] = sel.xpath('.//div[contains(#class,"score likes")]/text()').extract()
item["video_reference"] = sel.xpath('.//a[contains(#class,"thumbnail may-blank")]/#href').extract()
item["image"] = sel.xpath('.//a[contains(#class,"thumbnail may-blank")]/img/#src').extract()
return item
Tested, here is what I get for, for example, votes_likes:
'votes_likes': [u'5340',
u'4041',
u'4080',
u'5055',
u'4385',
u'4784',
u'3842',
u'3734',
u'4081',
u'3731',
u'4580',
u'5279',
u'2540',
u'4345',
u'2068',
u'3715',
u'3249',
u'4232',
u'4025',
u'522',
u'2993',
u'2789',
u'3529',
u'3450',
u'3533'],