I am newbie to scrapy. I am trying to download an image.
import scrapy
from scrapy.http import Request
class PlayerSpider(scrapy.Spider):
name = 'player'
#allowed_domains = ['nba.com/players']
start_urls = ['http://www.nba.com/players/']
def parse(self, response):
Player_Name = response.css('div#content.nba-player-index a ::attr(title)').extract()
Player_link = response.css('.nba-player-index__trending-item a::attr(href)').extract()
links = [url for url in Player_link if url.startswith("/players")]
for link in links:
absolute_url = response.urljoin(link)
yield Request(absolute_url, callback=self.parse_players)
def parse_players(self, response):
Player_Name = response.css('section.nba-player-header__details-bottom ::text').extract()
items=[]
for images in Player_Name:
item = PlayerSpider()
images_link = response.css('section.nba-detail-header-wrapper .nba-player-header__headshot img::attr(src)').extract_first()
image_urls = 'http:{}'.format(images_link)
item[image_urls]
return item
Player_Height = response.css('section.nba-player-vitals__top-left.small-6 p.nba-player-vitals__top-info-imperial ::text').extract()
Player_Weight = response.css('section.nba-player-vitals__top-right.small-6 p.nba-player-vitals__top-info-imperial ::text').extract()
yield {
'Player_name' : Player_Name,
'Player_Height' : Player_Height,
'Player Weight' : Player_Weight
}
I think files are good. But I am unable to write correct spider for getting the image. I am able to grab the image URL but don't know how to store the image using imagePipeline.
items.py
import scrapy
from scrapy.item import Item
class PlayerSpider(scrapy.Item):
image_url = scrapy.Field()
images = scrapy.Field()
pass
To enable your image pipeline you must first add it to your project ITEM_PIPELINES setting.
settings.py:
ITEM_PIPELINES = {'scrapy.pipelines.images.ImagesPipeline': 1}
IMAGES_STORE = 'images'
link
Related
I am trying to scrape data of # pages. I have already done a scraper which can scrape data from a single # page. But it suddenly finished the work after scraping of the first page
The whole file with parse function and scrapd function - Scraper.py
# -*- coding: utf-8 -*-
import scrapy
import csv
import os
from scrapy.selector import Selector
from scrapy import Request
class Proddduct(scrapy.Item):
price = scrapy.Field()
description = scrapy.Field()
link = scrapy.Field()
content = scrapy.Field()
class LapadaScraperSpider(scrapy.Spider):
name = 'lapada_scraper2'
allowed_domains = ['http://www.lapada.org']
start_urls = ['https://lapada.org/art-and-antiques/?search=antique']
def parse(self, response):
next_page_url = response.xpath("//ul/li[#class='next']//a/#href").get()
for item in self.scrape(response):
yield item
if next_page_url:
print("Found url: {}".format(next_page_url))
yield scrapy.Request(url=next_page_url, callback=self.parse)
def scrape(self, response):
parser = scrapy.Selector(response)
products = parser.xpath("//div[#class='content']")
for product in products:
item = Proddduct()
XPATH_PRODUCT_DESCRIPTION = ".//strong/text()"
XPATH_PRODUCT_PRICE = ".//div[#class='price']/text()"
XPATH_PRODUCT_LINK = ".//a/#href"
raw_product_description = product.xpath(XPATH_PRODUCT_DESCRIPTION).extract()
raw_product_price = product.xpath(XPATH_PRODUCT_PRICE).extract()
raw_product_link = product.xpath(XPATH_PRODUCT_LINK).extract_first()
item['description'] = raw_product_description
item['price'] = raw_product_price
item['link'] = raw_product_link
yield item
def get_information(self, response):
item = response.meta['item']
item['phonenumber'] = "12345"
yield item
How can I scrape all items in all pages?
Thanks
Change allowed_domains = ['http://www.lapada.org'] to allowed_domains = ['lapada.org']
Scrapy super noob here. Problem: I have an html page that contains both information that I want to scrape and an url that I want to follow to get images urls for images that I want to download and save via the scrapy image pipeline.
My approach to achieve this:
1. Scrape all the details as usual with a parse method
2. Find the url in the initial page, create a request that has a second parse method as callback where I build the image_urls list.
So, I have the following setup:
settings.py
...
ITEM_PIPELINES = {
'crawlbot.pipelines.MybotPipeline': 300,
'scrapy.pipelines.images.ImagesPipeline': 1,
}
IMAGES_STORE = '/url/to/images' #valid path to actual folder
...
pipelines.py
import pymongo
class MybotPipeline(object):
def __init__(self):
self.conn = pymongo.MongoClient('localhost', 27017)
db = self.conn['libraries']
self.collection = db['books']
def process_item(self, item, spider):
self.collection.insert(dict(item))
return item
items.py
import scrapy
class MybotItem(scrapy.Item):
url = scrapy.Field()
title = scrapy.Field()
images = scrapy.Field()
image_urls = scrapy.Field()
description = scrapy.Field()
crawler.py
import scrapy
from scrapy.spiders import CrawlSpider
class MySpider(CrawlSpider):
name = 'myspider'
allowed_domains = ['books.com']
def start_requests(self):
urls = [
'https://www.books.com/some/url'
]
custom_settings = {
'DEPTH_LIMIT': 1
}
for url in urls:
yield scrapy.Request(url=url, callback=self.parse_item)
def parse_details(self, response):
for image in enumerate(response.xpath('//div[contains(#class, "jumbotron")]/div')):
image_urls = image.xpath('div[contains(#class, "jumbotron-image")]/img/#src').getall()
def parse_item(self, response):
for idx, list_item in enumerate(response.xpath('//div[contains(#class, "slider-wrapper")]')):
anchor = list_item.xpath('div[contains(#class, "slider-section")]/div/a')
slider_thumbnail = anchor.xpath('div[contains(#class, "slider-thumbnail")]')
description = slider_thumbnail.xpath('div[contains(#class, "description-box")]')
yield {
'url': anchor.xpath('#href').get(),
'description': description
}
details_page_urls = anchor.xpath('#href').getall()
for details_page in details_page_urls:
yield scrapy.Request(url=details_page, callback=self.parse_details)
This is not working, although with my little knowledge of both Scrapy and Python, the second parse method should return a list of image_urls. So I have 2 questions: 1. is there a better approach for my case? Maybe the whole issue is in trying to do too much with one spider? 2. If the approach is ok, what am I doing wrong?
I am new to Scrapy . I am trying to download files using media pipeline. But when I am running spider no files are stored in the folder.
spider:
import scrapy
from scrapy import Request
from pagalworld.items import PagalworldItem
class JobsSpider(scrapy.Spider):
name = "songs"
allowed_domains = ["pagalworld.me"]
start_urls =['https://pagalworld.me/category/11598/Latest%20Bollywood%20Hindi%20Mp3%20Songs%20-%202017.html']
def parse(self, response):
urls = response.xpath('//div[#class="pageLinkList"]/ul/li/a/#href').extract()
for link in urls:
yield Request(link, callback=self.parse_page, )
def parse_page(self, response):
songName=response.xpath('//li/b/a/#href').extract()
for song in songName:
yield Request(song,callback=self.parsing_link)
def parsing_link(self,response):
item= PagalworldItem()
item['file_urls']=response.xpath('//div[#class="menu_row"]/a[#class="touch"]/#href').extract()
yield{"download_link":item['file_urls']}
Item file:
import scrapy
class PagalworldItem(scrapy.Item):
file_urls=scrapy.Field()
Settings File:
BOT_NAME = 'pagalworld'
SPIDER_MODULES = ['pagalworld.spiders']
NEWSPIDER_MODULE = 'pagalworld.spiders'
ROBOTSTXT_OBEY = True
CONCURRENT_REQUESTS = 5
DOWNLOAD_DELAY = 3
ITEM_PIPELINES = {
'scrapy.pipelines.files.FilesPipeline': 1
}
FILES_STORE = '/tmp/media/'
The output looks like this:
def parsing_link(self,response):
item= PagalworldItem()
item['file_urls']=response.xpath('//div[#class="menu_row"]/a[#class="touch"]/#href').extract()
yield{"download_link":item['file_urls']}
You are yielding:
yield {"download_link": ['http://someurl.com']}
where for scrapy's Media/File pipeline to work you need to yield and item that contains file_urls field. So try this instead:
def parsing_link(self,response):
item= PagalworldItem()
item['file_urls']=response.xpath('//div[#class="menu_row"]/a[#class="touch"]/#href').extract()
yield item
I try to get this spider work and if request the components to be scraped separately it works, however when try to use Srapy callback function to receive the arguments later i get crashed. The goal is to craw over multiple pages and scrape data while write in output json file in format:
author | album | title | lyrics
the data for each is located on separate web pages, so that is why I'm tying to use Scrapy callback function to get that accomplished.
Also each of the above items are defined under Scrapy items.py as:
import scrapy
class TutorialItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
author = scrapy.Field()
album = scrapy.Field()
title = scrapy.Field()
lyrics = scrapy.Field()
Spider Code start here:
import scrapy
import re
import json
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from tutorial.items import TutorialItem
# urls class
class DomainSpider(scrapy.Spider):
name = "domainspider"
allowed_domains = ['www.domain.com']
start_urls = [
'http://www.domain.com',
]
rules = (
Rule(LinkExtractor(allow='www\.domain\.com/[A-Z][a-zA-Z_/]+$'),
'parse', follow=True,
),
)
# Parsing start here
# crawling and scraping the links from menu list
def parse(self, response):
links = response.xpath('//html/body/nav[1]/div/ul/li/div/a/#href')
for link in links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
yield scrapy.Request(next_page, callback=self.parse_artist_page)
# crawling and scraping artist names and links
def parse_artist_page(self, response):
artist_links = response.xpath('//*/div[contains(#class, "artist-col")]/a/#href')
author = response.xpath('//*/div[contains(#class, "artist-col")]/a/text()').extract()
item = TutorialItem(author=author)
for link in artist_links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
yield scrapy.Request(next_page, callback=self.parse_album_page)
request.meta['author'] = item
yield item
return
# crawling and scraping album names and links
def parse_album_page(self, response):
album_links = response.xpath('//*/div[contains(#id, "listAlbum")]/a/#href')
album = response.xpath('//*/div[contains(#class, "album")]/b/text()').extract()
item = TutorialItem(album=album)
for link in album_links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
yield scrapy.Request(next_page, callback=self.parse_lyrics_page)
request.meta['album'] = item
yield item
return
# crawling and scraping titles and lyrics
def parse_lyrics_page(self, response):
title = response.xpath('//html/body/div[3]/div/div[2]/b/text()').extract()
lyrics = map(unicode.strip, response.xpath('//html/body/div[3]/div/div[2]/div[6]/text()').extract())
item = response.meta['author', 'album']
item = TutorialItem(author=author, album=album, title=title, lyrics=lyrics)
yield item
The code crash when get to call back function:
request.meta['author'] = item
yield item
return
Can anyone help?
I did found where was the problem, the way callback function was set by me, now works:
# crawling and scraping artist names and links
def parse_artist_page(self, response):
artist_links = response.xpath('//*/div[contains(#class, "artist-col")]/a/#href')
author = response.xpath('//*/div[contains(#class, "artist-col")]/a/text()').extract()
for link in artist_links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
request = scrapy.Request(next_page, callback=self.parse_album_page)
request.meta['author'] = author
return request
# crawling and scraping album names and links
def parse_album_page(self, response):
author = response.meta.get('author')
album_links = response.xpath('//*/div[contains(#id, "listAlbum")]/a/#href')
album = response.xpath('//*/div[contains(#class, "album")]/b/text()').extract()
for link in album_links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
request = scrapy.Request(next_page, callback=self.parse_lyrics_page)
request.meta['author'] = author
request.meta['album'] = album
return request
# crawling and scraping song titles and lyrics
def parse_lyrics_page(self, response):
author = response.meta.get('author')
album = response.meta.get('album')
title = response.xpath('//html/body/div[3]/div/div[2]/b/text()').extract()
lyrics = map(unicode.strip, response.xpath('//html/body/div[3]/div/div[2]/div[6]/text()').extract())
item = TutorialItem(author=author, album=album, title=title, lyrics=lyrics)
yield item
I am new to scrapy and is currently learning how to scrape information from a list of scraped URL. I have been able to scrape information from a url by going thru the tutorial in scrapy website. However, i am facing problem scraping information from a list of url scraped from a url even after googling for solution online.
The scraper that i have written below is able to scrape from the first url. However, it is unsuccessful in scraping from a list of scraped URL. The problem starts at def parse_following_urls(self, response): whereby i am unable to scrape from the list of scraped URL
Can anyone help to solve this? Thank in advance.
import scrapy
from scrapy.http import Request
class SET(scrapy.Item):
title = scrapy.Field()
open = scrapy.Field()
hi = scrapy.Field()
lo = scrapy.Field()
last = scrapy.Field()
bid = scrapy.Field()
ask = scrapy.Field()
vol = scrapy.Field()
exp = scrapy.Field()
exrat = scrapy.Field()
exdat = scrapy.Field()
class ThaiSpider(scrapy.Spider):
name = "warrant"
allowed_domains = ["marketdata.set.or.th"]
start_urls = ["http://marketdata.set.or.th/mkt/stocklistbytype.do?market=SET&language=en&country=US&type=W"]
def parse(self, response):
for sel in response.xpath('//table[#class]/tbody/tr'):
item = SET()
item['title'] = sel.xpath('td[1]/a[contains(#href,"ssoPageId")]/text()').extract()
item['open'] = sel.xpath('td[3]/text()').extract()
item['hi'] = sel.xpath('td[4]/text()').extract()
item['lo'] = sel.xpath('td[5]/text()').extract()
item['last'] = sel.xpath('td[6]/text()').extract()
item['bid'] = sel.xpath('td[9]/text()').extract()
item['ask'] = sel.xpath('td[10]/text()').extract()
item['vol'] = sel.xpath('td[11]/text()').extract()
yield item
urll = response.xpath('//table[#class]/tbody/tr/td[1]/a[contains(#href,"ssoPageId")]/#href').extract()
urls = ["http://marketdata.set.or.th/mkt/"+ i for i in urll]
for url in urls:
request = scrapy.Request(url, callback=self.parse_following_urls, dont_filter=True)
yield request
request.meta['item'] = item
def parse_following_urls(self, response):
for sel in response.xpath('//table[3]/tbody'):
item = response.meta['item']
item['exp'] = sel.xpath('tr[1]/td[2]/text()').extract()
item['exrat'] = sel.xpath('tr[2]/td[2]/text()').extract()
item['exdat'] = sel.xpath('tr[3]/td[2]/text()').extract()
yield item
I have re wrote the code after trying suggestions given and looking at the output. Below is the edited code. However, i got another error that states that Request url must be str or unicode, got %s:' % type(url).__name__). How do i convert the URL from list to a string?
I thought URL should be in string as it is in a For loop. I have added this as comment in the code below. Is there any way to solve this?
import scrapy
from scrapy.http import Request
class SET(scrapy.Item):
title = scrapy.Field()
open = scrapy.Field()
hi = scrapy.Field()
lo = scrapy.Field()
last = scrapy.Field()
bid = scrapy.Field()
ask = scrapy.Field()
vol = scrapy.Field()
exp = scrapy.Field()
exrat = scrapy.Field()
exdat = scrapy.Field()
class ThaiSpider(scrapy.Spider):
name = "warrant"
allowed_domains = ["marketdata.set.or.th"]
start_urls = ["http://marketdata.set.or.th/mkt/stocklistbytype.do?market=SET&language=en&country=US&type=W"]
def parse(self, response):
for sel in response.xpath('//table[#class]/tbody/tr'):
item = SET()
item['title'] = sel.xpath('td[1]/a[contains(#href,"ssoPageId")]/text()').extract()
item['open'] = sel.xpath('td[3]/text()').extract()
item['hi'] = sel.xpath('td[4]/text()').extract()
item['lo'] = sel.xpath('td[5]/text()').extract()
item['last'] = sel.xpath('td[6]/text()').extract()
item['bid'] = sel.xpath('td[9]/text()').extract()
item['ask'] = sel.xpath('td[10]/text()').extract()
item['vol'] = sel.xpath('td[11]/text()').extract()
url = ["http://marketdata.set.or.th/mkt/"]+ sel.xpath('td[1]/a[contains(#href,"ssoPageId")]/#href').extract()
request = scrapy.Request(url, callback=self.parse_following_urls, dont_filter=True) #Request url must be str or unicode, got list: How to solve this?
request.meta['item'] = item
yield item
yield request
def parse_following_urls(self, response):
for sel in response.xpath('//table[3]/tbody'):
item = response.meta['item']
item['exp'] = sel.xpath('tr[1]/td[2]/text()').extract()
item['exrat'] = sel.xpath('tr[2]/td[2]/text()').extract()
item['exdat'] = sel.xpath('tr[3]/td[2]/text()').extract()
yield item
I see what you are trying to do here, it's called - chaining requests.
What this means is that you want to keep yielding Requests and keep carrying your filled Item in the Request
s meta attribute.
For your case all you need to do is instead of yielding Item yield a Request with an item in it. Change your parse to:
def parse(self, response):
for sel in response.xpath('//table[#class]/tbody/tr'):
item = SET()
item['title'] = sel.xpath('td[1]/a[contains(#href,"ssoPageId")]/text()').extract()
item['open'] = sel.xpath('td[3]/text()').extract()
item['hi'] = sel.xpath('td[4]/text()').extract()
item['lo'] = sel.xpath('td[5]/text()').extract()
item['last'] = sel.xpath('td[6]/text()').extract()
item['bid'] = sel.xpath('td[9]/text()').extract()
item['ask'] = sel.xpath('td[10]/text()').extract()
item['vol'] = sel.xpath('td[11]/text()').extract()
urll = response.xpath('//table[#class]/tbody/tr/td[1]/a[contains(#href,"ssoPageId")]/#href').extract()
urls = ["http://marketdata.set.or.th/mkt/" + i for i in urll]
for url in urls:
yield scrapy.Request(url,
callback=self.parse_following_urls,
meta={'item': item})
I try to change the inverse 5th line
item = response.meta['item']
to
item = SET()
then it works!
Actually I didn't realize your "meta"way very much,since I never use this to describe item.