Scrapy's JSON output forms an array of JSON objects - python

I'm trying to scrape a games info website using Scrapy. The scraping process goes like this: scraping the categories -> scraping the list of games (multiple pages for each category) -> scraping game info.
The scraped info supposed to go into a json file. I'm getting the following result:
[
{"category": "cat1", "games": [...]},
{"category": "cat2", "games": [...]},
...
]
but I want to get this result:
{ "categories":
[
{"category": "cat1", "games": [...]},
{"category": "cat2", "games": [...]},
...
]
}
I tried to use the steps from this post and this post, with no success. couldn't find more related questions.
I would appreciate any help.
My spider:
import scrapy
from ..items import Category, Game
class GamesSpider(scrapy.Spider):
name = 'games'
start_urls = ['https://www.example.com/categories']
base_url = 'https://www.exmple.com'
def parse(self, response):
categories = response.xpath("...")
for category in categories:
cat_name = category.xpath(".//text()").get()
url = self.base_url + category.xpath(".//#href").get()
cat = Category()
cat['category'] = cat_name
yield response.follow(url=url,
callback=self.parse_category,
meta={ 'category': cat })
def parse_category(self, response):
games_url_list = response.xpath('//.../a/#href').getall()
cat = response.meta['category']
url = self.base_url + games_url_list.pop()
next_page = response.xpath('//a[...]/#href').get()
if next_page:
next_page = self.base_url + response.xpath('//a[...]/#href').get()
yield response.follow(url=url,
callback=self.parse_game,
meta={'category': cat,
'games_url_list': games_url_list,
'next_page': next_page})
def parse_game(self, response):
cat = response.meta['category']
game = Game()
try:
cat['games_list']
except:
cat['games_list'] = []
game['title_en'] = response.xpath('...')
game['os'] = response.xpath('...')
game['users_rating'] = response.xpath('...')
cat['games_list'].append(game)
games_url_list = response.meta['games_url_list']
next_page = response.meta['next_page']
if games_url_list:
url = self.base_url + games_url_list.pop()
yield response.follow(url=url,
callback=self.parse_game,
meta={'category': cat,
'games_url_list': games_url_list,
'next_page': next_page})
else:
if next_page:
yield response.follow(url=next_page,
callback=self.parse_category,
meta={'category': cat})
else:
yield cat
My item.py file:
import scrapy
class Category(scrapy.Item):
category = scrapy.Field()
games_list = scrapy.Field()
class Game(scrapy.Item):
title_en = scrapy.Field()
os = scrapy.Field()
users_rating = scrapy.Field()

You need to write a custom item exporter, or handle post-processing of the file generated by Scrapy separately, e.g. with a standalone Python script that converts from the output format to the desired format.

Related

Scrapy not scraping links gathered from pagination

I am trying to scrape an e-commerce website for its products, and I am currently facing an issue that not all of the pages I get with pagination are visited. The links themselves are valid, and visitable, not non-existing.
My spider code:
import scrapy
import json
from pbl.items import ShopCard
class SpidermaximaSpider(scrapy.Spider):
name = 'spiderMaxima'
allowed_domains = ['www.trobos.lt']
start_urls = ['https://trobos.lt/prekes?vendor=MAXIMA']
item = []
list = [{
'sid': 10,
'name': 'Maxima',
'domain': 'hhttps://www.maxima.lt/',
'imageurl': 'https://upload.wikimedia.org/wikipedia/commons/c/c1/Maxima_logo.svg',
'product': item
}]
def __init__(self):
self.declare_xpath()
def declare_xpath(self):
self.getAllItemsXpath = '//*[#id="category"]/div/div[1]/div/div[3]/div[4]/div/div/div/div/div/a/#href'
self.TitleXpath = '//*[#id="product"]/section[1]/div[3]/section/div[2]/h1/text()'
self.PriceXpath = '//*[#id="product"]/section[1]/div[3]/section/div[2]/div[1]/div/div[1]/div/div[1]/span/text()'
def parse(self, response):
for href in response.xpath(self.getAllItemsXpath):
url = response.urljoin(href.extract())
yield scrapy.Request(url=url,callback=self.parse_main_item, dont_filter=True)
next_page = [response.url + '&page='+str(x) for x in range(1,193)]
for page in next_page:
print('-'* 100)
print(page)
print('-'* 100)
url = page
yield scrapy.Request(url, callback=self.parse)
def parse_main_item(self,response):
shop = ShopCard()
Title = response.xpath(self.TitleXpath).extract_first()
Link = response.url
Image = 'https://upload.wikimedia.org/wikipedia/commons/c/c1/Maxima_logo.svg'
Price = response.xpath(self.PriceXpath).extract_first()
Price = Price.replace(',', '.')
Price = float(Price.split(' ')[0])
shop['item'] = {
'title': Title,
'link': Link,
'image': Image,
'price': Price
}
self.item.append(shop['item'])
def closed(self, reason):
with open("spiderMaxima.json", "w") as final:
json.dump(self.list, final, indent=2, ensure_ascii=False)
I am using a list with range() function, because in the response (from scrapy shell view(response), pagination buttons are connected to a script.
I have also tried scrapy shell several of the links, the outputs for xpaths work, but still, the pages are not getting scraped. What may be the issue? Are there other ways to deal with the pagination?
There are many things wrong with your code, and other things that can be improved. Please read the documentation carefully.
There's really no need to create xpath attributes.
You can write the xpath way shorter.
You can create a start_urls from the beginning.
You can let the item exporter to handle the json.
Here's an example, change it to your needs.
import scrapy
class ShopCard(scrapy.Item):
item = scrapy.Field()
class SpidermaximaSpider(scrapy.Spider):
name = 'spiderMaxima'
allowed_domains = ['trobos.lt']
start_urls = [f'https://trobos.lt/prekes?vendor=MAXIMA&page={i}' for i in range(1, 190)]
items = []
custom_settings = {
'DOWNLOAD_DELAY': 0.4,
'FEEDS': {
'spiderMaxima.json': {
'format': 'json',
'indent': 2,
}
}
}
def parse(self, response):
for url in response.xpath('//div[#class="card small"]//a[contains(#class, "shrink")]/#href').getall():
yield response.follow(url=url, callback=self.parse_main_item)
def parse_main_item(self, response):
shop = ShopCard()
Title = response.xpath('//h1/text()').get()
Link = response.url
Image = 'https://upload.wikimedia.org/wikipedia/commons/c/c1/Maxima_logo.svg'
Price = response.xpath('//div[#class="price"]//span/text()').get()
Price = Price.replace(',', '.')
Price = float(Price.split(' ')[0])
shop['item'] = {
'title': Title,
'link': Link,
'image': Image,
'price': Price
}
yield shop

scraping e-commerce website using scrapy concept

I'm new to this scrapy concept. I have written a script for E-commerce website and need to scrape below mentioned details in that website. I facing issue with this script. please anyone help me to get out from this issue.
website:https://savedbythedress.com/collections/maternity-tops
import scrapy
class DressSpider(scrapy.Spider):
name = 'dress'
allowed_domains = ['savedbythedress.com']
start_urls = ['https://savedbythedress.com/collections/maternity-tops']
def parse(self, response):
#scraped all product links
domain = "https://savedbythedress.com"
link_products = response.css('div[class="product-info-inner"] ::attr(href)').get()
for link in link_products:
product_link = domain + link
yield{
'product_link': product_link.css('div[class="product-info-inner"] ::attr(href)').get(),
}
yield scrapy.Request(url=product_link, callback=self.parse_contents)
def parse_contents(self, response):
#scrape needed information
productlink = response.url
yield{
'product_title' : response.css('.sbtd-product-title ::text').get(),
'product_price' : response.css('.product-price ::text').get(),
'product_review' : response.css('.Natsob ::text').getall()
}
use yield response.follow(page_url, self.parse_contents) it will work for you
import scrapy
class DressSpider(scrapy.Spider):
name = 'dress'
allowed_domains = ['savedbythedress.com']
start_urls = ['https://savedbythedress.com/collections/maternity-tops']
def parse(self, response):
#scraped all product links
domain = "https://savedbythedress.com"
# link_products = response.css('div[class="product-info-inner"] ::attr(href)').get()
for link in response.css('div.product-info'):
page_url = link.css('div[class="product-info-inner"] ::attr(href)').get()
print('PAGE URL IS ', page_url)
yield response.follow(page_url, self.parse_contents)
# product_link = domain + link
# yield{
# 'product_link': link.css('div[class="product-info-inner"] ::attr(href)').get(),
# }
print(page_url)
# yield scrapy.Request(response.follow(page_url), callback=self.parse_contents)
def parse_contents(self, response):
print()
#scrape needed information
print(response.url)
productlink = response.url
yield{
'product_title' : response.css('.sbtd-product-title ::text').get(),
'product_price' : response.css('.product-price ::text').get(),
'product_review' : response.css('.Natsob ::text').getall()
}

How can I Scrape next pages with Scrapy

Here is my scrapy code.I dont know my mistake but in only scrapes first page.How can i scrape and traverse through pages ? Is there any other way for scraping next pages ?
import scrapy
class HurriyetEmlakPage(scrapy.Spider):
name = 'hurriyetspider'
allowed_domain = 'hurriyetemlak.com'
start_urls = ['https://www.hurriyetemlak.com/satilik']
def parse(self, response):
fiyat = response.xpath('//div[#class="list-view-price"]//text()').extract()
durum = response.xpath('//div[#class="middle sibling"]//div[#class="left"]//text()').extract()
oda_sayisi = response.xpath('//span[#class="celly houseRoomCount"]//text()').extract()
metrekare = response.xpath('//span[#class="celly squareMeter list-view-size"]//text()').extract()
bina_yasi = response.xpath('//span[#class="celly buildingAge"]//text()').extract()
bulundugu_kat = response.xpath('//span[#class="celly floortype"]//text()').extract()
konum = response.xpath('//div[#class="list-view-location"]//text()').extract()
scraped_info = {
'fiyat':fiyat,
'durum': durum,
'oda_sayisi' : oda_sayisi,
'metrekare' : metrekare,
'bina_yasi' : bina_yasi,
'bulundugu_kat': bulundugu_kat,
'konum' : konum
}
yield scraped_info
next_page_url = response.xpath('//li[#class="next-li pagi-nav"]//a').extract_first()
if next_page_url:
next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(url = next_page_url,callback = self.parse)
Actually, you could simply generate your url list like this :
url_list = [f"https://www.hurriyetemlak.com/satilik?page={page}" for page in range(1,7326)]
Output
['https://www.hurriyetemlak.com/satilik?page=1',
'https://www.hurriyetemlak.com/satilik?page=2',
'https://www.hurriyetemlak.com/satilik?page=3',
'https://www.hurriyetemlak.com/satilik?page=4',
'https://www.hurriyetemlak.com/satilik?page=5',
...]

Scrapy item saves only the last element in a loop

I'm using Scrapy library to crawl data from a website.
I get the result from crawling an website and I want to save it to database. I use Scrapy item and pipeline for that.
I get an list, thus I need to use for loop to save the item. But the problem is that the only last item in a list gets saved.
My code is as follows:
def parse(self, response):
vehicles = []
total_results = response.css('.cl-filters-summary-counter::text').extract_first().replace('.', '')
reference_urls = []
for url in response.css('.cldt-summary-titles'):
reference_url = url.css("a::attr(href)").extract_first().strip(' \t\n\r')
reference_urls.append(reference_url)
ids = []
for item in response.css('.cldt-summary-full-item'):
car_id = item.css("::attr(id)").extract_first().strip(' \t\n\rli-')
ids.append(car_id)
for item in response.css('.cldt-price'):
dirty_price = item.css("::text").extract_first().strip(' \t\n\r')
comma = dirty_price.index(",-")
price = dirty_price[2:comma].replace('.', '')
prices.append(price)
for item in zip(ids, reference_urls, prices):
car = CarItem()
car['reference'] = item[0]
car['reference_url'] = item[1]
car['data'] = ""
car['price'] = item[2]
return car
The result that I get from crawling is good. If I in for loop do something as follows:
vehicles = []
for item in zip(ids, reference_urls, prices):
scraped_info = {
"reference": item[0],
"reference_url": item[1],
"price": item[2]
}
vehicles.append(scraped_info)
And if I print vehicles I get the right result:
[
{
"price": "4250",
"reference": "6784086e-1afb-216d-e053-e250040a033f",
"reference_url": "some-link-1"
},
{
"price": "4250",
"reference": "c05595ac-e49e-4b71-a436-868c192ef82c",
"reference_url": "some-link-2"
},
{
"price": "4900",
"reference": "444553f2-e8fd-41c9-9244-182668544e2a",
"reference_url": "some-link-3"
}
]
UPDATE
CarItem is just a scrapy item in items.py
class CarItem(scrapy.Item):
# define the fields for your item here like:
reference = scrapy.Field()
reference_url = scrapy.Field()
data = scrapy.Field()
price = scrapy.Field()
Any idea what I do wrong?
According to Scrapy Document, the method parse
, as well as any other Request callback, must return an iterable of Request and/or dicts or Item objects.
Also according to the code example below that link,
import scrapy
from myproject.items import MyItem
class MySpider(scrapy.Spider):
name = 'example.com'
allowed_domains = ['example.com']
def start_requests(self):
yield scrapy.Request('http://www.example.com/1.html', self.parse)
yield scrapy.Request('http://www.example.com/2.html', self.parse)
yield scrapy.Request('http://www.example.com/3.html', self.parse)
def parse(self, response):
for h3 in response.xpath('//h3').extract():
yield MyItem(title=h3)
for url in response.xpath('//a/#href').extract():
yield scrapy.Request(url, callback=self.parse)
We can see we have to use yield to acquire proper results from parse function.
tl;dr: replace return in your last line with yield.

Passing arguments to callback functions with Scrapy, so can receive the arguments later crash

I try to get this spider work and if request the components to be scraped separately it works, however when try to use Srapy callback function to receive the arguments later i get crashed. The goal is to craw over multiple pages and scrape data while write in output json file in format:
author | album | title | lyrics
the data for each is located on separate web pages, so that is why I'm tying to use Scrapy callback function to get that accomplished.
Also each of the above items are defined under Scrapy items.py as:
import scrapy
class TutorialItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
author = scrapy.Field()
album = scrapy.Field()
title = scrapy.Field()
lyrics = scrapy.Field()
Spider Code start here:
import scrapy
import re
import json
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from tutorial.items import TutorialItem
# urls class
class DomainSpider(scrapy.Spider):
name = "domainspider"
allowed_domains = ['www.domain.com']
start_urls = [
'http://www.domain.com',
]
rules = (
Rule(LinkExtractor(allow='www\.domain\.com/[A-Z][a-zA-Z_/]+$'),
'parse', follow=True,
),
)
# Parsing start here
# crawling and scraping the links from menu list
def parse(self, response):
links = response.xpath('//html/body/nav[1]/div/ul/li/div/a/#href')
for link in links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
yield scrapy.Request(next_page, callback=self.parse_artist_page)
# crawling and scraping artist names and links
def parse_artist_page(self, response):
artist_links = response.xpath('//*/div[contains(#class, "artist-col")]/a/#href')
author = response.xpath('//*/div[contains(#class, "artist-col")]/a/text()').extract()
item = TutorialItem(author=author)
for link in artist_links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
yield scrapy.Request(next_page, callback=self.parse_album_page)
request.meta['author'] = item
yield item
return
# crawling and scraping album names and links
def parse_album_page(self, response):
album_links = response.xpath('//*/div[contains(#id, "listAlbum")]/a/#href')
album = response.xpath('//*/div[contains(#class, "album")]/b/text()').extract()
item = TutorialItem(album=album)
for link in album_links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
yield scrapy.Request(next_page, callback=self.parse_lyrics_page)
request.meta['album'] = item
yield item
return
# crawling and scraping titles and lyrics
def parse_lyrics_page(self, response):
title = response.xpath('//html/body/div[3]/div/div[2]/b/text()').extract()
lyrics = map(unicode.strip, response.xpath('//html/body/div[3]/div/div[2]/div[6]/text()').extract())
item = response.meta['author', 'album']
item = TutorialItem(author=author, album=album, title=title, lyrics=lyrics)
yield item
The code crash when get to call back function:
request.meta['author'] = item
yield item
return
Can anyone help?
I did found where was the problem, the way callback function was set by me, now works:
# crawling and scraping artist names and links
def parse_artist_page(self, response):
artist_links = response.xpath('//*/div[contains(#class, "artist-col")]/a/#href')
author = response.xpath('//*/div[contains(#class, "artist-col")]/a/text()').extract()
for link in artist_links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
request = scrapy.Request(next_page, callback=self.parse_album_page)
request.meta['author'] = author
return request
# crawling and scraping album names and links
def parse_album_page(self, response):
author = response.meta.get('author')
album_links = response.xpath('//*/div[contains(#id, "listAlbum")]/a/#href')
album = response.xpath('//*/div[contains(#class, "album")]/b/text()').extract()
for link in album_links:
next_page_link = link.extract()
if next_page_link:
next_page = response.urljoin(next_page_link)
request = scrapy.Request(next_page, callback=self.parse_lyrics_page)
request.meta['author'] = author
request.meta['album'] = album
return request
# crawling and scraping song titles and lyrics
def parse_lyrics_page(self, response):
author = response.meta.get('author')
album = response.meta.get('album')
title = response.xpath('//html/body/div[3]/div/div[2]/b/text()').extract()
lyrics = map(unicode.strip, response.xpath('//html/body/div[3]/div/div[2]/div[6]/text()').extract())
item = TutorialItem(author=author, album=album, title=title, lyrics=lyrics)
yield item

Categories