Save downloaded files with custom names in scrapy - python

I am new to scrapy.I downloaded some files using the code bellow. I want to change the names of my downloaded files but I don't know how.
For example, I want to have a list containing names and use it to rename the files that I downloaded.
Any help will be appreciated
my spider
import scrapy from scrapy.loader
import ItemLoader from demo_downloader.items
import DemoDownloaderItem
class FileDownloader(scrapy.Spider):
name = "file_downloader"
def start_requests(self):
urls = [
"https://www.data.gouv.fr/en/datasets/bases-de-donnees-annuelles-des-accidents-corporels-de-la-circulation-routiere-annees-de-2005-a-2019/#_"
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for link in response.xpath('//article[#class = "card resource-card "]'):
name = link.xpath('.//h4[#class="ellipsis"]/text()').extract_first()
if ".csv" in name:
loader = ItemLoader(item=DemoDownloaderItem(), selector=link)
absolute_url = link.xpath(".//a[#class = 'btn btn-sm btn-primary']//#href").extract_first()
loader.add_value("file_urls", absolute_url)
loader.add_value("files", name)
yield loader.load_item()
items.py
from scrapy.item import Field, Item
class DemoDownloaderItem(Item):
file_urls = Field()
files = Field()
pipelines.py
from itemadapter import ItemAdapter
class DemoDownloaderPipeline:
def process_item(self, item, spider):
return item
settings.py
BOT_NAME = 'demo_downloader'
SPIDER_MODULES = ['demo_downloader.spiders']
NEWSPIDER_MODULE = 'demo_downloader.spiders'
ROBOTSTXT_OBEY = False
ITEM_PIPELINES = {
'scrapy.pipelines.files.FilesPipeline': 1
}
DOWNLOAD_TIMEOUT = 1200
FILES_STORE = "C:\\Users\\EL\\Desktop\\work\\demo_downloader"
MEDIA_ALLOW_REDIRECTS = True

Related

Same file downloads

I have a problem with my script such that the same file name, and pdf is downloading. I have checked the output of my results without downloadfile and I get unique data. It's when I use the pipeline that it somehow produces duplicates for download.
Here's my script:
import scrapy
from environment.items import fcpItem
class fscSpider(scrapy.Spider):
name = 'fsc'
start_urls = ['https://fsc.org/en/members']
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(
url,
callback = self.parse
)
def parse(self, response):
content = response.xpath("(//div[#class='content__wrapper field field--name-field-content field--type-entity-reference-revisions field--label-hidden field__items']/div[#class='content__item even field__item'])[position() >1]")
loader = fcpItem()
names_add = response.xpath(".//div[#class = 'field__item resource-item']/article//span[#class='media-caption file-caption']/text()").getall()
url = response.xpath(".//div[#class = 'field__item resource-item']/article/div[#class='actions']/a//#href").getall()
pdf=[response.urljoin(x) for x in url if '#' is not x]
names = [x.split(' ')[0] for x in names_add]
for nm, pd in zip(names, pdf):
loader['names'] = nm
loader['pdfs'] = [pd]
yield loader
items.py
class fcpItem(scrapy.Item):
names = Field()
pdfs = Field()
results = Field()
pipelines.py
class DownfilesPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None, item=None):
items = item['names']+'.pdf'
return items
settings.py
from pathlib import Path
import os
BASE_DIR = Path(__file__).resolve().parent.parent
FILES_STORE = os.path.join(BASE_DIR, 'fsc')
ROBOTSTXT_OBEY = False
FILES_URLS_FIELD = 'pdfs'
FILES_RESULT_FIELD = 'results'
ITEM_PIPELINES = {
'environment.pipelines.pipelines.DownfilesPipeline': 150
}
I am using css instead of xpath.
From the chrome debug panel, the tag is root of item of PDF list.
Under that div tag has title of PDF and tag for file download URL
Between root tag and tag two child's and sibling relation so xpath is not clean method and hard, a css much better is can easley pick up from root to . it don't necessary relation ship path. css can skip relationship and just sub/or grand sub is not matter. It also provides not necessary to consider index problem which is URL array and title array sync by index match.
Other key point are URL path decoding and file_urls needs to set array type even if single item.
fsc_spider.py
import scrapy
import urllib.parse
from quotes.items import fcpItem
class fscSpider(scrapy.Spider):
name = 'fsc'
start_urls = [
'https://fsc.org/en/members',
]
def parse(self, response):
for book in response.css('div.field__item.resource-item'):
url = urllib.parse.unquote(book.css('div.actions a::attr(href)').get(), encoding='utf-8', errors='replace')
url_left = url[0:url.rfind('/')]+'/'
title = book.css('span.media-caption.file-caption::text').get()
item = fcpItem()
item['original_file_name'] = title.replace(' ','_')
item['file_urls'] = ['https://fsc.org'+url_left+title.replace(' ','%20')]
yield item
items.py
import scrapy
class fcpItem(scrapy.Item):
file_urls = scrapy.Field()
files = scrapy.Field
original_file_name = scrapy.Field()
pipelines.py
import scrapy
from scrapy.pipelines.files import FilesPipeline
class fscPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None):
file_name: str = request.url.split("/")[-1].replace('%20','_')
return file_name
settings.py
BOT_NAME = 'quotes'
FILES_STORE = 'downloads'
SPIDER_MODULES = ['quotes.spiders']
NEWSPIDER_MODULE = 'quotes.spiders'
FEED_EXPORT_ENCODING = 'utf-8'
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = { 'quotes.pipelines.fscPipeline': 1}
file structure
execution
quotes>scrapy crawl fsc
result
The problem is that you are overwriting the same scrapy item every iteration.
What you need to do is create a new item for each time your parse method yields. I have tested this and confirmed that it does produce the results you desire.
I made and inline not in my example below on the line that needs to be changed.
For example:
import scrapy
from environment.items import fcpItem
class fscSpider(scrapy.Spider):
name = 'fsc'
start_urls = ['https://fsc.org/en/members']
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(
url,
callback = self.parse
)
def parse(self, response):
content = response.xpath("(//div[#class='content__wrapper field field--name-field-content field--type-entity-reference-revisions field--label-hidden field__items']/div[#class='content__item even field__item'])[position() >1]")
names_add = response.xpath(".//div[#class = 'field__item resource-item']/article//span[#class='media-caption file-caption']/text()").getall()
url = response.xpath(".//div[#class = 'field__item resource-item']/article/div[#class='actions']/a//#href").getall()
pdf=[response.urljoin(x) for x in url if '#' is not x]
names = [x.split(' ')[0] for x in names_add]
for nm, pd in zip(names, pdf):
loader = fcpItem() # Here you create a new item each iteration
loader['names'] = nm
loader['pdfs'] = [pd]
yield loader

Scrapy: How to store scraped data in different json files within one crawler run?

I'm using generic spiders with a list of multiple urls in the start_urls field.
Is it possible to export one json file for each URL?
As far as I know it's only possible to set one path to one specific output file.
Any ideas how to solve this are rewarded!
EDIT: This is my spider class:
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class MySpider(CrawlSpider):
name = 'my_spider'
start_urls = start_urls = ['www.domain1.com','www.domain2.com',
'www.domain3.com']
custom_settings = {
'FEED_EXPORT_ENCODING': 'utf-8',
'DEPTH_LIMIT': '1',
'FEED_URI': 'file:///C:/path/to/result.json',
}
rules = (
Rule(LinkExtractor(allow=r"abc"), callback='parse_item', follow=True),
)
def parse_item(self, response):
all_text = response.xpath("//p/text()").getall()
yield {
"text": " ".join(all_text),
"url": response.url,
}
First option
You can save the items in the spider as Scrapy tutorial for example:
import scrapy
import json
DICT = {
'https://quotes.toscrape.com/page/1/': 'domain1.json',
'https://quotes.toscrape.com/page/2/': 'domain2.json',
}
class MydomainSpider(scrapy.Spider):
name = "mydomain"
start_urls = [
'https://quotes.toscrape.com/page/1/',
'https://quotes.toscrape.com/page/2/',
]
def parse(self, response):
filename = DICT[response.url]
with open(filename, 'w') as fp:
json.dump({"content": response.body.decode("utf-8")}, fp)
The DICT variable is just for specifying the JSON filename but you can use the domain as the filename too.
Second option
You can try using process_item in pipelines.py as follow:
from scrapy.exporters import JsonItemExporter
class SaveJsonPipeline:
def process_item(self, item, spider):
filename = item['filename']
del item['filename']
JsonItemExporter(open(filename, "wb")).export_item(item)
return item
item['filename'] is for save the filename for each start_url. You need to set the items.py too, for example:
import scrapy
class MydomainItem(scrapy.Item):
filename = scrapy.Field()
content = scrapy.Field()
your spider:
import scrapy
from ..items import MydomainItem
DICT = {
'https://quotes.toscrape.com/page/1/': 'domain1.json',
'https://quotes.toscrape.com/page/2/': 'domain2.json',
}
class MydomainSpider(scrapy.Spider):
name = 'mydomain'
allowed_domains = ['mydomain.com']
start_urls = [
'https://quotes.toscrape.com/page/1/',
'https://quotes.toscrape.com/page/2/',
]
def parse(self, response):
item = MydomainItem()
item["filename"] = DICT[response.url]
item["content"] = response.body.decode("utf-8")
yield item
Before running you need to add the pipeline in your settings.
ITEM_PIPELINES = {
'myproject.pipelines.SaveJsonPipeline': 300,
}

Xpath can't find element and return none

im facing a problem with this page :
https://www.ouedkniss.com/op%C3%A9rateur-sur-machine-bejaia-boudjellil-algerie-offres-d-emploi-d19820393
i want to scrape this elements:
Employeur : SARL UFMATP AZIEZ ET ASSOCIES
Poste : Opérateur sur machine
But when i use xpath to find element it cant see them and jump to other element
'Titre': response.xpath("normalize-space(//h1[#id='Title']/text())").get(),
'Boss':response.xpath("//*[#id="Employeur"]/span/text()").get(),
this script return a value for 'Titre' but none for Boss, i cheked if there are an iframe but not
any help would be appreciated
import scrapy
import requests
from scrapy.http import Request
from urllib.parse import urljoin
from scrapy import Selector
from sidahmed.items import sidahmedItem
from scrapy.pipelines.files import FilesPipeline
from scrapy.http import HtmlResponse
from scrapy.utils.markup import remove_tags
custom_settings = {
'ITEM_PIPELINES': {'sidahmed.pipelines.MyImagesPipeline': 1},
}
starting_number = 1
number_of_pages = 10
class sidahmed (scrapy.Spider):
name = "sidahmed"
allowed_domains = ["ouedkniss.com"]
start_urls = ["https://www.ouedkniss.com/emploi_demandes/industrie-production/{}".format(i) for i in range(1)]
def __init__(self):
self.page_number = 1
# def parse(self, response):
#print (self.page_number)
#print ("----------")
#sel = Selector(response)
#titles = sel.xpath("//div[#class='magicCard']")
#if not titles:
#raise CloseSpider('No more pages')
#def start_requests(self):
#for i in range (self.page_number, number_of_pages, +1):
#suivante ="[adressesite % i]"
#yield Request(suivante, callback=self.parse)
def parse (self, response):
urls=response.xpath("//a[#class = 'button button_details']/#href"). extract ()
for p in urls:
url = urljoin(response.url, p)
yield scrapy.Request(url, callback=self.parse_annonces)
def parse_annonces(self, response):
for annonce in response.xpath("//div[#id='annonce']"):
yield {
'Titre': response.xpath("normalize-space(//h1[#id='Title']/text())").get(),
#'Boss': response.xpath("//*[#id='Sexe']/span[contains(., 'Homme')]").get(),
'Boss': response.xpath("//*[#id='Employeur']/span/text()").get()
#'Ville': response.xpath("normalize-space(//h2[#class='country-wilaya']/text())").get(),
#'Annonceur': response.xpath("normalize-space(//p[#class='nom-entreprise orange']/text())").get(),
#'Prix': response.xpath("normalize-space(//span[#itemprop='price']/text())").get(),
#'Boitevitesse':response.xpath("normalize-space(//li[2][#class='col-md-6']/text())").get(),
#'Carburant':response.xpath("normalize-space(//li[3][#class='col-md-6']/text())").get(),
#'Annee':response.xpath("normalize-space(//li[4][#class='col-md-6']/text())").get(),
#'Etat':response.xpath("normalize-space(//li[5][#class='col-md-6']/text())").get(),
#'Statut': response.xpath("normalize-space(//p[#class='type-inscription orange']/text())").get(),
#'Description': response.xpath("normalize-space(//div[#id='Description']/node()/text())").get(),
#'Tel': response.xpath("normalize-space(//div[#class='contact-box']//p[last()]/text())").get(),
#'index':response.xpath("//span[#id='imgrightspan']/text()").get(),
#'image_urls': response.xpath("//ul[#id='foo3']//li//img/#src").extract()
}
#self.page_number += 1
#yield Request(adressesite % self.page_number)
items.py
-- coding: utf-8 --
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item
class sidahmedItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
image_urls=scrapy.Field()
images=scrapy.Field()
Titre = scrapy.Field()
Ville = scrapy.Field()
Carburant = scrapy.Field()
Boss = scrapy.Field()
Prix = scrapy.Field()
Statut = scrapy.Field()
Description = scrapy.Field()
Tel = scrapy.Field()
index =scrapy.Field()
settings.py
# -*- coding: utf-8 -*-
# Scrapy settings for sidahmed project
#
# For simplicity, this file contains only settings considspidered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'sidahmed'
SPIDER_MODULES = ['sidahmed.spiders']
NEWSPIDER_MODULE = 'sidahmed.spiders'
#ITEM_PIPELINES = {'sidahmed.pipelines.MyFilesPipeline': 1}
ITEM_PIPELINES = {'sidahmed.pipelines.MyImagesPipeline': 1}
#ITEM_PIPELINES = {'sidahmed.pipelines.CustomImagesPipeline': 1,}
IMAGES_STORE = "./images"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False

Scrapy Media Pipeline ,files not downloading

I am new to Scrapy . I am trying to download files using media pipeline. But when I am running spider no files are stored in the folder.
spider:
import scrapy
from scrapy import Request
from pagalworld.items import PagalworldItem
class JobsSpider(scrapy.Spider):
name = "songs"
allowed_domains = ["pagalworld.me"]
start_urls =['https://pagalworld.me/category/11598/Latest%20Bollywood%20Hindi%20Mp3%20Songs%20-%202017.html']
def parse(self, response):
urls = response.xpath('//div[#class="pageLinkList"]/ul/li/a/#href').extract()
for link in urls:
yield Request(link, callback=self.parse_page, )
def parse_page(self, response):
songName=response.xpath('//li/b/a/#href').extract()
for song in songName:
yield Request(song,callback=self.parsing_link)
def parsing_link(self,response):
item= PagalworldItem()
item['file_urls']=response.xpath('//div[#class="menu_row"]/a[#class="touch"]/#href').extract()
yield{"download_link":item['file_urls']}
Item file:
import scrapy
class PagalworldItem(scrapy.Item):
file_urls=scrapy.Field()
Settings File:
BOT_NAME = 'pagalworld'
SPIDER_MODULES = ['pagalworld.spiders']
NEWSPIDER_MODULE = 'pagalworld.spiders'
ROBOTSTXT_OBEY = True
CONCURRENT_REQUESTS = 5
DOWNLOAD_DELAY = 3
ITEM_PIPELINES = {
'scrapy.pipelines.files.FilesPipeline': 1
}
FILES_STORE = '/tmp/media/'
The output looks like this:
def parsing_link(self,response):
item= PagalworldItem()
item['file_urls']=response.xpath('//div[#class="menu_row"]/a[#class="touch"]/#href').extract()
yield{"download_link":item['file_urls']}
You are yielding:
yield {"download_link": ['http://someurl.com']}
where for scrapy's Media/File pipeline to work you need to yield and item that contains file_urls field. So try this instead:
def parsing_link(self,response):
item= PagalworldItem()
item['file_urls']=response.xpath('//div[#class="menu_row"]/a[#class="touch"]/#href').extract()
yield item

Scrapy Images Downloading

My spider runs without displaying any errors but the images are not stored in the folder here are my scrapy files:
Spider.py:
import scrapy
import re
import os
import urlparse
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.loader.processors import Join, MapCompose, TakeFirst
from scrapy.pipelines.images import ImagesPipeline
from production.items import ProductionItem, ListResidentialItem
class productionSpider(scrapy.Spider):
name = "production"
allowed_domains = ["someurl.com"]
start_urls = [
"someurl.com"
]
def parse(self, response):
for sel in response.xpath('//html/body'):
item = ProductionItem()
img_url = sel.xpath('//a[#data-tealium-id="detail_nav_showphotos"]/#href').extract()[0]
yield scrapy.Request(urlparse.urljoin(response.url, img_url),callback=self.parseBasicListingInfo, meta={'item': item})
def parseBasicListingInfo(item, response):
item = response.request.meta['item']
item = ListResidentialItem()
try:
image_urls = map(unicode.strip,response.xpath('//a[#itemprop="contentUrl"]/#data-href').extract())
item['image_urls'] = [ x for x in image_urls]
except IndexError:
item['image_urls'] = ''
return item
settings.py:
from scrapy.settings.default_settings import ITEM_PIPELINES
from scrapy.pipelines.images import ImagesPipeline
BOT_NAME = 'production'
SPIDER_MODULES = ['production.spiders']
NEWSPIDER_MODULE = 'production.spiders'
DEFAULT_ITEM_CLASS = 'production.items'
ROBOTSTXT_OBEY = True
DEPTH_PRIORITY = 1
IMAGE_STORE = '/images'
CONCURRENT_REQUESTS = 250
DOWNLOAD_DELAY = 2
ITEM_PIPELINES = {
'scrapy.contrib.pipeline.images.ImagesPipeline': 300,
}
items.py
# -*- coding: utf-8 -*-
import scrapy
class ProductionItem(scrapy.Item):
img_url = scrapy.Field()
# ScrapingList Residential & Yield Estate for sale
class ListResidentialItem(scrapy.Item):
image_urls = scrapy.Field()
images = scrapy.Field()
pass
My pipeline file is empty i'm not sure what i am suppose to add to the pipeline.py file.
Any help is greatly appreciated.
My Working end result:
spider.py:
import scrapy
import re
import urlparse
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.loader.processors import Join, MapCompose, TakeFirst
from scrapy.pipelines.images import ImagesPipeline
from production.items import ProductionItem
from production.items import ImageItem
class productionSpider(scrapy.Spider):
name = "production"
allowed_domains = ["url"]
start_urls = [
"startingurl.com"
]
def parse(self, response):
for sel in response.xpath('//html/body'):
item = ProductionItem()
img_url = sel.xpath('//a[#idd="followclaslink"]/#href').extract()[0]
yield scrapy.Request(urlparse.urljoin(response.url, img_url),callback=self.parseImages, meta={'item': item})
def parseImages(self, response):
for elem in response.xpath("//img"):
img_url = elem.xpath("#src").extract_first()
yield ImageItem(image_urls=[img_url])
Settings.py
BOT_NAME = 'production'
SPIDER_MODULES = ['production.spiders']
NEWSPIDER_MODULE = 'production.spiders'
DEFAULT_ITEM_CLASS = 'production.items'
ROBOTSTXT_OBEY = True
IMAGES_STORE = '/Users/home/images'
DOWNLOAD_DELAY = 2
ITEM_PIPELINES = {'scrapy.pipelines.images.ImagesPipeline': 1}
# Disable cookies (enabled by default)
items.py
# -*- coding: utf-8 -*-
import scrapy
class ProductionItem(scrapy.Item):
img_url = scrapy.Field()
# ScrapingList Residential & Yield Estate for sale
class ListResidentialItem(scrapy.Item):
image_urls = scrapy.Field()
images = scrapy.Field()
class ImageItem(scrapy.Item):
image_urls = scrapy.Field()
images = scrapy.Field()
pipelines.py
import scrapy
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
class MyImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield scrapy.Request(image_url)
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
item['image_paths'] = image_paths
return item
Since you don't know what to put in the pipelines I assume you can use the default pipeline for images provided by scrapy so in the settings.py file you can just declare it like
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline':1
}
Also, your images path is wrong the / means that you are going to the absolute root path of your machine, so you either put the absolute path to where you want to save or just do a relative path from where you are running your crawler
IMAGES_STORE = '/home/user/Documents/scrapy_project/images'
or
IMAGES_STORE = 'images'
Now, in the spider you extract the url but you don't save it into the item
item['image_urls'] = sel.xpath('//a[#data-tealium-id="detail_nav_showphotos"]/#href').extract_first()
The field has to literally be image_urls if you're using the default pipeline.
Now, in the items.py file you need to add the following 2 fields (both are required with this literal name)
image_urls=Field()
images=Field()
That should work
In my case it was the IMAGES_STORE path that was causing the problem
I did IMAGES_STORE = 'images' and it worked like a charm!
Here is complete code:
Settings:
ITEM_PIPELINES = {
'mutualartproject.pipelines.MyImagesPipeline': 1,
}
IMAGES_STORE = 'images'
Pipline:
class MyImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield scrapy.Request(image_url)
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
return item
Just adding my misstake here which threw me of for several hours. Perhaps it can help someone.
From scrapy docs (https://doc.scrapy.org/en/latest/topics/media-pipeline.html#using-the-images-pipeline):
Then, configure the target storage setting to a valid value that will be used for storing the downloaded images. Otherwise the pipeline will remain disabled, even if you include it in the ITEM_PIPELINES setting.
For some reason I used a colon ":" instead of an equal sign "=".
# My misstake:
IMAGES_STORE : '/Users/my_user/images'
# Working code
IMAGES_STORE = '/Users/my_user/images'
This dosen't return an error but instead leads to the pipeline not loading at all which for me was pretty hard to trouble shoot.
You have to enable SPIDER_MIDDLEWARES and DOWNLOADER_MIDDLEWARES in the settings.py file

Categories