Python:Scrapy image download how to use custom filename [duplicate] - python

For my scrapy project I'm currently using the ImagesPipeline. The downloaded images are stored with a SHA1 hash of their URLs as the file names.
How can I store the files using my own custom file names instead?
What if my custom file name needs to contain another scraped field from the same item? e.g. use the item['desc'] and the filename for the image with item['image_url']. If I understand correctly, that would involve somehow accessing the other item fields from the Image Pipeline.
Any help will be appreciated.

This is just actualization of the answer for scrapy 0.24 (EDITED), where the image_key() is deprecated
class MyImagesPipeline(ImagesPipeline):
#Name download version
def file_path(self, request, response=None, info=None):
#item=request.meta['item'] # Like this you can use all from item, not just url.
image_guid = request.url.split('/')[-1]
return 'full/%s' % (image_guid)
#Name thumbnail version
def thumb_path(self, request, thumb_id, response=None, info=None):
image_guid = thumb_id + response.url.split('/')[-1]
return 'thumbs/%s/%s.jpg' % (thumb_id, image_guid)
def get_media_requests(self, item, info):
#yield Request(item['images']) # Adding meta. I don't know, how to put it in one line :-)
for image in item['images']:
yield Request(image)

In scrapy 0.12 I solved something like this
class MyImagesPipeline(ImagesPipeline):
#Name download version
def image_key(self, url):
image_guid = url.split('/')[-1]
return 'full/%s.jpg' % (image_guid)
#Name thumbnail version
def thumb_key(self, url, thumb_id):
image_guid = thumb_id + url.split('/')[-1]
return 'thumbs/%s/%s.jpg' % (thumb_id, image_guid)
def get_media_requests(self, item, info):
yield Request(item['images'])

I found my way in 2017,scrapy 1.1.3
def file_path(self, request, response=None, info=None):
return request.meta.get('filename','')
def get_media_requests(self, item, info):
img_url = item['img_url']
meta = {'filename': item['name']}
yield Request(url=img_url, meta=meta)
like the code above,you can add the name you want to a Request meta in get_media_requests(), and get it back in file_path() by request.meta.get('yourname','').

This was the way I solved the problem in Scrapy 0.10 .
Check the method persist_image of FSImagesStoreChangeableDirectory. The filename of the downloaded image is key
class FSImagesStoreChangeableDirectory(FSImagesStore):
def persist_image(self, key, image, buf, info,append_path):
absolute_path = self._get_filesystem_path(append_path+'/'+key)
self._mkdir(os.path.dirname(absolute_path), info)
image.save(absolute_path)
class ProjectPipeline(ImagesPipeline):
def __init__(self):
super(ImagesPipeline, self).__init__()
store_uri = settings.IMAGES_STORE
if not store_uri:
raise NotConfigured
self.store = FSImagesStoreChangeableDirectory(store_uri)

I did a nasty quick hack for that. In my case, I stored the title of image in my feeds. And, I had only 1 image_urls per item, so, I wrote the following script. It basically renames the image files in the /images/full/ directory with the corresponding title in the item feed that I had stored in as json.
import os
import json
img_dir = os.path.join(os.getcwd(), 'images\\full')
item_dir = os.path.join(os.getcwd(), 'data.json')
with open(item_dir, 'r') as item_json:
items = json.load(item_json)
for item in items:
if len(item['images']) > 0:
cur_file = item['images'][0]['path'].split('/')[-1]
cur_format = cur_file.split('.')[-1]
new_title = item['title']+'.%s'%cur_format
file_path = os.path.join(img_dir, cur_file)
os.rename(file_path, os.path.join(img_dir, new_title))
It's nasty & not recommended. But, it is a naive alternative approach.

I rewrite the code, changing, in thumb_path def, "response." by "request.". If no, it won't work because "response is set to None".
class MyImagesPipeline(ImagesPipeline):
#Name download version
def file_path(self, request, response=None, info=None):
#item=request.meta['item'] # Like this you can use all from item, not just url.
image_guid = request.url.split('/')[-1]
return 'full/%s' % (image_guid)
#Name thumbnail version
def thumb_path(self, request, thumb_id, response=None, info=None):
image_guid = thumb_id + request.url.split('/')[-1]
return 'thumbs/%s/%s.jpg' % (thumb_id, image_guid)
def get_media_requests(self, item, info):
#yield Request(item['images']) # Adding meta. Dunno how to put it in one line :-)
for image in item['images']:
yield Request(image)

Related

Different Scrapy feeds export destination for each request

I'm trying to save image urls for individual properties in their respective csv files via feeds export, in order for this to work, the FEEDS csv_path in custom_settings will have to be changed every time a scrapy.Request is yielded in start_requests. Every time a scrapy.Request is yielded, the self.get_csv_path in __init__ is assigned a new csv file path correspondent to the property id, it is then fetched to FEEDS by def get_feeds_csv_path as in the code below. The self.feeds_csv_path in custom_settings doesn't seem to be able to access def get_feeds_csv_path, where is the error here?
import asyncio
from configparser import ConfigParser
import os
import pandas as pd
import scrapy
import requests
import json
class GetpropertyimgurlsSpider(scrapy.Spider):
name = 'GetPropertyImgUrls'
custom_settings = {
"FEEDS": {
self.feeds_csv_path: {
"format": "csv",
"overwrite": True
}
}
}
def __init__(self, *args, **kwargs):
self.feeds_csv_path = None
super(GetpropertyimgurlsSpider, self).__init__(*args, **kwargs)
def start_requests(self):
files = self.get_html_files() # List of html file full paths
for file in files[:2]:
self.feeds_csv_path = self.get_feeds_csv_path(file)
yield scrapy.Request(file, callback=self.parse)
def parse(self, response):
texts = response.xpath("//text()").getall()
text = texts[1]
json_text = json.loads(text)
#print(text)
photos = json_text["#graph"][3]["photo"]
for photo in photos:
yield photo["contentUrl"]
def get_feeds_csv_path(self, html_file_path):
property_id = html_file_path.split("/")[-2].split("_")[1]
feeds_csv_path = f"{html_file_path}/images/Property_{property_id}_ImgSrcs.csv"
return feeds_csv_path
def get_path(self):
config = ConfigParser()
config.read("config.ini") # Location relative to main.py
path = config["scrapezoopla"]["path"]
return path
#Returns a list of html file dirs
def get_html_files(self):
path = self.get_path()
dir = f"{path}/data/properties/"
dir_list = os.listdir(dir)
folders = []
for ins in dir_list:
if os.path.isdir(f"{dir}{ins}") == True:
append_ins = folders.append(ins)
html_files = []
for folder in folders:
html_file = f"{dir}{folder}/{folder}.html"
if os.path.isfile(html_file) == True:
append_html_file = html_files.append(f"file:///{html_file}")
return html_files
The first problem I see is that you are using the self keyword in the namespace scope of your spider class. The self keyword is only available inside of instance methods where you pass the keyword in as the first argument. e.g. def __init__(self...).
Even if self was available it still wouldn't work though, because once you create the custom_settings dictionary, the self.feeds_csv_path is immediately converted to it's string value at runtime, so updating the instance variable would have no effect on the custom_settings propert.
Another issue is that scrapy collects all of the custom settings and stores them internally before the crawl is actually started, and updating the custom_settings dictionary mid crawl might not actually have an effect. I am not certain about that though.
All of that being said, your goal is still achievable. One means that I can think of is by creating the FEEDS dictionary runtime but prior to initiating the crawl and filtering using custom scrapy.Item classes to filter which item belongs to which output.
I have no way of testing it so it might be buggy but here is an example of what I am referring to:
from configparser import ConfigParser
import json
import os
import scrapy
def get_path():
config = ConfigParser()
config.read("config.ini") # Location relative to main.py
path = config["scrapezoopla"]["path"]
return path
#Returns a list of html file dirs
def get_html_files():
path = get_path()
folder = f"{path}/data/properties/"
dir_list = os.listdir(folder)
html_files = []
for ins in dir_list:
if os.path.isdir(f"{folder}{ins}"):
if os.path.isfile(f"{folder}{ins}/{ins}.html"):
html_files.append(f"file:///{folder}{ins}/{ins}.html")
return html_files
def get_feeds_csv_path(self, html_file_path):
property_id = html_file_path.split("/")[-2].split("_")[1]
feeds_csv_path = f"{html_file_path}/images/Property_{property_id}_ImgSrcs.csv"
return feeds_csv_path
def create_custom_item():
class Item(scrapy.Item):
contentUrl = scrapy.Field()
return Item
def customize_settings():
feeds = {}
files = get_html_files()
start_urls = {}
for path in files:
custom_class = create_custom_item()
output_path = get_feeds_csv_path(path)
start_urls[path] = custom_class
feeds[output_path] = {
"format": "csv",
"item_classes": [custom_class],
}
custom_settings = {"FEEDS": feeds}
return custom_settings, start_urls
class GetpropertyimgurlsSpider(scrapy.Spider):
name = 'GetPropertyImgUrls'
custom_settings, start_urls = customize_settings()
def start_requests(self):
for uri, itemclass in self.start_urls.items():
yield scrapy.Request(uri, callback=self.parse, cb_kwargs={'itemclass': itemclass})
def parse(self, response, itemclass):
texts = response.xpath("//text()").getall()
text = texts[1]
json_text = json.loads(text)
photos = json_text["#graph"][3]["photo"]
for photo in photos:
item = itemclass()
item['contentUrl'] = photo["contentUrl"]
yield item

How do I get the orignal name of the file that is uploaded in the filefield in django

I want to access the name of the file uploaded to models.FileField() in my class.
Here is my class
class extract(models.Model):
doc = models.FileField(upload_to='files/')
Is there a function or some method, to access the original name of the uploaded file, without modifying the name during upload.
like something like this
name = doc.filename()
Edit - 2:
I'm very new to django and programming in general. So this might seem stupid but I want something like this:
class extract(models.Model):
def get_name(instance,filename):
path = 'files'
name = filename
return name,path
doc = models.FileField(fname,upload_to==get_name)
def ex(fname):
path0 = "E:/Projects/PDF Converter/pdf/files/"
fullpath = path0 + fname
document = open(fullpath)
reader = PyPDF2.PdfFileReader(document)
page1 = reader.getPage(0)
data = page1.extractText()
return data
text = models.TextField(default=ex(fname),editable=False)
Per the documentation, this can be accomplished by passing a callback function as the upload_to argument, and the callback function will be called with the given filename as the second argument:
def get_filename(_, filename):
print(filename) # or do whatever you want with the original filename here
return 'files/'
class extract(models.Model):
doc = models.FileField(upload_to=get_filename)

Scrapy CSV column export

I´d like to export data to several columns in csv but I always obtain this kind of file:
csv
I´d like to obtain two columns one "articulo" and another one "price"
My pipelines:
import scrapy
from scrapy import signals
from scrapy.contrib.exporter import CsvItemExporter
import csv
class MercadoPipeline(object):
def __init__(self):
self.files = {}
#classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
file = open('%s_items.csv' % spider.name, 'w+b')
self.files[spider] = file
self.exporter = CsvItemexporter(file)
self.exporter.fields_to_export = ['articulo','precio']
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
file = self.files.pop(spider)
file.closed()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
Can you help me please?
Here you are:
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.exceptions import CloseSpider
from mercado.items import MercadoItem
class MercadoSpider(CrawlSpider):
name = 'mercado'
item_count = 0
allowed_domain = ['www.autodoc.es']
start_urls = ['https://www.autodoc.es/search?brandNo%5B0%5D=101']
rules = {
Rule(LinkExtractor(allow =(), restrict_xpaths = ('//span[#class="next"]/a'))),
Rule(LinkExtractor(allow =(), restrict_xpaths = ('//a[#class="ga-click"]')),
callback = 'parse_item', follow = False)
}
def parse_item(self, response):
ml_item = MercadoItem()
#info de producto
ml_item['articulo'] = response.xpath('normalize-space(//*[#id="content"]/div[4]/div[2]/div[1]/div[1]/div/span[1]/span/text())').extract()
ml_item['precio'] = response.xpath('normalize-space(//*[#id="content"]/div[4]/div[3]/div[2]/p[2]/text())').extract()
self.item_count += 1
if self.item_count > 20:
raise CloseSpider('item_exceeded')
yield ml_item
There is nothing wrong with the output of your code.
You are getting the two csv columns you want, but the program you are using to view the data is not interpreting it correctly.
By default, CsvItemExporter uses , as the delimiter, and the program seems to expect something else (and possibly even different quoting).
There are two possibilities to solve your problem:
Change the program's settings so it reads the file correctly
Change the way CsvItemExporter exports data (it will pass any additional keyword arguments to the underlying csv.writer object)

How to order csv items in a multiCSV exporter scrapy?

I have been putting together a piece of code, I found on stack overflow, on how to split url's output in separate csv files and I came up with the code below. However, I can no longer use field_to_export in the code. I wonder how can I set the field to export so that they are exported like: field_to_export = ['itemA', 'itemB', 'itemC'].
from scrapy import signals
from scrapy.exporters import CsvItemExporter
import re
class appPipeline(object):
urls = [l.strip() for l in open('data/listOfUrls.txt').readlines()]
names = [name.group(1) for l in urls for name in [re.search(r'https://www.google.co.uk/', l, re.M|re.I)] if name]
def __init__(self):
self.files = {}
self.exporters = {}
#classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
self.files = dict([ (name, open('results/'+name+'.csv','w+b')) for name in self.names])
self.exporters = dict([ (name,CsvItemExporter(self.files[name])) for name in self.names])
### this line is not working self.exportes.fields_to_export = ['itemA','itemB','itemC']
[e.start_exporting() for e in self.exporters.values()]
def spider_closed(self, spider):
[e.finish_exporting() for e in self.exporters.values()]
[f.close() for f in self.files.values()]
def process_item(self, item, spider):
myItem = item['myItem']
if myItem in set(self.names):
self.exporters[myItem].export_item(item)
return item
So far I have been trying to override the keys in items. I have tried to serialize the items and I have been looking how to sort values in a dictionary by a list of keys. None of them worked.
Thanks for you help.

Generator for multipart form data with python requests

I've been monkeying around with this Zipstream module by SpiderOak that basically allows you to stream and zip file or folder without writing anything to disc. It yields chunks of irregularly sized data.
Now, I am trying to upload a directory to a file hosting site that requires me to send file and apikey fields inside a post request. With requests I have to build a dict for the apikey like so:
data = {'apikey': 'myapikey'}
and also read the entire zipstream into a string and pass it to a file-encoding dict:
files = {'file': ('mydir.zip', the_string_that_is_a_zipped_dir)}
then issue the request
r = requests.post(url, data=data, files=files).
This works ok. However I would like to transfer larger stuff in the future and reading the entire file in memory is a BAD idea to begin with.
I saw in the requests advanced section that you could send a generator as the data field, but then I can't send the api key and have to modify headers manually to set the content type and all that stuff so it doesn't work. Also tried to form a dictionary out of the api key and zip file generator like so
data = {
'file': ('mydir.zip', generator()),
'apikey': 'myapikey'
}
but this fails (as expected).
Is there a way to hack requests into using a generator that yields strings for a file in a multipart form-data?
Ok, after some struggle I managed to make this working without requests, instead using the module poster.
First I created a fileobject wrapper around zipstream like so:
from zipstream import ZipStream
class Zipit:
def __init__( self, path):
self.it = iter(ZipStream(path,compression=0))
self.next_chunk = ""
self.length = -1
self.path = path
self.__is_zipit__=''
#property
def size(self):
if self.length < 0:
self.length = 0
zip_object = ZipStream(self.path,compression=0)
for data in zip_object:
self.length += len(data)
return self.length
def growChunk( self ):
self.next_chunk = self.next_chunk + self.it.next()
def read( self, n ):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk)<n:
self.growChunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
in order to have an easy api (code shamelessly adapted from another example on SO).
Then as per poster's doc create the necessary multipart objects:
z = Zipit('/my/path/to/zip')
f = MultipartParam('file', fileobj=z, filesize=z.size, filename='test.zip',filetype='application/zip')
datagen, headers = multipart_encode([ f, ('akey', 'mykey')])
One last hack is skipping the reset in case the fileobject field is a Zipit instance:
def reset(self):
if hasattr(self.fileobj, '__is_zipit__'): return
if self.fileobj is not None:
self.fileobj.seek(0)
elif self.value is None:
raise ValueError("Don't know how to reset this parameter")
This worked for me. Hope it helps any of the five of you that read this.

Categories