Using Python decorators to delete files after uploading - python

Trying to wrap my head around decorators in Python
I am trying to write a class which contains 2 functions:
A function which takes a pandas DataFrame as an argument and writes it to a text file, using tabulate.
def text_file(filename, df):
table = tabulate(filename, tablefmt="grid", headers=df.columns)
with open(filename, 'w') as f:
f.write(table)
A function to upload files to Slack and delete it after 5 seconds
slack = Slacker(api_token)
def upload(func):
#functools.wraps(func)
def upload_wrapper(*args, **kwargs):
slack.files.upload(file,
channels=channel,
title=head,
initial_comment=comment)
time.sleep(5)
os.remove(file)
return upload_wrapper(*args, **kwargs)
I am getting tripped up on understanding how to use decorators and how to use args and kwargs.
The end result of my structure would be something like:
from slacker import Slacker
from tabulate import tabulate
import functools
import time, os
class Slack:
def __init__(self, api_token, channel):
self.Slacker = Slacker(api_token)
self.channel = channel
#functools.wraps(func)
def upload(func):
def upload_wrapper(self, *args, **kwargs):
self.slack.files.upload(file,
channels=self.channel,
title=head,
initial_comment=comment)
time.sleep(5)
os.remove(file)
return upload_wrapper(self, *args, **kwargs)
def generate_text_file(self, filename, df):
table = tabulate(filename, tablefmt="grid", headers=df.columns)
with open(filename, 'w') as f:
f.write(table)
And my desired result would be to call a function which prepares a file for me, then uploads it to Slack and deletes it so no files are kept on the system (I would have this running via a cronjob).
workspace_name = Slack(api_token, channel=channel_name)
df = some_pandas_function()
#upload
workspace_name.generate_text_file("filename.txt", df)
Any help would be much appreciated ...

Related

How to update a file inside a folder in a zipfile without unzipping the zip in Python? [duplicate]

I have archive.zip with two files: hello.txt and world.txt
I want to overwrite hello.txt file with new one with that code:
import zipfile
z = zipfile.ZipFile('archive.zip','a')
z.write('hello.txt')
z.close()
but it won't overwrite file, somehow it creates another instance of hello.txt — take a look at winzip screenshot:
Since there is no smth like zipfile.remove(), what's the best way to handle this problem?
There's no way to do that with python zipfile module. You have to create a new zip file and recompress everything again from the first file, plus the new modified file.
Below is some code to do just that. But note that it isn't efficient, since it decompresses and then recompresses all data.
import tempfile
import zipfile
import shutil
import os
def remove_from_zip(zipfname, *filenames):
tempdir = tempfile.mkdtemp()
try:
tempname = os.path.join(tempdir, 'new.zip')
with zipfile.ZipFile(zipfname, 'r') as zipread:
with zipfile.ZipFile(tempname, 'w') as zipwrite:
for item in zipread.infolist():
if item.filename not in filenames:
data = zipread.read(item.filename)
zipwrite.writestr(item, data)
shutil.move(tempname, zipfname)
finally:
shutil.rmtree(tempdir)
Usage:
remove_from_zip('archive.zip', 'hello.txt')
with zipfile.ZipFile('archive.zip', 'a') as z:
z.write('hello.txt')
Building on nosklo's answer.
UpdateableZipFile A class that inherits from ZipFile, maintians the same interface but adds the ability to overwrite files (via writestr or write) and removing files.
import os
import shutil
import tempfile
from zipfile import ZipFile, ZIP_STORED, ZipInfo
class UpdateableZipFile(ZipFile):
"""
Add delete (via remove_file) and update (via writestr and write methods)
To enable update features use UpdateableZipFile with the 'with statement',
Upon __exit__ (if updates were applied) a new zip file will override the exiting one with the updates
"""
class DeleteMarker(object):
pass
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
# Init base
super(UpdateableZipFile, self).__init__(file, mode=mode,
compression=compression,
allowZip64=allowZip64)
# track file to override in zip
self._replace = {}
# Whether the with statement was called
self._allow_updates = False
def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
if isinstance(zinfo_or_arcname, ZipInfo):
name = zinfo_or_arcname.filename
else:
name = zinfo_or_arcname
# If the file exits, and needs to be overridden,
# mark the entry, and create a temp-file for it
# we allow this only if the with statement is used
if self._allow_updates and name in self.namelist():
temp_file = self._replace[name] = self._replace.get(name,
tempfile.TemporaryFile())
temp_file.write(bytes)
# Otherwise just act normally
else:
super(UpdateableZipFile, self).writestr(zinfo_or_arcname,
bytes, compress_type=compress_type)
def write(self, filename, arcname=None, compress_type=None):
arcname = arcname or filename
# If the file exits, and needs to be overridden,
# mark the entry, and create a temp-file for it
# we allow this only if the with statement is used
if self._allow_updates and arcname in self.namelist():
temp_file = self._replace[arcname] = self._replace.get(arcname,
tempfile.TemporaryFile())
with open(filename, "rb") as source:
shutil.copyfileobj(source, temp_file)
# Otherwise just act normally
else:
super(UpdateableZipFile, self).write(filename,
arcname=arcname, compress_type=compress_type)
def __enter__(self):
# Allow updates
self._allow_updates = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# call base to close zip file, organically
try:
super(UpdateableZipFile, self).__exit__(exc_type, exc_val, exc_tb)
if len(self._replace) > 0:
self._rebuild_zip()
finally:
# In case rebuild zip failed,
# be sure to still release all the temp files
self._close_all_temp_files()
self._allow_updates = False
def _close_all_temp_files(self):
for temp_file in self._replace.itervalues():
if hasattr(temp_file, 'close'):
temp_file.close()
def remove_file(self, path):
self._replace[path] = self.DeleteMarker()
def _rebuild_zip(self):
tempdir = tempfile.mkdtemp()
try:
temp_zip_path = os.path.join(tempdir, 'new.zip')
with ZipFile(self.filename, 'r') as zip_read:
# Create new zip with assigned properties
with ZipFile(temp_zip_path, 'w', compression=self.compression,
allowZip64=self._allowZip64) as zip_write:
for item in zip_read.infolist():
# Check if the file should be replaced / or deleted
replacement = self._replace.get(item.filename, None)
# If marked for deletion, do not copy file to new zipfile
if isinstance(replacement, self.DeleteMarker):
del self._replace[item.filename]
continue
# If marked for replacement, copy temp_file, instead of old file
elif replacement is not None:
del self._replace[item.filename]
# Write replacement to archive,
# and then close it (deleting the temp file)
replacement.seek(0)
data = replacement.read()
replacement.close()
else:
data = zip_read.read(item.filename)
zip_write.writestr(item, data)
# Override the archive with the updated one
shutil.move(temp_zip_path, self.filename)
finally:
shutil.rmtree(tempdir)
usage example:
with UpdateableZipFile("C:\Temp\Test2.docx", "a") as o:
# Overwrite a file with a string
o.writestr("word/document.xml", "Some data")
# exclude an exiting file from the zip
o.remove_file("word/fontTable.xml")
# Write a new file (with no conflict) to the zp
o.writestr("new_file", "more data")
# Overwrite a file with a file
o.write(r"C:\Temp\example.png", "word/settings.xml")
Based on this answer here's a quick and dirty way to monkey patch stock zipfile to support file deletion (while we waiting for it being accepted for python:main):
from zipfile import ZipFile, ZipInfo
from operator import attrgetter
import functools
def enable_zip_remove(func):
def _zipfile_remove_member(self, member):
# get a sorted filelist by header offset, in case the dir order
# doesn't match the actual entry order
fp = self.fp
entry_offset = 0
filelist = sorted(self.filelist, key=attrgetter('header_offset'))
for i in range(len(filelist)):
info = filelist[i]
# find the target member
if info.header_offset < member.header_offset:
continue
# get the total size of the entry
entry_size = None
if i == len(filelist) - 1:
entry_size = self.start_dir - info.header_offset
else:
entry_size = filelist[i + 1].header_offset - info.header_offset
# found the member, set the entry offset
if member == info:
entry_offset = entry_size
continue
# Move entry
# read the actual entry data
fp.seek(info.header_offset)
entry_data = fp.read(entry_size)
# update the header
info.header_offset -= entry_offset
# write the entry to the new position
fp.seek(info.header_offset)
fp.write(entry_data)
fp.flush()
# update state
self.start_dir -= entry_offset
self.filelist.remove(member)
del self.NameToInfo[member.filename]
self._didModify = True
# seek to the start of the central dir
fp.seek(self.start_dir)
def zipfile_remove(self, member):
"""Remove a file from the archive. The archive must be open with mode 'a'"""
if self.mode != 'a':
raise RuntimeError("remove() requires mode 'a'")
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists."
)
# Make sure we have an info object
if isinstance(member, ZipInfo):
# 'member' is already an info object
zinfo = member
else:
# get the info object
zinfo = self.getinfo(member)
return self._zipfile_remove_member(zinfo)
#functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(ZipFile, "remove"):
setattr(ZipFile, "_zipfile_remove_member", _zipfile_remove_member)
setattr(ZipFile, "remove", zipfile_remove)
return func(*args, **kwargs)
return wrapper
Usage:
#enable_zip_remove
def replace_zip_file():
with ZipFile("archive.zip", "a") as z:
z.remove("hello.txt")
z.write("hello.txt")
P.S. NSFW
My solution is similar to the other answers but uses SQLite to manage the intermediate files and provides __getitem__, __setitem__ and __delitem__ for an easy interface.
By default the db is in-memory but you can provide a temp file path if you have a zip larger than available memory.
And of course SQLite is built into Python and faster than the file system
import sqlite3
import subprocess
import zipfile
from pathlib import Path
from sql import CREATE_TABLE, DELETE_FILE, INSERT_FILE, SELECT_CONTENT
class EditableZip:
"""Intended to make editing files inside zip archive easy, this class is capable of loading files
from a zip file into a sqlite database, facilitates editing/removing/adding files, and saving
to a zip.
The database can be in-memory (default) or in a temporary on disk file if
temp_db_path is provided.
If an on-disk file is used, EditableZip.close can be called to remove the file or EditableZip
can be used as a context manager.
If auto_save is set to True and an initial zip_path was provided then the file will
be overwritten when EditableZip closes. If you wish to save to a different file,
or no zip_path is used in instantiation, auto_save can take a file path.
Files can be added by item assignment
with EditableZip(auto_save="example.zip") as ez:
ez["thing.txt"] = "stuff"
# empty dir
ez["empty/"] = None
Assignment accepts Non-text files as bytes.
EditableZip is subscriptable. If the subscript is a path in the db, the data will be returned.
EditableZip.files can be used to iterate over files in the db.
"""
def __init__(
self,
zip_path: None | str | Path = None,
temp_db_path: None | Path = None,
auto_save: bool | str | Path = False,
):
self.temp_db_path, self.auto_save, self.file_path = (
temp_db_path,
auto_save,
zip_path,
)
self.db = sqlite3.connect(
str(temp_db_path if temp_db_path is not None else ":memory:")
)
self.db.execute(CREATE_TABLE)
if self.file_path:
self.load(self.file_path)
#property
def files(self):
"Returns a generator of all file paths in the database."
try:
return (
i[0] for i in self.db.execute("SELECT file_path FROM files").fetchall()
)
except TypeError:
return None
def load(self, zip_path: str | Path) -> None:
"Add all files from zip at zip_path to db."
with zipfile.ZipFile(zip_path, mode="r") as archive:
for item in archive.infolist():
self[item.filename] = (
None if item.filename[-1] == "/" else archive.read(item)
)
def save(self, zip_path: None | str | Path) -> Path:
"Save all files from db to zip at zip_path."
zip_path = self.file_path if zip_path is None else zip_path
with zipfile.ZipFile(zip_path, "w") as archive:
for file in self.files:
if file_data := self.fetch(file):
archive.writestr(file, file_data)
else:
archive.writestr(zipfile.ZipInfo(file), "")
return zip_path
def close(self):
"Auto save if applicable and close + remove db."
if self.auto_save:
self.save(
zip_path=self.auto_save
if isinstance(self.auto_save, (str, Path))
else None
)
self.db.close()
if isinstance(self.temp_db_path, Path):
self.temp_db_path.unlink(missing_ok=True)
def fetch(self, file_path: str) -> bytes:
"Get content of db file for file_path."
try:
return self.db.execute(SELECT_CONTENT, {"file_path": file_path}).fetchone()[
0
]
except TypeError:
return None
def __getitem__(self, key):
result = self.fetch(key)
try:
return result.decode("utf-8")
except AttributeError:
return result
def __setitem__(self, file_path, content: str | bytes):
if isinstance(content, str):
content = content.encode("utf-8")
self.db.execute(
INSERT_FILE,
{"file_path": file_path, "file_content": content},
)
def __delitem__(self, file_path):
self.db.execute(DELETE_FILE, {"file_path": file_path})
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
if __name__ == "__main__":
# A use case: editing epub files.
# File source:
# https://archiveofourown.org/downloads/13795605/Victoria%20Potter%20and%20the.epub?updated_at=1650231615
file_path = Path("Victoria Potter and the.epub")
new_file = (file_path.parent / (file_path.stem + "- lowercase")).with_suffix(
file_path.suffix
)
# Create a copy of the epub with all letters lowercase
with EditableZip(zip_path=file_path, auto_save=new_file) as ez:
for file in ez.files:
if Path(file).suffix in [".html", ".xhtml"]:
ez[file] = ez[file].lower()
Reference: Delete file from zipfile with the ZipFile Module
In short,
You can take the code from https://github.com/python/cpython/blob/659eb048cc9cac73c46349eb29845bc5cd630f09/Lib/zipfile.py and create a separate file from it. After that just reference it from your project instead of built-in python library: import myproject.zipfile as zipfile.
Usage:
with zipfile.ZipFile(f"archive.zip", "a") as z:
z.remove(f"firstfile.txt")

Create a Class that creates a dictionary from reading in Multiple CSV Files - Python

I have 24 csv files that currently reside in a folder directory. The goal is to read all the CSV files in and store them as individual pandas dataframes. At the request of my client, they wish all of our code to be in Object Oriented Programming. I am new to OOP and I would appreciate any help.
I am currently trying to create a class that will read in my files and store them as a dictionary via a for loop. With the key being the name of the file, and the value being the pandas dataframe
I already have a list of filepaths stored in a a variable called fns
This is what I have for the code so far, I jam trying to figure out the loop logic so I don't have to call a new class instance every time.
fns = glob.glob(path + "*.csv")
enc = 'ISO-8859-1'
# create class
class MyFile:
def __init__(self, file_path):
self.file = file_path
def ParseName(self):
self.name_me = self.file.split('\\')[-1].strip('.csv')
def Read_CSV(self):
self.data_csv = pd.read_csv(self.file,delimiter='\t',
low_memory=False, encoding= enc)
My goal is to get a dictionary like this:
{'filename1': DataFrame, 'filename2': DataFrame, .... 'filename24': DataFrame}
I appreciate all the help!
Sample Object-oriented CsvStorage:
import glob
import pandas as pd
from os.path import basename
class CsvStorage:
_dfs = {}
def __init__(self, path):
for f in glob.glob(path):
self._dfs[basename(f)] = pd.read_csv(f, encoding='ISO-8859-1')
def get_dataframes(self):
if not self._dfs:
raise ValueError('No dataframes. Load data first')
return self._dfs
files_path = '*/FILE_*.csv' # adjust to your actual path pattern
csv_store = CsvStorage(files_path)
dfs = csv_store.get_dataframes()
print(dfs)

how to create a decorator that sends an argument to a function

I'm trying to create a decorator that goes through a bunch of files and applies the function to the text. Problem is that the wrapped function needs to receive a text and the returned wrapped function does not.
The function works fine if I define it receiving only parameters
import os
def manipulate( folder, func, *args, **kwargs ):
for file_name in os.listdir( folder ):
with open( file_name , 'r' ) as f:
text = ''.join(f.readlines())
func( text, *args, **kwargs )
with open( file_name , 'w' ) as f:
f.write(text)
but I wish I could transform this into a decorator
import os, functools
def manip( folder ):
def decorator( func ):
#functools.wraps( func )
def wrapper( *args, **kwargs ):
for file_name in os.listdir( folder ):
with open( file_name , 'r' ) as f:
text = ''.join(f.readlines())
func( text, *args, **kwargs )
with open( file_name , 'w' ) as f:
f.write(text)
return wrapper
return decorator
now if I have a function:
def add_after( text, after='to be added' ):
return text + after
manipulate('./MyFolder/', add_after)
the manipulate function works fine.
but using the manip decorator like:
#manip('./MyFolder/')
def add_after( text, after='to be added' ):
return text + after
manip()
gives me the error:
Type Error: add_after() missing 1 required positional argument: 'text'
I can't figure out what's wrong or if I just shouldn't be trying to do it like this. The idea is to be able to create util functions to help me edit a bunch of text/json files when needed and use this decorator to make a call on the decorated function already apply itself to all the text files on that folder.

lru_cache dump into a file and loading back into in-memory again

Within the functools pacakge in Python3, there is a lru_cache() decorator that will memo-ize your function calls.
Is there a way for me to dump out this cache into a file and then load the file back into in-memory later?
I couldn't find this feature in the functools documentation. What would be the recommended way to achieve the above requirements, preferably with the solution involving only Python?
I don't know a standart way to resolve this problem.
But you can write your annotation like this:
def diskcached(cachefile, saveafter=1):
def cacheondisk(fn):
try:
with open(cachefile, 'rb') as f:
cache = pickle.load(f)
except:
cache = {}
unsaved = [0]
#wraps(fn)
def usingcache(*args, **kwargs):
try:
key = hash((args, kwargs))
except TypeError:
key = repr((args, kwargs))
try:
ret = cache[key]
except KeyError:
ret = cache[key] = fn(*args, **kwargs)
unsaved[0] += 1
if unsaved[0] >= saveafter:
with open(cachefile, 'wb') as f:
pickle.dump(cache, f)
unsaved[0] = 0
return ret
return usingcache
return cacheondisk
and use with
#diskcached("filename_to_save")
def your_function():
...
Here is a different solution using the third party package joblib (pip install joblib), which has served me very well:
from joblib import Memory
memory = Memory("/usr/src/app/no_sync/tmp/", verbose=0)
#memory.cache
def args_and_results_of_this_function_are_cached_to_disk(a,b):
return a + b

Save data into two separate Parse apps

I wrote a small Python Django program that parses data from a JSON API call and saves it into Parse, using ParsePy.
I have a python file that collects the data and saves it into a Parse app DB. The Python file also passes some data into a different file that should save the passed data into a different Parse app.
In pseudocode:
File1.py
register('key1', 'restKey1')
file2.class1(passedData)
file1.saveData
File2.py
register('key2','restKey2')
file2.saveData
When I run the files individually, the code works perfectly. However, when I execute the program through the first file, the data is all getting saved into the first Parse app database instead of the second one.
I think you can use pattern like this:
#!/usr/bin/python
class SourceInterface(object):
def get_data(self):
raise NotImplementedError("Subclasses should implement this!")
class DestinationInterface(object):
def put_data(self, data):
raise NotImplementedError("Subclasses should implement this!")
class FileSource(SourceInterface):
def __init__(self, filename):
self.filename = filename
def get_data(self):
lines = None
with open(self.filename, 'r') as f:
lines = f.readlines()
if lines:
with open(self.filename, 'w') as f:
if lines[1:]:
f.writelines(lines[1:])
return lines[0]
class FileDestination(DestinationInterface):
def __init__(self, filename):
self.filename = filename
def put_data(self, data):
print 'put data', data
with open(self.filename, 'a+') as f:
f.write(data)
class DataProcessor(object):
sources_list = []
destinitions_list = []
def register_source(self, source):
self.sources_list.append(source)
def register_destinition(self, destinition):
self.destinitions_list.append(destinition)
def process(self):
for source in self.sources_list:
data = source.get_data()
if data:
for destinition in self.destinitions_list:
destinition.put_data(data)
if __name__ == '__main__':
processor = DataProcessor()
processor.register_source(FileSource('/tmp/source1.txt'))
processor.register_source(FileSource('/tmp/source2.txt'))
processor.register_destinition(FileDestination('/tmp/destinition1.txt'))
processor.register_destinition(FileDestination('/tmp/destinition2.txt'))
processor.process()
Just define you own Source and Destination classes

Categories