I have a module that can save and load data and it only takes up one file. How do I make it installable with pip? The code is
import subprocess
def save(stuffToStore, title):
f = open(title + '.sv', 'w')
f.write(stuffToStore)
f.close()
def load(title):
f = open(title + '.sv', 'r')
contents = f.read()
f.close()
return(contents)
I would also like to include a readme and a MIT license. And I would also like to post it on GitHub.
Related
Like normally, we can copy files directly to clipboard by simply
ctrl + c
But I want to do that using python
path = "..."
def copy_file_to_clipboard() -> None:
clipboard.copy_file(path) # this is a false code, but I want to know the code to do so
here is a solution tested in python 3.9 (Notebook jupyter) :
import pyperclip as pc
def copy_to_clipboard(file_name):
with open(file_name, 'r') as f:
text = f.read()
pc.copy(text)
print("File successfully copied to clipboard!")
copy_to_clipboard('file.txt')
import os
import glob
import comtypes.client
from PyPDF2 import PdfFileMerger
def docxs_to_pdf():
"""Converts all word files in pdfs and append them to pdfslist"""
word = comtypes.client.CreateObject('Word.Application')
pdfslist = PdfFileMerger()
x = 0
for f in glob.glob("*.docx"):
input_file = os.path.abspath(f)
output_file = os.path.abspath("demo" + str(x) + ".pdf")
# loads each word document
doc = word.Documents.Open(input_file)
doc.SaveAs(output_file, FileFormat=16+1)
doc.Close() # Closes the document, not the application
pdfslist.append(open(output_file, 'rb'))
x += 1
word.Quit()
return pdfslist
def joinpdf(pdfs):
"""Unite all pdfs"""
with open("result.pdf", "wb") as result_pdf:
pdfs.write(result_pdf)
def main():
"""docxs to pdfs: Open Word, create pdfs, close word, unite pdfs"""
pdfs = docxs_to_pdf()
joinpdf(pdfs)
main()
I am using jupyter notebook and it throw an error what should I do :
this is error message
I am going to convert many .doc file to one pdf. Help me I am beginner in this field.
Make sure you have all the dependencies installed in your environment. You can use pip to install comtypes.client, simply pass this in your terminal:
pip install comtypes
You can download _ctypes from sourceforge:
https://sourceforge.net/projects/ctypes/files/ctypes/1.0.2/ctypes-1.0.2.tar.gz/download?use_mirror=deac-fra
Using docx2pdf does seem easier for your task though. After you converted the files you can use PyPDF2 to append them.
I am trying to make docs read in python and I am not getting any error, but it also doesn't show what is written in document.
from docx import Document
import os
file = open('C:\\Users\\hamza\\Desktop\\Python\\qwe.docx','r', encoding='utf8' )
document =(file.read())
file.close()
Try pip install docx2txt
from docx2txt import process
import os
path = r'C:\Users\hamza\Desktop\Python\qwe.docx'
text = process(path)
with open(os.path.basename(path) + '.txt', 'w') as f:
f.write(text)
I have this file:
import wget
import sys
import datetime
class Printer(object):
def __init__(self, *files):
self.files = files
def write(self, obj):
for file in self.files:
file.write(obj)
file.flush()
def flush(self):
for file in self.files:
file.flush()
f = open(f"{__file__}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.log", 'w')
sys.stdout = Printer(sys.stdout, f)
url='https://www.w3.org/TR/PNG/iso_8859-1.txt'
wget.download(url)
#Your print statements below
print("Hello world!")
My above code does not work. It does not download the file. The question is to make the download work while storing the print logs in the log file.
The wget python package downloads content of a URL in a file. Below is a working example:
import wget
url = 'https://www.w3.org/TR/PNG/iso_8859-1.txt'
filename = wget.download(url)
with open(filename, 'r') as f:
print(f.read())
I have been using pylzma for a little bit, but I have to be able to create files compatible with the 7zip windows application. The caveat is that some of my files are really large (3 to 4gb created by a third party software in a proprietary binary format).
I went over and over here and on the instructions here: https://github.com/fancycode/pylzma/blob/master/doc/USAGE.md
I am able to create compatible files with the following code:
def Compacts(folder,f):
os.chdir(folder)
fsize=os.stat(f).st_size
t=time.clock()
i = open(f, 'rb')
o = open(f+'.7z', 'wb')
i.seek(0)
s = pylzma.compressfile(i)
result = s.read(5)
result += struct.pack('<Q', fsize)
s=result+s.read()
o.write(s)
o.flush()
o.close()
i.close()
os.remove(f)
The smaller files (up to 2Gb) compress well with this code and are compatible with 7Zip, but the larger files just crash python after some time.
According to the user guide, to compact large files one should use streaming, but then the resulting file is not compatible with 7zip, as in the snippet bellow.
def Compacts(folder,f):
os.chdir(folder)
fsize=os.stat(f).st_size
t=time.clock()
i = open(f, 'rb')
o = open(f+'.7z', 'wb')
i.seek(0)
s = pylzma.compressfile(i)
while True:
tmp = s.read(1)
if not tmp: break
o.write(tmp)
o.flush()
o.close()
i.close()
os.remove(f)
Any ideas on how can I incorporate the streaming technique present in pylzma while keeping the 7zip compatibility?
You still need to correctly write the header (.read(5)) and size, e.g. like so:
import os
import struct
import pylzma
def sevenzip(infile, outfile):
size = os.stat(infile).st_size
with open(infile, "rb") as ip, open(outfile, "wb") as op:
s = pylzma.compressfile(ip)
op.write(s.read(5))
op.write(struct.pack('<Q', size))
while True:
# Read 128K chunks.
# Not sure if this has to be 1 instead to trigger streaming in pylzma...
tmp = s.read(1<<17)
if not tmp:
break
op.write(tmp)
if __name__ == "__main__":
import sys
try:
_, infile, outfile = sys.argv
except:
infile, outfile = __file__, __file__ + u".7z"
sevenzip(infile, outfile)
print("compressed {} to {}".format(infile, outfile))