I want to download the archive after i created. The code is bellow:
def downloadArchive(room, catalog):
test = RoomTest.objects.get(idRoom=room)
zip_name = room.name + "_" + token_urlsafe(5) + '.zip'
zip_path = 'media/zipFiles/' + zip_name
if test.OLD_Archive != '' and os.path.exists(test.OLD_Archive): # remove old file.
os.remove(test.OLD_Archive)
with ZipFile(zip_path, 'w') as zipObj:
for student in catalog:
zipObj.write('media/' + str(student.file),
student.studentFullName() + os.path.splitext('media/' + str(student.file))[1])
test.OLD_Archive = zip_path
test.save()
response = HttpResponse(zipObj)
response['Content-Type'] = 'application/x-zip-compressed'
response['Content-Disposition'] = 'attachment; filename=' + zip_name
return response
Every time i try to download, got error "The archive is either in unknown format or damaged". I think the route is broken. Any ideas how can i check the response route?
Btw: if i download the archive manually (after archive), it's working fine.
Solved, i used:
response = redirect('/' + zip_path)
return response
Related
I am trying to download files using python and then add lines at the end of the downloaded files, but it returns an error:
f.write(data + """<auth-user-pass>
TypeError: can't concat str to bytes
Edit: Thanks, it works now when I do this b"""< auth-user-pass >""", but I only want to add the string at the end of the file. When I run the code, it adds the string for every line.
I also tried something like this but it also did not work: f.write(str(data) + "< auth-user-pass >")
here is my full code:
import requests
from multiprocessing.pool import ThreadPool
def download_url(url):
print("downloading: ", url)
# assumes that the last segment after the / represents the file name
# if url is abc/xyz/file.txt, the file name will be file.txt
file_name_start_pos = url.rfind("/") + 1
file_name = url[file_name_start_pos:]
save_path = 'ovpns/'
complete_path = os.path.join(save_path, file_name)
print(complete_path)
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(complete_path, 'wb') as f:
for data in r:
f.write(data + """<auth-user-pass>
username
password
</auth-user-pass>""")
return url
servers = [
"us-ca72.nordvpn.com",
"us-ca73.nordvpn.com"
]
urls = []
for server in servers:
urls.append("https://downloads.nordcdn.com/configs/files/ovpn_legacy/servers/" + server + ".udp1194.ovpn")
# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
for r in results:
print(r)
EDIT: Thanks, it works now when I do this b"""< auth-user-pass >""", but I only want to add the string at the end of the file. When I run the code, it adds the string for every line.
Try this:
import requests
from multiprocessing.pool import ThreadPool
def download_url(url):
print("downloading: ", url)
# assumes that the last segment after the / represents the file name
# if url is abc/xyz/file.txt, the file name will be file.txt
file_name_start_pos = url.rfind("/") + 1
file_name = url[file_name_start_pos:]
save_path = 'ovpns/'
complete_path = os.path.join(save_path, file_name)
print(complete_path)
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(complete_path, 'wb') as f:
for data in r:
f.write(data)
return url
servers = [
"us-ca72.nordvpn.com",
"us-ca73.nordvpn.com"
]
urls = []
for server in servers:
urls.append("https://downloads.nordcdn.com/configs/files/ovpn_legacy/servers/" + server + ".udp1194.ovpn")
# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
with open(complete_path, 'ab') as f:
f.write(b"""<auth-user-pass>
username
password
</auth-user-pass>""")
for r in results:
print(r)
You are using binary mode, encode your string before concat, that is replace
for data in r:
f.write(data + """<auth-user-pass>
username
password
</auth-user-pass>""")
using
for data in r:
f.write(data + """<auth-user-pass>
username
password
</auth-user-pass>""".encode())
You open the file as a write in binary.
Because of that you cant use normal strings like the comment from #user56700 said.
You either need to convert the string or open it another way(ex. 'a' = appending).
Im not completly sure but it is also possible that the write binary variant of open the data of the file deletes. Normally open with write deletes existing data, so its quite possible that you need to change it to 'rwb'.
I'm getting an issue where I can't create a folder with a similar name as a file inside the folder.
For instance if the image contains the name
apple-banana-carrot-dog-electric-fish-gorilla-horse-igloo-jackrabbit-kangaroo-long-maze-nickel-octopus-pretty.jpg
I'll get the error: (I seperated the directory path for readability)
FileNotFoundError: [Errno 2] No such file or directory: 'C:/Users/Dave/Desktop/PictureGallery/
apple-banana-carrot-dog-electric-fish-gorilla-horse-igloo-jackrabbit-kangaroo-long-maze-nickel-octopus-pretty/
apple-banana-carrot-dog-electric-fish-gorilla-horse-igloo-jackrabbit-kangaroo-long-maze-nickel-octopus-pretty.-1.jpg'
However if I use half of that folder name to something like...
apple-banana-carrot-dog-electric-fish-gorilla-horse
I won't get an error. I have absolutely no idea why this is happening.
Downloading the image:
import os
import requests
def download(url: str, dest_folder: str):
print("DESTINATION FOLDER: " + dest_folder)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
filename = url.split('/')[-1].replace(" ", "_") # be careful with file names
file_path = os.path.join(dest_folder, filename)
print("File path: " + file_path)
r = requests.get(url, stream=True)
if r.ok:
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 8):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
else: # HTTP status code 4XX/5XX
print("Download failed: status code {}\n{}".format(r.status_code, r.text))
And the main.py:
import requests
import os
from bs4 import BeautifulSoup
from downloader import download
url = 'https://urltoimage.com/123456/apple-banana-carrot-dog-electric-fish-gorilla-horse-igloo-jackrabbit-kangaroo-long-maze-nickel-octopus-pretty/'
model_name = 'Coolname'
album_name = ' '.join(url.split("/")).split()[-1] #apple-banana-carrot-dog-electric-fish-gorilla-horse-igloo-jackrabbit-kangaroo-long-maze-nickel-octopus-pretty
print("Album name: " + album_name)
location = "C:/Users/Dave/Desktop/" + model_name + "/" + album_name + "/"
print('Location: ' + location) #C:/Users/Dave/Desktop/Coolname/apple-banana-carrot-dog-electric-fish-gorilla-horse-igloo-jackrabbit-kangaroo-long-maze-nickel-octopus-pretty/
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
for link in soup.find_all('img'):
if album_name in link.get('src').lower():
print(link.get('src'))
download(link.get('src'), location)
For the line in my main.py:
location = "C:/Users/Dave/Desktop/" + model_name + "/" + album_name + "/"
the album name would normally be extracted from the url so...
apple-banana-carrot-dog-electric-fish-gorilla-horse-igloo-jackrabbit-kangaroo-long-maze-nickel-octopus-pretty
This causes the error to be thrown.
HOWEVER when I change it to anything else...
such as
location = "C:/Users/Dave/Desktop/" + model_name + "/" + "anything5"+ "/"
or
location = "C:/Users/Dave/Desktop/" + model_name + "/" + "apple-banana-carrot-dog-electric-fish-gorilla-horse"+ "/"
the download will work without any errors thrown. I have no idea what's causing this issue.
The error is thrown at line 18 in the downloader.py
with open(file_path, 'wb') as f:
Full error:
with open(file_path, 'wb') as f:
FileNotFoundError: [Errno 2] No such file or directory: 'C:/Users/Dave/Desktop/Coolname/apple-banana-carrot-dog-electric-fish-gorilla-horse-igloo-jackrabbit-kangaroo-long-maze-nickel-octopus-pretty/apple-banana-carrot-dog-electric-fish-gorilla-horse-igloo-jackrabbit-kangaroo-long-maze-nickel-octopus-pretty.-1.jpg'
I think if you using windows you need to use \ instead of /
For example: file_path = "C:\\Users\\arter\\filename.ext" or file_path = "C:\Users\arter\filename.ext"
I am trying to make a python 3 script that takes a screenshot, uploads it to a website and then deletes the screenshot from the computer. The problem occurs when I try to delete the file using os.remove() . I get the following error: "The process cannot access the file because it is being used by another process" Any ideas on how to fix this?
pic = pyautogui.screenshot()
file_name = 'ss-' + nume + "-" + str(random.randint(0, 1000)) + '.png'
pic.save(file_name)
form_data = {
'image': (file_name, open(file_name, 'rb')),
'nume': ('', str(nume)),
}
response = requests.post('https://website.com', files=form_data)
print(response)
k = 1
os.remove(file_name)
The problem you opened the file in open(file_name, 'rb') and didn't close it before remove()
try this:
pic = pyautogui.screenshot()
file_name = 'ss-' + nume + "-" + str(random.randint(0, 1000)) + '.png'
pic.save(file_name)
f = open(file_name, 'rb') # open the file
form_data = {
'image': (file_name, f),
'nume': ('', str(nume)),
}
response = requests.post('https://website.com', files=form_data)
print(response)
k = 1
f.close() # close file before remove
os.remove(file_name)
Here is the link i have used (Download files from Amazon S3 with Django). Using this i'm able to download single file.
Code:
s3_template_path = queryset.values('file')
filename = 'test.pdf'
conn = boto.connect_s3('<aws access key>', '<aws secret key>')
bucket = conn.get_bucket('your_bucket')
s3_file_path = bucket.get_key(s3_template_path)
response_headers = {
'response-content-type': 'application/force-download',
'response-content-disposition':'attachment;filename="%s"'% filename
}
url = s3_file_path.generate_url(60, 'GET',
response_headers=response_headers,
force_http=True)
return HttpResponseRedirect(url)
I need to download multiple files from S3, as a zip would be better. Can the mentioned method be modified and used. If not please suggest other method.
Okay here is a possible solution, it basically downloads each file and zips them into a folder, then returns this to the user.
Not sure if s3_template_path is the same for each file, but change this if neccessary
# python 3
import requests
import os
import zipfile
file_names = ['test.pdf', 'test2.pdf', 'test3.pdf']
# set up zip folder
zip_subdir = "download_folder"
zip_filename = zip_subdir + ".zip"
byte_stream = io.BytesIO()
zf = zipfile.ZipFile(byte_stream, "w")
for filename in file_names:
s3_template_path = queryset.values('file')
conn = boto.connect_s3('<aws access key>', '<aws secret key>')
bucket = conn.get_bucket('your_bucket')
s3_file_path = bucket.get_key(s3_template_path)
response_headers = {
'response-content-type': 'application/force-download',
'response-content-disposition':'attachment;filename="%s"'% filename
}
url = s3_file_path.generate_url(60, 'GET',
response_headers=response_headers,
force_http=True)
# download the file
file_response = requests.get(url)
if file_response.status_code == 200:
# create a copy of the file
f1 = open(filename , 'wb')
f1.write(file_response.content)
f1.close()
# write the file to the zip folder
fdir, fname = os.path.split(filename)
zip_path = os.path.join(zip_subdir, fname)
zf.write(filename, zip_path)
# close the zip folder and return
zf.close()
response = HttpResponse(byte_stream.getvalue(), content_type="application/x-zip-compressed")
response['Content-Disposition'] = 'attachment; filename=%s' % zip_filename
return response
I want to upload zip file with .csv files and output zip file with .vm files.
I use this code:
def csv_archive_to_vm(request):
response = HttpResponse(content_type='application/force-download')
work_string = ''
if request.method == "POST":
##reading input zip file
input_file = request.FILES.get('file')
zf = zipfile.ZipFile(input_file)
for info in zf.infolist():
##reading files in archive
path = re.search('(.*\.csv)', info.filename)
path_name = re.search('(.*/)(.*\.csv)', info.filename)
for string in zf.open(info.filename):
quotes_search = re.search('"(.*)",\s*"(.*)",\s*"(.*)"', string)
if quotes_search:
descr = quotes_search.group(1)
macro_name = quotes_search.group(2)
say = quotes_search.group(3)
new_lines_search = re.search('/n', say)
if new_lines_search:
say = re.sub('/n', '\n\t\t', say)
##making content for new files for new archive
work_string = work_string + '##' + descr + '\n#macro(' + macro_name + ')\n\t#random()\n\t\t' + say + '\n\t#end\n#end\n\n'
##outputting new archive
zipdata = StringIO()
zf_create = zipfile.ZipFile(zipdata, mode='a')
try:
if path_name:
zf_create.writestr(str(path_name.group(1)) + str(path_name.group(2))[0:-4] + '.vm', work_string)
finally:
zf_create.close()
work_string = ''
response = HttpResponse(zipdata.read())
response['Content-Disposition'] = 'attachment; filename=assistant-linguistics_vm.zip'
response['Content-Type'] = 'application/x-zip'
return response
but i get empty zip archive, with 0kb weight. What am i doing wrong? Thanks.