How to set secured path settings using Python - python

I need one help. I am uploading file into folder and for that I have set constant in settings.py file using Python. I am explaining my code below.
settings.py:
FILE_PATH = os.getcwd()+'/upload/'
views.py:
report = Reactor.objects.all()
filename = str(uuid.uuid4()) + '.csv'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename='+filename
with open(settings.FILE_PATH + filename, 'w') as csv_file:
file_writer = csv.writer(csv_file)
response_writer = csv.writer(response)
file_writer.writerow(['Name', 'Status', 'Date'])
response_writer.writerow(['Name', 'Status', 'Date'])
for rec in report:
if rec.status == 1:
status = 'Start'
if rec.status == 0:
status = 'Stop'
if rec.status == 2:
status = 'Suspend'
file_writer.writerow([rec.rname, status, rec.date])
response_writer.writerow([rec.rname, status, rec.date])
return response
Here I need the secured file path to upload the downloaded file into folder using Python and Django.

Are you writing to the file row by row? I would make a list or dataframe first & then write that to the csv.
import pandas as pd
#make your dataframe by adding your rows to a list
df = pd.DataFrame(list)
df.to_csv(filepath)

Related

Django how to create a tmp excel file and return it to the browser within the response

I have a process to build a tmp file and then return it to the browser in csv. Now i want to do the same but return a excel file.
So what i have for the csv is a view in django that does:
def export_wallet_view(request):
tmp = tempfile.NamedTemporaryFile(delete=False)
with open(tmp.name, 'w', encoding="utf-8-sig") as fi:
csv_headers = [
'Id',
'Name'
]
fi.write(';'.join(csv_headers))
fi.write('\n')
//here also i save the rows into the file
response = FileResponse(open(tmp.name, 'rb'))
response['Content-Disposition'] = 'attachment; filename="wallet.csv"'
return response
So to convert it to excel i try to do something like this using pandas:
df = pd.read_csv(tmp.name)
df.to_excel('pandas_to_excel.xlsx', sheet_name='new_sheet_name')
The problem is that this creates the excel in the server, and i would like to do something like:
df = pd.read_csv(tmp.name)
df.to_excel('pandas_to_excel.xlsx', sheet_name='new_sheet_name') //this being a tmp file
response = FileResponse(open(tmp.name, 'rb')) //this should be the new excel tmp file
response['Content-Disposition'] = 'attachment; filename="wallet.csv"'
return response
Thanks
I don't understand your problem.
You should use the same 'pandas_to_excel.xlsx' in both
df.to_excel('pandas_to_excel.xlsx', ...)
... open('pandas_to_excel.xlsx', 'rb')
or the same tmp.name in both
df.to_excel(tmp.name, ...)
... open(tmp.name, 'rb')
You can even use again NamedTemporaryFile() to create new temporary name.
tmp = tempfile.NamedTemporaryFile(delete=False)
df.to_excel(tmp.name, ...)
... open(tmp.name, 'rb')
But popular method is to use io.String() or io.Bytes() to create file-like object in memory - without creating file on disk.
def export_wallet_view(request):
csv_headers = ['Id', 'Name']
file_like_object = io.Bytes()
file_like_object.write(';'.join(csv_headers).encode('utf-8-sig'))
file_like_object.write('\n'.encode('utf-8-sig'))
file_like_object.write('other rows'.encode('utf-8-sig'))
file_like_object.seek(0) # move to the beginning of file
response = FileResponse(file_like_object)
response['Content-Disposition'] = 'attachment; filename="wallet.csv"'
return response
For excel it could be something like this. I use io.String() to read csv directly to pandas, and later I use io.Bytes() to create file-like object with excel data.
def export_wallet_view(request):
csv_headers = ['Id', 'Name']
text = ';'.join(csv_headers)
text += '\n'
text += 'other rows'
df = pd.read_csv(io.String(text))
file_like_object = io.Bytes()
df.to_excel(file_like_object)
file_like_object.seek(0) # move to the beginning of file
response = FileResponse(file_like_object)
response['Content-Disposition'] = 'attachment; filename="pandas_to_excel.xlsx"'
return response

How to read and overwrite a file in AWS s3 using Lambda and Python?

I'm trying the following. But when i overwite a file which was invoked by lambda, due to this it is going in a loop. Can you anyone please help me. Below also pasted the piece of code which am using for lambda.
Task
Read a file in a folder called 'Folder A' when it is uploaded to this folder
Then replace a particualr column which has character more then 10
then upload this file back to the same folder but unfortunately it is going in a loop due to lambda invoke
Tried moved to a different folder called TrimmedFile then it is working fine without any loops.
Can someone tell me how to read, edit, save the file in the same folder which was invoked?
import json
import urllib.parse
import boto3
import json
import os
import csv
print('Loading function')
s3 = boto3.client('s3')
def lambda_handler(event, context):
# Get the object from the event and show its content type
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
#print("CONTENT TYPE: " + key['ContentType'])
#for record in event['Records']:
print("file name " + key)
#bucket = record['s3']['bucket']['name']
#file_key = urllib.parse.unquote_plus(record['s3']['object']['key'], encoding='utf-8')
file_key = key
csvfile = s3.get_object(Bucket=bucket, Key=file_key)
csvcontent = csvfile["Body"].read().decode("utf-8")
file = csvcontent.split("\n")
csv_reader = csv.reader(file)
line_count = 0
colindex = ''
content = []
contentstring = ''
s33 = boto3.resource('s3')
copy_source = {
'Bucket': bucket,
'Key': file_key
}
new_bucket = s33.Bucket(bucket)
print(file_key)
print(bucket)
src_folder = "FolderA/"
new_filekey = file_key.replace(src_folder,"")
print(new_filekey)
new_bucket.copy(copy_source, 'BKP/' + new_filekey )
for row in csv_reader:
if row:
row = list(map(str.strip, row))
if line_count == 0:
if 'ColToTruncate' in row:
colindex = row.index('ColToTruncate')
line_count += 1
else:
print('No ColToTruncate column found in '+ file_key)
return 'No ColToTruncate column found in '+ file_key
else:
if len(row[colindex ]) >= 10:
row[colindex ] = row[colindex ][0:2]
line_count += 1
content.append(row)
contentstring += ', '.join(row)
contentstring = contentstring + '\n'
#print(contentstring)
#filename = file_key + '.csv'
uploadByteStream = bytes(contentstring.encode('utf-8'))
#new_key = 'TrimmedFiles/' + new_filekey
s3.put_object(Bucket=bucket, Key=file_key , Body=uploadByteStream)
return True
except Exception as e:
print(e)
print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))
raise e
I believe you have created an event Trigger on S3 and associated it with Lambda and when you are replacing the file you get the lambda triggered and it becomes a loop.
There could be 2 ways to handle it:
1.Configure a PUT OR POST event type ( which ever suits your case) to trigger the lambda. Now save the updated file at another location and then copy it to the original one. Doing this s3 will generate a "S3:ObjectCreated:Copy" event which will not invoke the Lambda again.
# Copying file from secondary location to original location
copy_sr = {
"Bucket":bucket,
"Key" :file_key_copy
}
s3_resource.meta.client.copy(copy_sr,
final_bucket,file_key_copy
)
#Deleting the file from the secondary location
s3_client.delete_object(Bucket=bucket,
Key=file_key_copy
)
2.Use SQS queue and configure it not to precess any message received twice in a specified period of time ( depending on the frequency of file getting updated)
This is to demonstrate how to read a file and and replace it after editing. It can act as a skeleton code.
import boto3
import base64
import json
import io
client = boto3.client('s3')
res = boto3.resource('s3')
def lambda_handler(event, context):
file_key = event['file_key']
file_obj = s3_res.Object("bucket_name", file_key)
content_obj = file_obj.get()['Body'].read().decode('utf-8') # fetching the data in
res.Object("bucket_name", file_key).delete() # Here you are deleting the old file
######Performing your operation and saving in new_data variable#########
new_file = io.BytesIO(new_data.encode())
client.upload_fileobj(new_file, "bucket_name", file_key) # uploading the file at the exact same location.

return multiple pandas Dataframes as single Django zip files (one file per DF) HttpResponse

I trying to figure out a way to return multiple dataframes from a Django view as Zip HttpResponse(zip_file, content_type="application/x-zip-compressed")
I tried this:
import zipfile
import datetime
def makeZipfiles(df_list):
i = 0
with zipfile.ZipFile('some_zip.zip', 'w') as csv_zip:
for dfl in df_list:
csv_zip.writestr(f"file_{str(i)}.csv", dfl.to_csv(index=False))
i = i + 1
return csv_zip
and in the view, I have the following:
zip_file = makeZipfiles(df_list)
response = HttpResponse(zip_file, content_type="application/x-zip-compressed")
return response
but when I try to look at the zip file in the download folder, I get an error that the 'archive is either unknown format or damaged'. The exported file is 1KB size and when I open in notepad I see this content only
"<zipfile.ZipFile [closed]>"
Please advise if what I am trying to do is feasible and if so, please provide a sample code.
Thank you
I haven't tried this out myself yet, but it seems to fit your demand quite well.
https://georepublic.info/en/blog/2019/zip-files-in-django-admin-and-python/
The author describes in detail how to get .csv files from the different dataframes and zip them in the end to one file for download.
His final code was the following:
file_list = [
. . .,
UserInfoResource()
]
def getfiles():
fileSet = {}
date_now = dt.datetime.now().strftime('%Y%m%d%H%M')
for file in file_list:
dataset = file.export()
dataset = dataset.csv
name = file._meta.model.__name__ + date_now
fileSet[name] = dataset
return fileSet
def download_zip(request):
files = getfiles()
zip_filename = 'Survey_Data' + dt.datetime.now().strftime('%Y%m%d%H%M') + '.zip'
zip_buffer = io.BytesIO()
with zipfile.ZipFile(zip_buffer, "a", zipfile.ZIP_DEFLATED, False) as zip_file:
for k, file in files.items():
zip_file.writestr(k + '.csv', file)
zip_buffer.seek(0)
resp = HttpResponse(zip_buffer, content_type='application/zip')
resp['Content-Disposition'] = 'attachment; filename = %s' % zip_filename
return resp
I will update this answer if I manage to apply this on my own website.
(Let us know if you found a better answer in the meantime, thanks ;) )

How download file and save it inside the upload folder using Django and Python

I am trying to write the content in CSV file and Here I need to download that file and save that same file into upload folder. My code is below.
if request.method == 'POST':
param = request.POST.get('param')
report = Reactor.objects.all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename='+uuid.uuid4()+'.csv'
writer = csv.writer(response)
writer.writerow(['Name', 'Status', 'Date'])
for rec in report:
if rec.status == 1:
status = 'Start'
if rec.status == 0:
status = 'Stop'
if rec.status == 2:
status = 'Suspend'
writer.writerow([rec.rname, status, rec.date])
#return response
u = urllib.URLopener()
f = u.open(param)
open("down.txt","w").write(f.read())
pers = User.objects.get(pk=request.session['id'])
root = []
user_name = pers.uname
count = 1
root.append(
{'username': user_name,
'count': count
})
return render(request, 'plant/home.html',
{'user': root, 'count': 1})
Here I am setting the database values inside one CSV file and that file is named as unique id. Here I need to save that file inside Upload folder and that folder path will set inside settings.py file and also same file will be downloaded as well.
You should try to generate your CSV into a buffer an then use it to save it to the file system and use it again to return the CSV as the respone. Something like this
import csv
import os
import shutil
from io import StringIO
from django.http import HttpResponse
from django.conf import settings
def my_view(request):
csvbuffer = StringIO
writer = csv.writer(csvbuffer)
# Write data from the DB here into the CSV buffer
# Write the file to the file system
path = os.path.join(settings.FILE_PATH, "%s.csv" % uuid.uuid4())
with(path, 'w') as fd:
csvbuffer.seek(0)
shutil.copyfileobj(csvbuffer, fd)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="somefilename.csv"'
csvbuffer.seek(0)
response.write(csvbuffer)
return response
I am quite sure this not the most optimized way to do it, but at least it should works.

How to download CSV file and return to the page using Python and Django

I am generating the CSV file and downloading it but after download it need to return to the other page using Python but its not happening like this.
My code is below:
if request.method == 'POST':
param = request.POST.get('param')
report = Reactor.objects.all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=status.csv'
writer = csv.writer(response)
writer.writerow(['Name', 'Status', 'Date'])
for rec in report:
if rec.status == 1:
status = 'Start'
if rec.status == 0:
status = 'Stop'
if rec.status == 2:
status = 'Suspend'
writer.writerow([rec.rname, status, rec.date])
return response
u = urllib.URLopener()
f = u.open(param)
open("down.txt","w").write(f.read())
pers = User.objects.get(pk=request.session['id'])
root = []
user_name = pers.uname
count = 1
root.append(
{'username': user_name,
'count': count
})
return render(request, 'plant/home.html',
{'user': root, 'count': 1})
Here actually user input is one URL e.g
https://pypi.python.org/pypi/autopep8 like this. Here my requirement is first all required data will fetch from database and write inside one CSV file and download it. secondly the user provided URL content will be written on a text file and download it finally the required page will be redirected. In my case after downloading the CSV file only the page is remaining same means not redirecting.

Categories