I want to testing my tornado server
but, I don't know how...
I don't run my testing code...
please help me unit test on tornado
I don't speak English very well...
I am sorry that I have poor questions.
this is my tornado server code.
import tornado.ioloop
import os
from tornado import web, gen
from concurrent.futures import ThreadPoolExecutor
from json_output import on_correct_text
import json
thread_pool = ThreadPoolExecutor()
class cer(tornado.web.RequestHandler):
_thread_pool = thread_pool
#gen.coroutine
def post(self, json_data):
### temporary json data
_json = {'file_name' : 'tmp_text.txt', 'file_text' : 'temp text...'}
json_data = json.dumps(_json)
tmp_json = {'file_name': '', 'text': '', 'upload_start_timestamp':'', 'upload_end_timestamp': '','process_start_timestamp': '', 'process_end_timestamp': '', 'response_time_second': '', 'status': "upload_waiting"}
json_d = json.loads(json_data)
print(json_d)
jsonString = yield on_correct_text(json_d, tmp_json)
self.set_header("Content-Type", "application/json")
self.write(jsonString)
self.finish()
def make_app():
return tornado.web.Application([
(r"/([a-zA-Z0-9.:_/%{}]+)", cer)
]
)
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.instance().start()
and this is json_output.py
import datetime, time, json
from concurrent.futures import ThreadPoolExecutor
from tornado import gen
thread_pool = ThreadPoolExecutor()
_thread_pool = thread_pool
#gen.coroutine
def on_correct_text(json_data, tmp_json):
file_up_starttime = datetime.datetime.now()
tmp_json['upload_start_timestamp'] = str(file_up_starttime)
tmp_json['status'] = "uploading"
try:
tmp_json['text'] = json_data['file_text']
tmp_json['file_name'] = json_data['file_name']
except:
tmp_json['status'] = "upload_faild"
print("upload falied")
jsonString = json.dumps(tmp_json, ensure_ascii=False)
return jsonString
start_time = time.time()
file_up_endtime = datetime.datetime.now()
tmp_json['upload_end_timestamp'] = str(file_up_endtime)
process_starttime = datetime.datetime.now()
tmp_json['process_start_timestamp'] = str(process_starttime)
tmp_json['status'] = "processing"
try:
process_endtime = datetime.datetime.now()
tmp_json['process_end_timestamp'] = str(process_endtime)
tmp_json['status'] = "complete"
except:
tmp_json['status'] = "process_faild"
print("process falied")
jsonString = json.dumps(tmp_json, ensure_ascii=False)
return jsonString
response_time = round((time.time() - start_time), 2)
tmp_json['response_time_second'] = str(response_time)
jsonString = json.dumps(tmp_json, ensure_ascii=False)
return jsonString
please help me...
I don't know how this tornado server unit test
There is some excellent documentation here on how to setup Unit Testing in Tornado:
http://www.tornadoweb.org/en/stable/testing.html
class cerTester(AsyncHTTPTestCase):
def get_app(self):
return make_app()
def get_url(self, path):
"""Returns an absolute url for the given path on the test server."""
return '%s://localhost:%s%s' % (self.get_protocol(),
self.get_http_port(), path)
def test_url(self):
#This is where your actual test will take place.
response = self.fetch('/sometest', method="POST")
assert response.code == 200
data = json.loads(response.body.decode('utf-8'))
assert data
#More Asserts
Related
I am able to use haveibeenpwned to search for 1 account compromise. However, I could not find an option to use the API key to search for compromise of all the email accounts on a domain. (For example. if the domain is xyz.com, I want to search for the compromise of abc#xyz.com, peter.charlie#xyz.com and so on). I am aware of the notification email that I can sign up for. But, that is a lengthy process and I prefer using the API.
So, I wrote a script to search against haveibeenpwned for all the email address of my domain, but it takes very long. I searched through a couple of Github projects, but I did not find any such implementation. Has anyone tried this before?
I have added the code below. I am using Multi threading approach, but still it takes very long, is there any other Optimization strategy I can use? Please help. Thank you.
import requests, json
import threading
from time import sleep
import datetime
import splunklib.client as client
import splunklib.results as results
date = datetime.datetime.now()
from itertools import islice
import linecache
import sys
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
class myThread (threading.Thread):
def __init__(self, threadID, name, list_emails):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.list_emails = list_emails
def run(self):
i=0
print "Starting " + self.name
for email in self.list_emails:
print i
i=i+1
result = check_pasteaccount(email)
print email
print result
print result
print "Exiting " + self.name
def check_pasteaccount(account):
account = str(account)
result = ""
URL = "https://haveibeenpwned.com/api/v3/pasteaccount/%s?truncateResponse=false" % (account)
# print(URL)
headers= {'hibp-api-key':api_key}
result = ""
try:
r = requests.get(url=URL,headers=headers)
# sleep(2)
status_code = r.status_code
if status_code == 200:
data = r.text
result = []
for entry in json.loads(data.decode('utf8')):
if int((date - datetime.datetime.strptime(entry['Date'], '%Y-%m-%dT%H:%M:%SZ')).days) > 120:
pass
else:
result.append(['Title: {0}'.format(entry['Title']), \
'Source: {0}'.format(['Source']), \
'Paste ID: {0}'.format(entry['Id'])])
if len(result) == 0:
result = "No paste reported for given account and time frame."
else:
paste_result = ""
for entry in result:
for item in entry:
paste_result += str(item) + "\r\n"
paste_result += "\r\n"
result = paste_result
elif status_code == 404:
result = "No paste for the account"
else:
if status_code == 429:
sleep(5)
# print "Limit exceeded, sleeping"
result = check_pasteaccount(account)
else:
result = "Exception"
print status_code
except Exception as e:
result = "Exception"
PrintException()
pass
return result
def split_every(n, iterable):
iterable = iter(iterable)
for chunk in iter(lambda: list(islice(iterable, n)), []):
yield chunk
def main():
print datetime.datetime.now()
# Fetching the list of email addresses from Splunk
list_emails = connect_splunk()
print datetime.datetime.now()
i=0
list_split = split_every(1000,list_emails)
threads=[]
for list in list_split:
i=i+1
thread_name = "Thread" + str(i)
thread = myThread(1, thread_name, list)
thread.start()
threads.append(thread)
# Wait for all the threads to complete
for t in threads:
t.join()
print "Completed Search"
Here's a shorter and maybe more efficient version of your script using the standard multiprocessing library instead of a hand-rolled thread system.
You'll need Python 3.6+ since we're using f-strings.
You'll need to install the tqdm module for fancy progress bars.
You can adjust the number of concurrent requests with the pool size parameter.
Output is written in machine-readable JSON Lines format into a timestamped file.
A single requests session is shared (per-worker), which means less time spent connecting to HIBP.
import datetime
import json
import multiprocessing
import random
import time
import requests
import tqdm
HIBP_PARAMS = {
"truncateResponse": "false",
}
HIBP_HEADERS = {
"hibp-api-key": "xxx",
}
sess = requests.Session()
def check_pasteaccount(account):
while True:
resp = sess.get(
url=f"https://haveibeenpwned.com/api/v3/pasteaccount/{account}",
params=HIBP_PARAMS,
headers=HIBP_HEADERS,
)
if resp.status_code == 429:
print("Quota exceeded, waiting for a while")
time.sleep(random.uniform(3, 7))
continue
if resp.status_code >= 400:
return {
"account": account,
"status": resp.status_code,
"result": resp.text,
}
return {
"account": account,
"status": resp.status_code,
"result": resp.json(),
}
def connect_splunk():
# TODO: return emails
return []
def main():
list_emails = [str(account) for account in connect_splunk()]
datestamp = datetime.datetime.now().isoformat().replace(":", "-")
output_filename = f"accounts-log-{datestamp}.jsonl"
print(f"Accounts to look up: {len(list_emails)}")
print(f"Output filename: {output_filename}")
with multiprocessing.Pool(processes=16) as p:
with open(output_filename, "a") as f:
results_iterable = p.imap_unordered(
check_pasteaccount, list_emails, chunksize=20
)
for result in tqdm.tqdm(
results_iterable,
total=len(list_emails),
unit="acc",
unit_scale=True,
):
print(json.dumps(result, sort_keys=True), file=f)
if __name__ == "__main__":
main()
I have a python flask app that is receiving webhook from another application. When it receives the webhook, it responds back by carry out a task (looking up someone's availability) and responding back to the web application with a response. I am getting an unbound local error local variable 'response' referenced below assignment when sending a response back. It looks like calling response at that level is causing issues.
Any help would be greatly appreciated.
from flask import Flask
from flask import request
from flask import make_response
import logging
import json
import random
import os
import importlib
import win32com.client
import pywintypes
import datetime
import pythoncom
from gevent.pywsgi import WSGIServer
from gevent import monkey; monkey.patch_all()
import string
pythoncom.CoInitialize()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(message)s')
app = Flask(__name__)
#app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
logger.info("Incoming request: %s", req)
intent = get_intent_from_req(req)
logger.info('Detected intent %s', intent)
if intent == "Check Schedule Next":
pythoncom.CoInitialize()
emailparam = req.get('queryResult').get('parameters').get('email')
datetime1 = req.get('queryResult').get('parameters').get('date-time').get("date_time")
datetime2=datetime1.replace('T',' ')
datetime3=datetime2.replace("-04:00", "")
print(datetime3)
pythoncom.CoInitialize()
class MeetingRoom:
def __init__(self, inputDate, duration, locationMail):
self.inputDate = inputDate
self.oOutlook = win32com.client.Dispatch("Outlook.Application")
self.bookings = self.oOutlook.CreateItem(1)
self.bookings.Start = inputDate
self.bookings.Duration = duration
self.bookings.Subject = 'Follow Up Meeting'
self.bookings.MeetingStatus = 1
self.roomRecipient = self.bookings.Recipients.Add(locationMail)
def checkRoomAvailability(self):
bookingDateTime = datetime.datetime.strptime(self.inputDate, '%Y-%m-%d %H:%M:%S')
self.roomRecipient.resolve
myDate = bookingDateTime.date()
pywintypeDate = pywintypes.Time(myDate)
availabilityInfo = self.roomRecipient.FreeBusy(pywintypeDate, self.bookings.Duration, True)
timeAvailability = []
newTime = pywintypeDate
# print(newTime)
currentTime = datetime.datetime.now()
for isAvailable in availabilityInfo:
# print(newTime, " :: ", isAvailable)
if isAvailable == "0" and newTime > currentTime:
timeAvailability.append(newTime)
newTime = newTime + datetime.timedelta(minutes=self.bookings.Duration)
# print(availabilityInfo)
# for value in timeAvailability:
# print(value)
try:
index = timeAvailability.index(bookingDateTime)
print(emailparam, "is available")
response = {
'fulfillmentText': emailparam
}
# self.bookings.Save()
# self.bookings.Send()
except ValueError:
for timestamp in timeAvailability:
if bookingDateTime <= timestamp:
break
print("I dont see availability for", emailparam, "at", bookingDateTime, " but next available time is ", timestamp)
x = ("I dont see availability for", emailparam, "at", bookingDateTime, " but next available time is ", timestamp)
response = {
'fulfillmentText': x
}
# def bookMeetingRoom():
if __name__ == '__main__':
meetingRoomObj = MeetingRoom(datetime3, 15, emailparam)
meetingRoomObj.checkRoomAvailability()
#response = {
# 'fulfillmentText': emailparam
#}
res = create_response(response)
return res
def get_intent_from_req(req):
try:
intent_name = req['queryResult']['intent']['displayName']
except KeyError:
return None
return intent_name
def get__from_req(req):
try:
intent_name = req['queryResult']['intent']['displayName']
except KeyError:
return None
return intent_name
def create_response(response):
res = json.dumps(response, indent=4)
logger.info(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
if __name__ == '__main__':
LISTEN = ('0.0.0.0',8080)
http_server = WSGIServer( LISTEN, app )
http_server.serve_forever()
This line references response before it's initialized:
res = create_response(response)
Perhaps make sure all code paths initialize the response varaiable?
Solution
It seems you've created your response object in the wrong scope, remove it from the function checkRoomAvailability.
Inside the function checkRoomAvailability after you've created the response object, return it like so
response = {
'fulfillmentText': x
}
return response #ADD THIS LINE
Remove these lines
if __name__ == '__main__':
meetingRoomObj = MeetingRoom(datetime3, 15, emailparam)
meetingRoomObj.checkRoomAvailability()
Then add back the object creation and call right before you create your response like so
meetingRoomObj = MeetingRoom(datetime3, 15, emailparam)
response = meetingRoomObj.checkRoomAvailability()
res = create_response(response)
return res
Suggestion
You are lacking some fundamental understanding about how python or scope works so I suggest taking a read friend
https://docs.python.org/3.3/reference/executionmodel.html
I've been working on trying to edit a webhook that was originally meant to be used for a weather API to get to be used with a postcode/zipcode API. The original file is here: https://github.com/dialogflow/fulfillment-webhook-weather-python/blob/master/app.py
I can't understand where mine is different, I thought I had solved it when I replaced urlencode with quote but alas, it wasn't enough.
The problem is very unlikely to do with the source json request that collects the postcode in postcodeValue(). The api url comes out correct when you enter it into a browser and is presented quite simply.
https://api.getaddress.io/find/SW11%201th?api-key=I98umgPiu02GEMmHdmfg3w12959
Is it in the correct format? Maybe I need to convert it to become even more JSON then it already is. This question is essentially an end of day brain dump that I I'm hoping that someone can save me with.
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode, quote
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
#this line is just naming conventions I reckon with a reference to expect to receive data as POST
#app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
#who knows where this is getting printed
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
baseurl = "https://api.getaddress.io/find/"
apikey = "?api-key=I98umgPiu02GEMmHdmfg3w12959"
yql_query = postcodeValue(req)
if yql_query is None:
return {}
#this line is the actual api request
yql_url = baseurl + quote(yql_query) + apikey
result = urlopen(yql_url).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
#this function extracts an individual parameter and turns it into a string
def postcodeValue(req):
result = req.get("result")
parameters = result.get("parameters")
postcode = parameters.get("postcode")
if postcode is None:
return None
return postcode
#def housenoValue(req):
# result = req.get("result")
#parameters = result.get("parameters")
#houseno = parameters.get("houseno")
#if houseno is None:
# return None
#return houseno
def makeWebhookResult(data):
longitude = data.get("longitude")
if longitude is None:
return {}
#def makeWebhookResult(data):
# query = data.get('query')
# if query is None:
# return {}
# result = query.get('results')
# if result is None:
# return {}
# channel = result.get('channel')
# if channel is None:
# return {}
# item = channel.get('item')
# location = channel.get('location')
# units = channel.get('units')
# if (location is None) or (item is None) or (units is None):
# return {}
# condition = item.get('condition')
# if condition is None:
# return {}
# print(json.dumps(item, indent=4))
speech = "Sausage face " + longitude
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
#More flask specific stuff
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
Here is a bit cleaner version of your code:
from urllib.request import urlopen
import os
from flask import Flask
app = Flask(__name__)
#app.route('/webhook', methods=['GET'])
def webhook():
res = processRequest()
return res
def processRequest():
try:
result = urlopen("https://api.getaddress.io/find/SW11%201th?api-key=I98umgPiu02GEMmHdmfg3w12959").read()
return result
except:
return "Error fetching data"
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
Open your browser and go to http://localhost:5000/webhook and you should see a response.
I am writing a simple producer/consumer app to call multiple URL's asynchronously.
In the following code if I set the conn_count=1, and add 2 items to the Queue it works fine as only one consumer is created. But if I make conn_count=2 and add 4 items to the Queue only 3 request are being made. The other request fails with ClientConnectorError.
Can you please help be debug the reason for failure with multiple consumers? Thank You.
I am using a echo server I created.
Server:
import os
import logging.config
import yaml
from aiohttp import web
import json
def start():
setup_logging()
app = web.Application()
app.router.add_get('/', do_get)
app.router.add_post('/', do_post)
web.run_app(app)
async def do_get(request):
return web.Response(text='hello')
async def do_post(request):
data = await request.json()
return web.Response(text=json.dumps(data))
def setup_logging(
default_path='logging.yaml',
default_level=logging.INFO,
env_key='LOG_CFG'
):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
if __name__ == '__main__':
start()
Client:
import asyncio
import collections
import json
import sys
import async_timeout
from aiohttp import ClientSession, TCPConnector
MAX_CONNECTIONS = 100
URL = 'http://localhost:8080'
InventoryAccount = collections.namedtuple("InventoryAccount", "op_co customer_id")
async def produce(queue, num_consumers):
for i in range(num_consumers * 2):
await queue.put(InventoryAccount(op_co=i, customer_id=i * 100))
for j in range(num_consumers):
await queue.put(None)
async def consumer(n, queue, session, responses):
print('consumer {}: starting'.format(n))
while True:
try:
account = await queue.get()
if account is None:
queue.task_done()
break
else:
print(f"Consumer {n}, Updating cloud prices for account: opCo = {account.op_co!s}, customerId = {account.customer_id!s}")
params = {'opCo': account.op_co, 'customerId': account.customer_id}
headers = {'content-type': 'application/json'}
with async_timeout.timeout(10):
print(f"Consumer {n}, session state " + str(session.closed))
async with session.post(URL,
headers=headers,
data=json.dumps(params)) as response:
assert response.status == 200
responses.append(await response.text())
queue.task_done()
except:
e = sys.exc_info()[0]
print(f"Consumer {n}, Error updating cloud prices for account: opCo = {account.op_co!s}, customerId = {account.customer_id!s}. {e}")
queue.task_done()
print('consumer {}: ending'.format(n))
async def start(loop, session, num_consumers):
queue = asyncio.Queue(maxsize=num_consumers)
responses = []
consumers = [asyncio.ensure_future(loop=loop, coro_or_future=consumer(i, queue, session, responses)) for i in range(num_consumers)]
await produce(queue, num_consumers)
await queue.join()
for consumer_future in consumers:
consumer_future.cancel()
return responses
async def run(loop, conn_count):
async with ClientSession(loop=loop, connector=TCPConnector(verify_ssl=False, limit=conn_count)) as session:
result = await start(loop, session, conn_count)
print("Result: " + str(result))
if __name__ == '__main__':
conn_count = 2
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(run(loop, conn_count))
finally:
loop.close()
Reference:
https://pymotw.com/3/asyncio/synchronization.html
https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
https://hackernoon.com/asyncio-for-the-working-python-developer-5c468e6e2e8e
I am trying to upload a large file (say ~1GB) from client (using Python request.post) to the flask server.
When client sends the request to server in chunks of 1024, server do not read the whole file and save to server 0kb.
Can you please help me in debugging what exactly I am mistaking here.
Server - Flask Code:
from flask import Flask, request, jsonify
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads/'
#app.route("/upload/<filename>", methods=["POST", "PUT"])
def upload_process(filename):
filename = secure_filename(filename)
fileFullPath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
with open(fileFullPath, "wb") as f:
chunk_size = 1024
chunk = request.stream.read(chunk_size)
f.write(chunk)
return jsonify({'filename': filename})
if __name__ == '__main__':
app.run(host="0.0.0.0", port=int("8080"),debug=True)
Client - Request Code
import os
import requests
def read_in_chunks(file_object, chunk_size=1024):
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def main(fname, url):
content_path = os.path.abspath(fname)
with open(content_path, 'r') as f:
try:
r = requests.post(url, data=read_in_chunks(f))
print "r: {0}".format(r)
except Exception, e:
print e
if __name__ == '__main__':
filename = 'bigfile.zip' # ~1GB
url = 'http://localhost:8080/upload/{0}'.format(filename)
main(filename, url)
kindly use 'file.stream.read(chunk_size)' instead of request.stream.read(chunk_size). It works for me...!
Old thread but I was looking for something similar so I'll post here anyway.
The server reads the file in write mode which will overwrite at each chunk. Prefer append mode:
with open(fileFullPath, "ab") as f:
The client needs to read the file in byte mode:
with open(content_path, "rb") as f:
Finally, the generator read_in_chunks needs to be used in a loop before being passed to the request:
def main(fname, url):
content_path = os.path.abspath(fname)
with open(content_path, "rb") as f:
try:
for data in read_in_chunks(f):
r = requests.post(url, data=data)
print("r: {0}".format(r))
except Exception as e:
print(e)
Then you have your 2 files
Server
from flask import Flask, request, jsonify
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
app.config["UPLOAD_FOLDER"] = "uploads/"
#app.route("/upload/<filename>", methods=["POST", "PUT"])
def upload_process(filename):
filename = secure_filename(filename)
fileFullPath = os.path.join(app.config["UPLOAD_FOLDER"], filename)
with open(fileFullPath, "ab") as f:
chunk_size = 1024
chunk = request.stream.read(chunk_size)
f.write(chunk)
return jsonify({"filename": filename})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=int("8080"), debug=True)
Client
import os
import requests
def read_in_chunks(file_object, chunk_size=1024):
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def main(fname, url):
content_path = os.path.abspath(fname)
with open(content_path, "rb") as f:
try:
for data in read_in_chunks(f):
r = requests.post(url, data=data)
print("r: {0}".format(r))
except Exception as e:
print(e)
if __name__ == "__main__":
filename = "bigfile.zip" # ~1GB
url = "http://localhost:8080/upload/{0}".format(filename)
main(filename, url)
Note that posting un chunks usually requires the total number of chunks and a hash of the file to validate the upload.
Flask depends on werkzeug to process streams, and werkzeug demands a content length for a stream. There's a thread on this here, but no real solution currently available, other than to take another framework approach.
This example below should work very well for you all. If you use Redis, you can also pub/sub the chunk being processed for progression bar in another API.
from flask import Flask, request, jsonify
#app.route("/submit_vdo", methods=['POST'])
def submit_vdo():
#copy_current_request_context
def receive_chunk(stream, full_file_path):
if full_file_path is None:
tmpfile = tempfile.NamedTemporaryFile('wb+', prefix=str(uuid.uuid4())+"_")
full_file_path = tmpfile.name
print ('Write temp to ', full_file_path)
with open(full_file_path, "wb") as f:
max_chunk_size = settings.VIDEO_MAX_SIZE_CHUNK # config.MAX_UPLOAD_BYTE_LENGHT
count_chunks = 0
total_uploaded = 0
try:
while True:
print ('Chunk ', count_chunks)
chunk = stream.read(max_chunk_size)
if chunk is not None and len(chunk)>0:
total_uploaded += len(chunk)
count_chunks += 1
f.write(chunk)
temp = {}
temp ['chunk_counts'] = count_chunks
temp ['total_bytes'] = total_uploaded
temp ['status'] = 'uploading...'
temp ['success'] = True
db_apn_logging.set(user_id+"#CHUNK_DOWNLOAD", json.dumps(temp), ex=5)
print (temp)
else:
f.close()
temp = {}
temp ['chunk_counts'] = count_chunks
temp ['total_bytes'] = total_uploaded
temp ['status'] = 'DONE'
temp ['success'] = True
db_apn_logging.set(user_id+"#CHUNK_DOWNLOAD", json.dumps(temp), ex=5)
break
except Exception as e:
temp = {}
temp ['chunk_counts'] = count_chunks
temp ['total_bytes'] = total_uploaded
temp ['status'] = e
temp ['success'] = False
db_apn_logging.set(user_id+"#CHUNK_DOWNLOAD", json.dumps(temp), ex=5)
return None
return full_file_path
stream = flask.request.files['file']
stream.seek(0)
full_file_path = receive_chunk(stream, full_file_path)
return "DONE !"