server: ubuntu 14.04 2core and 4G.
gunicorn -k gevent and flask.
gunicorn==20.0.4
Flask==1.1.1
gevent==1.4.0
the service behind the flask is some redis read/write, just small keys and values. use the python library: redis==3.4.1.
the production problem is: when more people use same api at same time, the cost or time of the api response becomes heavy and spend more time in redis operations: from 10ms increase to 100ms or even higher.
mport time
import functools
import redis
from flask import Flask, request, jsonify
app = Flask(__name__)
pool = redis.ConnectionPool(host='127.0.0.1',
port='6379',
db=6,
encoding='utf-8',
decode_responses=True)
r = redis.StrictRedis(
connection_pool=pool
)
def timer(func):
#functools.wraps(func)
def decorator(*args, **kwargs):
s = time.time()
data = request.json or request.form.to_dict()
r = func(data, *args, **kwargs)
end = time.time()
print('spend: {}'.format(int(end * 1000 - s * 1000)))
return r
return decorator
def get_no():
z = r.get('test2')
print('room_no: {}'.format(z))
if not z:
create_no()
return get_no()
else:
if player_num() > 100:
create_no()
return get_no()
else:
return z
def player_num():
return r.incrby('room_num')
def create_no():
if r.setnx('lock', 1):
print('locked!')
n = r.incrby('test2')
r.delete('room_num')
r.delete('lock')
return n
else:
print('sleep!')
time.sleep(0.05)
#app.route('/test', methods=['POST', 'GET'])
#timer
def test(data):
# no = get_no()
# print(no)
z = r.incrby('incry_4')
print(z)
return jsonify(dict(code=200))
Plus, I take some tests in local machine with wrk tool. and found that, when use more connections , the api response spend more time. I want to know why when use the -k gevent, the api spend more time.
Related
I'm trying to deploy a python app on heroku, but heroku returned a H10 error. Here is my code python code:
import os
import requests
import random
from flask import Flask, jsonify, request
from flask_cors import CORS
from system.backend.data import Data
from system.backend.folder import Folder
from system.wallet.data_pool import DataPool
from system.wallet.exchange import Exchange
from system.pubsub import PubSub
app = Flask(__name__)
CORS(app, resources={ r'/*': { 'origins': 'http://localhost:8080' } })
data = Data()
data_pool = DataPool()
folder = Folder(data)
pubsub = PubSub(data, data_pool)
#app.route('/')
def default():
return 'Welcome to mypython app'
#app.route('/main')
def route_mainn():
return jsonify(data.to_json())
#app.route('/main/range')
def route_main_range():
start = int(request.args.get('start'))
end = int(request.args.get('end'))
return jsonify(data.to_json()[::-1][start:end])
#app.route('/main/datalength')
def route_main_datalength():
return jsonify(len(data.length))
#app.route('/myapp/data')
def route_myapp_data():
app_data = data_pool.app_data()
exchange_data.append(Exchange.reward_exchange(folder).to_json())
data.add_data(exchange_data)
folder = data.length[-1]
pubsub.broadcast_folder(folder)
data_pool.clear_data_exchange(data)
return jsonify(data.to_json())
#app.route('/folder/exchange', methods=['POST'])
def route_folder_exchange():
exchange_data = request.get_json()
exchange = data_pool.existing_exchange(folder.address)
if exchange:
exchange.update(
folder,
exchange_data['recipient'],
exchange_data['sender']
)
else:
exchange = Exchange(
folder,
exchange_data['recipient'],
exchange_data['sender']
)
pubsub.broadcast_exchange(exchange)
data_pool.set_exchange(exchange)
return jsonify(exchange.to_json())
#app.route('/folder/info')
def route_folder_info():
return jsonify({'address': folder.address, 'data': folder.balance})
#app.route('/known-addresses')
def route_known_addresses():
known_addresses = set()
for data in main.length:
for exchange in main.data:
exchange['output'].keys()
known_addresses.update(exchange['output'].keys())
return jsonify(list(known_addresses))
#app.route('/exchange')
def route_exchanges():
return jsonify(exchange_pool.exchange_data())
ROOT_PORT = 8080
PORT = ROOT_PORT
if os.environ.get('PEER') == 'True':
PORT = random.randint(8081, 9000)
result = requests.get(f'http://localhost:{ROOT_PORT}/main')
print(f'result.json(): {result.json()}')
result_main = Data.from_json(result.json())
try:
data.replace_length(result_data.length)
print('\n -- Successfully synchronized the local data')
except Exception as e:
print(f'\n -- Error synchronizing: {e}')
if os.environ.get('SEED_DATA') == 'True':
for i in range(10):
main.add_folder([
Exchange(Folder(), Folder().address, random.randint(500, 1000)).to_json(),
Exchange(Folder(), Folder().address, random.randint(500, 1000)).to_json()
])
for i in range(3):
data_pool.set_exchange(
Exchange(Folder(), Folder().address, random.randint(500, 1000))
)
app.run(port=PORT)
I also made a worker.py file with the code as follows:
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDIS_URL', 'redis://localhost:8080')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
The mac terminal recommended to use waitress for python deployment, but I'm not sure on how to implement waitress within the code shown on the above.
To use waitress:
Just do pip install waitress
And add this snippet:
if __name__ == '__main__':
from waitress import serve
serve(app, host='0.0.0.0', port=8000)
i deploy a Flask scrip as below on a Centos7 vps.
from flask import Flask, request
import time
# http server
app = Flask(__name__)
server_dict = dict()
server_status_dict = dict()
#app.route('/<ip>')
def report(ip):
server_dict[ip] = time.time()
return "ok" + ":"+ip + ":"+str(time.time())
#app.route('/check')
def check():
for server in server_dict:
difference = time.time()-server_dict[server]
if difference < 120:
server_status_dict[server]= "ok" + ":"+str(difference)
elif difference > 172800:
del server_status_dict[server]
else:
server_status_dict[server]= "fail" + ":"+str(difference)
message =''
for item in server_status_dict:
message = message+ f'{item}:{server_status_dict[item]}<br>'
return message
# main function
if __name__ == '__main__':
# start server
app.run(host='0.0.0.0', port=1111)
I run it in 2 way.
1. direct run the python scrip by type "python3.7 test.py"
2. deploy with Gunicorn and Nginx.
And i make a script to test this Flask api as below
import requests
for i in range(100):
requests.get(f'http://example.org/{i}')
print("done 1")
for i in range(100):
requests.get(f'http://server_IP:1111/{i}')
print("done 2")
with 1st option. The scrip run ok. When i go to "http://server_IP:1111/check". It give 100 entry
0:fail:759.2570543289185
1:fail:758.8786942958832
2:fail:758.5069346427917
3:fail:758.1351449489594
4:fail:757.7596881389618
5:fail:757.3863341808319
6:fail:757.010666847229
7:fail:756.6381704807281
8:fail:756.2622804641724
9:fail:755.8862257003784
10:fail:755.5146560668945
11:fail:755.1291973590851
12:fail:754.7365326881409
13:fail:754.356516122818
14:fail:753.981279373169
15:fail:753.6054089069366
16:fail:753.2138450145721
17:fail:752.818380355835
18:fail:752.4382960796356
19:fail:752.0667576789856
20:fail:751.7003827095032
21:fail:751.3132452964783
22:fail:750.9238367080688
23:fail:750.5513446331024
24:fail:750.1771302223206
25:fail:749.7979047298431
26:fail:749.4190459251404
27:fail:749.0481917858124
28:fail:748.6672575473785
29:fail:748.2830848693848
30:fail:747.909416437149
31:fail:747.5357480049133
32:fail:747.1593079566956
33:fail:746.7837409973145
34:fail:746.3994252681732
35:fail:746.0265593528748
36:fail:745.6520500183105
37:fail:745.2793860435486
38:fail:744.904794216156
39:fail:744.5288579463959
40:fail:744.1554877758026
41:fail:743.7802364826202
42:fail:743.4038217067719
43:fail:743.0015366077423
44:fail:742.616055727005
45:fail:742.2241225242615
46:fail:741.8492274284363
47:fail:741.4703538417816
48:fail:741.0822536945343
49:fail:740.7089433670044
50:fail:740.3415608406067
51:fail:739.9651212692261
52:fail:739.5690467357635
53:fail:739.1705968379974
54:fail:738.7934353351593
55:fail:738.4151468276978
56:fail:738.0353343486786
57:fail:737.6413230895996
58:fail:737.2650125026703
59:fail:736.8714530467987
60:fail:736.4966006278992
61:fail:736.1160485744476
62:fail:735.7190825939178
63:fail:735.3462533950806
64:fail:734.9714226722717
65:fail:734.5957586765289
66:fail:734.2199065685272
67:fail:733.8420522212982
68:fail:733.4598708152771
69:fail:733.0775439739227
70:fail:732.6989419460297
71:fail:732.3187139034271
72:fail:731.9392898082733
73:fail:731.5633845329285
74:fail:731.1846008300781
75:fail:730.8096714019775
76:fail:730.4323663711548
77:fail:730.0437717437744
78:fail:729.6707744598389
79:fail:729.2912459373474
80:fail:728.8956272602081
81:fail:728.5194237232208
82:fail:728.1444211006165
83:fail:727.7692551612854
84:fail:727.3844618797302
85:fail:727.0075929164886
86:fail:726.612667798996
87:fail:726.2140853404999
88:fail:725.8366258144379
89:fail:725.4668595790863
90:fail:725.080512046814
91:fail:724.7128283977509
92:fail:724.3402450084686
93:fail:723.9593863487244
94:fail:723.5851843357086
95:fail:723.2059574127197
96:fail:722.802404165268
97:fail:722.40824842453
98:fail:722.0141706466675
99:fail:721.6389377117157
But with 2nd option. the error happen. when i go to "http://example.org/check" it give difference result each time.
some time is below:
0:fail:780.1568698883057
4:fail:778.6187407970428
6:fail:777.8681375980377
9:fail:776.739280462265
13:fail:775.2384984493256
15:fail:774.4248764514923
19:fail:772.897510766983
22:fail:771.7576985359192
25:fail:770.6321122646332
28:fail:769.4517879486084
31:fail:768.3453030586243
34:fail:767.2020778656006
37:fail:766.0453197956085
40:fail:764.8815402984619
43:fail:763.7402126789093
another time is below:
2:fail:933.5209627151489
5:fail:932.381462097168
8:fail:931.2705476284027
11:fail:930.1319863796234
14:fail:928.9525971412659
17:fail:927.8257281780243
20:fail:926.6680727005005
23:fail:925.52357172966
26:fail:924.3903162479401
29:fail:923.2299783229828
32:fail:922.1214139461517
35:fail:920.9716517925262
38:fail:919.8139469623566
41:fail:918.6542329788208
44:fail:917.4981758594513
49:fail:915.6011772155762
i'm thinking it is threadsafe issue. Could someone advise me.
Thanks
I have a script with twilio:
from twilio.rest import Client
def wa(testo):
client = Client()
# this is the Twilio sandbox testing number
from_whatsapp_number='whatsapp:+14155238886'
to_whatsapp_number='whatsapp:+39xxxxxxxxxx'
ts = 'Anomalia Rapportino ' + str(testo)
client.messages.create(body=ts,
from_=from_whatsapp_number,
to=to_whatsapp_number)
I imported this script in view and I have this def:
def grazieeprint(request, pk):
intermedio = get_object_or_404(IntermProd, pk=pk)
datilavoro = WhoWork.objects.get(pk=intermedio.work_id)
try:
return render(request, 'FBIsystem/thanksandprint.html', {'pkpreso': pk})
finally:
try:
appo = datilavoro.pezziorastima * 2
if datilavoro.pezziora >= appo:
testo = datilavoro.pk
subprocess.Popen([wa(testo)], shell=True)
except:
pass
I need to run 'wa(testo)' after django load the page because all the process of sending message take approx 15/20 sec.
I try whit 'try and finally' and whit 'subbrocess.Popen' but it send always the message before render the page.
Please help
TY
EDIT:
I try:
finally:
try:
time.sleep(1)
appo = datilavoro.pezziorastima * 2
if datilavoro.pezziora >= appo:
testo = datilavoro.pk
subprocess.Popen([wa(testo)], shell=True)
it load page fast, but not send
EDIT 2:
Trying use Celery, now script is:
from twilio.rest import Client
from celery import shared_task,current_task
#shared_task
def wa(testo):
print 'test'
client = Client()
# this is the Twilio sandbox testing number
from_whatsapp_number='whatsapp:+14155238886'
to_whatsapp_number='whatsapp:+39xxxxxxxxx'
ts = 'Anomalia Rapportino ' + str(testo)
client.messages.create(body=ts,
from_=from_whatsapp_number,
to=to_whatsapp_number)
but not work in parallel...
what is the right way?
Setup: Python 2.7.15, Tornado 5.1
I have a web-server machine that handles ~40 /recommend requests per second.
The average response time is 25ms, but there's a big divergence (some requests can take more than 500ms).
Each request generates between 1-8 Elasticsearch queries (HTTP requests) internally.
Each Elasticsearch query can take between 1-150ms.
The Elasticsearch requests are handled synchronously via elasticsearch-dsl library.
The goal is to reduce the i/o waiting time (queries to Elasticsearch) and handle more requests per second so I can reduce the number of machines.
One thing is unacceptable - I don't want to increase the average handle time (25ms).
I found some tornado-elasticsearch implementations on the web, but since I need to use only one endpoint to Elasticsearch (/_search) I am trying to do that alone.
Below there's a degenerated implementation of my web-server. With the same load (~40 request per second) the average request response time increased to 200ms!
Digging in, I see that the internal async handle time (queries to Elasticsearch) is not stable and the time takes to each fetch call might be different, and the total average (in ab load test) is high.
I'm using ab to simulate the load and measure it internally by printing the current fetch handle time, average fetch handle time and maximum handle time.
When doing one request at a time (concurrency 1):
ab -p es-query-rcom.txt -T application/json -n 1000 -c 1 -k 'http://localhost:5002/recommend'
my prints looks like: [avg req_time: 3, dur: 3] [current req_time: 2, dur: 3] [max req_time: 125, dur: 125] reqs: 8000
But when I try to increase the concurrency (up to 8): ab -p es-query-rcom.txt -T application/json -n 1000 -c 8 -k 'http://localhost:5002/recommend'
now my prints looks like: [avg req_time: 6, dur: 13] [current req_time: 4, dur: 4] [max req_time: 73, dur: 84] reqs: 8000
The average req is now x2 slower (or x4 by my measurements)!
What do I miss here? why do I see this degradation?
web_server.py:
import tornado
from tornado.httpclient import AsyncHTTPClient
from tornado.options import define, options
from tornado.httpserver import HTTPServer
from web_handler import WebHandler
SERVICE_NAME = 'web_server'
NUM_OF_PROCESSES = 1
class Statistics(object):
def __init__(self):
self.total_requests = 0
self.total_requests_time = 0
self.total_duration = 0
self.max_time = 0
self.max_duration = 0
class RcomService(object):
def __init__(self):
print 'initializing RcomService...'
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient", max_clients=3)
self.stats = Statistics()
def start(self, port):
define("port", default=port, type=int)
db = self.get_db(self.stats)
routes = self.generate_routes(db)
app = tornado.web.Application(routes)
http_server = HTTPServer(app, xheaders=True)
http_server.bind(options.port)
http_server.start(NUM_OF_PROCESSES)
tornado.ioloop.IOLoop.current().start()
#staticmethod
def generate_routes(db):
return [
(r"/recommend", WebHandler, dict(db=db))
]
#staticmethod
def get_db(stats):
return {
'stats': stats
}
def main():
port = 5002
print('starting %s on port %s', SERVICE_NAME, port)
rcom_service = RcomService()
rcom_service.start(port)
if __name__ == '__main__':
main()
web_handler.py:
import time
import ujson
from tornado import gen
from tornado.gen import coroutine
from tornado.httpclient import AsyncHTTPClient
from tornado.web import RequestHandler
class WebHandler(RequestHandler):
def initialize(self, db):
self.stats = db['stats']
#coroutine
def post(self, *args, **kwargs):
result = yield self.wrapper_innear_loop([{}, {}, {}, {}, {}, {}, {}, {}]) # dummy queries (empty)
self.write({
'res': result
})
#coroutine
def wrapper_innear_loop(self, queries):
result = []
for q in queries: # queries are performed serially
res = yield self.async_fetch_gen(q)
result.append(res)
raise gen.Return(result)
#coroutine
def async_fetch_gen(self, query):
url = 'http://localhost:9200/my_index/_search'
headers = {
'Content-Type': 'application/json',
'Connection': 'keep-alive'
}
http_client = AsyncHTTPClient()
start_time = int(round(time.time() * 1000))
response = yield http_client.fetch(url, method='POST', body=ujson.dumps(query), headers=headers)
end_time = int(round(time.time() * 1000))
duration = end_time - start_time
body = ujson.loads(response.body)
request_time = int(round(response.request_time * 1000))
self.stats.total_requests += 1
self.stats.total_requests_time += request_time
self.stats.total_duration += duration
if self.stats.max_time < request_time:
self.stats.max_time = request_time
if self.stats.max_duration < duration:
self.stats.max_duration = duration
duration_avg = self.stats.total_duration / self.stats.total_requests
time_avg = self.stats.total_requests_time / self.stats.total_requests
print "[avg req_time: " + str(time_avg) + ", dur: " + str(duration_avg) + \
"] [current req_time: " + str(request_time) + ", dur: " + str(duration) + "] [max req_time: " + \
str(self.stats.max_time) + ", dur: " + str(self.stats.max_duration) + "] reqs: " + \
str(self.stats.total_requests)
raise gen.Return(body)
I tried to play a bit with the async class (Simple vs curl), the max_clients size, but I don't understand what is the best tune in my case.
But
Increased time may be because with concurrency==1, CPU was under-utilized and with c==8 it's 100%+ utilized and is unable to catch up with all requests. Example, abstract CPU can process 1000 operations/sec, to send a request it takes 50 CPU ops and to read a request result it takes 50 CPU ops too. When you have 5 RPS your CPU is 50% utilized and average request time is 50 ms (to send a req.) + request time + 50 ms (to read a req.). But when you have, for example, 40 RPS (8 times more than 5 RPS), your CPU would be over-utilized by 400% and some finished requests would be waiting to be parsed, so average request time now is 50 ms + request time + CPU wait time + 50 ms.
To sum up, my advise would be to check a CPU utilization on both loads and, to be sure, to profile how much time does it takes to send a request and parse a response, CPU may be your bottleneck.
My program collects data from sensors and sends this data to web interface, which shows it in real time graphs and sends some commands back.
The problem is that data sending has a big delay or sends old data. What i should change to send data async to both ways?
I simplified my code. Example 1, when data comes one time in a few minutes:
# System stuff
import os
import sys
import serial
# Multiprocessing
from multiprocessing import Process
from threading import Thread # for same memory
import timeit
from time import sleep
# database
import redis
# Web server
from flask import Flask, render_template
from flask_socketio import SocketIO, emit, send, join_room, leave_room, close_room, rooms, disconnect
# Config
DEBUG = True
db = redis.StrictRedis(host='localhost', port=6379, db=0)
#db vars
db.set('velocity', 0.0)
db.set('distance', 0.0)
def webServer(db):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
#app.route('/')
def index():
return render_template('index.html')
def exchangeData():
prev_distance = 0.00
while(True):
distance = round(float(db.get('distance')), 2)
if(distance != prev_distance):
velocity = round(float(db.get('velocity')), 2)
distance = round(float(db.get('distance')), 2)
socketio.emit('exchangeData', {
'velocity': velocity,
'distance': distance,
})
prev_distance = distance
print("DATA sended: %s m/s" % velocity)
sleep(0.2)
#socketio.on('connect')
def test_connect():
t_exchangeData = Thread(target=exchangeData).start()
socketio.run(app, debug=True, host="0.0.0.0")
def newData(db):
c = 0.00
while(True):
db.set('velocity', c)
db.set('distance', c)
c += 1.00
sleep(1)
if __name__ == '__main__':
p_newData = Process(target=newData, args=(db,)).start()
p_webServer = Process(target=webServer, args=(db,)).start()
#p_checkConnection = Process(target=checkConnection, args=(db, HOSTNAME, pinglog)).start()
#p_calcVD = Process(target=calcVD, args=(db,)).start()
In browser, I get this with latency about 2 minutes:
Object {velocity: 218, distance: 218}
// two minutes later
Object {velocity: 306, distance: 306}
// two minutes later
Object {velocity: 306, distance: 306}
Example 2, when I do not use if statement and sleep:
# System stuff
import os
import sys
import serial
# Multiprocessing
from multiprocessing import Process
from threading import Thread # for same memory
import timeit
from time import sleep
# database
import redis
# Web server
from flask import Flask, render_template
from flask_socketio import SocketIO, emit, send, join_room, leave_room, close_room, rooms, disconnect
# Config
DEBUG = True
db = redis.StrictRedis(host='localhost', port=6379, db=0)
#db vars
db.set('velocity', 0.0)
db.set('distance', 0.0)
def webServer(db):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
#app.route('/')
def index():
return render_template('index.html')
def exchangeData():
prev_distance = 0.00
while(True):
#distance = round(float(db.get('distance')), 2)
#if(distance != prev_distance):
velocity = round(float(db.get('velocity')), 2)
distance = round(float(db.get('distance')), 2)
socketio.emit('exchangeData', {
'velocity': velocity,
'distance': distance,
})
prev_distance = distance
print("DATA sended: %s m/s" % velocity)
#sleep(0.2)
#socketio.on('connect')
def test_connect():
t_exchangeData = Thread(target=exchangeData).start()
socketio.run(app, debug=True, host="0.0.0.0")
def newData(db):
c = 0.00
while(True):
db.set('velocity', c)
db.set('distance', c)
c += 1.00
sleep(1)
if __name__ == '__main__':
p_newData = Process(target=newData, args=(db,)).start()
p_webServer = Process(target=webServer, args=(db,)).start()
#p_checkConnection = Process(target=checkConnection, args=(db, HOSTNAME, pinglog)).start()
#p_calcVD = Process(target=calcVD, args=(db,)).start()
In this case, i get data in real time, but it is the same data, and changes only one time in few minutes:
Object {distance: 3, velocity: 3}
Object {distance: 3, velocity: 3}
Object {distance: 3, velocity: 3}
// repeating around 2 minutes
Object {distance: 357, velocity: 357}
// repeating again...
The main problem was that I did not add these two lines:
import eventlet
eventlet.monkey_patch()
Also, while I tried to find solution, made a mistake when I called data from redis out of while loop.