I have a simple Flask application which calls FastApi with route /api,
FastAPI calls Postgresql Database.
Need to achieve 200 request/second (insertions through Flask => FastAPI => DB).
Flask
To run it I use gunicorn
gunicorn --worker-class gevent --workers 2 --threads 50 --bind 0.0.0.0:5000 main_api:app
from gevent import monkey
monkey.patch_all()
app = Flask(__name__)
#app.route('/api/mt_retailprice', methods=['POST'])
# #jwt_required()
def mt_retailprice():
try:
request_data = request.get_json()
_write_to_database(request_data)
return data
except Exception as e:
logging.error("Prediction is failed, review that json sent in the body is correct.Exception %s has happened", e)
return e
def _write_to_database(data):
api_url = f'http://127.0.0.1:3000/api/'
return write_sql(api_url, data)
def write_sql(url, data):
"""
Makes a POST request to a specific URL
"""
try:
headers = {
'Accept-Language': 'application/json',
'Content-Type': 'application/json'
}
payload = json.dumps(data)
return requests.post(url, headers=headers, data=payload)
except Exception as e:
logging.error(e)
def main():
configure_logging()
if __name__ == "__main__":
main()
FastAPI
class Database():
async def create_pool(self):
self.pool = await asyncpg.create_pool(user=username, password=password,
database=db_name, host=host, max_size=1000)
def create_app():
app = FastAPI()
db = Database()
#app.on_event("startup")
async def startup():
await db.create_pool()
#app.post('/api')
async def insert_db(request: Request):
data = ...
df = pd.DataFrame.from_dict(data)
tuples = [tuple(x) for x in df.values]
connection = await db.pool.acquire()
s = await connection.copy_records_to_table(table, schema_name=schema, records = tuples, columns = list(df.columns), timeout = 10)
return s
return app
app = create_app()
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=3000)
Here the logs from Apache Benchmark
$ ab -n 200 -c 20 -T "application/json" -p body.json http://127.0.0.1:5000/api/mt_retailprice
This is ApacheBench, Version 2.3 <$Revision: 1879490 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 100 requests
Completed 200 requests
Finished 200 requests
Server Software: gunicorn
Server Hostname: 127.0.0.1
Server Port: 5000
Document Path: /api/mt_retailprice
Document Length: 60 bytes
Concurrency Level: 20
Time taken for tests: 12.372 seconds
Complete requests: 200
Failed requests: 0
Total transferred: 42600 bytes
Total body sent: 137800
HTML transferred: 12000 bytes
Requests per second: 16.17 [#/sec] (mean)
Time per request: 1237.222 [ms] (mean)
Time per request: 61.861 [ms] (mean, across all concurrent requests)
Transfer rate: 3.36 [Kbytes/sec] received
10.88 kb/s sent
14.24 kb/s total
I have such error when make more requests. For example
ab -n 300 -c 20 -T "application/json" -p body.json
http://127.0.0.1:5000/api/mt_retailprice
Anyone know what's this error about ? can't find anything.
INFO: 127.0.0.1:50600 - "POST /api/
HTTP/1.1" 500 Internal Server Error
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 372, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__
return await self.app(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/applications.py", line 270, in __call__
await super().__call__(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/applications.py", line 124, in __call__
await self.middleware_stack(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/middleware/errors.py", line 184, in __call__
raise exc
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/middleware/errors.py", line 162, in __call__
await self.app(scope, receive, _send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/middleware/exceptions.py", line 75, in __call__
raise exc
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/middleware/exceptions.py", line 64, in __call__
await self.app(scope, receive, sender)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/middleware/asyncexitstack.py", line 21, in __call__
raise e
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__
await self.app(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/routing.py", line 680, in __call__
await route.handle(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/routing.py", line 275, in handle
await self.app(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/routing.py", line 65, in app
response = await func(request)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/routing.py", line 232, in app
dependant=dependant, values=values, is_coroutine=is_coroutine
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/routing.py", line 160, in run_endpoint_function
return await dependant.call(**values)
File "app_test.py", line 49, in insert_db
connection = await db.pool.acquire()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 842, in _acquire
return await _acquire_impl()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 827, in _acquire_impl
proxy = await ch.acquire() # type: PoolConnectionProxy
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 141, in acquire
await self.connect()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 133, in connect
self._con = await self._pool._get_new_connection()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 532, in _get_new_connection
record_class=self._record_class,
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/connect_utils.py", line 781, in _connect_addr
return await __connect_addr(params, timeout, True, *args)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/connect_utils.py", line 833, in __connect_addr
tr, pr = await compat.wait_for(connector, timeout=timeout)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/compat.py", line 66, in wait_for
return await asyncio.wait_for(fut, timeout)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/asyncio/tasks.py", line 442, in wait_for
return fut.result()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/connect_utils.py", line 695, in _create_ssl_connection
host, port)
File "uvloop/loop.pyx", line 1978, in create_connection
socket.gaierror: [Errno 8] nodename nor servname provided, or not known
Related
I am using FastAPI with Pydantic.
My problem - I need to raise ValueError using Pydantic
from fastapi import FastAPI
from pydantic import BaseModel, validator
from fastapi import Depends, HTTPException
app = FastAPI()
class RankInput(BaseModel):
rank: int
#validator('rank')
def check_if_value_in_range(cls, v):
"""
check if input rank is within range
"""
if not 0 < v < 1000001:
raise ValueError("Rank Value Must be within range (0,1000000)")
#raise HTTPException(status_code=400, detail="Rank Value Error") - this works But I am looking for a solution using ValueError
return v
def get_info_by_rank(rank):
return rank
#app.get('/rank/{rank}')
async def get_rank(value: RankInput = Depends()):
result = get_info_by_rank(value.rank)
return result
this piece of code gives Internal Server Error when a ValueError is raised
INFO: 127.0.0.1:59427 - "GET /info/?rank=-1 HTTP/1.1" 500 Internal Server Error
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/uvicorn/protocols/http/h11_impl.py", line 396, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastapi/applications.py", line 199, in __call__
await super().__call__(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/applications.py", line 111, in __call__
await self.middleware_stack(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc from None
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc from None
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/routing.py", line 566, in __call__
await route.handle(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/routing.py", line 227, in handle
await self.app(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/routing.py", line 41, in app
response = await func(request)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastapi/routing.py", line 195, in app
dependency_overrides_provider=dependency_overrides_provider,
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastapi/dependencies/utils.py", line 550, in solve_dependencies
solved = await run_in_threadpool(call, **sub_values)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/concurrency.py", line 34, in run_in_threadpool
return await loop.run_in_executor(None, func, *args)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "pydantic/main.py", line 400, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 1 validation error for GetInput
rank
ValueError() takes no keyword arguments (type=type_error)
ERROR:uvicorn.error:Exception in ASGI application
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/uvicorn/protocols/http/h11_impl.py", line 396, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastapi/applications.py", line 199, in __call__
await super().__call__(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/applications.py", line 111, in __call__
await self.middleware_stack(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc from None
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc from None
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/routing.py", line 566, in __call__
await route.handle(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/routing.py", line 227, in handle
await self.app(scope, receive, send)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/routing.py", line 41, in app
response = await func(request)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastapi/routing.py", line 195, in app
dependency_overrides_provider=dependency_overrides_provider,
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastapi/dependencies/utils.py", line 550, in solve_dependencies
solved = await run_in_threadpool(call, **sub_values)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/starlette/concurrency.py", line 34, in run_in_threadpool
return await loop.run_in_executor(None, func, *args)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "pydantic/main.py", line 400, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 1 validation error for GetInput
rank
ValueError() takes no keyword arguments (type=type_error)
I also checked https://github.com/tiangolo/fastapi/issues/2180.
But I was not able to figure out a solution.
What I need to do is Raise ValueError with a Custom Status Code.
Note - I know I can get the Job Done by raising HTTPException.
But I am looking for a solution using ValueError
Could you tell me where I am going wrong?
Have Also Posted this Issue on Github - https://github.com/tiangolo/fastapi/issues/3761
If you're not raising an HTTPException then normally any other uncaught exception will generate a 500 response (an Internal Server Error). If your intent is to respond with some other custom error message and HTTP status when raising a particular exception - say, ValueError - then you can use add a global exception handler to your app:
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
#app.exception_handler(ValueError)
async def value_error_exception_handler(request: Request, exc: ValueError):
return JSONResponse(
status_code=400,
content={"message": str(exc)},
)
This will give a 400 response (or you can change the status code to whatever you like) like this:
{
"message": "Value Must be within range (0,1000000)"
}
Please note that pydantic expects that validators raise a ValueError, TypeError, or AssertionError (see docs) which pydantic will convert into a ValidationError.
Further, as per FastAPI's documentation:
When a request contains invalid data, FastAPI internally raises a RequestValidationError.
and
RequestValidationError is a sub-class of Pydantic's ValidationError.
The result of this is that a standard Validation error raised during pydantic's Model validation will be translated into a 422 Unprocessable Entity, and the response body will contain details on why the validation failed.
(As a side note: pydantic comes with constrained types which allow to constrain basic datatypes without having to write explicit validators.)
If the above is not satisfactory and you'd like to change the behaviour, here's how I would approach it (see here for details on the ValidationError handling):
from fastapi import Depends, FastAPI, Request, status
from fastapi.exceptions import RequestValidationError
from fastapi.exception_handlers import request_validation_exception_handler
from fastapi.responses import JSONResponse
from pydantic import BaseModel, conint
class RankInput(BaseModel):
# Constrained integer, must be greater than or equal to 0
# and less than or equal to 1 million.
rank: conint(ge=0, le=1_000_000)
async def rank_out_of_bound_handler(request: Request, exc: RequestValidationError):
validation_errors = exc.errors()
for err in validation_errors:
# You could check for other things here as well, e.g. the error type.
if "rank" in err["loc"]:
return JSONResponse(
status_code=status.HTTP_400_BAD_REQUEST,
content={"message": "Rank must be in range [0, 1000000]."}
)
# Default response in every other case.
return await request_validation_exception_handler(request, exc)
def get_info_by_rank(rank):
return rank
app = FastAPI(
exception_handlers={RequestValidationError: rank_out_of_bound_handler},
)
#app.get('/rank/{rank}')
async def get_rank(value: RankInput = Depends()):
result = get_info_by_rank(value.rank)
return result
A call to the endpoint now gives:
$ curl -i "http://127.0.0.1:8000/rank/1"
HTTP/1.1 200 OK
date: Sat, 28 Aug 2021 20:47:58 GMT
server: uvicorn
content-length: 1
content-type: application/json
1
$ curl -i "http://127.0.0.1:8000/rank/-1"
HTTP/1.1 400 Bad Request
date: Sat, 28 Aug 2021 20:48:24 GMT
server: uvicorn
content-length: 49
content-type: application/json
{"message":"Rank must be in range [0, 1000000]."}
$ curl -i "http://127.0.0.1:8000/rank/1000001"
HTTP/1.1 400 Bad Request
date: Sat, 28 Aug 2021 20:48:51 GMT
server: uvicorn
content-length: 49
content-type: application/json
{"message":"Rank must be in range [0, 1000000]."}
If you were to add a different endpoint that uses the same model, the exception handler will automatically take care of this as well, e.g.:
#app.get('/other-rank/{rank}')
async def get_other_rank(value: RankInput = Depends()):
result = get_info_by_rank(value.rank)
return result
$ curl -i "http://127.0.0.1:8000/other-rank/-1"
HTTP/1.1 400 Bad Request
date: Sat, 28 Aug 2021 20:54:16 GMT
server: uvicorn
content-length: 49
content-type: application/json
{"message":"Rank must be in range [0, 1000000]."}
If this is not what you're looking for, could you explain why exactly you'd like to raise a ValueError?
I have a FastAPI endpoint where it need to download some files from HDFS to the local server.
I'm trying to use asyncio to run the function that will download the files in a separate process.
I'm using FastAPI Depends to create a HDFS client and inject the object in the endpoint execution.
from fastapi import Depends, FastAPI, Request, Response, status
from hdfs import InsecureClient
import asyncio
from concurrent.futures.process import ProcessPoolExecutor
app = FastAPI()
HDFS_URLS = ['http://hdfs-srv.local:50070']
async def run_in_process(fn, *args):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(app.state.executor, fn, *args) # wait and return result
def connectHDFS():
client = InsecureClient(url)
yield client
def fr(id, img, client):
# my code here
client.download(id_identifica_foto_dir_hdfs, id_identifica_foto_dir_local, True, n_threads=2)
# my code here
return jsonReturn
#app.post("/")
async def main(request: Request, hdfsclient: InsecureClient = Depends(connectHDFS)):
# Decode the received message
data = await request.json()
message = base64.b64decode(data['data']).decode('utf-8').replace("'", '"')
message = json.loads(message)
res = await run_in_process(fr, message['id'], message['img'], hdfsclient)
return {
"message": res
}
#app.on_event("startup")
async def on_startup():
app.state.executor = ProcessPoolExecutor()
#app.on_event("shutdown")
async def on_shutdown():
app.state.executor.shutdown()
But I'm not able to pass ahead the hdfsclient object:
res = await run_in_process(fr, message['id'], message['img'], hdfsclient)
I'm getting the following error:
Traceback (most recent call last):
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/uvicorn/protocols/http/h11_impl.py", line 396, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/fastapi/applications.py", line 199, in __call__
await super().__call__(scope, receive, send)
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/starlette/applications.py", line 111, in __call__
await self.middleware_stack(scope, receive, send)
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc from None
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc from None
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/starlette/routing.py", line 566, in __call__
await route.handle(scope, receive, send)
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/starlette/routing.py", line 227, in handle
await self.app(scope, receive, send)
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/starlette/routing.py", line 41, in app
response = await func(request)
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/fastapi/routing.py", line 202, in app
dependant=dependant, values=values, is_coroutine=is_coroutine
File "/home/kleyson/.virtualenvs/reconhecimentofacial/lib/python3.7/site-packages/fastapi/routing.py", line 148, in run_endpoint_function
return await dependant.call(**values)
File "./asgi.py", line 86, in main
res = await run_in_process(fr, message['id'], message['img'], hdfsclient)
File "./asgi.py", line 22, in run_in_process
return await loop.run_in_executor(app.state.executor, fn, *args) # wait and return result
File "/usr/lib/python3.7/multiprocessing/queues.py", line 236, in _feed
obj = _ForkingPickler.dumps(obj)
File "/usr/lib/python3.7/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects
How can have the hdfsclient available inside the def fr() function without the need to create a new connection on every new request ? I mean, how to create the hdfsclient on the application startup and be able to use it inside the function ?
The entire point of asyncio is to do what you are trying to achieve in the same process.
The typical example is a web crawler, where you open multiple requests, within the same thread/process, and then wait for them to finish. This way, you will get data from multiple urls without having to wait each single request before starting the next one.
The same applies in your case: call your async function that downloads the file, do your stuff and then wait for the file download to complete (if it hasn't completed yet). Sharing data between processes is not trivial, and your function is not working because of that reason.
I suggest you to first understand what async is and how it works, before jumping into doing something that you don't understand.
Some tutorials on asyncio
https://www.datacamp.com/community/tutorials/asyncio-introduction
https://realpython.com/lessons/what-asyncio/
https://docs.python.org/3/library/asyncio.html
I want to host a pytorch model in a fastapi backend. When I run the code with python it is working fine. the depickled model can use the defined class. When the same file is started with uvicorn it cannot find the class definition.
Sourcecode looks like this:
import uvicorn
import json
from typing import List
from fastapi import Body, FastAPI
from fastapi.encoders import jsonable_encoder
import requests
from pydantic import BaseModel
#from model_ii import Model_II_b
import dill as pickle
import torch as T
import sys
app = FastAPI()
current_model = 'model_v2b_c2_small_ep15.pkl'
verbose_model = False # for model v2
class Model_II_b(T.nn.Module):
[...]
#app.post('/function')
def API_call(req_json: dict = Body(...)):
try:
# load model...
model = pickle.load(open('models/' + current_model, 'rb'))
result = model.dosomething_with(req_json)
return result
except Exception as e:
raise e
return {"error" : str(e)}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
When I run this with python main.py it is working fine and I am gettings results. When I run it with uvicorn main:app and send a request I get the following error:
AttributeError: Can't get attribute 'Model_II_b' on <module '__mp_main__' from '/opt/webapp/env/bin/uvicorn'>
both should be using the same python env as I use the uvicorn from within the env.
I hope someone has an idea what is wrong with my setup or code.
Update Stacktrace:
(model_2) root#machinelearning-01:/opt/apps# uvicorn main:app --env-file /opt/apps/env/pyvenv.cfg --reload
INFO: Loading environment from '/opt/apps/env/pyvenv.cfg'
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: Started reloader process [164777] using statreload
INFO: Started server process [164779]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: 127.0.0.1:33872 - "POST /ml/v2/predict HTTP/1.1" 500 Internal Server Error
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/opt/apps/env/lib/python3.6/site-packages/uvicorn/protocols/http/httptools_impl.py", line 385, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/opt/apps/env/lib/python3.6/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/opt/apps/env/lib/python3.6/site-packages/fastapi/applications.py", line 183, in __call__
await super().__call__(scope, receive, send) # pragma: no cover
File "/opt/apps/env/lib/python3.6/site-packages/starlette/applications.py", line 102, in __call__
await self.middleware_stack(scope, receive, send)
File "/opt/apps/env/lib/python3.6/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc from None
File "/opt/apps/env/lib/python3.6/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/opt/apps/env/lib/python3.6/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc from None
File "/opt/apps/env/lib/python3.6/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/opt/apps/env/lib/python3.6/site-packages/starlette/routing.py", line 550, in __call__
await route.handle(scope, receive, send)
File "/opt/apps/env/lib/python3.6/site-packages/starlette/routing.py", line 227, in handle
await self.app(scope, receive, send)
File "/opt/apps/env/lib/python3.6/site-packages/starlette/routing.py", line 41, in app
response = await func(request)
File "/opt/apps/env/lib/python3.6/site-packages/fastapi/routing.py", line 197, in app
dependant=dependant, values=values, is_coroutine=is_coroutine
File "/opt/apps/env/lib/python3.6/site-packages/fastapi/routing.py", line 149, in run_endpoint_function
return await run_in_threadpool(dependant.call, **values)
File "/opt/apps/env/lib/python3.6/site-packages/starlette/concurrency.py", line 34, in run_in_threadpool
return await loop.run_in_executor(None, func, *args)
File "/usr/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "./main.py", line 155, in API_call
raise e
File "./main.py", line 129, in API_call
model = pickle.load(open('models/' + current_model, 'rb'))
File "/opt/apps/env/lib/python3.6/site-packages/dill/_dill.py", line 270, in load
return Unpickler(file, ignore=ignore, **kwds).load()
File "/opt/apps/env/lib/python3.6/site-packages/dill/_dill.py", line 473, in load
obj = StockUnpickler.load(self)
File "/opt/apps/env/lib/python3.6/site-packages/dill/_dill.py", line 463, in find_class
return StockUnpickler.find_class(self, module, name)
AttributeError: Can't get attribute 'Model_II_b' on <module '__mp_main__' from '/opt/apps/env/bin/uvicorn'>
enter code here
With the help from #lsabi I found the solution here https://stackoverflow.com/a/51397373/13947506
With the custom unpickler my problem was solved:
class CustomUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if name == 'Model_II_b':
from model_ii_b import Model_II_b
return Model_II_b
return super().find_class(module, name)
current_model = 'model_v2b_c2_small_ep24.pkl'
model = CustomUnpickler(open('models/' + current_model, 'rb')).load()
I was learning some async/await in python, and i wanted to try it, but
I'm getting this error while trying to connect to chatango via websocket and i don't know what means.
I'm using python 3.6.1 and aiohttp 2.2.3
This is my code:
import asyncio
import aiohttp
msgs = []
async def main():
async with aiohttp.ClientSession() as session:
async with session.ws_connect("ws://s12.chatango.com:8081/") as ws:
for msg in ws:
msgs.append(msg)
print(msg)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
Full traceback:
Traceback (most recent call last):
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client_reqrep.py", line 559, in start
(message, payload) = yield from self._protocol.read()
File "C:\Program Files\Python36\lib\site-packages\aiohttp\streams.py", line 509, in read
yield from self._waiter
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client_proto.py", line 165, in data_received
messages, upgraded, tail = self._parser.feed_data(data)
File "aiohttp\_http_parser.pyx", line 274, in aiohttp._http_parser.HttpParser.feed_data (aiohttp/_http_parser.c:4364)
aiohttp.http_exceptions.BadHttpMessage: 400, message='invalid constant string'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/Users/joseh/Desktop/a.ws.py", line 42, in <module>
loop.run_until_complete(main())
File "C:\Program Files\Python36\lib\asyncio\base_events.py", line 466, in run_until_complete
return future.result()
File "C:/Users/joseh/Desktop/a.ws.py", line 34, in main
async with session.ws_connect("ws://s12.chatango.com:8081/") as ws:
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client.py", line 603, in __aenter__
self._resp = yield from self._coro
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client.py", line 390, in _ws_connect
proxy_auth=proxy_auth)
File "C:\Program Files\Python36\lib\site-packages\aiohttp\helpers.py", line 91, in __iter__
ret = yield from self._coro
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client.py", line 241, in _request
yield from resp.start(conn, read_until_eof)
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client_reqrep.py", line 564, in start
message=exc.message, headers=exc.headers) from exc
aiohttp.client_exceptions.ClientResponseError: 400, message='invalid constant string'
invalid constant string is a custom response from chatango, they probably want a protocol or some kind of auth header.
If you don't know much about how chatango uses websockets, reverse engineering their system is probably not a good task for learning asyncio and aiohttp.
Better to use something like httparrot which just echos back the message you send it.
Here's your code modified to use httparrot and send 5 messages, get 5 responses, then exit.
import asyncio
import aiohttp
msgs = []
async def main():
async with aiohttp.ClientSession() as session:
async with session.ws_connect('ws://httparrot.herokuapp.com/websocket') as ws:
ws.send_str('hello')
async for msg in ws:
msgs.append(msg)
print(msg)
ws.send_str('hello')
if len(msgs) >= 5:
break
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
print(msgs)
I have client and server (send json messages over http) works ok on my standart ubuntu 16.04
But then I try to run it client and server inside docker or client outside docker and server inside i got errors.
My docker command:
sudo docker run -p 127.0.0.1:8888:8888 -i -t seo_server
Here is my server and client code and errors:
server
import asyncio
import json
import aiohttp
import re
async def get_cy(domain):
return {'result': 'ok','value':10}
async def handle(reader, writer):
# data = await reader.read(1000)
data = bytearray()
while not reader.at_eof():
chunk = await reader.read(2 ** 12)
data += chunk
# print(json.loads(data))
# https://aiomas.readthedocs.io/en/latest/
message = data.decode()
addr = writer.get_extra_info('peername')
print("Received %r from %r" % (message, addr))
m = json.loads(message)
f = m['method']
del m['method']
r = await globals()[f](**m)
r = json.dumps(r)
# await asyncio.sleep(int(m['time']))
print("Send: %r" % r)
writer.write(r.encode())
await writer.drain()
print("Close the client socket")
writer.close()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
coro = asyncio.start_server(handle, '127.0.0.1', 8888, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving cy microservice on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
client
import asyncio
import json
from itertools import zip_longest
import time
def to_json(func):
def wrap(**kwargs):
message = kwargs
message['method'] = func.__name__
print(message)
return asyncio.ensure_future(tcp_send(json.dumps(message)))
return wrap
#to_json
def get_cy(domain):
pass
async def tcp_send(message):
loop = asyncio.get_event_loop()
reader, writer = await asyncio.open_connection('127.0.0.1', 8888,
loop=loop)
print('Send: %r' % message)
writer.write(message.encode())
writer.write_eof()
data = await reader.read()
data = data.decode()
print('Received: %r' % data)
print('Close the socket')
writer.close()
return json.loads(data)
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def async_map(loop, f, iterable, chunk_size=2):
for chunk in grouper(iterable, chunk_size):
future = asyncio.gather(*(f(param) for param in chunk if param))
loop.run_until_complete(future)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
async def cy(site):
cy = await get_cy(domain=site)
print(site + " cy =", cy)
#update site here
while True:
sites = ('site1.ru', 'site2.ru', 'site3.ru', 'site4.ru', 'site5.ru')
async_map(loop, cy, sites)
time.sleep(100) #if not sites
error's then i try client and server inside docker:
client error
root#341fdee56d6d:/seo_server# python client.py
{'domain': 'site1.ru', 'method': 'get_cy'}
{'domain': 'site2.ru', 'method': 'get_cy'}
Send: '{"domain": "site2.ru", "method": "get_cy"}'
Send: '{"domain": "site1.ru", "method": "get_cy"}'
Received: ''
Close the socket
Traceback (most recent call last):
File "client.py", line 63, in <module>
async_map(loop, cy, sites)
File "client.py", line 49, in async_map
loop.run_until_complete(future)
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 341, in run_until_complete
return future.result()
File "/usr/local/lib/python3.5/asyncio/futures.py", line 276, in result
raise self._exception
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 236, in _step
result = coro.throw(exc)
File "client.py", line 57, in cy
cy = await get_cy(domain=site)
File "/usr/local/lib/python3.5/asyncio/futures.py", line 387, in __iter__
yield self # This tells Task to wait for completion.
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 287, in _wakeup
value = future.result()
File "/usr/local/lib/python3.5/asyncio/futures.py", line 276, in result
raise self._exception
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 238, in _step
result = coro.send(value)
File "client.py", line 36, in tcp_send
return json.loads(data)
File "/usr/local/lib/python3.5/json/__init__.py", line 319, in loads
return _default_decoder.decode(s)
File "/usr/local/lib/python3.5/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/local/lib/python3.5/json/decoder.py", line 357, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Task was destroyed but it is pending!
task: <Task pending coro=<tcp_send() running at client.py:30> wait_for=<Future pending cb=[Task._wakeup()]> cb=[Task._wakeup()]>
server error
sudo docker run -p 127.0.0.1:8888:8888 -i -t seo_server
Serving cy microservice on ('127.0.0.1', 8888)
Received '{"domain": "site2.ru", "method": "get_cy"}' from ('127.0.0.1', 47768)
http://bar-navig.yandex.ru/u?ver=2&show=31&url=http://site2.ru
Received '{"domain": "site1.ru", "method": "get_cy"}' from ('127.0.0.1', 47770)
http://bar-navig.yandex.ru/u?ver=2&show=31&url=http://site1.ru
Send: '{"result": "ok", "value": "50"}'
Task exception was never retrieved
future: <Task finished coro=<handle() done, defined at seo_server.py:18> exception=ConnectionResetError('Connection lost',)>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 238, in _step
result = coro.send(value)
File "seo_server.py", line 40, in handle
await writer.drain()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 304, in drain
yield from self._protocol._drain_helper()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 195, in _drain_helper
raise ConnectionResetError('Connection lost')
ConnectionResetError: Connection lost
Send: '{"result": "ok", "value": "50"}'
Task exception was never retrieved
future: <Task finished coro=<handle() done, defined at seo_server.py:18> exception=ConnectionResetError('Connection lost',)>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 238, in _step
result = coro.send(value)
File "seo_server.py", line 40, in handle
await writer.drain()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 304, in drain
yield from self._protocol._drain_helper()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 195, in _drain_helper
raise ConnectionResetError('Connection lost')
ConnectionResetError: Connection lost
If i try client outside docker i got this client error and no actions at server:
outside client error
/usr/bin/python3.5 /home/se7en/examples/python_3.5/seo_server/client.py
{'method': 'get_cy', 'domain': 'site1.ru'}
{'method': 'get_cy', 'domain': 'site2.ru'}
Send: '{"method": "get_cy", "domain": "site1.ru"}'
Send: '{"method": "get_cy", "domain": "site2.ru"}'
Traceback (most recent call last):
File "/home/se7en/examples/python_3.5/seo_server/client.py", line 63, in <module>
async_map(loop, cy, sites)
File "/home/se7en/examples/python_3.5/seo_server/client.py", line 49, in async_map
loop.run_until_complete(future)
File "/usr/lib/python3.5/asyncio/base_events.py", line 373, in run_until_complete
return future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 274, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 242, in _step
result = coro.throw(exc)
File "/home/se7en/examples/python_3.5/seo_server/client.py", line 57, in cy
cy = await get_cy(domain=site)
File "/usr/lib/python3.5/asyncio/futures.py", line 361, in __iter__
yield self # This tells Task to wait for completion.
File "/usr/lib/python3.5/asyncio/tasks.py", line 297, in _wakeup
future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 274, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 240, in _step
result = coro.send(None)
File "/home/se7en/examples/python_3.5/seo_server/client.py", line 28, in tcp_send
writer.write_eof()
File "/usr/lib/python3.5/asyncio/streams.py", line 294, in write_eof
return self._transport.write_eof()
File "/usr/lib/python3.5/asyncio/selector_events.py", line 745, in write_eof
self._sock.shutdown(socket.SHUT_WR)
OSError: [Errno 107] Transport endpoint is not connected
my docker file:
FROM davidjfelix/python3.5
RUN pip3 install aiohttp
ADD . /seo_server
WORKDIR /seo_server
CMD python seo_server.py
docker version:
$ sudo docker version
Client:
Version: 1.11.2
API version: 1.23
Go version: go1.5.4
Git commit: b9f10c9
Built: Wed Jun 1 22:00:43 2016
OS/Arch: linux/amd64
Server:
Version: 1.11.2
API version: 1.23
Go version: go1.5.4
Git commit: b9f10c9
Built: Wed Jun 1 22:00:43 2016
OS/Arch: linux/amd64
Please help to find a problem and fix
Because of container isolation python threads in different container don't see each other. So for asyncio loop you need to start server and worker/client in the same container. You can do it via launch .sh script as described here https://docs.docker.com/config/containers/multi-service_container/
or same via Supervisord.
I had this same issue.
async server and async client outside docker refusing to connect,
this solved my problem: https://forums.docker.com/t/python-asyncio-container-is-not-receiving-socket-connections/34018
u need to change:
coro = asyncio.start_server(handle, '127.0.0.1', 8888, loop=loop)
to
coro = asyncio.start_server(handle, '0.0.0.0', 8888, loop=loop)
here quoting "currently you just bind the local interface."