I have a flask application that receives the request and trying to take a screenshot from the given URL which is done with asyncio function.
What I have done is ,
import asyncio
from pyppeteer import launch
from flask import Flask
import base64
from flask import Blueprint, jsonify, request
import jwt
async def main():
browser = await launch(headless=True)
page = await browser.newPage()
await page.goto(target)
await page.screenshot({'path': '/tmp/screen.png', 'fullPage': True})
await browser.close()
app = Flask(__name__)
#app.route('/heatMapDbConfigSave', methods=['POST'])
def notify():
token, target,id = map(
request.form.get, ('token', 'target','id'))
asyncio.get_event_loop().run_until_complete(main(target))
if __name__ == '__main__':
app.run(host='localhost', port=5002, debug=True)
The problem I have faced is, getting error RuntimeError: There is no current event loop in thread 'Thread-2'. .I have googled and gone through previous posts. None helped and not pointed a clear solution.
What was the solution to solve this?
Thanks in advance!
you can try something like below
from flask import Blueprint
import asyncio
health_check = Blueprint('health_check', __name__)
async def first():
await asyncio.sleep(20)
return 'first'
async def second():
await asyncio.sleep(10)
return 'second'
async def third():
await asyncio.sleep(10)
return 'third'
def ordinary_generator():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
for future in asyncio.as_completed([first(), second(), third()]):
print('reached')
yield loop.run_until_complete(future)
#health_check.route('', methods=['GET'])
def healthcheck():
"""
Retrieves the health of the service.
"""
for element in ordinary_generator():
print(element)
return "Health check passed"
Blueprints are not required, but just used them. You have to register the blueprint in the main app file like below
app = Flask(__name__)
app.register_blueprint(health_check, url_prefix='/api/v1/healthcheck')
if __name__ == '__main__':
app.run()
Related
I have the following code:
from fastapi import FastAPI, Request, Form
import uvicorn
from testphoto.utils.logger import get_log
import datetime
import time
import asyncio
log = get_log()
app = FastAPI()
def process():
log.info("Sleeping at "+str(datetime.datetime.now()))
time.sleep(5)
log.info("Woke up at "+str(datetime.datetime.now()))
return "Sucess"
#app.post("/api/photos")
async def root(request: Request, photo: str = Form()):
process()
return {"message": "Hello World"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8008)
What I want to do is to run the function process and return the response while keeping the function process running. I've read some documentation about asyncio and FastAPI but I'm still unable to figure this out. Where would you point me to in order make the code do exactly as I want?
What you're looking for, if not CPU intensive, is called a background task.
You can read more in the docs:
https://fastapi.tiangolo.com/tutorial/background-tasks/
An example from the reference guide on how to use background tasks:
from fastapi import BackgroundTasks, FastAPI
app = FastAPI()
def write_notification(email: str, message=""):
with open("log.txt", mode="w") as email_file:
content = f"notification for {email}: {message}"
email_file.write(content)
#app.post("/send-notification/{email}")
async def send_notification(email: str, background_tasks: BackgroundTasks):
background_tasks.add_task(write_notification, email, message="some notification")
return {"message": "Notification sent in the background"}
Inside my FastAPI application, I would like to schedule an HTTP request to be made to check for new results (comparing to database) every X time interval. What would be the easiest way to accomplish this using httpx?
You can add an async task to the event loop during the startup event. This async task would check (and sleep) and store the result somewhere. In the below example, I've chosen to pass around a shared object using the app.state feature of FastAPI. This should give you enough pointers to implement your exact use case. I have commented out an example of dealing with https specifically.
from fastapi import FastAPI
import asyncio
class MySharedObject:
def __init__(self) -> None:
self.count = 0
async def timed_checker(obj: MySharedObject):
while True:
obj.count += 1
# async with httpx.AsyncClient() as client:
# r = await client.get('https://www.example.com/')
await asyncio.sleep(3)
app = FastAPI()
#app.on_event("startup")
def startup_function():
app.state.shared_object = MySharedObject()
asyncio.create_task(timed_checker(app.state.shared_object))
#app.get("/")
async def root():
return {"hello": "world"}
#app.get("/count")
async def get_count():
return app.state.shared_object.count
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
We are using quart from last 6 months and it was a awesome journey with it. Now we have a new requirement where we need to build a request engine using which we can make request to any url. Now we have tried different url and it work perfectly whereas it fails on the same server request. As it was already running for 3-4 months we can not make a full refactor. is there a better way to handle this scenario?
from quart import Quart, request
import time
import asyncio
import requests
app = Quart(__name__)
#app.route("/", methods=["POST", "GET"])
async def main_route():
print("Hello from main route")
await asyncio.sleep(1)
loop = asyncio.get_event_loop()
async def handle_future(timeout):
print("future is here")
res = requests.get('http://0.0.0.0:5000/test')
print("Response from test route",res.text)
await asyncio.sleep(timeout)
print("I am completed")
def call_soon():
asyncio.ensure_future(handle_future(10))
loop.call_soon_threadsafe(call_soon)
return "Hello from root"
#app.route("/test", methods=["POST", "GET"])
async def test_route():
print("Hello from test route")
await asyncio.sleep(0.5)
return "Hello from test" + str(time.time())
if __name__ == '__main__':
from hypercorn.config import Config
from hypercorn.asyncio import serve
Config.bind = ["0.0.0.0:5000"]
asyncio.run(serve(app, Config()))
Output
* Serving Quart app 'index'
* Environment: production
* Please use an ASGI server (e.g. Hypercorn) directly in production
* Debug mode: False
* Running on http://0.0.0.0:5000 (CTRL + C to quit)
[2022-03-22 10:54:09,047] Running on http://0.0.0.0:5000 (CTRL + C to quit)
Hello from main route
[2022-03-22 10:54:13,775] 127.0.0.1:49650 GET / 1.1 200 15 1009876
future is here
It was hanged infinitely until the request is timed out. And moreover, it is not accepting any more request during this period
I'd recommend you use a background task to run a coroutine function in the background (via add_background_task). I'd also recommend you switch from requests to httpx as httpx makes requests without blocking the event loop (you can also use requests by using the app.ensure_async function). This is what I suggest,
import asyncio
import time
import httpx
from quart import Quart, request
app = Quart(__name__)
async def _background_task(timeout):
async with httpx.AsyncClient() as client:
response = await client.get('http://0.0.0.0:5000/test')
print("Response from test route", response.text)
await asyncio.sleep(timeout)
print("I am completed")
#app.route("/", methods=["POST", "GET"])
async def main_route():
print("Hello from main route")
await asyncio.sleep(1)
app.add_background_task(_background_task, 10)
return "Hello from root"
#app.route("/test", methods=["POST", "GET"])
async def test_route():
print("Hello from test route")
await asyncio.sleep(0.5)
return "Hello from test" + str(time.time())
if __name__ == '__main__':
from hypercorn.config import Config
from hypercorn.asyncio import serve
Config.bind = ["0.0.0.0:5000"]
asyncio.run(serve(app, Config()))
anyio is a part of starlette and, therefore, of FastAPI. I find it quite convenient to use its task groups to perform concurrent requests to external services outside of one of my API servers.
Also, I would like to stream out the results as soon as they are ready. fastapi.StreamingResponse could do the trick, still I need to be able to keep the task group up and running after returning StreamingResponse, but it sounds like something that goes against the idea of structured concurrency.
Using an asynchronous generator may look like an obvious solution, but yield in general can not be used in a task group context, according to this: https://trio.readthedocs.io/en/stable/reference-core.html#cancel-scopes-and-nurseries
There is an example of a FastAPI server that seems to work, though it aggregates the responses before returning them:
import anyio
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
app = FastAPI()
#app.get("/")
async def root():
# What to put below?
result = await main()
return StreamingResponse(iter(result))
async def main():
send_stream, receive_stream = anyio.create_memory_object_stream()
result = []
async with anyio.create_task_group() as tg:
async with send_stream:
for num in range(5):
tg.start_soon(sometask, num, send_stream.clone())
async with receive_stream:
async for entry in receive_stream:
# What to do here???
result.append(entry)
return result
async def sometask(num, send_stream):
await anyio.sleep(1)
async with send_stream:
await send_stream.send(f'number {num}\n')
if __name__ == "__main__":
import uvicorn
# Debug-only configuration
uvicorn.run(app)
So, the question is, is there something similar to #trio_util.trio_async_generator in anyio, or is it possible to use #trio_util.trio_async_generator with FastAPI directly?
Maybe there are other solutions?
import anyio
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
app = FastAPI()
#app.get("/")
async def root():
return StreamingResponse(main())
async def main():
send_stream, receive_stream = anyio.create_memory_object_stream()
async with anyio.create_task_group() as tg:
async with send_stream:
for num in range(5):
tg.start_soon(sometask, num, send_stream.clone())
async with receive_stream:
async for entry in receive_stream:
yield entry
async def sometask(num, send_stream):
async with send_stream:
for i in range(1000):
await anyio.sleep(1)
await send_stream.send(f"number {num}\n")
if __name__ == "__main__":
import uvicorn
# Debug-only configuration
uvicorn.run(app)
unexpectedly, it works.
I created a dash app to present information that another code is collecting, I want to run them both concurrently using the asyncio module in Python.
My code is using async functions and the Dash app (which is based on Flask) is blocking anything else from executing while serving.
I'm not sure if this is something that has to involve opening up more threads.
Here's my current code which only runs the main coroutine.
async def main():
some code here...
while True:
try:
await client.handle_message()
except ConnectionClosedError as error:
logger.error(error)
for strategy in strategies:
await asyncio.create_task(...)
some code here...
async def run_dashboard():
app = create_app()
app.run_server('0.0.0.0', 5000, debug=False)
if __name__ == '__main__':
some code here...
# Currently just runs the main coroutine
asyncio.run(main())
How can I run main and run_dashboard concurrently?
Frankly speaking it is not good design to combine Dash (Flask) with some async work in one process, consider to run Flask and async activities in different processes (i.e. apps).
Nevertheless, If you still want to run all in one process, I can give you the following working example, please follow comments and ask if you have any questions:
from flask import Flask, jsonify
import asyncio
from threading import Thread
# ** Async Part **
async def some_print_task():
"""Some async function"""
while True:
await asyncio.sleep(2)
print("Some Task")
async def another_task():
"""Another async function"""
while True:
await asyncio.sleep(3)
print("Another Task")
async def async_main():
"""Main async function"""
await asyncio.gather(some_print_task(), another_task())
def async_main_wrapper():
"""Not async Wrapper around async_main to run it as target function of Thread"""
asyncio.run(async_main())
# *** Flask Part ***:
app = Flask(__name__)
#app.route("/", methods=["GET"])
def index():
"""just some function"""
return jsonify({"hello": "world"})
if __name__ == '__main__':
# run all async stuff in another thread
th = Thread(target=async_main_wrapper)
th.start()
# run Flask server
app.run(host="0.0.0.0", port=9999)
th.join()
Here is some code running a dash app (collecting flight data - courtesy Jose Portilla - Udemy) + the threading to run the dash app and some tasks in async.
from flask import Flask, jsonify
import asyncio
from threading import Thread
# Dash
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import requests
import plotly.graph_objects as go
# ** Async Part **
async def some_print_task():
"""Some async function"""
while True:
await asyncio.sleep(2)
print("Some Task")
async def another_task():
"""Another async function"""
while True:
await asyncio.sleep(3)
print("Another Task")
async def async_main():
"""Main async function"""
await asyncio.gather(some_print_task(), another_task())
def async_main_wrapper():
"""Not async Wrapper around async_main to run it as target function of Thread"""
asyncio.run(async_main())
# *** Dash Part ***:
app = dash.Dash()
app.layout = html.Div([
# html.Div([
# html.Iframe(src="https://www.flightradar24.com",
# height=500,width=200)
# ]),
html.Div([
html.Pre(id='counter-text',children='Active Flights Worldwide'),
dcc.Graph(id='live-update-graph',style={'width':1200}),
dcc.Interval( id='interval-component',
interval=6000,
n_intervals=0)
])
])
counter_list = []
#app.callback( Output('counter-text','children'),
[Input('interval-component','n_intervals')])
def update_layout(n):
url = "https://data-live.flightradar24.com/zones/fcgi/feed.js?faa=1&mlat=1&flarm=1&adsb=1&gnd=1&air=1&vehicles=1&estimated=1&stats=1"
res = requests.get(url, headers={'User-Agent' : 'Mozilla/5.0'}) # A fake header is necessary to access the site
data = res.json()
counter = 0
for element in data["stats"]["total"]:
counter += data["stats"]["total"][element]
counter_list.append(counter)
return "Active flights Worldwide: {}".format(counter)
#app.callback( Output('live-update-graph','figure'),
[Input('interval-component','n_intervals')])
def update_graph(n):
fig = go.Figure(data=[
go.Scatter(x=list(range(len(counter_list))),
y=counter_list,
mode='lines+markers')
])
return fig
if __name__ == '__main__':
# run all async stuff in another thread
th = Thread(target=async_main_wrapper)
th.start()
# run Flask server
# app.run(host="0.0.0.0", port=9999)
app.run_server(debug=True)
th.join()
If you really want it to run all in one process, the following worked for me:
from functools import partial
from threading import Thread
partial_run = partial(app.run, host="0.0.0.0", port=5000, debug=True, use_reloader=False)
t = Thread(target=partial_run)
t.start()
asyncio.run(main())
Run run_dashboard in a background thread. Ref to the document.
async def run():
await asyncio.gather(
asyncio.to_thread(run_dashboard),
main()
)
asyncio.run(run())
Note that asyncio.to_thread is a new function in version 3.9.
For python version older than 3.9, copy the following code
threads.py.