I am running WSGIServer using gevent but I also want to run a background worker.
from gevent import monkey
from gevent.pywsgi import WSGIServer
from flask import Flask, request,Response
monkey.patch_all()
from threading import Thread
api = Flask(__name__)
my_queue=[1,2,3]
#api.route('/add', methods=['GET'])
def add():
my_queue.append(request.args.get('item'))
return Response("Added!", 200)
def worker(id):
while True:
print(f"{id}-working-{my_queue}")
if __name__ == '__main__':
server = WSGIServer(('0.0.0.0', 5001), api)
server_thread = Thread(target=server.start)
worker1_thread = Thread(target=worker,args=(1,))
worker2_thread = Thread(target=worker,args=(2,))
server_thread.start()
worker1_thread.start()
worker2_thread.start()
If I am running it without monkey.patch_all(), my two worker works in parallel but the API will not respond.
If I use monkey.patch_all() only the first worker will work and the API will also not respond.
How can I get this to work properly?
Also, I am aware I need a lock for the queue but I don't know how to implement it.
Related
I have python version 3.9, I am using the default development web server that is provided by the flask on windows OS (laptop). I need to run the app on Windows machine, and it should be able to start and stop the app on demand.
Here is what I tried so far. when I run the application either in VS code, I need to force stop the app, If running in the command windows I have to kill the python app from task manager.
With the following code changes I can exit the app by sending the SIGTERM to proess itself, as both the Flask, and simplehttpserver do not provide an API for exiting the process.
from flask import Flask, request, g, make_response
import queue
import time
import os
from threading import Thread
import signal
import ctypes
from multiprocessing import Process
import sys
OUTPUT_EMPTY_LIMIT = 3
DEFAULT_PORT = 9090
SHUTDOWN = False
EXIT_CHARS = "Qq\x11\x18" # 'Q', 'q', Ctrl-Q, ESC
print("Name = {}".format(__name__))
app = Flask(__name__)
def shutdown_app():
global SHUTDOWN
SHUTDOWN = True
#app.before_request
def before_request():
if SHUTDOWN:
return 'Shutting down...'
def run_app(port, host, debug, threaded):
app.run(port=port, host=host, debug=debug, threaded=threaded)
#app.route("/")
def app_help():
return "Hello, World from help!"
#app.route("/square/<int:number>")
def square(number):
return str(number**2)
# This doesnt kill the app as well.
#app.route("/stop")
def stop():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the werkzeug Server')
func()
return 'Server shutting down...'
exit(0)
if __name__ == "__main__":
port = 5000
host = '0.0.0.0'
debug = False
threaded = True
process = Process(target=run_app, args=(port, host, debug, threaded))
process.start()
while True:
ch = input("Enter 'Q' to stop server and exit: ")
if (ch == 'Q'):
break
shutdown_app()
process.terminate()
#process.kill()
#process.join()
print("Exiting the app")
# os.kill(os.getpid(), signal.SIGINT)
os._exit(0)
I am new to Python and have tried both google search and chatGPT to find a way to shutdown Flask App and the web server that is started on a windows operating system.
I have also tried the other answeres on the stack overflow 15562446
I also tried the link Shutdown The Simple Server
I've found while testing that werkzeug.server.shutdown does nothing, and it needs to be shutdown using sys.exit(0) on my windows machine.
In your code a value is returned before the exit() is run - therefore it does not shut down. One way to do what you want is using deferred callbacks.
from flask import after_this_request
#app.get('/stop')
def shutdown():
#after_this_request
def response_processor(response):
#response.call_on_close
def shutdown():
import sys
sys.exit("Stop API Called")
return response
return 'Server shutting down...',200
I have a schedule script running a job with:
schedule.every(3).seconds.do(jobCheckSmth)
while True:
schedule.run_pending()
time.sleep(1)
I wanted to use web interface to check on it's status instead of print() on the CLI with
run(host='localhost', port=80, debug=True)
But it blocks code execution so I have to Ctrl-C to break webserver loop to continue to run while loop
Bottle v0.12.18 server starting up (using WSGIRefServer())...
Listening on http://localhost:80/ Hit Ctrl-C to quit.
Here is an example that works really well for me using gevent to turn bottle into async. It's either this, or you have to run schedule in it's own thread outside of your bottle app. But honestly you should be doing this.
import gevent
from gevent import monkey,spawn as gspawn, sleep as gsleep, socket, signal_handler as sig
monkey.patch_all()
import signal
import arrow
from bottle import Bottle, static_file, get, post, request, response, template, redirect, hook, route, abort
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
def start():
def start_thread():
set()
while 1:
try:
schedule.run_pending()
except:
logger.exception('Scheduler Exception')
gsleep(5) # This is the polling cycle for pending jobs
print('Scheduler Started...')
gspawn(start_thread)
def sample():
gspawn(jobCheckSmth)
def set():
# schedule.every(180).seconds.do(func)
schedule.every().day.at("00:00").do(sample)
logger.info('Started Schedule at {}'.format(arrow.now()))
#get('/')
def app():
return 'Hello World!'
if __name__ == '__main__':
scheduler.start()
botapp = bottle.app()
server = WSGIServer(("0.0.0.0", int(port)), botapp , handler_class=WebSocketHandler)
def shutdown():
print('Shutting down ...')
server.stop(timeout=60)
exit(signal.SIGTERM)
sig(signal.SIGTERM, shutdown)
sig(signal.SIGINT, shutdown)
server.serve_forever()
I'm trying to build a Flask app with Gunicorn to serve concurrent requests. For what it's worth, the context is a bring-your-own-container Sagemaker application.
The issue is that I need the application to periodically check for updates. So I thought to implement a thread for this. Here is a minimal example of some Flask code with an update thread.
server.py
from flask import Flask
import time, threading
app = Flask(__name__)
message = True
def update():
while True:
message = not message
time.sleep(10)
#app.route("/")
def hello():
global message
return message
update_thread = threading.Thread(target=update)
if __name__ == "__main__":
update_thread.start()
app.run()
update_thread.join()
I then launch with gunicorn:
gunicorn -k gevent -b unix:/tmp/gunicorn.sock -w 4 server:app
Perhaps unsurprisingly the update thread doesn't start since the __main__ section is never executed.
Question: How can one use an update thread (or similar construct) in a Flask app with Gunicorn?
It looks like this can be accomplished using Flask-APScheduler as follows:
pip install flask_apscheduler
server.py
from flask import Flask
from apscheduler.schedulers.background import BackgroundScheduler
import atexit
app = Flask(__name__)
message = True
def update():
global message
message = not message
scheduler = BackgroundScheduler()
scheduler.add_job(func=update,trigger="interval",seconds=10)
scheduler.start()
# shut down the scheduler when exiting the app
atexit.register(scheduler.shutdown)
#app.route("/")
def hello():
global message
return message
if __name__ == "__main__":
app.run()
Then launching as usual with
gunicorn -k gevent -b unix:/tmp/gunicorn.sock -w 4 server:app
I am creating an Flask-socketio app, in which I want to send socket when an OSC message is received, in order to receive it into a web app thanks to socketio.js
from flask import Flask, render_template, copy_current_request_context, current_app
from flask_socketio import SocketIO, emit, send
from OSC import OSCClient, OSCMessage, OSCServer
import time, threading
app = Flask(__name__)
socketio = SocketIO(app)
client = OSCClient()
client.connect( ("localhost", 62345))
receive_address = ('localhost', 62346)
server = OSCServer(receive_address)
# simple callback functions
def answer_handler(addr, tags, stuff, source):
with app.app_context():
socketio.emit('tempo', 1)
# adding callback functions to listener
server.addMsgHandler("/puredata", answer_handler)
if __name__ == '__main__':
#Start OSCServer in extra thread
st = threading.Thread( target = server.serve_forever )
st.daemon = True
st.start()
print('OSC server is started')
socketio.run(app, host='0.0.0.0')
Even if I don't get error message, the socket is not receive from the javascript side, because the emit function is called in another thread, and there is conflicts with the request context.
I tried several things according to some other stackoverflow tickets :
socketio = SocketIO(app, async_mode='threading')
Adding this line before handler function
#copy_current_request_context
Or recreate a socketio instance into the callback
def answer_handler(addr, tags, stuff, source):
socketio_connection = SocketIO(app)
with app.app_context():
socketio_connection.emit('tempo', 1)
But none of these solution is working, and I need to integrate solve this 'context' problem into my thread.
Note that :
socketio.emit('tempo', 1)
is working great out of this thread , and received in the javascript part
In a project I'm working in I need to cover a Tornado service with Behave so I want to start an instance of my tornado service before running each scenario.
Naively trying to run the loop as part before all seems to lock the excecution:
from tornado import ioloop
from tornadoadapter.applications import APPLICATION
def before_all(context):
print "Service running on port 8000"
APPLICATION.listen(8000)
ioloop.IOLoop.instance().start()
So it's probably not what I need.
Your IOLoop is running in the main thread, so it's blocking. You could do it in a separate thread or process.
from multiprocessing import Process
from tornado import ioloop
from tornadoadapter.applications import APPLICATION
def run_server():
print "Service running on port 8000"
APPLICATION.listen(8000)
ioloop.IOLoop.instance().start()
def before_all(context):
context.server_thread = Process(target=run_server)
context.server_thread.deamon = True
context.server_thread.start()