Error in celery while requesting API endpoint(flask app) - python

Basically, I have flask app which is hosted on Azure instance. when I post some data at API endpoint celery start a process in background and API send a response immediately to the client.
here is tasks.py basic sample:
from celery import Celery
app = Celery('tasks', broker ='amqp://localhost//')
#app.task
def reverse(main):
return main[::-1]
Error:
Basic flask Example:
from flask import Flask
from flask import request
from tasks import *
app = Flask(__name__)
#app.route('/params',methods =['POST'])
def get_url():
main = request.args.get('main')
reverse.delay(main)
return main
if __name__ == "__main__":
app.run()
again, flask app is running on azure instance. do I have change localhost to
a IP in tasks.py

Related

Deploy Flask app with celery worker on Google Cloud

I have a very simple Flask app example which uses a celery worker to process a task asynchronously:
app.py
app.config['CELERY_BROKER_URL'] = os.environ.get('REDISCLOUD_URL', 'redis://localhost:6379')
app.config['CELERY_RESULT_BACKEND']= os.environ.get('REDISCLOUD_URL', 'redis://localhost:6379')
app.config['SQLALCHEMY_DATABASE_URI'] = conn_str
celery = make_celery(app)
db.init_app(app)
#app.route('/')
def index():
return "Working"
#app.route('/test')
def test():
task = reverse.delay("hello")
return task.id
#celery.task(name='app.reverse')
def reverse(string):
return string[::-1]
if __name__ == "__main__":
app.run()
To run it locally, I run celery -A app.celery worker --loglevel=INFO
in one terminal, and python app.py in another terminal.
I'm wondering how can I deploy this application on Google Cloud? I don't want to use Task Queues since it is only compatible with Python 2. Is there a good piece of documentation available for doing something like this? Thanks
App engine task queues is the previous version of Google Cloud Tasks, this has full support for App Engine Flex/STD and Python 3.x runtimes.
You need to create a Cloud Task Queue and an App engine service to handle the tasks
Gcloud command to create a queue
gcloud tasks queues create [QUEUE_ID]
Task handler code
from flask import Flask, request
app = Flask(__name__)
#app.route('/example_task_handler', methods=['POST'])
def example_task_handler():
"""Log the request payload."""
payload = request.get_data(as_text=True) or '(empty payload)'
print('Received task with payload: {}'.format(payload))
return 'Printed task payload: {}'.format(payload)
Code to push a task
"""Create a task for a given queue with an arbitrary payload."""
from google.cloud import tasks_v2
client = tasks_v2.CloudTasksClient()
# replace with your values.
# project = 'my-project-id'
# queue = 'my-appengine-queue'
# location = 'us-central1'
# payload = 'hello'
parent = client.queue_path(project, location, queue)
# Construct the request body.
task = {
'app_engine_http_request': { # Specify the type of request.
'http_method': tasks_v2.HttpMethod.POST,
'relative_uri': '/example_task_handler'
}
}
if payload is not None:
# The API expects a payload of type bytes.
converted_payload = payload.encode()
# Add the payload to the request.
task['app_engine_http_request']['body'] = converted_payload
if in_seconds is not None:
timestamp = datetime.datetime.utcnow() + datetime.timedelta(seconds=in_seconds)
# Add the timestamp to the tasks.
task['schedule_time'] = timestamp
# Use the client to build and send the task.
response = client.create_task(parent=parent, task=task)
print('Created task {}'.format(response.name))
return response
requirements.txt
Flask==1.1.2
gunicorn==20.0.4
google-cloud-tasks==2.0.0
You can check this full example in GCP Python examples Github page

What is the best way for a Python script to communicate with a Python Flask server that transports content to the client?

The following scenario:
I have a Raspberry Pi running as a server. Currently I am using a Python script with Flask and I can also access the Raspberry Pi from my PC. (The flask server runs an react app.)
But the function should be extended. It should look like the following:
2nd Python script is running all the time. This Python script fetches data from an external API every second and processes it. If certain conditions are met, the data should be processed and then the data should be communicated to the Python Flask server. And the Flask server then forwards the data to the website running on the computer.
How or which method is best to program this "interprocess communication". Are there any libraries? I tried Celery, but then it throws up my second Python script whenever I want to access the external API, so I don't know if this is the right choice.
What else would be the best approach? Threading? Direct interprocess communication?
If important, this is how my server application looks so far:
from gevent import monkey
from flask import Flask, render_template
from flask_socketio import SocketIO
monkey.patch_all()
app = Flask(__name__, template_folder='./build', static_folder='./build/static')
socket_io = SocketIO(app)
#app.route('/')
def main():
return render_template('index.html')
#socket_io.on('fromFrontend')
def handleInput(input):
print('Input from Frontend: ' + input)
send_time()
#socket_io.on('time')
def send_time():
socket_io.emit('time', {'returnTime': "some time"})
if __name__ == '__main__':
socket_io.run(app, host='0.0.0.0', port=5000, debug=True)
Well i found a solution for my specific problem i implemented it with a thread as follows:
import gevent.monkey
gevent.monkey.patch_all()
from flask import Flask, render_template
from flask_socketio import SocketIO
import time
import requests
from threading import Thread
app = Flask(__name__, template_folder='./build', static_folder='./build/static')
socket_io = SocketIO(app)
#app.route('/')
def main():
thread = Thread(target=backgroundTask)
thread.daemon = True
thread.start()
return render_template('index.html')
#socket_io.on('fromFrontend')
def handleInput(input):
print('Input from Frontend: ' + input)
#socket_io.on('time')
def send_time():
socket_io.emit('time', {'returnTime': 'hi frontend'})
def backgroundTask():
# do something here
# access socket to push some data
socket_io.emit('time', {'returnTime': "some time"})
if __name__ == '__main__':
socket_io.run(app, host='0.0.0.0', port=5000, debug=True)

Flask application on AWS EB always gives me a 500 Internal Server Error

I can run my application on localhost without an issue, but as soon as I upload it to AWS EB, I get the 500 Internal Server Error.
Here is my app/__init__.py file:
from flask import Flask
app = Flask(__name__)
And my application.py file:
from app import app
if __name__ == '__main__':
app.run(host='0.0.0.0', port='8080', use_reloader=True, debug=True)
Is there a misconfiguration?
The issue is that AWS EB expects:
application = Flask(__name__)
Solution:
application = app = Flask(__name__)

Flask APScheduler + Gunicorn workers - Still running task twice after socket fix

I have a Flask app where i use Flask-APScheduler to run a scheduled query on my database and send an email via a cron job.
I'm running my app via Gunicorn with the following config and controlled via supervisor:
[program:myapp]
command=/home/path/to/venv/bin/gunicorn -b localhost:8000 -w 4 myapp:app --preload
directory=/home/path/here
user=myuser
autostart=true
autorestart=true
stopasgroup=true
killasgroup=true
The job details are stored in my config.py:
...config stuff
JOBS = [
{
'id': 'sched_email',
'func': 'app.tasks:sched_email',
'trigger': 'cron',
'hour': 9,
},
]
SCHEDULER_API_ENABLED = True
Originally the email was being sent 4 times due to the 4 workers initialising the app and the scheduler. I found a similar article which suggested opening a socket when the app is initialised so the other workers can't grab the job.
My init.py:
# Third-party imports
import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
from flask import Flask
from flask_mail import Mail, Message
from flask_sqlalchemy import SQLAlchemy
from flask_apscheduler import APScheduler
from flask_migrate import Migrate
from flask_login import LoginManager
import sys, socket
# Local imports
from config import app_config
# Create initial instances of extensions
mail = Mail()
db = SQLAlchemy()
scheduler = APScheduler()
migrate = Migrate()
login_manager = LoginManager()
# Construct the Flask app instance
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(app_config[config_name])
app_config[config_name].init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
db.init_app(app)
# Fix to ensure only one Gunicorn worker grabs the scheduled task
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 47200))
except socket.error:
pass
else:
scheduler.init_app(app)
scheduler.start()
login_manager.init_app(app)
login_manager.login_message = "You must be logged in to access this page."
login_manager.login_message_category = 'danger'
login_manager.login_view = "admin.login"
# Initialize blueprints
from .errors import errors as errors_blueprint
app.register_blueprint(errors_blueprint)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .admin import admin as admin_blueprint
app.register_blueprint(admin_blueprint)
# Setup logging when not in debug mode
if not app.debug:
... logging email config
... log file config
return app
Now the email is being sent twice!
Can anyone suggest why this is happening? Is there any logs i can dive into to work out what's going on?
I also read about using the #app.before_first_request decorator but as i'm using the app factory pattern i'm not sure how to incorporate this.
Thanks!
So it turns out the issue was a silly mistake i made.
I didn't configure supervisor correctly so my --preload flag was not actually being applied.
After i fixed supervisor and reloaded, my task is now running correctly and i'm receiving one email.
I hope my setup above will help others as being a beginner this took me a long time to get working.

Flask-SocketIO and Celery task, socketio.emit() not emitting to client after task

I have trouble when trying to emit() to all connected clients with flask-socketio after successfully executing a celery task.
The idea is the following:
Call flask endpoint -> triggers celery task on worker
# api.py
#app.route('/api/task', method=['GET'])
def trigger_celery_task():
celery_task.delay()
Before finishing the task, the celery worker calls another flask api endpoint which triggers the socket server to emit an event
# tasks.py
from server.app import create_celery_app # see below
celery = create_celery_app()
#celery.task
def celery_task():
# exec some long task
import requests
url='http://backend:5000/api/update_data'
requests.get(url)
return None
# api.py
#app.route('/api/update_data', method=['GET'])
def trigger_update():
from server.app import socketio
socketio.emit('new_data_available', {'message': 'update'})
My flask application gives me the following error message (when 2 clients are connected):
"GET /api/update_data HTTP/1.1" 200
Cannot send to sid 85d...
Cannot send to sid 52c...
However, if I manually call the endpoint /api/update_data the socket server emits the event correctly and all clients receive it:
85d...: Sending packet MESSAGE data 2["new_data_available",{"message":"update"}]
52c...: Sending packet MESSAGE data 2["new_data_available",{"message":"update"}]
"GET /api/update_data HTTP/1.1" 200
My configuration is as follows:
# app.py
# initialize sql-alchemy
db = SQLAlchemy()
# initialize socketIO server
socketio = SocketIO(path='/api/socket-io', engineio_logger=True)
def create_app(config_name):
# initialize flask app
app = FlaskAPI(__name__, instance_relative_config=True)
# configuration
app.config.from_object(app_config[config_name])
CORS(app)
db.init_app(app)
socketio.init_app(app)
# add blueprints/routes
# ...
return app
def create_celery_app(app=None):
config_name = os.getenv('APP_SETTINGS')
app = app or create_app(config_name)
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
And finally the run config:
# run.py
import os
from server.app import create_app
config_name = os.getenv('APP_SETTINGS') # config_name = "development"
app = create_app(config_name)
if __name__ == '__main__':
# app.run(host='0.0.0.0', port=5000, threaded=True)
from server.app import socketio
socketio.run(app, host='0.0.0.0', port=5000)
Note that I'm running the complete application in docker containers which are orchestrated by docker-compose, just in case it matters.
Thank you very much, I appreciate any help.

Categories