Flask-SQLAlchemy TimeoutError - python

Flask
Python 2.7
Postgres 9
Ubuntu 14.04
I'm using Flask and SQLAlchemy, after 15 consecutive HTTP requests I get:
QueuePool limit of size 5 overflow 10 reached, connection timed out, timeout 30
Then server stops responding:
Very similar as:
Flask-SQLAlchemy TimeoutError
I don't have session engine, per my understanding Flask-SQLAlchemy should take care of it.
What do I need to configure to support more sessions and to clean existing ones periodically.
app.py
import models
api_app = Flask(__name__)
api_app.config.from_pyfile(settings.celery_config)
db = SQLAlchemy(api_app)
#api_app.teardown_appcontext
def shutdown_session(exception=None):
try:
db.session.close()
except AttributeError,e:
print str(e)
except Exception,e:
print api_app.name
print str(e)
#api_app.route('/api/1.0/job/<string:ref>/')
def get_job_by_id(ref):
"""
:param id:
:return:
"""
try:
if request.method == 'GET':
job = models.Job.query.filter(models.Job.reference == ref)
if job:
job_ = job.all()
if len(job_) == 1:
return jsonify(job_[0].serialize())
resp = Response(status=404, mimetype='application/json')
return resp
else:
resp = Response(status=405, mimetype='application/json')
return resp
except Exception,e:
print str(e)
resp = Response(status=500, mimetype='application/json')
return resp
models.py
from api_app import db
class Job(db.Model, AutoSerialize, Serializer):
__tablename__ = 'job'
__public__ = ('status','description','reference')
id = Column(Integer, primary_key=True, server_default=text("nextval('job_id_seq'::regclass)"))
status = Column(String(40), nullable=False)
description = Column(String(200))
reference = Column(String(50))
def serialize(self):
d = Serializer.serialize(self)
del d['id']
return d

based on #spicyramen comment:
increase SQLALCHEMY_POOL_SIZE The size of the database pool.

Related

How to solve flask sqlalchemy connection time out error (queue pool overflow limit)?

I have made one POST API named as "/get_report" which takes some input from user and return data according to inputs by doing some search queries on database. If I keep hitting the same API multiple times like for 7-8 times, on 9th hit it throws error "sqlalchemy.exc.TimeoutError: QueuePool limit of size 5 overflow 10 reached, connection timed out, timeout 30.00 (Background on this error at: https://sqlalche.me/e/14/3o7r)".
Here is my main.py:
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, jsonify, request
from helper import *
from models.tx import *
app = Flask(__name__)
db = SQLAlchemy()
DB_URL = 'postgresql://postgres:postgres#localhost/test_db'
engine = create_engine(DB_URL)
Session = sessionmaker(engine)
app.config['SQLALCHEMY_DATABASE_URI'] = DB_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
#contextmanager
def session_manager():
s = Session()
try:
yield s
except:
s.rollback()
raise
finally:
s.close()
#app.route('/get_report', methods=['POST'])
def get_report():
try:
vendor = request.form['vendor']
circle = request.form['circle']
c_name = request.form['c_name']
c_port = request.form['c_port']
all_ssr = request.form['all_ssr']
data_list = []
table_name = get_table_by_vendor(vendor, circle)
if table_name != None:
with session_manager() as s:
if all_ssr == 'SSR':
result = s.query(table_name).distinct(table_name.label, table_name.c_node, \
table_name.c_port, table_name.z_port, table_name.z_node) \
.filter(and_((or_( and_(table_name.c_node == c_name, table_name.c_port == c_port), \
and_(table_name.z_node == c_name, table_name.z_port == c_port ))), \
(table_name.__table__.c[field_name].like('SSR%')))).all()
elif all_ssr == 'ALL':
# Get all the records
result = s.query(table_name).distinct(table_name.label, table_name.c_node, \
table_name.c_port, table_name.z_port, table_name.z_node) \
.filter(or_( and_(table_name.c_node ==c_name, table_name.c_port == c_port), \
and_(table_name.z_node == c_name, table_name.z_port == c_port ))).all()
else:
result = []
# Preparing JSON data to send
for item in result:
port = c_port if c_port != '' else (item.c_port if item.c_node == c_name else item.z_port)
data_dict = {'user': item.user, 'port': item.port, 'rate':item.rate, 'down': item.down}
data_list.append(data_dict)
response = {"status": "Success", "message": "Successfully Fetched", "data": data_list}
return jsonify(response)
if __name__ == '__main__':
app.run(host='0.0.0.0', port = 5000,debug = True)
Here is my models/tx.py:
class C_Segment(db.Model):
__tablename__ = 'c_segment'
id = db.Column(db.Integer, primary_key=True)
c_node = db.Column(db.String(350), nullable=False)
c_port = db.Column(db.String(200), nullable=False)
label = db.Column(db.String(350), nullable=False)
z_port = db.Column(db.String(200), nullable=False)
z_node = db.Column(db.String(200), nullable=False)
user = db.Column(db.String(200), nullable=False)
down = db.Column(db.String(200), nullable=False)
port = db.Column(db.String(200), nullable=False)
rate = db.Column(db.String(200), nullable=FaI
return '<id {}>'.format(self.id)
I have searched a lot and found so many related content on google but none of them worked for me. I have tried to increase pool size and overflow size also but nothing happened.
I am not able to understand where is the exact issue. I have been stuck into this from last two days and has gone through many stack overflow contents and flask sqlalchemy session documents.
You have a set of connections in the pool, e.g. 15 (5 in the pool and 10 from possible overflow). If a request processing is longer than connection checkout timeout (how long you will wait on available connection, default 30s) you will get this error (all 15 connections are busy and your request must wait for available connection - and waits but only for 30s, after that you get an error).
Have you thought about a query optimization? How many records do you have in the table? How long a request last? You can profile your SQL query using:
EXPLAIN ANALYZE <query>
You can of course increase the pool timeout by setting e.g 300s (5min):
app.config['SQLALCHEMY_POOL_TIMEOUT'] = 300
or make the pool size bigger but it would not solve your problem genuinely. Such a long response time in a request is really bad UX and limit concurrent access to you application from clients.
So I recommend you to make your query faster.

raise NotImplementedError NotImplementedError

I use pycharm to write a python3 web app project using tornado web framework,
The listing service has been built already. I need to build the remaining two components: the user service and the public API layer. The implementation of the listing service can serve as a good starting point to learn more about how to structure a web application using the Tornado web framework.
I am required to use tornado's built in framework for HTTP request.
error occurs at listening ( app.listen(options.port)) when I tried to run the program:
Traceback (most recent call last):
File "D:/Bill/python/Tornado/99-python-exercise-master/listing_service.py", line 203, in <module>
app.listen(options.port)
File "C:\Program Files\Python38\lib\site-packages\tornado\web.py", line 2116, in listen
server.listen(port, address)
File "C:\Program Files\Python38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Program Files\Python38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Program Files\Python38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Program Files\Python38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
code:
import tornado.web
import tornado.log
import tornado.options
import sqlite3
import logging
import json
import time
class App(tornado.web.Application):
def __init__(self, handlers, **kwargs):
super().__init__(handlers, **kwargs)
# Initialising db connection
self.db = sqlite3.connect("listings.db")
self.db.row_factory = sqlite3.Row
self.init_db()
def init_db(self):
cursor = self.db.cursor()
# Create table
cursor.execute(
"CREATE TABLE IF NOT EXISTS 'listings' ("
+ "id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"
+ "user_id INTEGER NOT NULL,"
+ "listing_type TEXT NOT NULL,"
+ "price INTEGER NOT NULL,"
+ "created_at INTEGER NOT NULL,"
+ "updated_at INTEGER NOT NULL"
+ ");"
)
self.db.commit()
class BaseHandler(tornado.web.RequestHandler):
def write_json(self, obj, status_code=200):
self.set_header("Content-Type", "application/json")
self.set_status(status_code)
self.write(json.dumps(obj))
# /listings
class ListingsHandler(BaseHandler):
#tornado.gen.coroutine
def get(self):
# Parsing pagination params
page_num = self.get_argument("page_num", 1)
page_size = self.get_argument("page_size", 10)
try:
page_num = int(page_num)
except:
logging.exception("Error while parsing page_num: {}".format(page_num))
self.write_json({"result": False, "errors": "invalid page_num"}, status_code=400)
return
try:
page_size = int(page_size)
except:
logging.exception("Error while parsing page_size: {}".format(page_size))
self.write_json({"result": False, "errors": "invalid page_size"}, status_code=400)
return
# Parsing user_id param
user_id = self.get_argument("user_id", None)
if user_id is not None:
try:
user_id = int(user_id)
except:
self.write_json({"result": False, "errors": "invalid user_id"}, status_code=400)
return
# Building select statement
select_stmt = "SELECT * FROM listings"
# Adding user_id filter clause if param is specified
if user_id is not None:
select_stmt += " WHERE user_id=?"
# Order by and pagination
limit = page_size
offset = (page_num - 1) * page_size
select_stmt += " ORDER BY created_at DESC LIMIT ? OFFSET ?"
# Fetching listings from db
if user_id is not None:
args = (user_id, limit, offset)
else:
args = (limit, offset)
cursor = self.application.db.cursor()
results = cursor.execute(select_stmt, args)
listings = []
for row in results:
fields = ["id", "user_id", "listing_type", "price", "created_at", "updated_at"]
listing = {
field: row[field] for field in fields
}
listings.append(listing)
self.write_json({"result": True, "listings": listings})
#tornado.gen.coroutine
def post(self):
# Collecting required params
user_id = self.get_argument("user_id")
listing_type = self.get_argument("listing_type")
price = self.get_argument("price")
# Validating inputs
errors = []
user_id_val = self._validate_user_id(user_id, errors)
listing_type_val = self._validate_listing_type(listing_type, errors)
price_val = self._validate_price(price, errors)
time_now = int(time.time() * 1e6) # Converting current time to microseconds
# End if we have any validation errors
if len(errors) > 0:
self.write_json({"result": False, "errors": errors}, status_code=400)
return
# Proceed to store the listing in our db
cursor = self.application.db.cursor()
cursor.execute(
"INSERT INTO 'listings' "
+ "('user_id', 'listing_type', 'price', 'created_at', 'updated_at') "
+ "VALUES (?, ?, ?, ?, ?)",
(user_id_val, listing_type_val, price_val, time_now, time_now)
)
self.application.db.commit()
# Error out if we fail to retrieve the newly created listing
if cursor.lastrowid is None:
self.write_json({"result": False, "errors": ["Error while adding listing to db"]}, status_code=500)
return
listing = dict(
id=cursor.lastrowid,
user_id=user_id_val,
listing_type=listing_type_val,
price=price_val,
created_at=time_now,
updated_at=time_now
)
self.write_json({"result": True, "listing": listing})
def _validate_user_id(self, user_id, errors):
try:
user_id = int(user_id)
return user_id
except Exception as e:
logging.exception("Error while converting user_id to int: {}".format(user_id))
errors.append("invalid user_id")
return None
def _validate_listing_type(self, listing_type, errors):
if listing_type not in {"rent", "sale"}:
errors.append("invalid listing_type. Supported values: 'rent', 'sale'")
return None
else:
return listing_type
def _validate_price(self, price, errors):
# Convert string to int
try:
price = int(price)
except Exception as e:
logging.exception("Error while converting price to int: {}".format(price))
errors.append("invalid price. Must be an integer")
return None
if price < 1:
errors.append("price must be greater than 0")
return None
else:
return price
# /listings/ping
class PingHandler(tornado.web.RequestHandler):
#tornado.gen.coroutine
def get(self):
self.write("pong!")
def make_app(options):
return App([
(r"/listings/ping", PingHandler),
(r"/listings", ListingsHandler),
], debug=options.debug)
if __name__ == "__main__":
# Define settings/options for the web app
# Specify the port number to start the web app on (default value is port 6000)
tornado.options.define("port", default=6000)
# Specify whether the app should run in debug mode
# Debug mode restarts the app automatically on file changes
tornado.options.define("debug", default=True)
# Read settings/options from command line
tornado.options.parse_command_line()
# Access the settings defined
options = tornado.options.options
# Create web app
app = make_app(options)
app.listen(options.port)
logging.info("Starting listing service. PORT: {}, DEBUG: {}".format(options.port, options.debug))
# Start event loop
tornado.ioloop.IOLoop.instance().start()
How to fix this problem?
Python 3.8 made a backwards-incompatible change to the asyncio package used by Tornado. Applications that use Tornado on Windows with Python 3.8 must call asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) at the beginning of their main file/function. (as documented on the home page of tornadoweb.org)

How to set a request's limit to a joken JWT

I'm working in an API with python, flask and implementing JWT with a timeout for expiration, but I'd like to set also a limit request, So the token is gonna be invalid if the time is out or the token has been used in five requests.
I'd been working with the timeout for expiration, but I can't find how to implement the expiration by five requests. Thanks by the help.
The Code til now:
from flask import *
import jwt
import datetime
from flask_pymongo import PyMongo
from functools import wraps
import hashlib
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'MONGOCONEX'
app.config['MONGO_URI'] = 'mongodb://localhost:27017/MONGOCONEX'
app.config['log_log_1'] = 'LOGKEYCONNECT'
app.config['key1'] = 'SECRECTKEY'
app.config['key2'] = 'PASSKEY'
mongo = PyMongo(app)
def token_required(f):
#wraps(f)
def decorated(*args, **kwargs):
token = request.args.get('token')
if not token:
return jsonify({'error': 402,'message':'Token is missing'})
try:
data = jwt.decode(token, app.config['key1'])
except:
return jsonify({'error': 403,'message': 'Token Invalid'})
return f(*args, **kwargs)
return decorated
#app.route('/results', methods=['GET'])
#token_required
def get_all_stars():
results = mongo.db.resultados
output = []
date_start = datetime.datetime.now() - datetime.timedelta(days=1*365)
date_end = datetime.datetime.now() + datetime.timedelta(days=1*365)
for s in results.find():
#print(s)
if date_start <= s['day'] <= date_end:
output.append({'day':s['day'], 'monthly_prediction':s['monthly_prediction'], 'percent_prediction':s['percent_prediction']})
return jsonify({'result' : output})
#app.route('/login', methods=['GET'])
def login():
log_key = request.args.get('l_k')
password_k = request.args.get('p_k')
md5_hash = hashlib.md5()
md5_hash.update(b""+app.config['key2']+"")
encoded_pass_key = md5_hash.hexdigest()
if (log_key == app.config['log_log_1']) and (password_k == encoded_pass_key):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=2)}, app.config['key1'])
return jsonify({'token': token.decode('UTF-8')})
return jsonify({'error': 401, 'description': 'Not verified', 'Wrong Auth': 'Auth Required'})
if __name__ == '__main__':
try:
app.run(debug=True)
except Exception as e:
print('Error: '+str(e))
I see you're using mongo, the workflow is you can put counter along with the token in mongo database and count it how many it has been used, then adding logic to compare which one comes first, the time limit or how many times the token has been used, if it has been used five times you can revoke the token & generate a new token or another workflow you want to do. Here's the further reference to revoke/blacklist the token after it has been accesed five times https://flask-jwt-extended.readthedocs.io/en/stable/blacklist_and_token_revoking/

FLASK REST API returns 400 on POST

I'm building a REST API for a simple Todo application using flask and SQLAlchemy as my ORM. I am testing my API using Postman. I'm on a windows 10 64-bit machine.
A GET request works and returns the data that I've entered into my database using python.
I'd like to try to add a task now. But when I POST my request, I receive an error.
My route in flask looks like this.
#add task
#app.route('/todo/api/v1.0/tasks', methods=['POST'])
def create_task():
if not request.json or not 'title' in request.json:
raise InvalidUsage('Not a valid task!', status_code=400)
task = {
'title': request.json['title'],
'description': request.json['description'],
'done': False
}
Todo.add_todo(task)
return jsonify({'task': task}), 201
And the method it's calling on the Todo object looks like this.
def add_todo(_title, _description):
new_todo = Todo(title=_title, description=_description , completed = 0)
db.session.add(new_todo)
db.session.commit()
What I've tried
I thought that maybe the ' in my Postman Params was causing an issue so I removed them. But I still get the same error.
Then I thought that maybe the way that Postman was sending the POST was incorrect so I checked to make sure that the Content-Type headers was correct. It is set to application/json
Finally, to confirm that the issue was that flask didn't like the request, I removed the check in the add task route to make sure the request had a title. So it looks like this.
if not request.json:
And I get the same error. So I think that the problem must be with how I'm actually sending the POST rather than some kind of formatting issue.
My entire code looks like this.
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import json
from flask import jsonify
from flask import request
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(300), unique=False, nullable=False)
description = db.Column(db.String(), unique=False, nullable=False)
completed = db.Column(db.Boolean, nullable=False)
def json(self):
return {'id': self.id,'title': self.title, 'description': self.description, 'completed': self.completed}
def add_todo(_title, _description):
new_todo = Todo(title=_title, description=_description , completed = 0)
db.session.add(new_todo)
db.session.commit()
def get_all_tasks():
return [Todo.json(todo) for todo in Todo.query.all()]
def get_task(_id):
task = Todo.query.filter_by(id=_id).first()
if task is not None:
return Todo.json(task)
else:
raise InvalidUsage('No task found', status_code=400)
def __repr__(self):
return f"Todo('{self.title}')"
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
#app.route('/')
def hello_world():
return 'Hello to the World of Flask!'
#get all tasks
#app.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return_value = Todo.get_all_tasks()
return jsonify({'tasks': return_value})
#get specific task
#app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = Todo.get_task(task_id)
#if len(task) == 0:
#raise InvalidUsage('No such task', status_code=404)
return jsonify({'task': task})
#add task
#app.route('/todo/api/v1.0/tasks', methods=['POST'])
def create_task():
if not request.json or not 'title' in request.json:
raise InvalidUsage('Not a valid task!', status_code=400)
task = {
'title': request.json['title'],
'description': request.json['description'],
'done': False
}
Todo.add_todo(task)
return jsonify({'task': task}), 201
#update task
#app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['PUT'])
def update_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
raise InvalidUsage('No provided updated', status_code=400)
if not request.json:
raise InvalidUsage('request not valid json', status_code=400)
if 'title' in request.json and type(request.json['title']) != unicode:
raise InvalidUsage('title not unicode', status_code=400)
if 'description' in request.json and type(request.json['description']) != unicode:
raise InvalidUsage('description not unicode', status_code=400)
if 'done' in request.json and type(request.json['done']) is not bool:
raise InvalidUsage('done not boolean', status_code=400)
task[0]['title'] = request.json.get('title', task[0]['title'])
task[0]['description'] = request.json.get('description', task[0]['description'])
task[0]['done'] = request.json.get('done', task[0]['done'])
return jsonify({'task': task[0]})
#delete task
#app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['DELETE'])
def delete_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
raise InvalidUsage('No task to delete', status_code=400)
tasks.remove(task[0])
return jsonify({'result': True})
#app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
if __name__ == '__main__':
app.run(debug=True)
EDIT:
Turns out I wasn't setting the request type in POSTMAN correctly. I've updated it to 'application/json' in the header. Now I'm receiving a different error.
Bad Request Failed to decode JSON object: Expecting value: line 1
column 1 (char 0)
I've tried all the previous steps as before but I continue to get this error.
EDIT 2:
Per a response below, I tried putting the values into the body of the POST. But I still get back a 400 response.
From the image [second postman screenshot] it looks like you pass data in query string but create_task() expects them in request body.
Either replace all occurrences of request.json with request.args in create_task() (to make it work with query params) or leave it as it is and send data in request body.
curl -X POST http://localhost:5000/todo/api/v1.0/tasks \
-H "Content-Type: application/json" \
-d '{"title":"Learn more flask","description":"its supper fun"}'
Also, take a look at Get the data received in a Flask request.
EDITED
Update your add_todo to something like
#classmethod
def add_todo(cls, task):
new_todo = cls(title=task["title"], description=task["description"], completed=0)
db.session.add(new_todo)
db.session.commit()
Related: generalised insert into sqlalchemy using dictionary.

Flask-SQLALchemy update record automatically after specific time

I have a db models like this:
class Payment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
ticket_status = db.Column(db.Enum(TicketStatus, name='ticket_status', default=TicketStatus.UNUSED))
departure_time = db.Column(db.Date)
I want to change the value from all ticket_status after datetime.utcnow() passed the date value from departure_time.
I tried to code like this:
class TicketStatus(enum.Enum):
UNUSED = 'UNUSED'
USED = 'USED'
EXPIRED = 'EXPIRED'
def __repr__(self):
return str(self.value)
class Payment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
ticket_status = db.Column(db.Enum(TicketStatus, name='ticket_status', default=TicketStatus.UNUSED))
departure_time = db.Column(db.Date)
# TODO | set ticket expirations time
def __init__(self):
if datetime.utcnow() > self.departure_time:
self.ticket_status = TicketStatus.EXPIRED.value
try:
db.session.add(self)
db.session.commit()
except Exception as e:
db.session.rollback()
I also tried like this:
def ticket_expiration(self, payment_id):
now = datetime.utcnow().strftime('%Y-%m-%d')
payment = Payment.query.filter_by(id=payment_id).first()
if payment.ticket_status.value == TicketStatus.USED.value:
pass
elif payment and str(payment.departure_time) < now:
payment.ticket_status = TicketStatus.EXPIRED.value
elif payment and str(payment.departure_time) >= now:
payment.ticket_status = TicketStatus.UNUSED.value
try:
db.session.commit()
except Exception as e:
db.session.rollback()
return str('ok')
But it seems no effect when the datetime.utcnow() passed the date value from departure_time.
So the point of my questions is, how to change the value from a row automatically after a set of times..?
Finally I figure out this by using flask_apscheduler, and here is the snippet of my code that solved this questions:
Install flask_apscheduler:
pip3 install flask_apscheduler
create new module tasks.py
from datetime import datetime
from flask_apscheduler import APScheduler
from app import db
from app.models import Payment, TicketStatus
scheduler = APScheduler()
def ticket_expiration():
utc_now = datetime.utcnow().strftime('%Y-%m-%d')
app = scheduler.app
with app.app_context():
payment = Payment.query.all()
for data in payment:
try:
if data.ticket_status.value == TicketStatus.USED.value:
pass
elif str(data.departure_time) < utc_now:
data.ticket_status = TicketStatus.EXPIRED.value
elif str(data.departure_time) >= utc_now:
data.ticket_status = TicketStatus.UNUSED.value
except Exception as e:
print(str(e))
try:
db.session.commit()
except Exception as e:
db.session.rollback()
return str('ok')
and then register the package with the flask app in the __init__.py
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
# The other packages...
# The other packages...
scheduler.init_app(app)
scheduler.start()
return app
# import from other_module...
# To avoid SQLAlchemy circular import, do the import at the bottom.
from app.tasks import scheduler
And here is for the config.py:
class Config(object):
# The others config...
# The others config...
# Flask-apscheduler
JOBS = [
{
'id': 'ticket_expiration',
'func': 'app.tasks:ticket_expiration',
'trigger': 'interval',
'hours': 1, # call the task function every 1 hours
'replace_existing': True
}
]
SCHEDULER_JOBSTORES = {
'default': SQLAlchemyJobStore(url='sqlite:///flask_context.db')
}
SCHEDULER_API_ENABLED = True
In the config above, we can call the function to update db every 1 hours, seconds or others time according to our case, for more informations to set the interval time we can see it here.
I hope this answer helps someone who facing this in the future.
You may replace your status column with just "used" column which will contain Boolean value and make a hybrid attribute for state. https://docs.sqlalchemy.org/en/13/orm/extensions/hybrid.html
class Payment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
used = db.Column(db.Boolean(), default=False)
departure_time = db.Column(db.Date)
#hybrid_property
def status(self):
if datetime.utcnow() > self.departure_time:
return "EXPIRED"
elif self.used:
return "USED"
return "UNUSED"

Categories