Flask-Sqlalchemy autocommit UPDATE while insert multiple instances? - python

I try to save some columns (eg: tags, models) with JSON encoded string.
And I hope to always keep then decoded in use.
I have read some refers to add configs to disable autocommit and autoflush , but it doesn't work.
While the instance was added into db.session and then changed value , orm still try to commit an UPDATE OPERATION and then raise TypeError.
Here is my code.
```python
import json
from sqlalchemy import orm
from flask_sqlalchemy import SQLAlchemy
session_options = dict(
bind=None,
autoflush=False,
autocommit=False,
expire_on_commit=False,
)
db = SQLAlchemy(session_options=session_options)
class Sample(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# tags, models : string of json.dumps(array)
tags = db.Column(db.String(128), default='')
models = db.Column(db.String(128), default='')
def __init__(self, **kwargs):
cls = self.__class__
super(cls, self).__init__(**kwargs)
self.formatting()
#orm.reconstructor
def init_on_load(self):
self.formatting()
def formatting(self):
self.tags = json.loads(self.tags)
self.models = json.loads(self.models)
def save(self):
self.tags = json.dumps(self.tags)
self.models = json.dumps(self.models)
db.session.add(self)
db.session.commit()
self.formatting()
## fixme !!!
## formatting after saved will cause auto-commit and raise TypeError
```
Thank you :)
ps: Flask-SQLAlchemy==2.3.2

This error was raised by lacking called db.session.close() after db.session.commit()
I was told that db.session.close() is automatically called in db.session.commit(). And the real has denied my cognition.
And I try to refer the source code of sqlalchmey, and then I find the db.session is an instance of sqlalchemy.orm.scoping.scoped_session, NOT sqlalchemy.orm.SessionTransaction.
Here is the source code in sqlalchemy.orm.SessionTransaction
def commit(self):
self._assert_active(prepared_ok=True)
if self._state is not PREPARED:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self._state = COMMITTED
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
It’s really confusing.
If you want to repeat this Error, here is Test code:
"""
# snippet for testing <class:Sample>
"""
from flask import Flask
app = Flask(__name__)
app.config.from_mapping(
SQLALCHEMY_ECHO=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
SQLALCHEMY_DATABASE_URI='sqlite:///test_orm.sqlite.db',
)
db.init_app(app=app)
db.app = app
db.create_all()
d1 = dict(
tags='["python2","flask"]',
models='["m1"]'
)
m1 = Sample(**d1)
print(1111, type(m1.tags), m1.tags)
m1.save()
print(1112, type(m1.tags), m1.tags)
dm1 = Sample.query.filter(Sample.id == m1.id).all()[0]
print(1113, dm1, type(dm1.tags), dm1.tags)
## fixme[Q1] !!!
## if not continue with $d2, it won't raise error of UPDATE $d1
d2 = dict(
tags='["python3","flask"]',
models='["m2", "m3"]'
)
m2 = Sample(**d2)
print(2221, type(m2.tags), m2.tags)
## fixme[Q1] !!!
# db.session.close()
## If session was not closed, error raise here.
m2.save()
print(2222, type(m2.tags), m2.tags)
dm2 = Sample.query.filter(Sample.id == m2.id).all()[0]
print(2223, dm2, type(dm2.tags), dm2.tags)
Thank you for your read ,wish to solve your same confusion.

Related

"Does not link from element" error when eager loading a relationship from an aliased table

When querying a table, I'd like to eager load a set of columns. Prior to 1.3.x, I could do it with the below code, but it now I'm getting:
sqlalchemy.exc.ArgumentError: Attribute "AliasedClass_Blueprint.engineer" does not link from element "mapped class
Blueprint->blueprints". Did you mean to use Building.blueprint.of_type(AliasedClass_Blueprint)?
The query in question is setup as follows:
def doQuery():
building = aliased(Building)
blueprint = aliased(Blueprint, name="blueprint")
engineer = aliased(Engineer, name="engineer")
with sessionScope() as session:
return session.query(building)\
.join(blueprint, blueprint.id==building.blueprintId)\
.outerjoin(engineer, blueprint.engineerId==engineer.id)\
.options(contains_eager(building.blueprint, alias=blueprint))\
.options(contains_eager(building.blueprint, alias=blueprint)\
# The error shows up here
.contains_eager(blueprint.engineer, alias=engineer))
The error suggests using building.blueprint.of_type(blueprint), which seems to work properly, but it looks to be accomplishing something similar to what alias=blueprint does. Does of_type(x) replace the alias=x parameter?
Below is a functioning example with both a working function and one that reproduces the error:
from sqlalchemy import create_engine, inspect, Column, Integer, String, DateTime, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship, aliased, contains_eager
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Engineer(Base):
__tablename__ = "engineers"
id=Column(Integer, primary_key=True)
name=Column(String)
def __repr__(self):
return self.name
class Blueprint(Base):
__tablename__ = "blueprints"
id=Column(Integer, primary_key=True)
name=Column(String)
engineerId=Column(Integer, ForeignKey('engineers.id'))
engineer=relationship(Engineer, foreign_keys=[engineerId], backref="outputBlueprints")
def __repr__(self):
return self.name
class Building(Base):
__tablename__ = "buildings"
id=Column(Integer, primary_key=True)
name=Column(String)
blueprintId=Column(Integer, ForeignKey('blueprints.id'))
blueprint = relationship(Blueprint, foreign_keys=[blueprintId], remote_side=[Blueprint.id], backref='buildings')
def __repr__(self):
return self.name
engine = create_engine('sqlite:///:memory:', echo=True)
Base.metadata.create_all(engine)
sessionFactory = sessionmaker(bind=engine, expire_on_commit=False)
#contextmanager
def sessionScope():
try:
session = sessionFactory()
yield session
session.commit()
except Exception as e:
session.rollback()
raise
finally:
session.close()
return
def doQueryWorking():
building = aliased(Building)
blueprint = aliased(Blueprint, name="blueprint")
engineer = aliased(Engineer, name="engineer")
with sessionScope() as session:
return session.query(building)\
.join(blueprint, blueprint.id==building.blueprintId)\
.outerjoin(engineer, blueprint.engineerId==engineer.id)\
.options(contains_eager(building.blueprint.of_type(blueprint)))\
.options(contains_eager(building.blueprint.of_type(blueprint))\
.contains_eager(blueprint.engineer, alias=engineer))
def doQueryError():
building = aliased(Building)
blueprint = aliased(Blueprint, name="blueprint")
engineer = aliased(Engineer, name="engineer")
with sessionScope() as session:
return session.query(building)\
.join(blueprint, blueprint.id==building.blueprintId)\
.outerjoin(engineer, blueprint.engineerId==engineer.id)\
.options(contains_eager(building.blueprint, alias=blueprint))\
.options(contains_eager(building.blueprint, alias=blueprint)\
.contains_eager(blueprint.engineer, alias=engineer))
buildings = doQueryError()

FastApi/Sqlalchemy "class is not mapped" error

I'm getting this error when I try to post and create a new registry using fastapi and sqlalchemy:
Class 'endpoints.resultados.ResultadoPruebaSerializer.InsertTResultadoRegla' is not mapped
Here is my code, thanks for your help.
Sqlalchemy Models
#as_declarative()
class Base:
def as_dict(self) -> dict:
return {c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs}
class TResultadoRegla(Base):
__tablename__ = 'T_resultadoReglas'
idresultadoReglas = Column(Integer, primary_key=True)
idtareas = Column(ForeignKey('T_tareas.idtareas', ondelete='RESTRICT', onupdate='RESTRICT'), nullable=False, index=True)
idreglas = Column(ForeignKey('M_reglas.idreglas', ondelete='RESTRICT', onupdate='RESTRICT'), index=True)
fecCreacion = Column(Date)
resultado = Column(String(30))
M_regla = relationship('MRegla', backref="resultados")
T_tarea = relationship('TTarea')
Pydantic crud and schemas
class InsertTResultadoRegla(BaseModel):
idtareas: int
idreglas: int
fecCreacion: date
resultado: str
class Config:
orm_mode = True
def get_regla(db: Session, skip: int = 0, limit: int = 100):
return db.query(Regla).offset(skip).limit(limit).all()
def check_resultados(db: Session, user_resultados: Dict):
db_item = None
for resultado in user_resultados:
regla = get_regla_per_id(db, resultado["idreglas"])
regla = regla.as_dict()
if resultado["resultado"] >= int(regla["minimo"]) and resultado["resultado"] <= int(regla["maximo"]):
db_item = InsertTResultadoRegla(idtareas=1, idreglas=regla["idreglas"], fecCreacion=datetime.date.today(), resultado="CUMPLE")
db.add(db_item)
db.commit()
db.refresh(db_item)
else:
db_item = InsertTResultadoRegla(idtareas=1, idreglas=regla["idreglas"], fecCreacion=datetime.date.today(), resultado="NO CUMPLE")
db.add(db_item)
db.commit()
db.refresh(db_item)
return db_item
The issue is when you are trying to add it to the db, precisely,
db.add(db_item)
You have to add it like shown below:
db_item = InsertTResultadoRegla(idtareas=1, idreglas=regla["idreglas"], fecCreacion=datetime.date.today(), resultado="CUMPLE")
actual_db_item = models.TableName(** db_item.dict())
db.add(actual_db_item)
db.commit()
Before adding the data to the db you have to convert it into a proper "db_item". I hope this solves you issue.

Homework project. Can't figure out how sum values via flask_sqlalchemy. Flask tossing 'AttributeError: 'BaseQuery' object has no attribute 'sum''

This is the problem. I can do count() (the count of this query is 1617) but can't figure out how to do a sum. FWIW, this is from a job satisfaction survey. Lots of 1 and 0 depending on whether they provided a response to a specific question.
This works:
#app.route('/list') def list_respondents(): all_working = Jf_Q1.query.filter((Jf_Q1.working==1) & (Jf_Q1.good_job==1)).count() return render_template('list.html', all_working=all_working)
This code above works, but what I need to be able to replicate this from postgres:
select sum(moderatewellbeing)/sum(good_job) from jf_q1
where working=1
and
good_job=1;
I've tried:
all_working = Jf_Q1.query.filter(Jf_Q1.working==1).sum()
return render_template('list.html', all_working=all_working)
But flask tosses me:
'AttributeError: 'BaseQuery' object has no attribute 'sum'
Here is all my code:
from flask import Flask,render_template, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from flask_migrate import Migrate
######################################
#### SET UP OUR SQLite DATABASE #####
####################################
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
# Connects our Flask App to our Database
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:M1keD8nJ0e#localhost:5432/project2'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
Migrate(app,db)
#####################################
####################################
###################################
# Let's create our first model!
# We inherit from db.Model class
class Jf_Q1(db.Model):
__tablename__ = 'jf_q1'
#########################################
## CREATE THE COLUMNS FOR THE TABLE ####
#######################################
# Primary Key column, unique id for each puppy
id = db.Column(db.Integer,primary_key=True)
respondent_id = db.Column(db.Text)
good_job = db.Column(db.Numeric)
mediocre_job = db.Column(db.Numeric)
bad_job = db.Column(db.Numeric)
highwellbeing = db.Column(db.Numeric)
moderatewellbeing = db.Column(db.Numeric)
lowwellbeing = db.Column(db.Numeric)
working = db.Column(db.Numeric)
# This sets what an instance in this table will have
# Note the id will be auto-created for us later, so we don't add it here!
def __init__(self,respondent_id,good_job,mediocre_job,bad_job,highwellbeing,moderatewellbeing,lowwellbeing,working):
self.respondent_id = respondent_id
self.good_job = good_job
self.mediocre_job = mediocre_job
self.bad_job = bad_job
self.highwellbeing = highwellbeing
self.moderatewellbeing = moderatewellbeing
self.lowwellbeing = lowwellbeing
self.working = working
# def __repr__(self):
#app.route('/')
def index():
return render_template('home.html')
#app.route('/list')
def list_respondents():
# all_working = Jf_Q1.query.filter((Jf_Q1.working==1) & (Jf_Q1.good_job==1)).count()
# return render_template('list.html', all_working=all_working)
all_working = Jf_Q1.query.filter(Jf_Q1.working==1).sum()
return render_template('list.html', all_working=all_working)
# all_working = select([func.sum(Jf_Q1.working)]).\
# where(Jf_Q1.working==1)
# return render_template('list.html', all_working=all_working)
if __name__ == '__main__':
app.run(debug=True)
you can try this:-
from sqlalchemy.sql import func
all_working = session.query(func.sum(Jf_Q1.working)).filter(Jf_Q1.working==1)
or also you can use with_entities
all_working = Jf_Q1.with_entities(func.sum(jf_Q1.working)).filter(Jf_Q1.working==1)

Flask-SQLALchemy update record automatically after specific time

I have a db models like this:
class Payment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
ticket_status = db.Column(db.Enum(TicketStatus, name='ticket_status', default=TicketStatus.UNUSED))
departure_time = db.Column(db.Date)
I want to change the value from all ticket_status after datetime.utcnow() passed the date value from departure_time.
I tried to code like this:
class TicketStatus(enum.Enum):
UNUSED = 'UNUSED'
USED = 'USED'
EXPIRED = 'EXPIRED'
def __repr__(self):
return str(self.value)
class Payment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
ticket_status = db.Column(db.Enum(TicketStatus, name='ticket_status', default=TicketStatus.UNUSED))
departure_time = db.Column(db.Date)
# TODO | set ticket expirations time
def __init__(self):
if datetime.utcnow() > self.departure_time:
self.ticket_status = TicketStatus.EXPIRED.value
try:
db.session.add(self)
db.session.commit()
except Exception as e:
db.session.rollback()
I also tried like this:
def ticket_expiration(self, payment_id):
now = datetime.utcnow().strftime('%Y-%m-%d')
payment = Payment.query.filter_by(id=payment_id).first()
if payment.ticket_status.value == TicketStatus.USED.value:
pass
elif payment and str(payment.departure_time) < now:
payment.ticket_status = TicketStatus.EXPIRED.value
elif payment and str(payment.departure_time) >= now:
payment.ticket_status = TicketStatus.UNUSED.value
try:
db.session.commit()
except Exception as e:
db.session.rollback()
return str('ok')
But it seems no effect when the datetime.utcnow() passed the date value from departure_time.
So the point of my questions is, how to change the value from a row automatically after a set of times..?
Finally I figure out this by using flask_apscheduler, and here is the snippet of my code that solved this questions:
Install flask_apscheduler:
pip3 install flask_apscheduler
create new module tasks.py
from datetime import datetime
from flask_apscheduler import APScheduler
from app import db
from app.models import Payment, TicketStatus
scheduler = APScheduler()
def ticket_expiration():
utc_now = datetime.utcnow().strftime('%Y-%m-%d')
app = scheduler.app
with app.app_context():
payment = Payment.query.all()
for data in payment:
try:
if data.ticket_status.value == TicketStatus.USED.value:
pass
elif str(data.departure_time) < utc_now:
data.ticket_status = TicketStatus.EXPIRED.value
elif str(data.departure_time) >= utc_now:
data.ticket_status = TicketStatus.UNUSED.value
except Exception as e:
print(str(e))
try:
db.session.commit()
except Exception as e:
db.session.rollback()
return str('ok')
and then register the package with the flask app in the __init__.py
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
# The other packages...
# The other packages...
scheduler.init_app(app)
scheduler.start()
return app
# import from other_module...
# To avoid SQLAlchemy circular import, do the import at the bottom.
from app.tasks import scheduler
And here is for the config.py:
class Config(object):
# The others config...
# The others config...
# Flask-apscheduler
JOBS = [
{
'id': 'ticket_expiration',
'func': 'app.tasks:ticket_expiration',
'trigger': 'interval',
'hours': 1, # call the task function every 1 hours
'replace_existing': True
}
]
SCHEDULER_JOBSTORES = {
'default': SQLAlchemyJobStore(url='sqlite:///flask_context.db')
}
SCHEDULER_API_ENABLED = True
In the config above, we can call the function to update db every 1 hours, seconds or others time according to our case, for more informations to set the interval time we can see it here.
I hope this answer helps someone who facing this in the future.
You may replace your status column with just "used" column which will contain Boolean value and make a hybrid attribute for state. https://docs.sqlalchemy.org/en/13/orm/extensions/hybrid.html
class Payment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
used = db.Column(db.Boolean(), default=False)
departure_time = db.Column(db.Date)
#hybrid_property
def status(self):
if datetime.utcnow() > self.departure_time:
return "EXPIRED"
elif self.used:
return "USED"
return "UNUSED"

python logging to database

I'm seeking a way to let the python logger module to log to database and falls back to file system when the db is down.
So basically 2 things: How to let the logger log to database and how to make it fall to file logging when the db is down.
I recently managed to write my own database logger in Python. Since I couldn't find any example I thought I post mine here. Works with MS SQL.
Database table could look like this:
CREATE TABLE [db_name].[log](
[id] [bigint] IDENTITY(1,1) NOT NULL,
[log_level] [int] NULL,
[log_levelname] [char](32) NULL,
[log] [char](2048) NOT NULL,
[created_at] [datetime2](7) NOT NULL,
[created_by] [char](32) NOT NULL,
) ON [PRIMARY]
The class itself:
class LogDBHandler(logging.Handler):
'''
Customized logging handler that puts logs to the database.
pymssql required
'''
def __init__(self, sql_conn, sql_cursor, db_tbl_log):
logging.Handler.__init__(self)
self.sql_cursor = sql_cursor
self.sql_conn = sql_conn
self.db_tbl_log = db_tbl_log
def emit(self, record):
# Set current time
tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
# Clear the log message so it can be put to db via sql (escape quotes)
self.log_msg = record.msg
self.log_msg = self.log_msg.strip()
self.log_msg = self.log_msg.replace('\'', '\'\'')
# Make the SQL insert
sql = 'INSERT INTO ' + self.db_tbl_log + ' (log_level, ' + \
'log_levelname, log, created_at, created_by) ' + \
'VALUES (' + \
'' + str(record.levelno) + ', ' + \
'\'' + str(record.levelname) + '\', ' + \
'\'' + str(self.log_msg) + '\', ' + \
'(convert(datetime2(7), \'' + tm + '\')), ' + \
'\'' + str(record.name) + '\')'
try:
self.sql_cursor.execute(sql)
self.sql_conn.commit()
# If error - print it out on screen. Since DB is not working - there's
# no point making a log about it to the database :)
except pymssql.Error as e:
print sql
print 'CRITICAL DB ERROR! Logging to database not possible!'
And usage example:
import pymssql
import time
import logging
db_server = 'servername'
db_user = 'db_user'
db_password = 'db_pass'
db_dbname = 'db_name'
db_tbl_log = 'log'
log_file_path = 'C:\\Users\\Yourname\\Desktop\\test_log.txt'
log_error_level = 'DEBUG' # LOG error level (file)
log_to_db = True # LOG to database?
class LogDBHandler(logging.Handler):
[...]
# Main settings for the database logging use
if (log_to_db):
# Make the connection to database for the logger
log_conn = pymssql.connect(db_server, db_user, db_password, db_dbname, 30)
log_cursor = log_conn.cursor()
logdb = LogDBHandler(log_conn, log_cursor, db_tbl_log)
# Set logger
logging.basicConfig(filename=log_file_path)
# Set db handler for root logger
if (log_to_db):
logging.getLogger('').addHandler(logdb)
# Register MY_LOGGER
log = logging.getLogger('MY_LOGGER')
log.setLevel(log_error_level)
# Example variable
test_var = 'This is test message'
# Log the variable contents as an error
log.error('This error occurred: %s' % test_var)
Above will log both to the database and to the file. If file is not needed - skip the 'logging.basicConfig(filename=log_file_path)' line. Everything logged using 'log' - will be logged as MY_LOGGER. If some external error appears (i.e. in the module imported or something) - error will appear as 'root', since 'root' logger is also active, and is using the database handler.
Write yourself a handler that directs the logs to the database in question. When it fails, you can remove it from the handler list of the logger. There are many ways to deal with the failure-modes.
Python logging to a database with a backup logger
Problem
I had the same problem when I ran a Django project inside the server since sometimes you need to check the logs remotely.
Solution
First, there is a need for a handler for the logger to insert logs in to the database. Before that and since my SQL is not good, an ORM is needed that I choose SQLAlchemy.
model:
# models.py
from sqlalchemy import Column, Integer, String, DateTime, Text
from sqlalchemy.ext.declarative import declarative_base
import datetime
base = declarative_base()
class Log(base):
__tablename__ = "log"
id = Column(Integer, primary_key=True, autoincrement=True)
time = Column(DateTime, nullable=False, default=datetime.datetime.now)
level_name = Column(String(10), nullable=True)
module = Column(String(200), nullable=True)
thread_name = Column(String(200), nullable=True)
file_name = Column(String(200), nullable=True)
func_name = Column(String(200), nullable=True)
line_no = Column(Integer, nullable=True)
process_name = Column(String(200), nullable=True)
message = Column(Text)
last_line = Column(Text)
This is the crud for insertion into the database:
#crud.py
import sqlalchemy
from .models import base
from traceback import print_exc
class Crud:
def __init__(self, connection_string=f'sqlite:///log_db.sqlite3',
encoding='utf-8',
pool_size=10,
max_overflow=20,
pool_recycle=3600):
self.connection_string = connection_string
self.encoding = encoding
self.pool_size = pool_size
self.max_overflow = max_overflow
self.pool_recycle = pool_recycle
self.engine = None
self.session = None
def initiate(self):
self.create_engine()
self.create_session()
self.create_tables()
def create_engine(self):
self.engine = sqlalchemy.create_engine(self.connection_string)
def create_session(self):
self.session = sqlalchemy.orm.Session(bind=self.engine)
def create_tables(self):
base.metadata.create_all(self.engine)
def insert(self, instances):
try:
self.session.add(instances)
self.session.commit()
self.session.flush()
except:
self.session.rollback()
raise
def __del__(self):
self.close_session()
self.close_all_connections()
def close_session(self):
try:
self.session.close()
except:
print_exc()
else:
self.session = None
def close_all_connections(self):
try:
self.engine.dispose()
except:
print_exc()
else:
self.engine = None
The handler:
# handler.py
from logging import Handler, getLogger
from traceback import print_exc
from .crud import Crud
from .models import Log
my_crud = Crud(
connection_string=<connection string to reach your db>,
encoding='utf-8',
pool_size=10,
max_overflow=20,
pool_recycle=3600)
my_crud.initiate()
class DBHandler(Handler):
backup_logger = None
def __init__(self, level=0, backup_logger_name=None):
super().__init__(level)
if backup_logger_name:
self.backup_logger = getLogger(backup_logger_name)
def emit(self, record):
try:
message = self.format(record)
try:
last_line = message.rsplit('\n', 1)[-1]
except:
last_line = None
try:
new_log = Log(module=record.module,
thread_name=record.threadName,
file_name=record.filename,
func_name=record.funcName,
level_name=record.levelname,
line_no=record.lineno,
process_name=record.processName,
message=message,
last_line=last_line)
# raise
my_crud.insert(instances=new_log)
except:
if self.backup_logger:
try:
getattr(self.backup_logger, record.levelname.lower())(record.message)
except:
print_exc()
else:
print_exc()
except:
print_exc()
Test to check the logger:
# test.py
from logging import basicConfig, getLogger, DEBUG, FileHandler, Formatter
from .handlers import DBHandler
basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
level=DEBUG)
format = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
backup_logger = getLogger('backup_logger')
file_handler = FileHandler('file.log')
file_handler.setLevel(DEBUG)
file_handler.setFormatter(format)
backup_logger.addHandler(file_handler)
db_logger = getLogger('logger')
db_handler = DBHandler(backup_logger_name='backup_logger')
db_handler.setLevel(DEBUG)
db_handler.setFormatter(format)
db_logger.addHandler(db_handler)
if __name__ == "__main__":
db_logger.debug('debug: hello world!')
db_logger.info('info: hello world!')
db_logger.warning('warning: hello world!')
db_logger.error('error: hello world!')
db_logger.critical('critical: hello world!!!!')
You can see the handler accepts a backup logger that can use it when the database insertion fails.
A good improvement can be logging into the database by threading.
I am digging this out again.
There is a solution with SqlAlchemy (Pyramid is NOT required for this recipe):
https://docs.pylonsproject.org/projects/pyramid-cookbook/en/latest/logging/sqlalchemy_logger.html
And you could improve logging by adding extra fields, here is a guide: https://stackoverflow.com/a/17558764/1115187
Fallback to FS
Not sure that this is 100% correct, but you could have 2 handlers:
database handler (write to DB)
file handler (write to file or stream)
Just wrap the DB-commit with a try-except. But be aware: the file will contain ALL log entries, but not only entries for which DB saving was failed.
Old question, but dropping this for others. If you want to use python logging, you can add two handlers. One for writing to file, a rotating file handler. This is robust, and can be done regardless if the dB is up or not.
The other one can write to another service/module, like a pymongo integration.
Look up logging.config on how to setup your handlers from code or json.

Categories