I have a SQLite table defined like so:
create table if not exists KeyValuePair (
key CHAR(255) primary key not null,
val text not null,
fup timestamp default current_timestamp not null, -- time of first upload
lup timestamp default current_timestamp not null -- time of last upload
);
create trigger if not exists entry_first_insert after insert
on KeyValuePair
begin
update KeyValuePair set lup = current_timestamp where key = new.key;
end;
create trigger if not exists entry_last_updated after update of value
on KeyValuePair
begin
update KeyValuePair set lup = current_timestamp where key = old.key;
end;
I'm trying to write a peewee.Model for this table in Python. This is what I have so far:
import peewee as pw
db = pw.SqliteDatabase('dhm.db')
class BaseModel(pw.Model):
class Meta:
database = db
class KeyValuePair(BaseModel):
key = pw.FixedCharField(primary_key=True, max_length=255)
val = pw.TextField(null=False)
fup = pw.DateTimeField(
verbose_name='first_updated', null=False, default=datetime.datetime.now)
lup = pw.DateTimeField(
verbose_name='last_updated', null=False, default=datetime.datetime.now)
db.connect()
db.create_tables([KeyValuePair])
When I inspect the SQL produced by the last line I get:
CREATE TABLE "keyvaluepair" (
"key" CHAR(255) NOT NULL PRIMARY KEY,
"val" TEXT NOT NULL,
"fup" DATETIME NOT NULL,
"lup" DATETIME NOT NULL
);
So I have two questions at this point:
I've been unable to find a way to achieve the behavior of the entry_first_insert and entry_last_updated triggers. Does peewee support triggers? If not, is there a way to just create a table from a .sql file rather than the Model class definition?
Is there a way to make the default for fup and lup propogate to the SQL definitions?
I've figured out a proper answer to both questions. This solution actually enforces the desired triggers and default timestamps in the SQL DDL.
First we define a convenience class to wrap up the SQL for a trigger. There is a more proper way to do this with the peewee.Node objects, but I didn't have time to delve into all of that for this project. This Trigger class simply provides string formatting to output proper sql for trigger creation.
class Trigger(object):
"""Trigger template wrapper for use with peewee ORM."""
_template = """
{create} {name} {when} {trigger_op}
on {tablename}
begin
{op} {tablename} {sql} where {pk} = {old_new}.{pk};
end;
"""
def __init__(self, table, name, when, trigger_op, op, sql, safe=True):
self.create = 'create trigger' + (' if not exists' if safe else '')
self.tablename = table._meta.name
self.pk = table._meta.primary_key.name
self.name = name
self.when = when
self.trigger_op = trigger_op
self.op = op
self.sql = sql
self.old_new = 'new' if trigger_op.lower() == 'insert' else 'old'
def __str__(self):
return self._template.format(**self.__dict__)
Next we define a class TriggerTable that inherits from the BaseModel. This class overrides the default create_table to follow table creation with trigger creation. If any triggers fail to create, the whole create is rolled back.
class TriggerTable(BaseModel):
"""Table with triggers."""
#classmethod
def triggers(cls):
"""Return an iterable of `Trigger` objects to create upon table creation."""
return tuple()
#classmethod
def new_trigger(cls, name, when, trigger_op, op, sql):
"""Create a new trigger for this class's table."""
return Trigger(cls, name, when, trigger_op, op, sql)
#classmethod
def create_table(cls, fail_silently=False):
"""Create this table in the underlying database."""
super(TriggerTable, cls).create_table(fail_silently)
for trigger in cls.triggers():
try:
cls._meta.database.execute_sql(str(trigger))
except:
cls._meta.database.drop_table(cls, fail_silently)
raise
The next step is to create a class BetterDateTimeField. This Field object overrides the default __ddl__ to append a "DEFAULT current_timestamp" string if the default instance variable is set to the datetime.datetime.now function. There are certainly better ways to do this, but this one captures the basic use case.
class BetterDateTimeField(pw.DateTimeField):
"""Propogate defaults to database layer."""
def __ddl__(self, column_type):
"""Return a list of Node instances that defines the column."""
ddl = super(BetterDateTimeField, self).__ddl__(column_type)
if self.default == datetime.datetime.now:
ddl.append(pw.SQL('DEFAULT current_timestamp'))
return ddl
Finally, we define the new and improved KeyValuePair Model, incorporating our trigger and datetime field improvements. We conclude the Python code by creating the table.
class KeyValuePair(TriggerTable):
"""DurableHashMap entries are key-value pairs."""
key = pw.FixedCharField(primary_key=True, max_length=255)
val = pw.TextField(null=False)
fup = BetterDateTimeField(
verbose_name='first_updated', null=False, default=datetime.datetime.now)
lup = BetterDateTimeField(
verbose_name='last_updated', null=False, default=datetime.datetime.now)
#classmethod
def triggers(cls):
return (
cls.new_trigger(
'kvp_first_insert', 'after', 'insert', 'update',
'set lup = current_timestamp'),
cls.new_trigger(
'kvp_last_udpated', 'after', 'update', 'update',
'set lup = current_timestamp')
)
KeyValuePair.create_table()
Now the schema is created properly:
sqlite> .schema keyvaluepair
CREATE TABLE "keyvaluepair" ("key" CHAR(255) NOT NULL PRIMARY KEY, "val" TEXT NOT NULL, "fup" DATETIME NOT NULL DEFAULT current_timestamp, "lup" DATETIME NOT NULL DEFAULT current_timestamp);
CREATE TRIGGER kvp_first_insert after insert
on keyvaluepair
begin
update keyvaluepair set lup = current_timestamp where key = new.key;
end;
CREATE TRIGGER kvp_last_udpated after update
on keyvaluepair
begin
update keyvaluepair set lup = current_timestamp where key = old.key;
end;
sqlite> insert into keyvaluepair (key, val) values ('test', 'test-value');
sqlite> select * from keyvaluepair;
test|test-value|2015-12-07 21:58:05|2015-12-07 21:58:05
sqlite> update keyvaluepair set val = 'test-value-two' where key = 'test';
sqlite> select * from keyvaluepair;
test|test-value-two|2015-12-07 21:58:05|2015-12-07 21:58:22
You can override the save function of the model where you insert the timestamps. See TimeStampModel for an example.
I stumbled across exactly this issue a while ago, and spent some time coming up with an optimal design to support Triggers in PeeWee (inspired by the above answer). I am quite happy with how we ended up implementing it, and wanted to share this. At some point I will do a PR into Peewee for this.
Creating Triggers & TriggerListeners in PeeWee
Objective
This document describes how to do this in two parts:
How to add a Trigger to a model in the database.
How to create a ListenThread that will have a callback function that is notified each time the table is updated.
How-To Implementation
The beauty of this design is you only need one item: the TriggerModelMixin Model. Then it is easy to create listeners to subscribe/have callback methods.
The TriggerModelMixin can be copy-pasted as:
class TriggerModelMixin(Model):
""" PeeWee Model with support for triggers.
This will create a trigger that on all table updates will send
a NOTIFY to {tablename}_updates.
Note that it will also take care of updating the triggers as
appropriate/necesary.
"""
_template = """
CREATE OR REPLACE FUNCTION {function_name}()
RETURNS trigger AS
$BODY$
BEGIN
PERFORM pg_notify(
CAST('{notify_channel_name}' AS text),
row_to_json(NEW)::text);
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION {function_name}() OWNER TO postgres;
DROP TRIGGER IF EXISTS {trigger_name} ON "{tablename}";
CREATE TRIGGER {trigger_name}
AFTER INSERT OR UPDATE OR DELETE
ON "{tablename}"
{frequency}
EXECUTE PROCEDURE {function_name}();
"""
function_name_template = "{table_name}updatesfunction"
trigger_name_template = "{table_name}updatestrigger"
notify_channel_name_template = "{table_name}updates"
frequency = "FOR EACH ROW"
#classmethod
def get_notify_channel(cls):
table_name = cls._meta.table_name
return cls.notify_channel_name_template.format(**{"table_name": table_name})
#classmethod
def create_table(cls, fail_silently=False):
""" Create table and triggers """
super(TriggerModelMixin, cls).create_table()
table_name = cls._meta.table_name
notify_channel = cls.get_notify_channel()
function_name = cls.function_name_template.format(**{"table_name": table_name})
trigger_name = cls.trigger_name_template.format(**{"table_name": table_name})
trigger = cls._template.format(**{
"function_name": function_name,
"trigger_name": trigger_name,
"notify_channel_name": notify_channel,
"tablename": table_name,
"frequency": cls.frequency
}
)
logger.info(f"Creating Triggers for {cls}")
cls._meta.database.execute_sql(str(trigger))
#classmethod
def create_db_listener(cls):
''' Returns an object that will listen to the database notify channel
and call a specified callback function if triggered.
'''
class Trigger_Listener:
def __init__(self, db_model):
self.db_model = db_model
self.running = True
self.test_mode = False
self.channel_name = ""
def stop(self):
self.running = False
def listen_and_call(self, f, *args, timeout: int = 5, sync=False):
''' Start listening and call the callback method `f` if a
trigger notify is received.
This has two styles: sync (blocking) and async (non-blocking)
Note that `f` must have `record` as a keyword parameter - this
will be the record that sent the notification.
'''
if sync:
return self.listen_and_call_sync(f, *args, timeout=timeout)
else:
t = threading.Thread(
target=self.listen_and_call_sync,
args=(f, *args),
kwargs={'timeout': timeout}
)
t.start()
def listen_and_call_sync(self, f, *args, timeout: int = 5):
''' Call callback function `f` when the channel is notified. '''
self.channel_name = self.db_model.get_notify_channel()
db = self.db_model._meta.database
db.execute_sql(f"LISTEN {self.channel_name};")
conn = db.connection()
while self.running:
# The if see's if the response is non-null
if not select.select([conn], [], [], timeout) == ([], [], []):
# Wait for the bytes to become fully available in the buffer
conn.poll()
while conn.notifies:
record = conn.notifies.pop(0)
logger.info(f"Trigger recieved with record {record}")
f(*args, record=record)
if self.test_mode:
break
return Trigger_Listener(cls)
Example Implementation:
db_listener = FPGExchangeOrder.create_db_listener()
def callback_method(record=None):
# CallBack Method to handle the record.
logger.info(f"DB update on record: f{record}")
# Handle the update here
db_listener.listen_and_call(callback_method)
How to use this
1. Add a Trigger to a model in the database
This is very easy. Just add the mixin TriggerModelMixin to the model that you want to add support to. This Mixin will handle the creation of the triggers, and the Listening method to notify when the triggers are called.
2. Create a ListenThread to have a Callback
We have two modes for the listener: async (non-blocking) and sync (blocking). By default, it will be non-blocking, you can change this with the sync=True if you want it to be blocking.
To use it (in either case), create a callback method. Note that this callback method will be blocking when updates are received (records are processed in serial), so do not have heavy load or I/O in this method. The only requirement of this method is a keyed parameter of record - which will be where the record from the database is returned as a dictionary.
From this, just create the listener, then call listen_and_call.
Related
As the title says, I'm having problems retrieving data from a SQLite DB when using WHERE statement.
Here is the piece of code that tries to get a row where an ID is given:
def check_attendance(self, cred):
query = """SELECT * FROM clients WHERE dni=?"""
self.conn.cursor().execute(query, (cred,))
record = self.conn.cursor().fetchone()
The var cred is already inside a tuple as specified by SQLite API for Python. Sadly, the query returns None when executed here.
If I do the same but using sqlite.exe, then I do get the right row back. In fact, this is the only query I cannot execute properly from my python script, everything else return rows normally.
Here it is executing from the Python script
And here is in sqlite.exe
Here is the piece that stores values in the DB:
def new_client(self, *args):
success = False
# Check if all inputs are filled
if self.dialog.content_cls.ids.user_name.text and self.dialog.content_cls.ids.user_surname.text and len(self.dialog.content_cls.ids.user_dni.text) == 8 and self.dialog.content_cls.ids.user_date.text:
# Convert str date to a datetime obj in order to use it with timedelta
paid_date = datetime.strptime(self.dialog.content_cls.ids.user_date.text, "%d-%m-%Y")
# paid_date is now YYYY-MM-DD HH-MM-SS format
# Add 30 days to paid_date
exp_date = paid_date + timedelta(days=30)
# Convert YYYY-MM-DD HH-MM-DD to string YYYY-MM-DD as we don't need clock
paid_date = datetime.strptime(str(paid_date), "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d")
exp_date = datetime.strptime(str(exp_date), "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d")
# Create query blueprint and try executing
query = """INSERT INTO clients (name, surname, dni, membership_date, expiration_date) VALUES (?,?,?,?,?)"""
try:
self.conn.execute(query, (self.dialog.content_cls.ids.user_name.text,
self.dialog.content_cls.ids.user_surname.text,
self.dialog.content_cls.ids.user_dni.text,
paid_date,
exp_date
)
)
success = True
except sqlite3.IntegrityError:
pass
if success:
self.conn.commit()
The try/except was used for other reasons. Adding to the database from the Python script works fine as shown in the second screenshot.
And the table clients is as follows:
c.execute(''' CREATE TABLE clients (id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
surname TEXT NOT NULL,
dni INTEGER NOT NULL UNIQUE,
membership_date date NOT NULL,
expiration_date date NOT NULL); ''')
Using Python v3.7.7 32bit.
Thanks!
In your code, the cursor is initialized two times (cursor()).
You should either get the results from the same cursor you used to execute the SELECT statement:
def check_attendance(self, cred):
query = """SELECT * FROM clients WHERE dni=?"""
cur = self.conn.cursor()
cur.execute(query, (cred,))
record = cur.fetchone()
...or you can avoid the implicit cursor creation by using execute method directly on Connection object:
def check_attendance(self, cred):
query = """SELECT * FROM clients WHERE dni=?"""
record = self.conn.execute(query, (cred,)).fetchone()
You can read more about this approach in the documentation (https://docs.python.org/3/library/sqlite3.html#using-sqlite3-efficiently):
Using the nonstandard execute(), executemany() and executescript() methods of the Connection object, your code can be written more concisely because you don’t have to create the (often superfluous) Cursor objects explicitly. Instead, the Cursor objects are created implicitly and these shortcut methods return the cursor objects.
I am developing an extension to an existing app which uses sqlalchemy 0.6.
The app has sqlalchemy tables created the non-declarative way. I am trying to create in my extension a new table with a foreign key column pointing at the primary key of the main table in the application database and I am creating it declaratively.
This all works fine, with the table created once the extension is loaded, and with no complaints at all. My table prints out and demonstrates that new rows have been added ok.
What I want and think is possible (but don't know as I have never used sql or any other database) is for the corresponding row in my table to be deleted when the row in the app's main table with the corresponding foreign key is deleted.
So far, and with many permutations having been tried, nothing has worked. I thought that with a backref set and with a relation defined with delete being cascaded, there shouldn't be a problem. Because the new table is defined in an extension which should just plugin, I don't want to edit the code in the main app at all, at least that is my goal. One of the problems that I have, though, is that the main app table that I want to reference, has no member variables defined in its class, does not declare its primary key in its mapper and only has the primary key declared in the table. This makes it difficult to create a relation(ship) clause, the first argument of which must be to a class or mapper (in this case neither of which have the primary key declared).
Is there any way of achieving this?
ps - here is some of the code that I am using. LocalFile is the declarative class. All the connection details are taken care of by the main application.
if not self.LocalFile.__table__.exists(bind=Engine):
self.LocalFile__table__.create(bind=Engine)
Here is the LocalFile class - Base is a declarative base class with bind=Engine passed in the constructor:
class LocalFile(Base):
__tablename__ = 'local_file'
_id = Column(Integer, Sequence('local_file_sequence', start=1, increment=1), primary_key=True)
_filename = Column(String(50), nullable=False)
_filepath = Column(String(128), nullable=False)
_movieid = Column(Integer, ForeignKey(db.tables.movies.c.movie_id, onupdate='CASCADE', ondelete='CASCADE'))
#movies = relation(db.Movie, backref="local_file", cascade="all")
#property
def filename(self):
return self._filename
#filename.setter
def filename(self, filename):
self._filename = filename
#property
def filepath(self):
return self._filepath
#filepath.setter
def filepath(self, filepath):
self._filepath = filepath
#property
def movieid(self):
return self._movieid
#movieid.setter
def movieid(self, movieid):
self._movieid = movieid
#property
def id(self):
return self._id
#id.setter
def id(self, id):
self._id = id
filename = synonym('_filename', descriptor=filename)
movieid = synonym('_movieid', descriptor=movieid)
filepath = synonym('_filepath', descriptor=filepath)
id = synonym('_id', descriptor=id)
def __init__(self, filename, filepath, movieid):
self._filename = filename
self._filepath = filepath
self._movieid = movieid
def __repr__(self):
return "<User('%s','%s', '%s')>" % (self.filename, self.filepath, self.movieid)
Edit:
The backend is sqlite3. Below is the code from the creation of the table produced by using the echo command (thanks for pointing that out, it's very useful - already I suspect that the existing application is generating far more sql than is necessary).
Following the reported sql table creation is the code generated when a row is removed. I personally can't see any statement that references the possible deletion of a row in the local file table, but I know very little sql currently. Thanks.
2011-12-29 16:29:18,530 INFO sqlalchemy.engine.base.Engine.0x...0650
CREATE TABLE local_file (
_id INTEGER NOT NULL,
_filename VARCHAR(50) NOT NULL,
_filepath VARCHAR(128) NOT NULL,
_movieid INTEGER,
PRIMARY KEY (_id),
FOREIGN KEY(_movieid) REFERENCES movies (movie_id) ON DELETE CASCADE ON UPDATE CASCADE
)
2011-12-29T16:29:18: I: sqlalchemy.engine.base.Engine.0x...0650(base:1387):
CREATE TABLE local_file (
_id INTEGER NOT NULL,
_filename VARCHAR(50) NOT NULL,
_filepath VARCHAR(128) NOT NULL,
_movieid INTEGER,
PRIMARY KEY (_id),
FOREIGN KEY(_movieid) REFERENCES movies (movie_id) ON DELETE CASCADE ON UPDATE CASCADE
)
2011-12-29 16:29:18,534 INFO sqlalchemy.engine.base.Engine.0x...0650 ()
2011-12-29T16:29:18: I: sqlalchemy.engine.base.Engine.0x...0650(base:1388): ()
2011-12-29 16:29:18,643 INFO sqlalchemy.engine.base.Engine.0x...0650 COMMIT
2011-12-29T16:29:18: I: sqlalchemy.engine.base.Engine.0x...0650(base:1095): COMMIT
for row in table produces the following for the two tables:
the local file table:
(, u' 310 To Yuma')
(, u' Ravenous')
the movie table in the existing app:
(, u'IMDb - 3:10 to Yuma')
(, u'Ravenous')
The code when deleting a row is so long that I cannot include it here (200 lines or so - isn't that a little too many for deleting one row?), but it makes no reference to deleting a row in the localfile table. There are statements like:
2011-12-29 17:09:17,141 INFO sqlalchemy.engine.base.Engine.0x...0650 UPDATE movies SET poster_md5=?, updated=? WHERE movies.movie_id = ?
2011-12-29T17:09:17: I: sqlalchemy.engine.base.Engine.0x...0650(base:1387): UPDATE movies SET poster_md5=?, updated=? WHERE movies.movie_id = ?
2011-12-29 17:09:17,142 INFO sqlalchemy.engine.base.Engine.0x...0650 (None, '2011-12-29 17:09:17.141019', 2)
2011-12-29T17:09:17: I: sqlalchemy.engine.base.Engine.0x...0650(base:1388): (None, '2011-12-29 17:09:17.141019', 2)
2011-12-29 17:09:17,150 INFO sqlalchemy.engine.base.Engine.0x...0650 DELETE FROM posters WHERE posters.md5sum = ?
2011-12-29T17:09:17: I: sqlalchemy.engine.base.Engine.0x...0650(base:1387): DELETE FROM posters WHERE posters.md5sum = ?
2011-12-29 17:09:17,157 INFO sqlalchemy.engine.base.Engine.0x...0650 (u'083841e14b8bb9ea166ea4b2b976f03d',)
In SQLite you must turn on support for foreign keys explicitly or it just ignores any SQL related to foreign keys.
engine = create_engine(database_url)
def on_connect(conn, record):
conn.execute('pragma foreign_keys=ON')
from sqlalchemy import event
event.listen(engine, 'connect', on_connect)
I have usecase in whch I have to read rows having status = 0 from mysql.
Table schema:
CREATE TABLE IF NOT EXISTS in_out_analytics(
id INT AUTO_INCREMENT PRIMARY KEY,
file_name VARCHAR(255),
start_time BIGINT,
end_time BIGINT,
duration INT,
in_count INT,
out_count INT,
status INT
)
I am using this below code to read data from mysql.
persistance.py
import mysql
import mysql.connector
import conf
class DatabaseManager(object):
# global vars to storing db connection details
connection = None
def __init__(self):
self.ip = conf.db_ip
self.user_name = conf.db_user
self.password = conf.db_password
self.db_name = conf.db_name
# Initialize database only one time in application
if not DatabaseManager.connection:
self.connect()
self.cursor = DatabaseManager.connection.cursor()
self.create_schema()
def connect(self):
try:
DatabaseManager.connection = mysql.connector.connect(
host= self.ip,
database = self.db_name,
user = self.user_name,
password = self.password
)
print(f"Successfully connected to { self.ip } ")
except mysql.connector.Error as e:
print(str(e))
def create_schema(self):
# Create database
# sql = f"CREATE DATABASE { self.db_name} IF NOT EXIST"
# self.cursor.execute(sql)
# Create table
sql = """
CREATE TABLE IF NOT EXISTS in_out_analytics(
id INT AUTO_INCREMENT PRIMARY KEY,
file_name VARCHAR(255),
start_time BIGINT,
end_time BIGINT,
duration INT,
in_count INT,
out_count INT,
status INT
)"""
self.cursor.execute(sql)
def read_unprocessed_rows(self):
sql = "SELECT id, start_time, end_time FROM in_out_analytics WHERE status=0;"
self.cursor.execute(sql)
result_set = self.cursor.fetchall()
rows = []
for row in result_set:
id = row[0]
start_time = row[1]
end_time = row[2]
details = {
'id' : id,
'start_time' : start_time,
'end_time' : end_time
}
rows.append(details)
return rows
test.py
import time
from persistance import DatabaseManager
if __name__ == "__main__":
# Rows which are inserted after application is started do not get processed if
# 'DatabaseManager' is defined here
# dm = DatabaseManager()
while True:
# Rows which are inserted after application is started do get processed if
# 'DatabaseManager' is defined here
dm = DatabaseManager()
unprocessed_rows = dm.read_unprocessed_rows()
print(f"unprocessed_rows: { unprocessed_rows }")
time.sleep(2)
Problem:
The problem is, when I define database object dm = DatabaseManager() above the while loop, then any new row which is inserted after the application is started do not get processed and if I define the dm = DatabaseManager() inside the while loop then the rows which are inserted even after application is started gets processed.
What is the problem with the above code?
Ideally, we should make only one object of DatabaseManager as this class is creating a connection with MySQL. Hence creating a connection with any database should be the ideal case.
Making an assumption here, as I cannot test it myself.
tl;dr: Add DatabaseManager.connection.commit() to your read_unprocessed_rows
When you execute your SELECT statement, a transaction is created implicitly, using the default isolation level REPEATABLE READ. That creates a snapshot of the database at that point in time and all consecutive reads in that transaction will read from the snapshot established during the first read. The effects of different isolation levels are described here. To refresh the snapshot in REPEATABLE READ, you can commit your current transaction before executing the next statement.
So, when you instantiate your DatabaseManager inside your loop, each SELECT starts a new transaction on a new connection, hence has a fresh snapshot every time. When instantiating your Databasemanager outside the loop, the transaction created by the first SELECT keeps the same snapshot for all consecutive SELECTs and updates from outside that transaction remain invisible.
I am using TxPostgres for insert a row into a postgresql, my stored procedure is
CREATE OR REPLACE FUNCTION gps_open_connection(
_ip character varying(15),
_port integer
) RETURNS integer AS $$
DECLARE
log_id integer;
BEGIN
INSERT INTO gpstracking_device_logs (gpstracking_device_logs.id, gpstracking_device_logs.ip, gpstracking_device_logs.port, gpstracking_device_logs.status, gpstracking_device_logs.created, gpstracking_device_logs.updated) VALUES (DEFAULT, _ip, _port, TRUE, NOW(), NOW()) RETURNING id INTO log_id;
END
$$
LANGUAGE plpgsql VOLATILE SECURITY DEFINER;
and this stored procedure is called from a method in a twisted class, my method is
def openConnection (self, ip, port):
self.connection['ip'] = ip
self.connection['port'] = port
self.connection['status'] = True
self._d.addCallback(lambda _: self._conn.runQuery("select gps_open_connection('%s', '%s')" % (ip, port)))
self.id ?
My issue is that I dont know how to populate self.id, I hope you could help on this issue
self._conn.runQuery returns a Deferred that will contain query result.
As you return this Deferred from callback, next callback value will be the txpostgres Deferred's value. So, you may write just after previous callback:
def setId(val):
self.id = val[0]['id']
self._d.addCallback(setId)
So in my postgres DB I have the following custom type:
create type my_pg_type as (
sting_id varchar(32),
time_diff interval,
multiplier integer
);
To further complicate things, this is being used as an array:
alter table my_table add column my_keys my_pg_type [];
I'd like to map this with SQLAlchemy (0.6.4) !!
(apologies for elixir)
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.types import Enum
from elixir import Entity, Field
class MyTable(Entity):
# -- snip --
my_keys = Field(ARRAY(Enum))
I know 'Enum' is incorrect in the above.
For an example of a value coming back from the database for that array column, I've shown below the value in ARRAY.result_processor(self, dialect, coltype):
class ARRAY(sqltypes.MutableType, sqltypes.Concatenable, sqltypes.TypeEngine):
# -- snip --
def result_processor(self, dialect, coltype):
item_proc = self.item_type.result_processor(dialect, coltype)
if item_proc:
def convert_item(item):
if isinstance(item, list):
return [convert_item(child) for child in item]
else:
return item_proc(item)
else:
def convert_item(item):
if isinstance(item, list):
return [convert_item(child) for child in item]
else:
return item
def process(value):
if value is None:
return value
"""
# sample value:
>>> value
'{"(key_1,07:23:00,0)","(key_2,01:00:00,20)"}'
"""
return [convert_item(item) for item in value]
return process
So the above process function incorrectly splits the string, assuming it's already a list.
So far, I've successfully subclassed ARRAY to properly split the string, and instead of Enum, I've tried to write my own type (implementing Unicode) to recreate the (string, timedelta, integer) tuple, but have run into a lot of difficulties, specifically the proper conversion of the interval to the Python timedelta.
I'm posting here in case I'm missing an obvious precedent way of doing this?
UPDATE See the recipe at the bottom for a workaround
I worked up some example code to see what psycopg2 is doing here, and this is well within their realm - psycopg2 is not interpreting the value as an array at all. psycopg2 needs to be able to parse out the ARRAY when it comes back as SQLA's ARRAY type assumes at least that much has been done. You can of course hack around SQLAlchemy's ARRAY, which here would mean basically not using it at all in favor of something that parses out this particular string value psycopg2 is giving us back.
But what's also happening here is that we aren't even getting at psycopg2's mechanics for converting timedeltas either, something SQLAlchemy normally doesn't have to worry about. In this case I feel like the facilities of the DBAPI are being under-utilized and psycopg2 is a very capable DBAPI.
So I'd advise you work with psycopg2's custom type mechanics over at http://initd.org/psycopg/docs/extensions.html#database-types-casting-functions.
If you want to mail their mailing list, here's a test case:
import psycopg2
conn = psycopg2.connect(host="localhost", database="test", user="scott", password="tiger")
cursor = conn.cursor()
cursor.execute("""
create type my_pg_type as (
string_id varchar(32),
time_diff interval,
multiplier integer
)
""")
cursor.execute("""
CREATE TABLE my_table (
data my_pg_type[]
)
""")
cursor.execute("insert into my_table (data) "
"values (CAST(%(data)s AS my_pg_type[]))",
{'data':[("xyz", "'1 day 01:00:00'", 5), ("pqr", "'1 day 01:00:00'", 5)]})
cursor.execute("SELECT * from my_table")
row = cursor.fetchone()
assert isinstance(row[0], (tuple, list)), repr(row[0])
PG's type registration supports global registration. You can also register the types on a per-connection basis within SQLAlchemy using the pool listener in 0.6 or connect event in 0.7 and further.
UPDATE - due to https://bitbucket.org/zzzeek/sqlalchemy/issue/3467/array-of-enums-does-not-allow-assigning I'm probably going to recommend people use this workaround type for now, until psycopg2 adds more built-in support for this:
class ArrayOfEnum(ARRAY):
def bind_expression(self, bindvalue):
return sa.cast(bindvalue, self)
def result_processor(self, dialect, coltype):
super_rp = super(ArrayOfEnum, self).result_processor(dialect, coltype)
def handle_raw_string(value):
inner = re.match(r"^{(.*)}$", value).group(1)
return inner.split(",")
def process(value):
return super_rp(handle_raw_string(value))
return process
Checkout the sqlalchemy_utils documentation:
CompositeType provides means to interact with
`PostgreSQL composite types`_. Currently this type features:
* Easy attribute access to composite type fields
* Supports SQLAlchemy TypeDecorator types
* Ability to include composite types as part of PostgreSQL arrays
* Type creation and dropping
Usage:
from collections import OrderedDict
import sqlalchemy as sa
from sqlalchemy_utils import Composite, CurrencyType
class Account(Base):
__tablename__ = 'account'
id = sa.Column(sa.Integer, primary_key=True)
balance = sa.Column(
CompositeType(
'money_type',
[
sa.Column('currency', CurrencyType),
sa.Column('amount', sa.Integer)
]
)
)
Array Of Composites:
from sqlalchemy_utils import CompositeArray
class Account(Base):
__tablename__ = 'account'
id = sa.Column(sa.Integer, primary_key=True)
balances = sa.Column(
CompositeArray(
CompositeType(
'money_type',
[
sa.Column('currency', CurrencyType),
sa.Column('amount', sa.Integer)
]
)
)
)