Run UPDATE SET parameterized sql statement in Connection.execute sqlalchemy - python

I want to run a parametrized mysql UPDATE SET statement in flask_sqlalchemy. Since I do not know which columns should be updated I wrote a helper function to help in writing the statement.
My model
class User(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement= True)
username = db.Column(db.String(80), nullable=False)
email = db.Column(db.String(120), nullable=False)
def __repr__(self):
return '<User %r>' % self.username
Helper function
def run_sql(params):
# e.g: params = {'username': "testing", "email": "testing#testing.ts", "id": 1}
id = params.pop("id")
# params = {'username': "testing", "email": "testing#testing.ts"}
sets = list(map(lambda col: f"{col}=:{col}", params.keys()))
# sets = ["username=:username", "email=:email"]
sets = ", ".join(sets)
# sets = "username=:username, email=:email"
params["id"] = id
# params = {'username': "testing", "email": "testing#testing.ts", "id": 1}
sql_statement = f"""UPDATE User SET {sets} WHERE id=:id LIMIT 1"""
# sql_statement = UPDATE User SET username=:username, email=:email WHERE id=:id LIMIT 1
return sql_statement
Calling helper function
if __name__ == "__main__":
conn = engine.connect()
params = {'username': "testing", "email": "testing#testing.ts", "id": 1}
sql_statement = run_sql(params)
conn.execute(sql_statement, params)
Running the previous code generates the following exception
"sqlalchemy.exc.ProgrammingError: (pymysql.err.ProgrammingError) (1064, "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ':username, email=:email WHERE id=:id LIMIT 1' at line 1") [SQL: 'UPDATE User SET username=:username, email=:email WHERE id=:id LIMIT 1'] [parameters: {'username': 'testing', 'email': 'testing#testing.ts', 'id': 1}] (Background on this error at: http://sqlalche.me/e/f405)"
The SQL statement looks fine to me, so the parameters. Am I missing something?

The bind params aren't MySQL style, and as you're passing in plain text to engine.execute(), SQLAlchemy isn't applying a dialect to the query before executing it.
Try this:
engine.execute("SELECT :val", {"val": 1}) # will fail, same as your query
...and then this:
engine.execute("SELECT %(val)s", {"val": 1}) # will execute
Wrapping the query with text() will let SQLAlchemy handle the proper bind style:
from sqlalchemy import text # or you can use db.text w/ flask-sqlalchemy
engine.execute(text("SELECT :val"), {"val": 1})
One other thing to note is that SQLAlchemy will automatically handle construction of the UPDATE query for you, respecting the values in the parameter dict, e.g.:
id_ = 1
params = {'username': "testing", "email": "testing#testing.ts"}
User.__table__.update().values(params).where(id=id_)
# UPDATE user SET username=%(username)s, email=%(email)s WHERE user.id = %(id_1)s
params = {'username': "testing"}
User.__table__.update().values(params).where(id=id_)
# UPDATE user SET username=%(username)s WHERE user.id = %(id_1)s
params = {'username': "testing", "unexpected": "value"}
User.__table__.update().values(params).where(id=id_)
# sqlalchemy.exc.CompileError: Unconsumed column names: unexpected

If you want to use the :named_parameter form for your SQL statement you'll need to use SQLAlchemy's text method and then call the execute method of a Connection object:
import sqlalchemy as sa
# ...
with engine.begin() as conn:
conn.exec_driver_sql("CREATE TEMPORARY TABLE temp (id varchar(10) PRIMARY KEY)")
sql = sa.text("INSERT INTO temp (id) VALUES (:id)")
params = {'id': 'foo'}
conn.execute(sql, params)
print(conn.execute(sa.text("SELECT * FROM temp")).fetchall()) # [('foo',)]

Related

Using sqlalchemy with psycopg

I am in need of combining the results of a SQLAlchemy query and a pyscopg query.
Currently I use psycopg to do most of my SQL selects in my code. This is done using a cursor and fetchall().
However, I have a separate microservice that returns some extra WHERE clauses I need for my statement, based on some variables. This is returned as a SQLAlchemy SELECT object. This is out of my control.
Example return:
select * from users where name = 'bar';
My current solution for this is to hardcode the results of the microservice (just the WHERE clauses) into an enum and add them into the pyscopg statement using an f-string. This is a temporary solution.
Simplified example:
user_name = "bar"
sql_enum = {
"foo": "name = 'foo'"
"bar": "name = 'bar'"
}
with conn.cursor() as cur:
cur.execute(f"select * from users where location = 'FOOBAR' and {sql_enum[user_name]}")
I am looking for a way to better join these two statements. Any suggestions are greatly appreciated!
Rather than mess with dynamic SQL (f-strings, etc.), I would just start with a SQLAlchemy Core select() statement and then add the whereclause from the statement returned by the microservice:
import sqlalchemy as sa
engine = sa.create_engine("postgresql://scott:tiger#192.168.0.199/test")
users = sa.Table(
"users", sa.MetaData(),
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("name", sa.String(50)),
sa.Column("location", sa.String(50))
)
users.drop(engine, checkfirst=True)
users.create(engine)
# mock return from microservice
from_ms = sa.select(sa.text("*")).select_from(users).where(users.c.name == "bar")
base_query = sa.select(users).where(users.c.location == "FOOBAR")
full_query = base_query.where(from_ms.whereclause)
engine.echo = True
with engine.begin() as conn:
result = conn.execute(full_query)
"""SQL emitted:
SELECT users.id, users.name, users.location
FROM users
WHERE users.location = %(location_1)s AND users.name = %(name_1)s
[generated in 0.00077s] {'location_1': 'FOOBAR', 'name_1': 'bar'}
"""

How i can get data from DB with columns.names(keys)?

I am using SQLAlchemy to pull data from my database. More specifically, I use the db.select method. So I manage to pull out only the values from the columns or only the names of the columns, but I need to pull out in the format NAME: VALUE. Help how to do this?
connection = engine.connect()
metadata = db.MetaData()
report = db.Table('report', metadata, autoload=True, autoload_with=engine)
query = db.select([report])
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchall()
With SQLAlchemy 1.4+ we can use .mappings() to return results in a dictionary-like format:
import sqlalchemy as sa
# …
t = sa.Table(
"t",
sa.MetaData(),
sa.Column("id", sa.Integer, primary_key=True, autoincrement=False),
sa.Column("txt", sa.String),
)
t.create(engine)
# insert some sample data
with engine.begin() as conn:
conn.exec_driver_sql(
"INSERT INTO t (id, txt) VALUES (1, 'foo'), (2, 'bar')"
)
# test code
with engine.begin() as conn:
results = conn.execute(select(t)).mappings().fetchall()
pprint(results)
# [{'id': 1, 'txt': 'foo'}, {'id': 2, 'txt': 'bar'}]
As the docs state, ResultProxy.fetchall() returns a list of RowProxy objects. These behave like namedtuples, but can also be used like dictionaries:
>>> ResultSet[0]['column_name']
column_value
For more info, see https://docs.sqlalchemy.org/en/13/core/tutorial.html#coretutorial-selecting

How to insert a nil_uuid in Postgres, using SQLAlchemy?

Using Python and SQLAlchemy, is it possible to insert None / NIL_UUID / NULL value in a Postgresql foreign key column that links to a primary key, both stored as UUID ?
None returns column none does not exist :
statement = "INSERT INTO tb_person (pk_person, first_name, last_name, fk_person_parent) VALUES ('9ce131...985
fea06', 'John', 'Doe', None)"
parameters = {}, context = <sqlalchemy.dialects.postgresql.psycopg2.PGExecutionContext_psycopg2 object at 0x7fbff5ea2730>
def do_execute(self, cursor, statement, parameters, context=None):
> cursor.execute(statement, parameters)
E psycopg2.errors.UndefinedColumn: column "none" does not exist
E LINE 1: '9ce131...985','John', 'Doe', None)
E ^
E HINT: Perhaps you meant to reference the column "tb_person.last_name".
../../.local/share/virtualenvs/project/lib/python3.8/site-packages/sqlalchemy/engine/default.py:593: UndefinedColumn
a NIL_UUID (i.e. a valid UUID formed with 0s) returns psycopg2.errors.ForeignKeyViolation:
E psycopg2.errors.ForeignKeyViolation: insert or update on table "tb_person" violates foreign key constrain
t "tb_person_fk_person_parent_fkey"
E DETAIL: Key (fk_person_parent)=(00000000-0000-0000-0000-000000000000) is not present in table "tb_person
".
MORE DETAILS
I use SQLAlchemy classical mapping (SQLAlchemy Core), my table is defined like this :
tb_person = Table(
"tb_person",
metadata,
Column(
"pk_person",
UUID(as_uuid=True),
default=uuid.uuid4,
unique=True,
nullable=False
),
Column("first_name", String(255)),
Column("last_name", String(255)),
Column(
"fk_person_parent", UUID(as_uuid=True),
ForeignKey("tb_person.pk_person"),
nullable=True
)
)
The mapper is defined like this :
client_mapper = mapper(
domain.model.Person,
tb_person,
properties={
"child": relationship(domain.model.Person),
},
)
The unit test works well when inserting a UUID that already exists in the database in the pk_person field.
I was using raw SQL, and as suggested by #AdrianKlaver it is much preferable to use params - it fixes the problem.
# In the context of this unit test,
# we have to handle UUID generation here
uuid_1 = uuid.uuid4()
uuid_2 = uuid.uuid4()
uuid_3 = uuid.uuid4()
session.execute(
"""
INSERT INTO tb_person
(pk_person, first_name, last_name, fk_parent_person)
VALUES
(:uuid_1, 'John', 'Doe', :uuid_none),
(:uuid_2, 'Jean', 'Dupont', :uuid_none)
(:uuid_3, 'Baby', 'Doe', :uuid_1)
""",
{"uuid_1": uuid_1, "uuid_2", "uuid_3": uuid_3, ":uuid_none": None}
)
It effectively translates to NULL in the query.

PyMongo .find(data) to return all objs with key and value

I have a database with orders, I want to search for data = {"user": "Bob"} and return all documents with data.
I tried doing
data = {"user": "Bob"}
order_list = orders.find(data)
for order in order_list:
print(order)
but it just gives back an empty cursor, when I do list(orders.find(data)) it's empty.
Make sure you are connecting to right database. If you don't specify a database, the default database of test will be used. I would recommed always declaring a db variable with the specific database selected as per mydatabase in this example:
from pymongo import MongoClient
db = MongoClient()['mydatabase']
orders = db.orders
data = {"user": "Bob"}
orders.insert_one(data)
order_list = orders.find(data)
for order in order_list:
print(order)
prints:
{'_id': ObjectId('5fba78111e5694dd7fddbe3d'), 'user': 'Bob'}

Python MySql query silently failing

I've got a MySql database (running using the stock Docker image) and it contains a table defined like this:
CREATE TABLE `Edits` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`context` varchar(255) NOT NULL,
`field` varchar(255) NOT NULL,
`value` text,
`username` varchar(255) DEFAULT NULL,
`requested` datetime DEFAULT NULL,
PRIMARY KEY (`id`,`context`,`field`)
) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=latin1
I'm connecting to it in a WSGI application, and my connection code looks like this:
import contextlib
import MySQLdb
mysql = MySQLdb.connect(host = 'host', db = 'db')
def _cursor():
mysql.ping(True)
return contextlib.closing(mysql.cursor())
def _exec(sql, params = None):
with _cursor() as c:
c.execute(sql, params)
def save_edits(id, context, field, value, username):
return _exec('REPLACE INTO Edits SET id = %(id)s, context = %(context)s,
field = %(field)s, value = %(value)s, username = %(username)s,
requested = UTC_TIMESTAMP()', {
'id': id,
'context': context,
'field': field,
'value': value,
'username': username,
})
When I call the save_edits function, it doesn't throw an exception, but it fails to update the database. Furthermore, attempting to run the query REPLACE INTO Edits SET id = 1, context = 'x', field = 'y', value = 'z', username = 'foo', requested = UTC_TIMESTAMP(); through a mysql shell afterwards fails with an ERROR 1205 (HY000): Lock wait timeout exceeded; try restarting transaction error. If I wait about 5 minutes, however, that error goes away and I can run the REPLACE INTO query against the MySql database again.
What's going on? How can I fix this error?
It turns out that autocommit is false by default, and closing the cursor without closing the connection will keep the transaction open. I changed the method to:
def _exec(sql, params = None):
with _cursor() as c:
c.execute(sql, params)
mysql.commit()
And that fixed the problem.

Categories