import asyncio
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.future import select
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import sessionmaker
engine = create_async_engine(
"postgresql+asyncpg://user:pass#localhost/db",
echo=True,
)
# expire_on_commit=False will prevent attributes from being expired
# after commit.
async_session = sessionmaker(
engine, expire_on_commit=False, class_=AsyncSession
)
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
data = Column(String)
create_date = Column(DateTime, server_default=func.now())
bs = relationship("B")
# required in order to access columns with server defaults
# or SQL expression defaults, subsequent to a flush, without
# triggering an expired load
__mapper_args__ = {"eager_defaults": True}
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
data = Column(String)
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async with async_session() as session:
async with session.begin():
session.add_all(
[
A(bs=[B(), B()], data="a1"),
A(bs=[B()], data="a2"),
]
)
async with async_session() as session:
result = await session.execute(select(A).order_by(A.id))
a1 = result.scalars().first()
# no issue:
print(a1.name, a1.data)
# throws error:
print(a1.bs)
Trying to access a1.bs gives this error:
59 current = greenlet.getcurrent()
60 if not isinstance(current, _AsyncIoGreenlet):
---> 61 raise exc.MissingGreenlet(
62 "greenlet_spawn has not been called; can't call await_() here. "
63 "Was IO attempted in an unexpected place?"
MissingGreenlet: greenlet_spawn has not been called; can't call await_() here. Was IO attempted in an unexpected place? (Background on this error at: https://sqlalche.me/e/14/xd2s)
This is how:
from sqlalchemy.orm import selectinload
async with async_session() as session:
result = await session.execute(select(A).order_by(A.id)
.options(selectinload(A.bs)))
a = result.scalars().first()
print(a.bs)
key is using the selectinload method to prevent implicit IO
UPDATE
There are a few alternatives to selectinload like joinedload, lazyload. I am still trying to understand the differences.
From docs, there are 2 keys:
1. Use selectinload
stmt = select(A).options(selectinload(A.bs))
2. Set expire_on_commit=False when creating async session
# create AsyncSession with expire_on_commit=False
async_session = AsyncSession(engine, expire_on_commit=False)
# sessionmaker version
async_session = sessionmaker(
engine, expire_on_commit=False, class_=AsyncSession
)
async with async_session() as session:
result = await session.execute(select(A).order_by(A.id))
a1 = result.scalars().first()
# commit would normally expire all attributes
await session.commit()
# access attribute subsequent to commit; this is what
# expire_on_commit=False allows
print(a1.data)
Related
I am using SQAlchemy and python, without flask, well when I do an infinite loop and invoke the select method that sqlalchemy offers, it returns the value of my table, but when changing the value of a specific column from phpmyadmin, in the python script is not reflected the change made, someone could give me some advice or help, please thank you in advance.
PD: I leave the code for you to analyze:
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, create_engine, Float, DateTime, update, Date, Time
from sqlalchemy.sql import select
import time
import os
ahora = Table('ahora', metadata,
Column('id', Integer, primary_key=True),
Column('temperatura', Float()),
Column('humedad', Float()),
Column('canal1', Float()),
Column('canal2', Float()),
Column('canal3', Float()),
Column('canal4', Float()),
Column('hora', Time()),
)
engine = create_engine('mysql+pymysql://javi:javiersolis12#10.0.0.20/Tuti')
connection = engine.connect()
while True:
#Seleccionara la unica entrada en la tabla Configuracion
query = select([configuracion])
confi_actual = connection.execute(query).fetchone()
query_aux = select([ahora])
datos_actuales = connection.execute(query_aux).fetchone()
print(datos_actuales)
time.sleep(8)
You can specify the components you need to select into the Query in your Session. And then use, for example all() to get all the enterance. Please look for the next example:
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
...
class Ahora(Base):
__tablename__ = 'ahora'
id = sa.Column(sa.Integer, primary_key=True)
temperatura: int = sa.Column(sa.Float)
...
...
engine = create_engine('mysql+pymysql://javi:javiersolis12#10.0.0.20/Tuti')
session = Session(engine)
query = session.query(Ahora)
# some actions with query, for example filter(), limit(), etc
ahora = query.all()
I have a simple database storing an attachment as blob.
CREATE TABLE public.attachment
(
id integer NOT NULL,
attachdata oid,
CONSTRAINT attachment_pkey PRIMARY KEY (id)
)
-- Import a file
INSERT INTO attachment (id, attachdata) VALUES (1, lo_import('C:\\temp\blob_import.txt'))
-- Export back as file.
SELECT lo_export(attachdata, 'C:\temp\blob_export_postgres.txt') FROM attachment WHERE id = 1
I'm able to read this file back using psycopg2 directly.
from psycopg2 import connect
con = connect(dbname="blobtest", user="postgres", password="postgres", host="localhost")
cur = con.cursor()
cur.execute("SELECT attachdata FROM attachment WHERE id = 1")
oid = cur.fetchone()[0]
obj = con.lobject(oid)
obj.export('C:\\temp\\blob_export_psycopg.txt')
When I try the same using sqlalchemy, the attachdata is a bytestring of zeros.
I've tested the following code with types like BLOB, LargeBinary and BINARY.
The size of attachdata bytstring seems to be the OIDs value.
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Binary
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
Session = sessionmaker()
engine = create_engine('postgresql://postgres:postgres#localhost:5432/blobtest', echo=True)
Base.metadata.create_all(engine)
Session.configure(bind=engine)
class Attachment(Base):
__tablename__ ="attachment"
id = Column(Integer, primary_key=True)
attachdata = Column(Binary)
session = Session()
attachment = session.query(Attachment).get(1)
with open('C:\\temp\\blob_export_sqlalchemy.txt', 'wb') as f:
f.write(attachment.attachdata)
I've searched the sqlalchemy documentation and various sources and couldn't find a solution how to export the binary data using sqlalchemy.
I had the same problem. There seems to be no way to get the large object data via the ORM. So I combined the ORM and the psycopg2 engine like this:
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.dialects.postgresql import OID
Base = declarative_base()
session_factory = sessionmaker()
engine = create_engine('postgresql+psycopg2://postgres:postgres#localhost:5432/postgres', echo=True)
Base.metadata.create_all(engine)
session_factory.configure(bind=engine)
Session = scoped_session(session_factory)
class Attachment(Base):
__tablename__ ="attachment"
id = Column(Integer, primary_key=True)
oid = Column(OID)
#classmethod
def insert_file(cls, filename):
conn = engine.raw_connection()
l_obj = conn.lobject(0, 'wb', 0)
with open(filename, 'rb') as f:
l_obj.write(f.read())
conn.commit()
conn.close()
session = Session()
attachment = cls(oid=l_obj.oid)
session.add(attachment)
session.commit()
return attachment.id
#classmethod
def get_file(cls, attachment_id, filename):
session = Session()
attachment = session.query(Attachment).get(attachment_id)
conn = engine.raw_connection()
l_obj = conn.lobject(attachment.oid, 'rb')
with open(filename, 'wb') as f:
f.write(l_obj.read())
conn.close()
if __name__ == '__main__':
my_id = Attachment.insert_file(r'C:\path\to\file')
Attachment.get_file(my_id, r'C:\path\to\file_out')
Not very elegant but it seems to work.
Update:
I am using events now
from sqlalchemy import create_engine, event
from sqlalchemy import Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.dialects.postgresql import OID
Base = declarative_base()
session_factory = sessionmaker()
engine = create_engine('postgresql+psycopg2://postgres:postgres#localhost:5432/postgres', echo=True)
Base.metadata.create_all(engine)
session_factory.configure(bind=engine)
Session = scoped_session(session_factory)
class Data(Base):
__tablename__ = "attachment"
id = Column(Integer, primary_key=True)
oid = Column(OID)
#event.listens_for(Data, 'after_delete')
def remove_large_object_after_delete(_, connection, target):
raw_connection = connection.connection
l_obj = raw_connection.lobject(target.oid, 'n')
l_obj.unlink()
raw_connection.commit()
#event.listens_for(Data, 'before_insert')
def add_large_object_before_insert(_, connection, target):
raw_connection = connection.connection
l_obj = raw_connection.lobject(0, 'wb', 0)
target.oid = l_obj.oid
l_obj.write(target.ldata)
raw_connection.commit()
#event.listens_for(Data, 'load')
def inject_large_object_after_load(target, _):
session = object_session(target)
conn = session.get_bind().raw_connection()
l_obj = conn.lobject(target.oid, 'rb')
target.ldata = l_obj.read()
if __name__ == '__main__':
session = Session()
# Put
data = Data()
data.ldata = 'your large data'
session.add(data)
session.commit()
id = data.id
# Get
data2 = session.query(Data).get(id)
print(data.ldata) # Your large data is here
# Delete
session.delete(data)
session.delete(data2)
session.commit()
session.flush()
session.close()
Works good so far.
I don't understand why postgres large objects get so neglected these days. I use them a ton. Or let's say I want to but it's challenging especially in asyncio....
I have the following blocking code:
from flask_sqlalchemy import SQLAlchemy
def find_places():
query = "Select ... From ... Where ..."
result = db.session.execute(query)
return json.dumps([dict(r) for r in result)
#app.route('/')
def videos():
return find_places()
if __name__ == '__main__':
db = SQLAlchemy(app)
app.run()
How can I make this code asynchronous?
Take a look at aiopg, it's the best (and possibly the only) asynchronous Postgres library for Python.
They also have optional SQLAlchemy integration. I'll just copy from their README:
import asyncio
from aiopg.sa import create_engine
import sqlalchemy as sa
metadata = sa.MetaData()
tbl = sa.Table('tbl', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('val', sa.String(255)))
async def create_table(engine):
async with engine.acquire() as conn:
await conn.execute('DROP TABLE IF EXISTS tbl')
await conn.execute('''CREATE TABLE tbl (
id serial PRIMARY KEY,
val varchar(255))''')
async def go():
async with create_engine(user='aiopg',
database='aiopg',
host='127.0.0.1',
password='passwd') as engine:
async with engine.acquire() as conn:
await conn.execute(tbl.insert().values(val='abc'))
async for row in conn.execute(tbl.select()):
print(row.id, row.val)
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
My goal is to read data off of an excel sheet and create a database on a SQL server. I am trying to write a sample code using SQLalchemy and I am new to it. The code that I have so far is:
import time
from sqlalchemy import create_engine, Column, Integer, Date, String, Table, MetaData,table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///:memory:', echo = False)
Base = declarative_base()
class blc(Base):
__tablename__ = 'BLC_Databse'
date = Column(String, primary_key = True)
RES = Column(String)
BTTLCOLUMN = Column(String)
CS_HR = Column(Integer)
Base.metadata.create_all(engine)
sample = blc(date=time.strftime("%m/%d/%y") , RES = 'BDY_21', BTTLCOLUMN = '2075', CS_HR = 563)
Session = sessionmaker(bind=engine)
session = Session()
sample2 = blc(date=time.strftime("%m/%d/%y") , RES = 'BDY_21', BTTLCOLUMN = '2076', CS_HR = 375)
session.add(sample2)
session.commit()
with session.no_autoflush:
result = session.query(blc).filter_by(RES = 'BDY_21').first()
print(result)
When I am performing a filter query (which I am assuming it is similar to where clause in SQL) it gives <__main__.blc object at 0x00705770> error
Eventually, I plan to have the insert clause on a loop and it will read data from an excel sheet.
Result is an object that references the class blc. To get the desired column, I had to do result.ColName.
table:
id(integer primary key)
data(blob)
I use mysql and sqlalchemy.
To insert data I use:
o = Demo()
o.data = mydata
session.add(o)
session.commit()
I would like to insert to table like that:
INSERT INTO table(data) VALUES(COMPRESS(mydata))
How can I do this using sqlalchemy?
you can assign a SQL function to the attribute:
from sqlalchemy import func
object.data = func.compress(mydata)
session.add(object)
session.commit()
Here's an example using a more DB-agnostic lower() function:
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
Base= declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
e = create_engine('sqlite://', echo=True)
Base.metadata.create_all(e)
s = Session(e)
a1 = A()
a1.data = func.lower("SomeData")
s.add(a1)
s.commit()
assert a1.data == "somedata"
you can make it automatic with #validates:
from sqlalchemy.orm import validates
class MyClass(Base):
# ...
data = Column(BLOB)
#validates("data")
def _set_data(self, key, value):
return func.compress(value)
if you want it readable in python before the flush, you'd need to memoize it locally and use a descriptor to access it.