mysql Compress() with sqlalchemy - python

table:
id(integer primary key)
data(blob)
I use mysql and sqlalchemy.
To insert data I use:
o = Demo()
o.data = mydata
session.add(o)
session.commit()
I would like to insert to table like that:
INSERT INTO table(data) VALUES(COMPRESS(mydata))
How can I do this using sqlalchemy?

you can assign a SQL function to the attribute:
from sqlalchemy import func
object.data = func.compress(mydata)
session.add(object)
session.commit()
Here's an example using a more DB-agnostic lower() function:
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
Base= declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
e = create_engine('sqlite://', echo=True)
Base.metadata.create_all(e)
s = Session(e)
a1 = A()
a1.data = func.lower("SomeData")
s.add(a1)
s.commit()
assert a1.data == "somedata"
you can make it automatic with #validates:
from sqlalchemy.orm import validates
class MyClass(Base):
# ...
data = Column(BLOB)
#validates("data")
def _set_data(self, key, value):
return func.compress(value)
if you want it readable in python before the flush, you'd need to memoize it locally and use a descriptor to access it.

Related

How to test if a class object was created using Pytest

I wrote a habit tracker app and used SQLAlchemy to store the data in an SQLite3 database. Now I'm writing the unit tests using Pytest for all the functions I wrote. Besides functions returning values, there are functions that create entries in the database by creating objects. Here's my object-relational mapper setup and the two main classes:
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Date
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# Setting up SQLAlchemy to connect to the local SQLite3 database
Base = declarative_base()
engine = create_engine('sqlite:///:main:', echo=True)
Base.metadata.create_all(bind=engine)
Session = sessionmaker(bind=engine)
session = Session()
class Habit(Base):
__tablename__ = 'habit'
habit_id = Column('habit_id', Integer, primary_key=True)
name = Column('name', String, unique=True)
periodicity = Column('periodicity', String)
start_date = Column('start_date', Date)
class HabitEvent(Base):
__tablename__ = 'habit_event'
event_id = Column('event_id', Integer, primary_key=True)
date = Column('date', Date)
habit_id = Column('fk_habit_id', Integer, ForeignKey(Habit.habit_id))
One of the creating functions is the following:
def add_habit(name, periodicity):
if str(periodicity) not in ['d', 'w']:
print('Wrong periodicity. \nUse d for daily or w for weekly.')
else:
h = Habit()
h.name = str(name)
if str(periodicity) == 'd':
h.periodicity = 'Daily'
if str(periodicity) == 'w':
h.periodicity = 'Weekly'
h.start_date = datetime.date.today()
session.add(h)
session.commit()
print('Habit added.')
Here's my question: Since this functions doesn't return a value which can be matched with an expected result, I don't know how to test if the object was created. The same problem occurs to me, when I want to check if all objects were deleted using the following function:
def delete_habit(habitID):
id_list = []
id_query = session.query(Habit).all()
for i in id_query:
id_list.append(i.habit_id)
if habitID in id_list:
delete_id = int(habitID)
session.query(HabitEvent).filter(
HabitEvent.habit_id == delete_id).delete()
session.query(Habit).filter(Habit.habit_id == delete_id).delete()
session.commit()
print('Habit deleted.')
else:
print('Non existing Habit ID.')
If I understand correctly, you can utilize the get_habits function as part of the test for add_habit.
def test_add_habit():
name = 'test_add_habit'
periodicity = 'd'
add_habit(name, periodicity)
# not sure of the input or output from get_habits, but possibly:
results = get_habits(name)
assert name in results['name']

How to have common module for select() in python sqlalchemy?

I have 3 model classes from SQLAlchemy for my tables Table1 Table2 Table3
'''
from sqlalchemy import create_engine , text , select, MetaData, Table ,func , Column , String , Integer
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import sqlalchemy
from settings import DATABASE_URI
engine=create_engine(DATABASE_URI)
Base = declarative_base()
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
metadata = MetaData(bind=None)
session = Session()
class Table1(Base):
__tablename__ = 'table1'
id = Column(u'id', Integer(), primary_key=True)
name1 = Column(u'name1', String(50))
class Table2(Base):
__tablename__ = 'table2'
id = Column(u'id', Integer(), primary_key=True)
name2 = Column(u'name2', String(50))
class Table3(Base):
__tablename__ = 'table3'
id = Column(u'id', Integer(), primary_key=True)
name3 = Column(u'name3', String(50))
class connectionTest():
def wrapper_connection(self,table,column,value):
#SELECT column FROM table1 WHERE column = value
query = session.query(table)
q = query.filter_by(column = value)
session.execute(q)
def main():
ct = connectionTest()
t1 = Table1()
t2 = Table2()
t3 = Table3()
ct.wrapper_connection(t1,t1.name1, "Table1_Value_Information")
ct.wrapper_connection(t2,t2.name2, "Table2_Value_Information")
ct.wrapper_connection(t3,t3.name3, "Table3_Value_Information")
if __name__ == '__main__':
main()
'''
I want the wrapper connection to handle all the 3 different tables with different columns. Basically to make this as generalized as possible to handle any condition related to (#SELECT column FROM table1 WHERE column = value) Clause through SQLAlchemy ORM or Core library.
The issue I am facing is in this line.
'q = query.filter_by(column = value)'
where I am trying to pass the column information from the function attribute t1.name1
ct.wrapper_connection(t1,t1.name1, "Table1_Value_Information")
Error I am facing:
Traceback (most recent call last):
File "C:\Users<username>\AppData\Local\Programs\Python\Python37\lib\site-packages\sqlalchemy\orm\base.py", line 406, in _entity_descriptor
return getattr(entity, key)
AttributeError: type object 'Table1' has no attribute 'column'
The code in the question needs three changes:
In the wrapper_connection method, use Query.filter instead of Query.filter_by because it will accept a column object directly
Don't call Base.metadata.create_all() until after the model classes have been declared
t1 = Table1() creates a new instance of the Table1 class - a row. You want to query against the table, so use the model classes directly instead.
class connectionTest:
def wrapper_connection(self, table, column, value):
# SELECT column FROM table1 WHERE column = value
query = session.query(table)
# We have the column object, so use filter
# instead of filter_by
q = query.filter(column == value)
session.execute(q)
# Create the tables after the model classes have been declared.
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
def main():
ct = connectionTest()
# Use the model classes directly.
ct.wrapper_connection(Table1, Table1.name1, "Table1_Value_Information")
ct.wrapper_connection(Table2, Table2.name2, "Table2_Value_Information")
ct.wrapper_connection(Table3, Table3.name3, "Table3_Value_Information")

Query binary data using sqlalchemy with PostgreSQL

I have a simple database storing an attachment as blob.
CREATE TABLE public.attachment
(
id integer NOT NULL,
attachdata oid,
CONSTRAINT attachment_pkey PRIMARY KEY (id)
)
-- Import a file
INSERT INTO attachment (id, attachdata) VALUES (1, lo_import('C:\\temp\blob_import.txt'))
-- Export back as file.
SELECT lo_export(attachdata, 'C:\temp\blob_export_postgres.txt') FROM attachment WHERE id = 1
I'm able to read this file back using psycopg2 directly.
from psycopg2 import connect
con = connect(dbname="blobtest", user="postgres", password="postgres", host="localhost")
cur = con.cursor()
cur.execute("SELECT attachdata FROM attachment WHERE id = 1")
oid = cur.fetchone()[0]
obj = con.lobject(oid)
obj.export('C:\\temp\\blob_export_psycopg.txt')
When I try the same using sqlalchemy, the attachdata is a bytestring of zeros.
I've tested the following code with types like BLOB, LargeBinary and BINARY.
The size of attachdata bytstring seems to be the OIDs value.
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Binary
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
Session = sessionmaker()
engine = create_engine('postgresql://postgres:postgres#localhost:5432/blobtest', echo=True)
Base.metadata.create_all(engine)
Session.configure(bind=engine)
class Attachment(Base):
__tablename__ ="attachment"
id = Column(Integer, primary_key=True)
attachdata = Column(Binary)
session = Session()
attachment = session.query(Attachment).get(1)
with open('C:\\temp\\blob_export_sqlalchemy.txt', 'wb') as f:
f.write(attachment.attachdata)
I've searched the sqlalchemy documentation and various sources and couldn't find a solution how to export the binary data using sqlalchemy.
I had the same problem. There seems to be no way to get the large object data via the ORM. So I combined the ORM and the psycopg2 engine like this:
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.dialects.postgresql import OID
Base = declarative_base()
session_factory = sessionmaker()
engine = create_engine('postgresql+psycopg2://postgres:postgres#localhost:5432/postgres', echo=True)
Base.metadata.create_all(engine)
session_factory.configure(bind=engine)
Session = scoped_session(session_factory)
class Attachment(Base):
__tablename__ ="attachment"
id = Column(Integer, primary_key=True)
oid = Column(OID)
#classmethod
def insert_file(cls, filename):
conn = engine.raw_connection()
l_obj = conn.lobject(0, 'wb', 0)
with open(filename, 'rb') as f:
l_obj.write(f.read())
conn.commit()
conn.close()
session = Session()
attachment = cls(oid=l_obj.oid)
session.add(attachment)
session.commit()
return attachment.id
#classmethod
def get_file(cls, attachment_id, filename):
session = Session()
attachment = session.query(Attachment).get(attachment_id)
conn = engine.raw_connection()
l_obj = conn.lobject(attachment.oid, 'rb')
with open(filename, 'wb') as f:
f.write(l_obj.read())
conn.close()
if __name__ == '__main__':
my_id = Attachment.insert_file(r'C:\path\to\file')
Attachment.get_file(my_id, r'C:\path\to\file_out')
Not very elegant but it seems to work.
Update:
I am using events now
from sqlalchemy import create_engine, event
from sqlalchemy import Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.dialects.postgresql import OID
Base = declarative_base()
session_factory = sessionmaker()
engine = create_engine('postgresql+psycopg2://postgres:postgres#localhost:5432/postgres', echo=True)
Base.metadata.create_all(engine)
session_factory.configure(bind=engine)
Session = scoped_session(session_factory)
class Data(Base):
__tablename__ = "attachment"
id = Column(Integer, primary_key=True)
oid = Column(OID)
#event.listens_for(Data, 'after_delete')
def remove_large_object_after_delete(_, connection, target):
raw_connection = connection.connection
l_obj = raw_connection.lobject(target.oid, 'n')
l_obj.unlink()
raw_connection.commit()
#event.listens_for(Data, 'before_insert')
def add_large_object_before_insert(_, connection, target):
raw_connection = connection.connection
l_obj = raw_connection.lobject(0, 'wb', 0)
target.oid = l_obj.oid
l_obj.write(target.ldata)
raw_connection.commit()
#event.listens_for(Data, 'load')
def inject_large_object_after_load(target, _):
session = object_session(target)
conn = session.get_bind().raw_connection()
l_obj = conn.lobject(target.oid, 'rb')
target.ldata = l_obj.read()
if __name__ == '__main__':
session = Session()
# Put
data = Data()
data.ldata = 'your large data'
session.add(data)
session.commit()
id = data.id
# Get
data2 = session.query(Data).get(id)
print(data.ldata) # Your large data is here
# Delete
session.delete(data)
session.delete(data2)
session.commit()
session.flush()
session.close()
Works good so far.
I don't understand why postgres large objects get so neglected these days. I use them a ton. Or let's say I want to but it's challenging especially in asyncio....

Autoflush error and filter_by() query giving unexpected result

My goal is to read data off of an excel sheet and create a database on a SQL server. I am trying to write a sample code using SQLalchemy and I am new to it. The code that I have so far is:
import time
from sqlalchemy import create_engine, Column, Integer, Date, String, Table, MetaData,table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///:memory:', echo = False)
Base = declarative_base()
class blc(Base):
__tablename__ = 'BLC_Databse'
date = Column(String, primary_key = True)
RES = Column(String)
BTTLCOLUMN = Column(String)
CS_HR = Column(Integer)
Base.metadata.create_all(engine)
sample = blc(date=time.strftime("%m/%d/%y") , RES = 'BDY_21', BTTLCOLUMN = '2075', CS_HR = 563)
Session = sessionmaker(bind=engine)
session = Session()
sample2 = blc(date=time.strftime("%m/%d/%y") , RES = 'BDY_21', BTTLCOLUMN = '2076', CS_HR = 375)
session.add(sample2)
session.commit()
with session.no_autoflush:
result = session.query(blc).filter_by(RES = 'BDY_21').first()
print(result)
When I am performing a filter query (which I am assuming it is similar to where clause in SQL) it gives <__main__.blc object at 0x00705770> error
Eventually, I plan to have the insert clause on a loop and it will read data from an excel sheet.
Result is an object that references the class blc. To get the desired column, I had to do result.ColName.

Converting SQL commands to Python's ORM

How would you convert the following codes to Python's ORM such as by SQLalchemy?
#1 Putting data to Pg
import os, pg, sys, re, psycopg2
#conn = psycopg2.connect("dbname='tkk' host='localhost' port='5432' user='noa' password='123'")
conn = psycopg2.connect("dbname=tk user=naa password=123")
cur = conn.cursor()
cur.execute("""INSERT INTO courses (course_nro)
VALUES ( %(course_nro)s )""", dict(course_nro='abcd'))
conn.commit()
#2 Fetching
cur.execute("SELECT * FROM courses")
print cur.fetchall()
Examples about the two commands in SQLalchemy
insert
sqlalchemy.sql.expression.insert(table, values=None, inline=False, **kwargs)
select
sqlalchemy.sql.expression.select(columns=None, whereclause=None, from_obj=[], **kwargs)
After the initial declarations, you can do something like this:
o = Course(course_nro='abcd')
session.add(o)
session.commit()
and
print session.query(Course).all()
The declarations could look something like this:
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import session_maker
# create an engine, and a base class
engine = create_engine('postgre://naa:123#localhost/tk')
DeclarativeBase = declarative_base(bind=engine)
metadata = DeclarativeBase.metadata
# create a session
Session = session_maker(engine)
session = Session()
# declare the models
class Cource(DelcarativeBase):
__tablename__ = 'courses'
course_nro = Column('course_nro', CHAR(12))
This declarative method is just one way of using sqlalchemy.
Even though this is old, more examples can't hurt, right? I thought I'd demonstrate how to do this with PyORMish.
from pyormish import Model
class Course(Model):
_TABLE_NAME = 'courses'
_PRIMARY_FIELD = 'id' # or whatever your primary field is
_SELECT_FIELDS = ('id','course_nro')
_COMMIT_FIELDS = ('course_nro',)
Model.db_config = dict(
DB_TYPE='postgres',
DB_CONN_STRING='postgre://naa:123#localhost/tk'
)
To create:
new_course = Course().create(course_nro='abcd')
To select:
# return the first row WHERE course_nro='abcd'
new_course = Course().get_by_fields(course_nro='abcd')

Categories