hmm, is there any reason why sa tries to add Nones to for varchar columns that have defaults set in in database schema ?, it doesnt do that for floats or ints (im using reflection).
so when i try to add new row :
like
u = User()
u.foo = 'a'
u.bar = 'b'
sa issues a query that has a lot more cols with None values assigned to those, and db obviously bards and doesnt perform default substitution.
What version do you use and what is actual code? Below is a sample code showing that server_default parameter works fine for string fields:
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
metadata = MetaData()
Base = declarative_base(metadata=metadata)
class Item(Base):
__tablename__="items"
id = Column(String, primary_key=True)
int_val = Column(Integer, nullable=False, server_default='123')
str_val = Column(String, nullable=False, server_default='abc')
engine = create_engine('sqlite://', echo=True)
metadata.create_all(engine)
session = sessionmaker(engine)()
item = Item(id='foo')
session.add(item)
session.commit()
print item.int_val, item.str_val
The output is:
<...>
<...> INSERT INTO items (id) VALUES (?)
<...> ['foo']
<...>
123 abc
I've found its a bug in sa, this happens only for string fields, they dont get server_default property for some unknow reason, filed a ticket for this already
Related
I'm trying to do a large number of inserts with one call, and the way someone here recommended was by giving .insert a list of dictionaries. This is using SQLAlchemy Core.
As an example:
try:
engine = db.create_engine(f"postgres://user:pass#myip/addressbook", connect_args={'connect_timeout': 5})
connection = engine.connect()
metadata = db.MetaData()
except exc.OperationalError:
print_error(f":: Could not connect to myip!")
sys.exit()
table_addressbook = db.Table('addressbook', metadata, autoload=True, autoload_with=engine)
list = []
list.append({'firstname': "John", 'lastname': "Doe"})
list.append({'firstname': "Jane", 'lastname': "Doe"})
query = db.insert(table_addressbook).values(list)
connection.execute(query)
But I'm getting an error saying the column id violates a non-null constraint. This is because insert normally auto-generates the primary-key id. How do I use this method but specify that id should be auto-generated? Or is there a different method I should use?
edit
Table name is addressbook.
Column id is type integer with default sequence 'untitled_table_id_seq', constraints are PRIMARY_KEY. This was autogenerated by Postico for Mac, but I've always been able to insert without including id and it auto increments from the last inserted ID.
Columns firstname and lastname are type text, no default, no constraints.
Without any information on your model and/or connection it is a bit difficult to answer your question. Please find below a piece of code which uses insert without throwing non-null constraint errors. Hopefully it helps you.
from sqlalchemy import create_engine, Column, Integer, String, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import insert
engine = create_engine('sqlite:///:memory:', echo=True)
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
firstname = Column(String)
lastname = Column(String)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
Session.configure(bind=engine) # once engine is available
session = Session()
new_users = []
new_users.append({'firstname': "John", 'lastname': "Doe"})
new_users.append({'firstname': "Jane", 'lastname': "Doe"})
i = insert(User).values(new_users)
session.execute(i)
PS: most of this is coming from the tutorial on: https://docs.sqlalchemy.org/en/13/orm/tutorial.html
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
engine = create_engine('sqlite:///:memory:', echo=True)
Base = declarative_base()
# Example Model definition for the illustration
class Customer(Base):
__tablename__ = "customer"
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
Base.metadata.create_all(engine)
######################################################
# Bulk insert using dictionaries.
######################################################
# Insert test records into `customer`table.
def bulk_insert_customers(n):
session = Session(bind=engine)
session.bulk_insert_mappings(
Customer,
[
dict(
name="customer name %d" % i,
description="customer description %d" % i,
)
for i in range(n)
],
)
session.commit()
Refer these for more examples of how to do bulk inserts in different ways:
https://docs.sqlalchemy.org/en/13/_modules/examples/performance/bulk_inserts.html
I created a table with a primary key and a sequence but via the debug ad later looking at the table design, the sequence isn't applied, just created.
from sqlalchemy import create_engine, MetaData, Table, Column,Integer,String,Boolean,Sequence
from sqlalchemy.orm import mapper, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import json
class Bookmarks(object):
pass
#----------------------------------------------------------------------
engine = create_engine('postgresql://iser:p#host/sconf', echo=True)
Base = declarative_base()
class Tramo(Base):
__tablename__ = 'tramos'
__mapper_args__ = {'column_prefix':'tramos'}
id = Column(Integer, Sequence('seq_tramos_id', start=1, increment=1),primary_key=True)
nombre = Column(String)
tramo_data = Column(String)
estado = Column(Boolean,default=True)
def __init__(self,nombre,tramo_data):
self.nombre=nombre
self.tramo_data=tramo_data
def __repr__(self):
return '[id:%d][nombre:%s][tramo:%s]' % self.id, self.nombre,self.tramo_data
Session = sessionmaker(bind=engine)
session = Session()
tabla = Tramo.__table__
metadata = Base.metadata
metadata.create_all(engine)
the table is just created like this
CREATE TABLE tramos (
id INTEGER NOT NULL,
nombre VARCHAR,
tramo_data VARCHAR,
estado BOOLEAN,
PRIMARY KEY (id)
)
I was hoping to see the declartion of the default nexval of the sequence
but it isn't there.
I also used the __mapper_args__ but looks like it's been ignored.
Am I missing something?
I realize this is an old thread, but I stumbled on it with the same problem and were unable to find a solution anywhere else.
After some experimenting I was able to solve this with the following code:
TABLE_ID = Sequence('table_id_seq', start=1000)
class Table(Base):
__tablename__ = 'table'
id = Column(Integer, TABLE_ID, primary_key=True, server_default=TABLE_ID.next_value())
This way the sequence is created and is used as the default value for column id, with the same behavior as if created implicitly by SQLAlchemy.
I ran into a similar issue with composite multi-column primary keys. The SERIAL is only implicitly applied to a single column primary key. However, this behaviour can be controlled via the autoincrement argument (defaults to "auto"):
id = Column(Integer, primary_key=True, autoincrement=True)
You specified an explicit Sequence() object with name. If you were to omit that, then SERIAL would be added to the id primary key specification:
CREATE TABLE tramos (
id INTEGER SERIAL NOT NULL,
nombre VARCHAR,
tramo_data VARCHAR,
estado BOOLEAN,
PRIMARY KEY (id)
)
A DEFAULT is only generated if the column is not a primary key.
When inserting, SQLAlchemy will issue a select nextval(..) as needed to create a next value. See the PostgreSQL documentation for details.
I went searching w/o result in a way to get the integer value or the boolean value from an object model created via sqlalchemy,
I mean i can add it and it works flawless but i cant get the integer value or the boolean value all i get when i tried to print it is the object name:
from sqlalchemy import create_engine, MetaData, Table, Column,Integer,String,Boolean,Sequence
from sqlalchemy.orm import mapper, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import json
class Bookmarks(object):
pass
#----------------------------------------------------------------------
engine = create_engine('postgresql://u:p#localghost/asd', echo=True)
Base = declarative_base()
class Tramo(Base):
__tablename__ = 'tramos'
__mapper_args__ = {'column_prefix':'tramos'}
id = Column(Integer, primary_key=True)
nombre = Column(String)
tramo_data = Column(String)
estado = Column(Boolean,default=True)
def __init__(self,nombre,tramo_data):
self.nombre=nombre
self.tramo_data=tramo_data
def __repr__(self):
return "[id:%s][nombre:%s][tramo:%s]" % (getattr(self, 'id'), self.nombre,self.tramo_data)
Session = sessionmaker(bind=engine)
session = Session()
tabla = Tramo.__table__
metadata = Base.metadata
metadata.create_all(engine)
b=Tramo('tramo1','adadas')
session.add(b)
session.commit()
print b
print b.id
its prints
[id:tramos.id][nombre:tramo1][tramo:adadas]
tramos.id
i cant get to print the id value, looks like the object column is in there but it doesn't return the value ot the property
i even use
session.refresh(b)
after the add but the result is the same.
According to the documentation Naming All Columns with a Prefix:
...prefix to the mapped attribute names relative to the
(table) column name ...
Since you define the mapped attributes in your class, I do not think it does what you desire.
Solution-1: remove the 'column_prefix':'tramos' from your __mapper_args__
Solution-2: print b.tramosid will print its id. You would need to change the __repr__ accordingly:
def __repr__(self):
return "[id:%s][nombre:%s][tramo:%s]" % (getattr(self, 'tramosid'), self.nombre, self.tramo_data)
I'm making a WebService that sends specific tables in JSON.
I use SQLAlchemy to communicate with the database.
I'd want to retrieve just the columns the user has the right to see.
Is there a way to tell SQLAlchemy to not retrieve some columns ?
It's not correct but something like this :
SELECT * EXCEPT column1 FROM table.
I know it is possible to specify just some columns in the SELECT statement but it's not exactly what I want because I don't know all the table columns. I just want all the columns but some.
I also tried to get all the columns and delete the column attribute I don't want like this :
result = db_session.query(Table).all()
for row in result:
row.__delattr(column1)
but it seems SQLAlchemy doesn't allow to do this.
I get the warning :
Warning: Column 'column1' cannot be null
cursor.execute(statement, parameters)
ok
What would be the most optimized way to do it for you guys ?
Thank you
You can pass in all columns in the table, except the ones you don't want, to the query method.
session.query(*[c for c in User.__table__.c if c.name != 'password'])
Here is a runnable example:
#!/usr/bin/env python
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import Session
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
def __init__(self, name, fullname, password):
self.name = name
self.fullname = fullname
self.password = password
def __repr__(self):
return "<User('%s','%s', '%s')>" % (self.name, self.fullname, self.password)
engine = create_engine('sqlite:///:memory:', echo=True)
Base.metadata.create_all(engine)
session = Session(bind=engine)
ed_user = User('ed', 'Ed Jones', 'edspassword')
session.add(ed_user)
session.commit()
result = session.query(*[c for c in User.__table__.c if c.name != 'password']).all()
print(result)
You can make the column a defered column. This feature allows particular columns of a table be loaded only upon direct access, instead of when the entity is queried using Query.
See Deferred Column Loading
This worked for me
users = db.query(models.User).filter(models.User.email != current_user.email).all()
return users
I have a Test model/table and a TestAuditLog model/table, using SQLAlchemy and SQL Server 2008. The relationship between the two is Test.id == TestAuditLog.entityId, with one test having many audit logs. TestAuditLog is intended to keep a history of changes to rows in the Test table. I want to track when a Test is deleted, also, but I'm having trouble with this. In SQL Server Management Studio, I set the FK_TEST_AUDIT_LOG_TEST relationship's "Enforce Foreign Key Constraint" property to "No", thinking that would allow a TestAuditLog row to exist with an entityId that no longer connects to any Test.id because the Test has been deleted. However, when I try to create a TestAuditLog with SQLAlchemy and then delete the Test, I get an error:
(IntegrityError) ('23000', "[23000] [Microsoft][ODBC SQL Server Driver][SQL Server]Cannot insert the value NULL into column 'AL_TEST_ID', table 'TEST_AUDIT_LOG'; column does not allow nulls. UPDATE fails. (515) (SQLExecDirectW); [01000] [Microsoft][ODBC SQL Server Driver][SQL Server]The statement has been terminated. (3621)") u'UPDATE [TEST_AUDIT_LOG] SET [AL_TEST_ID]=? WHERE [TEST_AUDIT_LOG].[AL_ID] = ?' (None, 8)
I think because of the foreign-key relationship between Test and TestAuditLog, after I delete the Test row, SQLAlchemy is trying to update all that test's audit logs to have a NULL entityId. I don't want it to do this; I want SQLAlchemy to leave the audit logs alone. How can I tell SQLAlchemy to allow audit logs to exist whose entityId does not connect with any Test.id?
I tried just removing the ForeignKey from my tables, but I'd like to still be able to say myTest.audits and get all of a test's audit logs, and SQLAlchemy complained about not knowing how to join Test and TestAuditLog. When I then specified a primaryjoin on the relationship, it grumbled about not having a ForeignKey or ForeignKeyConstraint with the columns.
Here are my models:
class TestAuditLog(Base, Common):
__tablename__ = u'TEST_AUDIT_LOG'
entityId = Column(u'AL_TEST_ID', INTEGER(), ForeignKey(u'TEST.TS_TEST_ID'),
nullable=False)
...
class Test(Base, Common):
__tablename__ = u'TEST'
id = Column(u'TS_TEST_ID', INTEGER(), primary_key=True, nullable=False)
audits = relationship(TestAuditLog, backref="test")
...
And here's how I'm trying to delete a test while keeping its audit logs, their entityId intact:
test = Session.query(Test).first()
Session.begin()
try:
Session.add(TestAuditLog(entityId=test.id))
Session.flush()
Session.delete(test)
Session.commit()
except:
Session.rollback()
raise
You can solve this by:
POINT-1: not having a ForeignKey neither on the RDBMS level nor on the SA level
POINT-2: explicitly specify join conditions for the relationship
POINT-3: mark relationship cascades to rely on passive_deletes flag
Fully working code snippet below should give you an idea (points are highlighted in the code):
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.orm import scoped_session, sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
engine = create_engine('sqlite:///:memory:', echo=False)
Session = sessionmaker(bind=engine)
class TestAuditLog(Base):
__tablename__ = 'TEST_AUDIT_LOG'
id = Column(Integer, primary_key=True)
comment = Column(String)
entityId = Column('TEST_AUDIT_LOG', Integer, nullable=False,
# POINT-1
#ForeignKey('TEST.TS_TEST_ID', ondelete="CASCADE"),
)
def __init__(self, comment):
self.comment = comment
def __repr__(self):
return "<TestAuditLog(id=%s entityId=%s, comment=%s)>" % (self.id, self.entityId, self.comment)
class Test(Base):
__tablename__ = 'TEST'
id = Column('TS_TEST_ID', Integer, primary_key=True)
name = Column(String)
audits = relationship(TestAuditLog, backref='test',
# POINT-2
primaryjoin="Test.id==TestAuditLog.entityId",
foreign_keys=[TestAuditLog.__table__.c.TEST_AUDIT_LOG],
# POINT-3
passive_deletes='all',
)
def __init__(self, name):
self.name = name
def __repr__(self):
return "<Test(id=%s, name=%s)>" % (self.id, self.name)
Base.metadata.create_all(engine)
###################
## tests
session = Session()
# create test data
tests = [Test("test-" + str(i)) for i in range(3)]
_cnt = 0
for _t in tests:
for __ in range(2):
_t.audits.append(TestAuditLog("comment-" + str(_cnt)))
_cnt += 1
session.add_all(tests)
session.commit()
session.expunge_all()
print '-'*80
# check test data, delete one Test
t1 = session.query(Test).get(1)
print "t: ", t1
print "t.a: ", t1.audits
session.delete(t1)
session.commit()
session.expunge_all()
print '-'*80
# check that audits are still in the DB for deleted Test
t1 = session.query(Test).get(1)
assert t1 is None
_q = session.query(TestAuditLog).filter(TestAuditLog.entityId == 1)
_r = _q.all()
assert len(_r) == 2
for _a in _r:
print _a
Another option would be to duplicate the column used in the FK, and make the FK column nullable with ON CASCADE SET NULL option. In this way you can still check the audit trail of deleted objects using this column.