marshmallow `validates_schema` to reject unknown fields with `pass_many=True` - python

I struggle to understand how to handle unknown fields when the Schema is passed a list of objects for validation. I got so far :
class MySchema(Schema):
# fields ...
#marshmallow_decorators.validates_schema(pass_original=True)
def check_unknown_fields(self, data, original_data):
if isinstance(original_data, list):
for dct in original_data:
self._assert_no_unknown_field(dct)
else:
self._assert_no_unknown_field(original_data)
def _assert_no_unknown_field(self, dct):
unknown = set(dct.keys()) - set(self.fields)
if unknown:
raise MarshmallowValidationError('Unknown field', unknown)
But that obviously doesn't work, as the validator is ran for all items in the list every time. Therefore the first error will be caught, and returned on all items :
items = [
{'a': 1, 'b': 2, 'unknown1': 3},
{'a': 4, 'b': 5, 'unknown2': 6},
]
errors = MySchema(many=True).validate(items)
# {0: {'unknown1': ['Unknown field']}, 1: {'unknown1': ['Unknown field']}}
I was trying to think of a way to get only the single item from original_data corresponding to the data argument and validate only that one, but I can't really do that, as items have no id, or field that would make them searchable ...
Am I missing something? Is there a solution to this?

This is a workaround I came up with ... I wish it was simpler, but here it is :
from marshmallow import Schema, ValidationError as MarshmallowValidationError, fields
UNKNOWN_MESSAGE = 'unknown field'
class _RejectUnknownMixin(object):
def _collect_unknown_fields_errors(self, schema, data):
"""
Checks `data` against `schema` and returns a dictionary `{<field>: <error>}`
if unknown fields detected, or `{0: {<field>: <error>}, ... N: <field>: <error>}`
if `data` is a list.
"""
if isinstance(data, list):
validation_errors = {}
for i, datum in enumerate(data):
datum_validation_errors = self._collect_unknown_fields_errors(schema, datum)
if datum_validation_errors:
validation_errors[i] = datum_validation_errors
return validation_errors
else:
unknown = set(data.keys()) - set(schema.fields)
return {name: [UNKNOWN_MESSAGE] for name in unknown}
class NestedRejectUnknown(fields.Nested, _RejectUnknownMixin):
"""
Nested field that returns validation errors if unknown fields are detected.
"""
def _deserialize(self, value, attr, data):
validation_errors = {}
try:
result = super(NestedRejectUnknown, self)._deserialize(value, attr, data)
except MarshmallowValidationError as err:
validation_errors = err.normalized_messages()
# Merge with unknown field errors
validation_errors = _merge_dicts(
self._collect_unknown_fields_errors(self.schema, value), validation_errors)
if validation_errors:
raise MarshmallowValidationError(validation_errors)
return result
class SchemaRejectUnknown(Schema, _RejectUnknownMixin):
"""
Schema that return validation errors if unknown fields are detected
"""
def validate(self, data, **kwargs):
validation_errors = super(SchemaRejectUnknown, self).validate(data, **kwargs)
return _merge_dicts(
self._collect_unknown_fields_errors(self, data), validation_errors)
def _merge_dicts(a, b, path=None):
"""
Ref : https://stackoverflow.com/questions/7204805/dictionaries-of-dictionaries-merge
merges b into a
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
_merge_dicts(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
# same leaf value
pass
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a

In marshmallow 3.0+ there is the unknown field in Meta, i.e.:
def test_validate(self):
class ModelSchema(Schema):
class Meta:
unknown = RAISE
name = fields.String()
schema = ModelSchema()
data = dict(name='jfaleiro', xyz=2)
schema.validate(data) # passes
schema.load(data) # fails (as intended)
It is a bit contradictory why it passes validate and fails load though.

Related

finds key but not KEY.lower() in dictionary Python

So I need to write this program where I create a class and an object of that class is a dictionary with categories as keys, and words that are included in such categories are the values (Example: {'name' : {'patrick', 'jane'}, 'discipline' : {'geography',...}, ...}).
At some point in the program (in that class) I have to create a method which takes a the name of a category as an argument. I then have to pick a random word out of that category. In the dictionary all keys(categories) need to be lowercase but when I give a category to choose a word from that shouldn't matter.
Here is my code first (part of it):
import random
class MadLibs:
def __init__(self, woordenschat = {}):
self.woordenschat = woordenschat
def suggereren(self, categorie):
assert categorie.lower() in self.woordenschat, 'onbekende categorie'
randwoord = random.choice(list(self.woordenschat[categorie.lower()]))
if categorie.isupper():
return randwoord.upper()
elif categorie.islower():
return randwoord
else:
return randwoord.capitalize()
so say I got a category 'name' as key in my dictionary with a sequence of words, when I then use the method suggereren and give as argument 'name' it works, but when I give 'NAME' then self.woordenschat[category.lower()] returns an empty list (see the line where I initialize randwoord )
Would somebody be able to tell me why this happens?
UPDATE:
this is how you add the words in the dictionary, categorie is where you give the category, and woorden is where you give new words that belong to that category
def leren(self, categorie, woorden):
if isinstance(woorden, (tuple, list, set)):
woorden = set(woorden)
else:
woorden = {woorden}
if categorie in self.woordenschat:
self.woordenschat[categorie.lower()].add(woord.lower() for woord in woorden)
else:
self.woordenschat[categorie.lower()] = (woord.lower() for woord in woorden)
return None
UPDATE:
seems like the way I added the words in leren was the problem an error something like: object 'generator' does not have ... 'add'
here's my new code:
def leren(self, categorie, woorden):
if isinstance(woorden, (tuple, list, set)):
woorden = set(woorden)
else:
woorden = {woorden}
set_to_add = {woord.lower() for woord in woorden}
if categorie in self.woordenschat:
self.woordenschat[categorie.lower()].union(set_to_add)
else:
self.woordenschat[categorie.lower()] = (set_to_add)
return None
now the only problem left is that my object doesn't really get updated when I add new words to an existing category I'll try to find it first but if I don't I'll just ask a new question.
update: nevermind found it, twas a stupid mistake
actually , in the requests sources code , there have a solution about the caselessdict object maybe satisfied you need.
import collections
class CaseInsensitiveDict(collections.MutableMapping):
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
#property
def keys(self):
return [i for i in self]
#property
def values(self):
return [self[i] for i in self]

How do I use dictionary key,value pair to set class instance attributes "pythonic"ly?

I have created some Python classes to use as multivariate data structures, which are then used for various tasks. In some instances, I like to populate the classes with various value sets. The default parameter filename "ho2.defaults" would look something like this:
name = 'ho2'
mass_option = 'h1o16'
permutation = 'odd'
parity = 'odd'
j_total = 10
lr = 40
br = 60
jmax = 60
mass_lr = 14578.471659
mass_br = 1781.041591
length_lr = ( 1.0, 11.0, 2.65 )
length_br = ( 0.0, 11.0, 2.46 )
use_spline = True
energy_units = 'au'
pes_zpe = -7.407998138300982E-2
pes_cutoff = 0.293994
Currently, I create a dictionary from reading the desired key,value pairs from file, and now I'd like a "pythonic" way of making those dictionary keys be class instance variable names, i.e.
# Instantiate Molecule Class
molecule = Molecule()
# Create Dictionary of default values
default_dict = read_dict_from_file(filename)
# Set populate class instance variables with dictionary values
for key,value in default_dict:
molecule.key = value
So the Class's instance variable "molecule.name" could be set with the dictionary key,value pair. I could do this by hand, but I'ms sure there is a better way to loop through it. In actuality, the dictionary could be large, and I'd rather allow the user to choose which values they want to populate, so the dictionary could change. What am I missing here?
You would use setattr: setattr(molecule, key, value)
The simple way is:
vars(molecule).update(default_dict)
This will clobber any pre-existing attributes though. For a more delicate approach try:
for name, value in default_dict.items():
if not hasattr(molecule, name):
setattr(molecule, name value)
I'd invert the logic so that the object dynamically answers questions:
class Settings(object):
ATTRS = {'foo', 'bar'}
def __init__(self, defaults):
self.__dict__['data'] = defaults.copy()
def __getattr__(self, key):
if key not in self.ATTRS or key not in self.data:
raise AttributeError("'{}' object has no attribute '{}'".format(
self.__class__.__name__, key))
return self.data[key]
def __setattr__(self, key, value):
self.data[key] = value
s = Settings({'a': 'b', 'foo': 'foo!', 'spam': 'eggs'})
print s.foo
try:
print s.spam
except AttributeError:
pass
else:
raise AssertionError("That should have failed because 'spam' isn't in Settings.ATTRS")
try:
print s.bar
except AttributeError:
pass
else:
raise AssertionError("That should have failed because 'bar' wasn't passed in")
class Molecule(settings):
ATTRS = {'name', 'mass_option', ...}
molecule = Molecule(default_dict)

Python StructuredProperty to dictionary

My models all have a method which converts the model to a dictionary:
def to_dict(model):
output = {}
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
for key, prop in model._properties.iteritems():
value = getattr(model, key)
if value is None:
continue
if isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
dateString = value.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
output[key] = dateString
elif isinstance(value, ndb.Model):
output[key] = to_dict(value)
else:
raise ValueError('cannot encode ' + repr(prop))
return output
Now, one of my models, X, has a LocalStructuredProperty:
metaData = ndb.LocalStructuredProperty(MetaData, repeated=True)
So, repeated=True means this will be a list of MetaData objects. MetaData is another model, and it also has the same to_dict method.
However, when I call json.dumps(xInstance.to_dict()), I get an exception:
raise TypeError(repr(o) + " is not JSON serializable")
TypeError: MetaData(count=0, date=datetime.datetime(2012, 9, 19, 2, 46, 56, 660000), unique_id=u'8E2C3B07A06547C78AB00DD73B574B8C') is not JSON serializable
How can I handle this?
If you want to handle this in to_dict() and before the level of serializing to JSON, you'll just need a few more cases in your to_dict(). Firstly, you said the to_dict definition above is a method. I would have it delegate to a function or staticmethod so you have something you can call on ints and such without checking the type first. The code will just come out better that way.
def coerce(value):
SIMPLE_TYPES = (int, long, float, bool, basestring)
if value is None or isinstance(value, SIMPLE_TYPES):
return value
elif isinstance(value, datetime.date):
return value.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
elif hasattr(value, 'to_dict'): # hooray for duck typing!
return value.to_dict()
elif isinstance(value, dict):
return dict((coerce(k), coerce(v)) for (k, v) in value.items())
elif hasattr(value, '__iter__'): # iterable, not string
return map(coerce, value)
else:
raise ValueError('cannot encode %r' % value)
Then just plug that into your to_dict method itself:
def to_dict(model):
output = {}
for key, prop in model._properties.iteritems():
value = coerce(getattr(model, key))
if value is not None:
output[key] = value
return output
All you need to do to serialize is to implement a function
def default_encode(obj):
return obj.to_dict()
and then encode your JSON with
json.dumps(X.to_dict(), default=default_encode)
I figured out how to solve the issue: in the X class, add this to the to_dict() method:
...
if value is None:
continue
if key == 'metaData':
array = list()
for data in value:
array.append(data.to_dict())
output[key] = array
elif isinstance(value, SIMPLE_TYPES):
output[key] = value
...
Though I'm not really sure how to automate this case where it's not based off key, but rather whenever it encounters a list of custom objects, it first converts each object in the list to_dict() first.

How to serialize SqlAlchemy result to JSON?

Django has some good automatic serialization of ORM models returned from DB to JSON format.
How to serialize SQLAlchemy query result to JSON format?
I tried jsonpickle.encode but it encodes query object itself.
I tried json.dumps(items) but it returns
TypeError: <Product('3', 'some name', 'some desc')> is not JSON serializable
Is it really so hard to serialize SQLAlchemy ORM objects to JSON /XML? Isn't there any default serializer for it? It's very common task to serialize ORM query results nowadays.
What I need is just to return JSON or XML data representation of SQLAlchemy query result.
SQLAlchemy objects query result in JSON/XML format is needed to be used in javascript datagird (JQGrid http://www.trirand.com/blog/)
You could just output your object as a dictionary:
class User:
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
And then you use User.as_dict() to serialize your object.
As explained in Convert sqlalchemy row object to python dict
A flat implementation
You could use something like this:
from sqlalchemy.ext.declarative import DeclarativeMeta
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
fields[field] = data
except TypeError:
fields[field] = None
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
and then convert to JSON using:
c = YourAlchemyClass()
print json.dumps(c, cls=AlchemyEncoder)
It will ignore fields that are not encodable (set them to 'None').
It doesn't auto-expand relations (since this could lead to self-references, and loop forever).
A recursive, non-circular implementation
If, however, you'd rather loop forever, you could use:
from sqlalchemy.ext.declarative import DeclarativeMeta
def new_alchemy_encoder():
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
fields[field] = obj.__getattribute__(field)
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
And then encode objects using:
print json.dumps(e, cls=new_alchemy_encoder(), check_circular=False)
This would encode all children, and all their children, and all their children... Potentially encode your entire database, basically. When it reaches something its encoded before, it will encode it as 'None'.
A recursive, possibly-circular, selective implementation
Another alternative, probably better, is to be able to specify the fields you want to expand:
def new_alchemy_encoder(revisit_self = False, fields_to_expand = []):
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if revisit_self:
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# go through each field in this SQLalchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
val = obj.__getattribute__(field)
# is this field another SQLalchemy object, or a list of SQLalchemy objects?
if isinstance(val.__class__, DeclarativeMeta) or (isinstance(val, list) and len(val) > 0 and isinstance(val[0].__class__, DeclarativeMeta)):
# unless we're expanding this field, stop here
if field not in fields_to_expand:
# not expanding this field: set it to None and continue
fields[field] = None
continue
fields[field] = val
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
You can now call it with:
print json.dumps(e, cls=new_alchemy_encoder(False, ['parents']), check_circular=False)
To only expand SQLAlchemy fields called 'parents', for example.
Python 3.7+ and Flask 1.1+ can use the built-in dataclasses package
from dataclasses import dataclass
from datetime import datetime
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
#dataclass
class User(db.Model):
id: int
email: str
id = db.Column(db.Integer, primary_key=True, auto_increment=True)
email = db.Column(db.String(200), unique=True)
#app.route('/users/')
def users():
users = User.query.all()
return jsonify(users)
if __name__ == "__main__":
users = User(email="user1#gmail.com"), User(email="user2#gmail.com")
db.create_all()
db.session.add_all(users)
db.session.commit()
app.run()
The /users/ route will now return a list of users.
[
{"email": "user1#gmail.com", "id": 1},
{"email": "user2#gmail.com", "id": 2}
]
Auto-serialize related models
#dataclass
class Account(db.Model):
id: int
users: User
id = db.Column(db.Integer)
users = db.relationship(User) # User model would need a db.ForeignKey field
The response from jsonify(account) would be this.
{
"id":1,
"users":[
{
"email":"user1#gmail.com",
"id":1
},
{
"email":"user2#gmail.com",
"id":2
}
]
}
Overwrite the default JSON Encoder
from flask.json import JSONEncoder
class CustomJSONEncoder(JSONEncoder):
"Add support for serializing timedeltas"
def default(o):
if type(o) == datetime.timedelta:
return str(o)
if type(o) == datetime.datetime:
return o.isoformat()
return super().default(o)
app.json_encoder = CustomJSONEncoder
You can convert a RowProxy to a dict like this:
d = dict(row.items())
Then serialize that to JSON ( you will have to specify an encoder for things like datetime values )
It's not that hard if you just want one record ( and not a full hierarchy of related records ).
json.dumps([(dict(row.items())) for row in rs])
I recommend using marshmallow. It allows you to create serializers to represent your model instances with support to relations and nested objects.
Here is a truncated example from their docs. Take the ORM model, Author:
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
first = db.Column(db.String(80))
last = db.Column(db.String(80))
A marshmallow schema for that class is constructed like this:
class AuthorSchema(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
formatted_name = fields.Method("format_name", dump_only=True)
def format_name(self, author):
return "{}, {}".format(author.last, author.first)
...and used like this:
author_schema = AuthorSchema()
author_schema.dump(Author.query.first())
...would produce an output like this:
{
"first": "Tim",
"formatted_name": "Peters, Tim",
"id": 1,
"last": "Peters"
}
Have a look at their full Flask-SQLAlchemy Example.
A library called marshmallow-sqlalchemy specifically integrates SQLAlchemy and marshmallow. In that library, the schema for the Author model described above looks like this:
class AuthorSchema(ModelSchema):
class Meta:
model = Author
The integration allows the field types to be inferred from the SQLAlchemy Column types.
marshmallow-sqlalchemy here.
You can use introspection of SqlAlchemy as this :
mysql = SQLAlchemy()
from sqlalchemy import inspect
class Contacts(mysql.Model):
__tablename__ = 'CONTACTS'
id = mysql.Column(mysql.Integer, primary_key=True)
first_name = mysql.Column(mysql.String(128), nullable=False)
last_name = mysql.Column(mysql.String(128), nullable=False)
phone = mysql.Column(mysql.String(128), nullable=False)
email = mysql.Column(mysql.String(128), nullable=False)
street = mysql.Column(mysql.String(128), nullable=False)
zip_code = mysql.Column(mysql.String(128), nullable=False)
city = mysql.Column(mysql.String(128), nullable=False)
def toDict(self):
return { c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs }
#app.route('/contacts',methods=['GET'])
def getContacts():
contacts = Contacts.query.all()
contactsArr = []
for contact in contacts:
contactsArr.append(contact.toDict())
return jsonify(contactsArr)
#app.route('/contacts/<int:id>',methods=['GET'])
def getContact(id):
contact = Contacts.query.get(id)
return jsonify(contact.toDict())
Get inspired from an answer here :
Convert sqlalchemy row object to python dict
Flask-JsonTools package has an implementation of JsonSerializableBase Base class for your models.
Usage:
from sqlalchemy.ext.declarative import declarative_base
from flask.ext.jsontools import JsonSerializableBase
Base = declarative_base(cls=(JsonSerializableBase,))
class User(Base):
#...
Now the User model is magically serializable.
If your framework is not Flask, you can just grab the code
For security reasons you should never return all the model's fields. I prefer to selectively choose them.
Flask's json encoding now supports UUID, datetime and relationships (and added query and query_class for flask_sqlalchemy db.Model class). I've updated the encoder as follows:
app/json_encoder.py
from sqlalchemy.ext.declarative import DeclarativeMeta
from flask import json
class AlchemyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o.__class__, DeclarativeMeta):
data = {}
fields = o.__json__() if hasattr(o, '__json__') else dir(o)
for field in [f for f in fields if not f.startswith('_') and f not in ['metadata', 'query', 'query_class']]:
value = o.__getattribute__(field)
try:
json.dumps(value)
data[field] = value
except TypeError:
data[field] = None
return data
return json.JSONEncoder.default(self, o)
app/__init__.py
# json encoding
from app.json_encoder import AlchemyEncoder
app.json_encoder = AlchemyEncoder
With this I can optionally add a __json__ property that returns the list of fields I wish to encode:
app/models.py
class Queue(db.Model):
id = db.Column(db.Integer, primary_key=True)
song_id = db.Column(db.Integer, db.ForeignKey('song.id'), unique=True, nullable=False)
song = db.relationship('Song', lazy='joined')
type = db.Column(db.String(20), server_default=u'audio/mpeg')
src = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(db.DateTime, server_default=db.func.now(), onupdate=db.func.now())
def __init__(self, song):
self.song = song
self.src = song.full_path
def __json__(self):
return ['song', 'src', 'type', 'created_at']
I add #jsonapi to my view, return the resultlist and then my output is as follows:
[
{
"created_at": "Thu, 23 Jul 2015 11:36:53 GMT",
"song":
{
"full_path": "/static/music/Audioslave/Audioslave [2002]/1 Cochise.mp3",
"id": 2,
"path_name": "Audioslave/Audioslave [2002]/1 Cochise.mp3"
},
"src": "/static/music/Audioslave/Audioslave [2002]/1 Cochise.mp3",
"type": "audio/mpeg"
}
]
A more detailed explanation.
In your model, add:
def as_dict(self):
return {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}
The str() is for python 3 so if using python 2 use unicode(). It should help deserialize dates. You can remove it if not dealing with those.
You can now query the database like this
some_result = User.query.filter_by(id=current_user.id).first().as_dict()
First() is needed to avoid weird errors. as_dict() will now deserialize the result. After deserialization, it is ready to be turned to json
jsonify(some_result)
While the original question goes back awhile, the number of answers here (and my own experiences) suggest it's a non-trivial question with a lot of different approaches of varying complexity with different trade-offs.
That's why I built the SQLAthanor library that extends SQLAlchemy's declarative ORM with configurable serialization/de-serialization support that you might want to take a look at.
The library supports:
Python 2.7, 3.4, 3.5, and 3.6.
SQLAlchemy versions 0.9 and higher
serialization/de-serialization to/from JSON, CSV, YAML, and Python dict
serialization/de-serialization of columns/attributes, relationships, hybrid properties, and association proxies
enabling and disabling of serialization for particular formats and columns/relationships/attributes (e.g. you want to support an inbound password value, but never include an outbound one)
pre-serialization and post-deserialization value processing (for validation or type coercion)
a pretty straightforward syntax that is both Pythonic and seamlessly consistent with SQLAlchemy's own approach
You can check out the (I hope!) comprehensive docs here: https://sqlathanor.readthedocs.io/en/latest
Hope this helps!
Custom serialization and deserialization.
"from_json" (class method) builds a Model object based on json data.
"deserialize" could be called only on instance, and merge all data from json into Model instance.
"serialize" - recursive serialization
__write_only__ property is needed to define write only properties ("password_hash" for example).
class Serializable(object):
__exclude__ = ('id',)
__include__ = ()
__write_only__ = ()
#classmethod
def from_json(cls, json, selfObj=None):
if selfObj is None:
self = cls()
else:
self = selfObj
exclude = (cls.__exclude__ or ()) + Serializable.__exclude__
include = cls.__include__ or ()
if json:
for prop, value in json.iteritems():
# ignore all non user data, e.g. only
if (not (prop in exclude) | (prop in include)) and isinstance(
getattr(cls, prop, None), QueryableAttribute):
setattr(self, prop, value)
return self
def deserialize(self, json):
if not json:
return None
return self.__class__.from_json(json, selfObj=self)
#classmethod
def serialize_list(cls, object_list=[]):
output = []
for li in object_list:
if isinstance(li, Serializable):
output.append(li.serialize())
else:
output.append(li)
return output
def serialize(self, **kwargs):
# init write only props
if len(getattr(self.__class__, '__write_only__', ())) == 0:
self.__class__.__write_only__ = ()
dictionary = {}
expand = kwargs.get('expand', ()) or ()
prop = 'props'
if expand:
# expand all the fields
for key in expand:
getattr(self, key)
iterable = self.__dict__.items()
is_custom_property_set = False
# include only properties passed as parameter
if (prop in kwargs) and (kwargs.get(prop, None) is not None):
is_custom_property_set = True
iterable = kwargs.get(prop, None)
# loop trough all accessible properties
for key in iterable:
accessor = key
if isinstance(key, tuple):
accessor = key[0]
if not (accessor in self.__class__.__write_only__) and not accessor.startswith('_'):
# force select from db to be able get relationships
if is_custom_property_set:
getattr(self, accessor, None)
if isinstance(self.__dict__.get(accessor), list):
dictionary[accessor] = self.__class__.serialize_list(object_list=self.__dict__.get(accessor))
# check if those properties are read only
elif isinstance(self.__dict__.get(accessor), Serializable):
dictionary[accessor] = self.__dict__.get(accessor).serialize()
else:
dictionary[accessor] = self.__dict__.get(accessor)
return dictionary
Here is a solution that lets you select the relations you want to include in your output as deep as you would like to go.
NOTE: This is a complete re-write taking a dict/str as an arg rather than a list. fixes some stuff..
def deep_dict(self, relations={}):
"""Output a dict of an SA object recursing as deep as you want.
Takes one argument, relations which is a dictionary of relations we'd
like to pull out. The relations dict items can be a single relation
name or deeper relation names connected by sub dicts
Example:
Say we have a Person object with a family relationship
person.deep_dict(relations={'family':None})
Say the family object has homes as a relation then we can do
person.deep_dict(relations={'family':{'homes':None}})
OR
person.deep_dict(relations={'family':'homes'})
Say homes has a relation like rooms you can do
person.deep_dict(relations={'family':{'homes':'rooms'}})
and so on...
"""
mydict = dict((c, str(a)) for c, a in
self.__dict__.items() if c != '_sa_instance_state')
if not relations:
# just return ourselves
return mydict
# otherwise we need to go deeper
if not isinstance(relations, dict) and not isinstance(relations, str):
raise Exception("relations should be a dict, it is of type {}".format(type(relations)))
# got here so check and handle if we were passed a dict
if isinstance(relations, dict):
# we were passed deeper info
for left, right in relations.items():
myrel = getattr(self, left)
if isinstance(myrel, list):
mydict[left] = [rel.deep_dict(relations=right) for rel in myrel]
else:
mydict[left] = myrel.deep_dict(relations=right)
# if we get here check and handle if we were passed a string
elif isinstance(relations, str):
# passed a single item
myrel = getattr(self, relations)
left = relations
if isinstance(myrel, list):
mydict[left] = [rel.deep_dict(relations=None)
for rel in myrel]
else:
mydict[left] = myrel.deep_dict(relations=None)
return mydict
so for an example using person/family/homes/rooms... turning it into json all you need is
json.dumps(person.deep_dict(relations={'family':{'homes':'rooms'}}))
step1:
class CNAME:
...
def as_dict(self):
return {item.name: getattr(self, item.name) for item in self.__table__.columns}
step2:
list = []
for data in session.query(CNAME).all():
list.append(data.as_dict())
step3:
return jsonify(list)
Even though it's a old post, Maybe I didn't answer the question above, but I want to talk about my serialization, at least it works for me.
I use FastAPI,SqlAlchemy and MySQL, but I don't use orm model;
# from sqlalchemy import create_engine
# from sqlalchemy.orm import sessionmaker
# engine = create_engine(config.SQLALCHEMY_DATABASE_URL, pool_pre_ping=True)
# SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Serialization code
import decimal
import datetime
def alchemy_encoder(obj):
"""JSON encoder function for SQLAlchemy special classes."""
if isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(obj, decimal.Decimal):
return float(obj)
import json
from sqlalchemy import text
# db is SessionLocal() object
app_sql = 'SELECT * FROM app_info ORDER BY app_id LIMIT :page,:page_size'
# The next two are the parameters passed in
page = 1
page_size = 10
# execute sql and return a <class 'sqlalchemy.engine.result.ResultProxy'> object
app_list = db.execute(text(app_sql), {'page': page, 'page_size': page_size})
# serialize
res = json.loads(json.dumps([dict(r) for r in app_list], default=alchemy_encoder))
If it doesn't work, please ignore my answer. I refer to it here
https://codeandlife.com/2014/12/07/sqlalchemy-results-to-json-the-easy-way/
install simplejson by
pip install simplejson and the create a class
class Serialise(object):
def _asdict(self):
"""
Serialization logic for converting entities using flask's jsonify
:return: An ordered dictionary
:rtype: :class:`collections.OrderedDict`
"""
result = OrderedDict()
# Get the columns
for key in self.__mapper__.c.keys():
if isinstance(getattr(self, key), datetime):
result["x"] = getattr(self, key).timestamp() * 1000
result["timestamp"] = result["x"]
else:
result[key] = getattr(self, key)
return result
and inherit this class to every orm classes so that this _asdict function gets registered to every ORM class and boom.
And use jsonify anywhere
It is not so straighforward. I wrote some code to do this. I'm still working on it, and it uses the MochiKit framework. It basically translates compound objects between Python and Javascript using a proxy and registered JSON converters.
Browser side for database objects is db.js
It needs the basic Python proxy source in proxy.js.
On the Python side there is the base proxy module.
Then finally the SqlAlchemy object encoder in webserver.py.
It also depends on metadata extractors found in the models.py file.
def alc2json(row):
return dict([(col, str(getattr(row,col))) for col in row.__table__.columns.keys()])
I thought I'd play a little code golf with this one.
FYI: I am using automap_base since we have a separately designed schema according to business requirements. I just started using SQLAlchemy today but the documentation states that automap_base is an extension to declarative_base which seems to be the typical paradigm in the SQLAlchemy ORM so I believe this should work.
It does not get fancy with following foreign keys per Tjorriemorrie's solution, but it simply matches columns to values and handles Python types by str()-ing the column values. Our values consist Python datetime.time and decimal.Decimal class type results so it gets the job done.
Hope this helps any passers-by!
I know this is quite an older post. I took solution given by #SashaB and modified as per my need.
I added following things to it:
Field ignore list: A list of fields to be ignored while serializing
Field replace list: A dictionary containing field names to be replaced by values while serializing.
Removed methods and BaseQuery getting serialized
My code is as follows:
def alchemy_json_encoder(revisit_self = False, fields_to_expand = [], fields_to_ignore = [], fields_to_replace = {}):
"""
Serialize SQLAlchemy result into JSon
:param revisit_self: True / False
:param fields_to_expand: Fields which are to be expanded for including their children and all
:param fields_to_ignore: Fields to be ignored while encoding
:param fields_to_replace: Field keys to be replaced by values assigned in dictionary
:return: Json serialized SQLAlchemy object
"""
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if revisit_self:
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# go through each field in this SQLalchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata' and x not in fields_to_ignore]:
val = obj.__getattribute__(field)
# is this field method defination, or an SQLalchemy object
if not hasattr(val, "__call__") and not isinstance(val, BaseQuery):
field_name = fields_to_replace[field] if field in fields_to_replace else field
# is this field another SQLalchemy object, or a list of SQLalchemy objects?
if isinstance(val.__class__, DeclarativeMeta) or \
(isinstance(val, list) and len(val) > 0 and isinstance(val[0].__class__, DeclarativeMeta)):
# unless we're expanding this field, stop here
if field not in fields_to_expand:
# not expanding this field: set it to None and continue
fields[field_name] = None
continue
fields[field_name] = val
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
Hope it helps someone!
Use the built-in serializer in SQLAlchemy:
from sqlalchemy.ext.serializer import loads, dumps
obj = MyAlchemyObject()
# serialize object
serialized_obj = dumps(obj)
# deserialize object
obj = loads(serialized_obj)
If you're transferring the object between sessions, remember to detach the object from the current session using session.expunge(obj).
To attach it again, just do session.add(obj).
Under Flask, this works and handles datatime fields, transforming a field of type
'time': datetime.datetime(2018, 3, 22, 15, 40) into
"time": "2018-03-22 15:40:00":
obj = {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}
# This to get the JSON body
return json.dumps(obj)
# Or this to get a response object
return jsonify(obj)
following code will serialize sqlalchemy result to json.
import json
from collections import OrderedDict
def asdict(self):
result = OrderedDict()
for key in self.__mapper__.c.keys():
if getattr(self, key) is not None:
result[key] = str(getattr(self, key))
else:
result[key] = getattr(self, key)
return result
def to_array(all_vendors):
v = [ ven.asdict() for ven in all_vendors ]
return json.dumps(v)
Calling fun,
def all_products():
all_products = Products.query.all()
return to_array(all_products)
The AlchemyEncoder is wonderful but sometimes fails with Decimal values. Here is an improved encoder that solves the decimal problem -
class AlchemyEncoder(json.JSONEncoder):
# To serialize SQLalchemy objects
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
model_fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
print data
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
model_fields[field] = data
except TypeError:
model_fields[field] = None
return model_fields
if isinstance(obj, Decimal):
return float(obj)
return json.JSONEncoder.default(self, obj)
When using sqlalchemy to connect to a db I this is a simple solution which is highly configurable. Use pandas.
import pandas as pd
import sqlalchemy
#sqlalchemy engine configuration
engine = sqlalchemy.create_engine....
def my_function():
#read in from sql directly into a pandas dataframe
#check the pandas documentation for additional config options
sql_DF = pd.read_sql_table("table_name", con=engine)
# "orient" is optional here but allows you to specify the json formatting you require
sql_json = sql_DF.to_json(orient="index")
return sql_json
(Tiny tweak on Sasha B's really excellent answer)
This specifically converts datetime objects to strings which in the original answer would be converted to None:
# Standard library imports
from datetime import datetime
import json
# 3rd party imports
from sqlalchemy.ext.declarative import DeclarativeMeta
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
dict = {}
# Remove invalid fields and just get the column attributes
columns = [x for x in dir(obj) if not x.startswith("_") and x != "metadata"]
for column in columns:
value = obj.__getattribute__(column)
try:
json.dumps(value)
dict[column] = value
except TypeError:
if isinstance(value, datetime):
dict[column] = value.__str__()
else:
dict[column] = None
return dict
return json.JSONEncoder.default(self, obj)
class SqlToDict:
def __init__(self, data) -> None:
self.data = data
def to_timestamp(self, date):
if isinstance(date, datetime):
return int(datetime.timestamp(date))
else:
return date
def to_dict(self) -> List:
arr = []
for i in self.data:
keys = [*i.keys()]
values = [*i]
values = [self.to_timestamp(d) for d in values]
arr.append(dict(zip(keys, values)))
return arr
For example:
SqlToDict(data).to_dict()
Very late 2023
My implementation
def obj_to_dict(obj, remove=['_sa_instance_state'], debug=False):
result = {}
if type(obj).__name__ == "Row":
return dict(obj)
obj = obj.__dict__
for key in obj:
if key in remove:
continue
result[key] = obj[key]
if debug:
print(result)
return result
The built in serializer chokes with utf-8 cannot decode invalid start byte for some inputs. Instead, I went with:
def row_to_dict(row):
temp = row.__dict__
temp.pop('_sa_instance_state', None)
return temp
def rows_to_list(rows):
ret_rows = []
for row in rows:
ret_rows.append(row_to_dict(row))
return ret_rows
#website_blueprint.route('/api/v1/some/endpoint', methods=['GET'])
def some_api():
'''
/some_endpoint
'''
rows = rows_to_list(SomeModel.query.all())
response = app.response_class(
response=jsonplus.dumps(rows),
status=200,
mimetype='application/json'
)
return response
Maybe you can use a class like this
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy import Table
class Custom:
"""Some custom logic here!"""
__table__: Table # def for mypy
#declared_attr
def __tablename__(cls): # pylint: disable=no-self-argument
return cls.__name__ # pylint: disable= no-member
def to_dict(self) -> Dict[str, Any]:
"""Serializes only column data."""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
Base = declarative_base(cls=Custom)
class MyOwnTable(Base):
#COLUMNS!
With that all objects have the to_dict method
While using some raw sql and undefined objects, using cursor.description appeared to get what I was looking for:
with connection.cursor() as cur:
print(query)
cur.execute(query)
for item in cur.fetchall():
row = {column.name: item[i] for i, column in enumerate(cur.description)}
print(row)
This is a JSONEncoder version that preserves model column order and only keeps recursively defined column and relationship fields. It also formats most JSON unserializable types:
import json
from datetime import datetime
from decimal import Decimal
import arrow
from sqlalchemy.ext.declarative import DeclarativeMeta
class SQLAlchemyJSONEncoder(json.JSONEncoder):
"""
SQLAlchemy ORM JSON Encoder
If you have a "backref" relationship defined in your SQLAlchemy model,
this encoder raises a ValueError to stop an infinite loop.
"""
def default(self, obj):
if isinstance(obj, datetime):
return arrow.get(obj).isoformat()
elif isinstance(obj, Decimal):
return float(obj)
elif isinstance(obj, set):
return sorted(obj)
elif isinstance(obj.__class__, DeclarativeMeta):
for attribute, relationship in obj.__mapper__.relationships.items():
if isinstance(relationship.__getattribute__("backref"), tuple):
raise ValueError(
f'{obj.__class__} object has a "backref" relationship '
"that would cause an infinite loop!"
)
dictionary = {}
column_names = [column.name for column in obj.__table__.columns]
for key in column_names:
value = obj.__getattribute__(key)
if isinstance(value, datetime):
value = arrow.get(value).isoformat()
elif isinstance(value, Decimal):
value = float(value)
elif isinstance(value, set):
value = sorted(value)
dictionary[key] = value
for key in [
attribute
for attribute in dir(obj)
if not attribute.startswith("_")
and attribute != "metadata"
and attribute not in column_names
]:
value = obj.__getattribute__(key)
dictionary[key] = value
return dictionary
return super().default(obj)

Python serializable objects json [duplicate]

This question already has answers here:
How to make a class JSON serializable
(41 answers)
Closed 6 months ago.
class gpagelet:
"""
Holds 1) the pagelet xpath, which is a string
2) the list of pagelet shingles, list
"""
def __init__(self, parent):
if not isinstance( parent, gwebpage):
raise Exception("Parent must be an instance of gwebpage")
self.parent = parent # This must be a gwebpage instance
self.xpath = None # String
self.visibleShingles = [] # list of tuples
self.invisibleShingles = [] # list of tuples
self.urls = [] # list of string
class gwebpage:
"""
Holds all the datastructure after the results have been parsed
holds: 1) lists of gpagelets
2) loc, string, location of the file that represents it
"""
def __init__(self, url):
self.url = url # Str
self.netloc = False # Str
self.gpagelets = [] # gpagelets instance
self.page_key = "" # str
Is there a way for me to make my class json serializable? The thing that I am worried is the recursive reference.
Write your own encoder and decoder, which can be very simple like return __dict__
e.g. here is a encoder to dump totally recursive tree structure, you can enhance it or use as it is for your own purpose
import json
class Tree(object):
def __init__(self, name, childTrees=None):
self.name = name
if childTrees is None:
childTrees = []
self.childTrees = childTrees
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Tree):
return super(MyEncoder, self).default(obj)
return obj.__dict__
c1 = Tree("c1")
c2 = Tree("c2")
t = Tree("t",[c1,c2])
print json.dumps(t, cls=MyEncoder)
it prints
{"childTrees": [{"childTrees": [], "name": "c1"}, {"childTrees": [], "name": "c2"}], "name": "t"}
you can similarly write a decoder but there you will somehow need to identify is it is your object or not, so may be you can put a type too if needed.
Indirect answer: instead of using JSON, you could use YAML, which has no problem doing what you want. (JSON is essentially a subset of YAML.)
Example:
import yaml
o1 = gwebpage("url")
o2 = gpagelet(o1)
o1.gpagelets = [o2]
print yaml.dump(o1)
In fact, YAML nicely handles cyclic references for you.
I implemented a very simple todict method with the help of https://stackoverflow.com/a/11637457/1766716
Iterate over properties that is not starts with __
Eliminate methods
Eliminate some properties manually which is not necessary (for my case, coming from sqlalcemy)
And used getattr to build dictionary.
class User(Base):
id = Column(Integer, primary_key=True)
firstname = Column(String(50))
lastname = Column(String(50))
password = Column(String(20))
def props(self):
return filter(
lambda a:
not a.startswith('__')
and a not in ['_decl_class_registry', '_sa_instance_state', '_sa_class_manager', 'metadata']
and not callable(getattr(self, a)),
dir(self))
def todict(self):
return {k: self.__getattribute__(k) for k in self.props()}
My solution for this was to extend the 'dict' class and perform checks around required/allowed attributes by overriding init, update, and set class methods.
class StrictDict(dict):
required=set()
at_least_one_required=set()
cannot_coexist=set()
allowed=set()
def __init__(self, iterable={}, **kwargs):
super(StrictDict, self).__init__({})
keys = set(iterable.keys()).union(set(kwargs.keys()))
if not keys.issuperset(self.required):
msg = str(self.__class__.__name__) + " requires: " + str([str(key) for key in self.required])
raise AttributeError(msg)
if len(list(self.at_least_one_required)) and len(list(keys.intersection(self.at_least_one_required))) < 1:
msg = str(self.__class__.__name__) + " requires at least one: " + str([str(key) for key in self.at_least_one_required])
raise AttributeError(msg)
for key, val in iterable.iteritems():
self.__setitem__(key, val)
for key, val in kwargs.iteritems():
self.__setitem__(key, val)
def update(self, E=None, **F):
for key, val in E.iteritems():
self.__setitem__(key, val)
for key, val in F.iteritems():
self.__setitem__(key, val)
super(StrictDict, self).update({})
def __setitem__(self, key, value):
all_allowed = self.allowed.union(self.required).union(self.at_least_one_required).union(self.cannot_coexist)
if key not in list(all_allowed):
msg = str(self.__class__.__name__) + " does not allow member '" + key + "'"
raise AttributeError(msg)
if key in list(self.cannot_coexist):
for item in list(self.cannot_coexist):
if key != item and item in self.keys():
msg = str(self.__class__.__name__) + "does not allow members '" + key + "' and '" + item + "' to coexist'"
raise AttributeError(msg)
super(StrictDict, self).__setitem__(key, value)
Example usage:
class JSONDoc(StrictDict):
"""
Class corresponding to JSON API top-level document structure
http://jsonapi.org/format/#document-top-level
"""
at_least_one_required={'data', 'errors', 'meta'}
allowed={"jsonapi", "links", "included"}
cannot_coexist={"data", "errors"}
def __setitem__(self, key, value):
if key == "included" and "data" not in self.keys():
msg = str(self.__class__.__name__) + " does not allow 'included' member if 'data' member is not present"
raise AttributeError(msg)
super(JSONDoc, self).__setitem__(key, value)
json_doc = JSONDoc(
data={
"id": 5,
"type": "movies"
},
links={
"self": "http://url.com"
}
)

Categories