Python XML dot name constructor - python

I have a Solace JMS Messaging Box which I am automating the administration of, and the device uses lots of very small XML posts to configure. Since the device has so very many commands in its XML spec, I need a way to create arbitrary XML requests.
The XML looks like:
<rpc semp-version="soltr/5_5">
<create>
<message-vpn>
<vpn-name>developer.testvpn</vpn-name>
</message-vpn>
</create>
</rpc>
And a second call to change a setting might look like this:
<rpc semp-version="soltr/5_5">
<message-vpn>
<vpn-name>developer.testvpn</vpn-name>
<export-policy>
<no>
<export-subscriptions/>
</no>
</export-policy>
</message-vpn>
</rpc>
Since there are many commands in the XML spec, I'm looking for a way to create them freely from something like dot name space. eg:
mycall = SolaceXML()
mycall.create.message_vpn.vpn_name="developer.testvpn"
mycall.message_vpn.vpn_name='developer.testvpn'
mycall.message_vpn.export_policy.no.export_subscription
UPDATE
I have posted my solution below. Its not as small as I'd like it but it works for me.
K

I have found a solution in the mean time. Hopefully this helps someone else. This solution builds nested dictionary objects from dot name space calls, and then converts it to XML.
from xml.dom.minidom import Document
import copy
import re
from collections import OrderedDict
class d2x:
''' Converts Dictionary to XML '''
def __init__(self, structure):
self.doc = Document()
if len(structure) == 1:
rootName = str(structure.keys()[0])
self.root = self.doc.createElement(rootName)
self.doc.appendChild(self.root)
self.build(self.root, structure[rootName])
def build(self, father, structure):
if type(structure) == dict:
for k in structure:
tag = self.doc.createElement(k)
father.appendChild(tag)
self.build(tag, structure[k])
elif type(structure) == OrderedDict:
for k in structure:
tag = self.doc.createElement(k)
father.appendChild(tag)
self.build(tag, structure[k])
elif type(structure) == list:
grandFather = father.parentNode
tagName = father.tagName
grandFather.removeChild(father)
for l in structure:
tag = self.doc.createElement(tagName)
self.build(tag, l)
grandFather.appendChild(tag)
else:
data = str(structure)
tag = self.doc.createTextNode(data)
father.appendChild(tag)
def display(self):
# I render from the root instead of doc to get rid of the XML header
#return self.root.toprettyxml(indent=" ")
return self.root.toxml()
class SolaceNode:
''' a sub dictionary builder '''
def __init__(self):
self.__dict__ = OrderedDict()
def __getattr__(self, name):
name = re.sub("_", "-", name)
try:
return self.__dict__[name]
except:
self.__dict__[name] = SolaceNode()
return self.__dict__[name]
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
def __call__(self, *args, **kwargs):
return self.__dict__
def __setattr__(self, name, value):
name = re.sub("_", "-", name)
self.__dict__[name] = value
class SolaceXMLBuilder(object):
''' main dictionary builder
Any dot-name-space like calling of a instance of SolaceXMLBuilder will create
nested dictionary keys. These are converted to XML whenever the instance
representation is called ( __repr__ )
Example
a=SolaceXMLBuilder()
a.foo.bar.baz=2
str(a)
'<rpc semp-version="soltr/5_5">\n<foo><bar><baz>2</baz></bar></foo></rpc>'
'''
def __init__(self):
self.__dict__ = OrderedDict()
self.__setattr__ = None
def __getattr__(self, name):
name = re.sub("_", "-", name)
try:
return self.__dict__[name]
except:
self.__dict__[name] = SolaceNode()
return self.__dict__[name]
def __repr__(self):
# Show XML
myxml = d2x(eval(str(self.__dict__)))
# I had to conjur up my own header cause solace doesnt like </rpc> to have attribs
#return str('<rpc semp-version="soltr/5_5">\n%s</rpc>' % myxml.display())
return str('<rpc semp-version="soltr/5_5">\n%s</rpc>' % myxml.display())
def __call__(self, *args, **kwargs):
return self.__dict__
# def __setattr__(self, name, value):
# raise Exception('no you cant create assignments here, only on sub-elements')
if __name__ == '__main__':
x = SolaceXMLBuilder()
x.create.message_vpn.vpn_name='NEWVPN'
print(x)
# <?xml version="1.0" ?>
# <rpc semp-version="soltr/5_5">
# <create>
# <message-vpn>
# <vpn-name>
# NEWVPN
# </vpn-name>
# </message-vpn>
# </create>
# </rpc semp-version="soltr/5_5">
x=SolaceXMLBuilder()
x.message_vpn.vpn_name='NEWVPN'
x.message_vpn.no.feature_X
print(x)
# <?xml version="1.0" ?>
# <rpc semp-version="soltr/5_5">
# <message-vpn>
# <vpn-name>
# NEWVPN
# </vpn-name>
# <no>
# <feature-X/>
# </no>
# </message-vpn>
# </rpc semp-version="soltr/5_5">
# >>> client = SolaceAPI('pt1')
# >>> xml = SolaceXMLBuilder()
# >>> xml.create.message_vpn.vpn_name='NEWVPN'
# >>> client.rpc(str(xml))
# {u'rpc-reply': {u'execute-result': {u'#code': u'ok'}, u'#semp-version': u'soltr/5_5'}}

Related

Decorator: Maintain state

I need to compose information regarding the given information like what parameter the given function takes etc. The example what I would like to do is
#author("Joey")
#parameter("name", type=str)
#parameter("id", type=int)
#returns("Employee", desc="Returns employee with given details", type="Employee")
def get_employee(name, id):
//
// Some logic to return employee
//
Skeleton of decorator could be as follows:
json = {}
def author(author):
def wrapper(func):
def internal(*args, **kwargs):
json["author"] = name
func(args, kwargs)
return internal
return wrapepr
Similarly, parameter decorator could be written as follows:
def parameter(name, type=None):
def wrapper(func):
def internal(*args, **kwargs):
para = {}
para["name"] = name
para["type"] = type
json["parameters"].append = para
func(args, kwargs)
return internal
return wrapepr
Similarly, other handlers could be written. At the end, I can just call one function which would get all formed JSONs for each function.
End output could be
[
{fun_name, "get_employee", author: "Joey", parameters : [{para_name : Name, type: str}, ... ], returns: {type: Employee, desc: "..."}
{fun_name, "search_employee", author: "Bob", parameters : [{para_name : age, type: int}, ... ], returns: {type: Employee, desc: "..."}
...
}
]
I'm not sure how I can maintain the state and know to consolidate the data regarding one function should be handled together.
How can I achieve this?
I don't know if I fully get your use case, but wouldn't it work to add author to your current functions as:
func_list = []
def func(var):
return var
json = {}
json['author'] = 'JohanL'
json['func'] = func.func_name
func.json = json
func_list.append(func.json)
def func2(var):
return var
json = {}
json['author'] = 'Ganesh'
func2.json = json
func_list.append(func2.json)
This can be automated using a decorator as follows:
def author(author):
json = {}
def author_decorator(func):
json['func'] = func.func_name
json['author'] = author
func.json = json
return func
return author_decorator
def append(func_list):
def append_decorator(func):
func_list.append(func.json)
return func
return append_decorator
func_list = []
#append(func_list)
#author('JohanL')
def func(var):
return var
#append(func_list)
#author('Ganesh')
def func2(var):
return var
Then you can access the json dict as func.json and func2.json or find the functions in the func_list. Note that for the decorators to work, you have to add them in the order I have put them and I have not added any error handling.
Also, if you prefer the func_list to not be explicitly passed, but instead use a globaly defined list with an explicit name, the code can be somewhat simplified to:
func_list = []
def author(author):
json = {}
def author_decorator(func):
json['func'] = func.func_name
json['author'] = author
func.json = json
return func
return author_decorator
def append(func):
global func_list
func_list.append(func.json)
return func
#append
#author('JohanL')
def func(var):
return var
#append
#author('Ganesh')
def func2(var):
return var
Maybe this is sufficient for you?

Converting Nested Json into Python object

I have nested json as below
{
"product" : "name",
"protocol" : "scp",
"read_logs" : {
"log_type" : "failure",
"log_url" : "htttp:url"
}
}
I am trying to create Python class object with the below code.
import json
class Config (object):
"""
Argument: JSON Object from the configuration file.
"""
def __init__(self, attrs):
if 'log_type' in attrs:
self.log_type = attrs['log_type']
self.log_url = attrs['log_url']
else:
self.product = attrs["product"]
self.protocol = attrs["protocol"]
def __str__(self):
return "%s;%s" %(self.product, self.log_type)
def get_product(self):
return self.product
def get_logurl(self):
return self.log_url
class ConfigLoader (object):
'''
Create a confiuration loaded which can read JSON config files
'''
def load_config (self, attrs):
with open (attrs) as data_file:
config = json.load(data_file, object_hook=load_json)
return config
def load_json (json_object):
return Config (json_object)
loader = ConfigLoader()
config = loader.load_config('../config/product_config.json')
print config.get_protocol()
But, the object_hook is invoking the load_json recursively and the Class Config init is being called twice. So the final object that I created does not contain the nested JSON data.
Is there any way to read the entire nested JSON object into a single Python class ?
Thanks
A variation on Pankaj Singhal's idea, but using a "generic" namespace class instead of namedtuples:
import json
class Generic:
#classmethod
def from_dict(cls, dict):
obj = cls()
obj.__dict__.update(dict)
return obj
data = '{"product": "name", "read_logs": {"log_type": "failure", "log_url": "123"}}'
x = json.loads(data, object_hook=Generic.from_dict)
print(x.product, x.read_logs.log_type, x.read_logs.log_url)
namedtuple & object_hook can help create a one-liner:
# Create an object with attributes corresponding to JSON keys.
def json_to_obj(data): return json.loads(data, object_hook=lambda converted_dict: namedtuple('X', converted_dict.keys())(*converted_dict.values()))
OR Create a more readable function like below:
def _object_hook(converted_dict): return namedtuple('X', converted_dict.keys())(*converted_dict.values())
def json_to_obj(data): return json.loads(data, object_hook=_object_hook)
Below is the code snippet to use it:
import json
from collections import namedtuple
data = '{"product": "name", "read_logs": {"log_type": "failure", "log_url": htttp:url}}'
x = json_to_obj(data)
print x.product, x.read_logs.log_type, x.read_logs.log_url
NOTE: Check out namedtuple's rename parameter.
I wrote a simple DFS algorithm to do this job.
Convert nested item as a flat dictionary. In my case, I joined the keys of json item with a dash.
For example, nested item { "a":[{"b": "c"}, {"d":"e"}] } will be transformed as {'a-0-b': 'c', 'a-1-d': 'e'}.
def DFS(item, headItem, heads, values):
if type(item) == type({}):
for k in item.keys():
DFS(item[k], headItem + [k], heads, values)
elif type(item) == type([]):
for i in range(len(item)):
DFS(item[i], headItem + [str(i)], heads, values)
else:
headItemStr = '-'.join(headItem)
heads.append(headItemStr)
values.append(item)
return
def reduce(jsonItem):
heads, values = [], []
DFS(jsonItem, [], heads, values)
return heads, values
def json2dict(jsonItem):
head, value = reduce(jsonItem)
dictHeadValue = { head[i] : value[i] for i in range(len(head))}
return dictHeadValue

How to get a list of all non imported names in a Python module?

Given a module containing :
import stuff
from foo import Foo
from bar import *
CST = True
def func(): pass
How can I define a function get_defined_objects so that I can do:
print(get_defined_objects('path.to.module'))
{'CST': True, 'func', <function path.to.module.func>}
Right now the only solution I can imagine is to read the original module file, extract defined names with re.search(r'^(?:def|class )?(\w+)(?:\s*=)?' then import the module, and find the intersection with __dict__.
Is there something cleaner ?
Here is something for you to start with using ast. Note that this code does not cover all possible cases, although it should handle e.g. multiple assignment properly. Consider investigating ast's data structures and API more closely if you would like to get access to compiled code, for example.
import ast
with open('module.py') as f:
data = f.read()
tree = ast.parse(data)
elements = [el for el in tree.body if type(el) in (ast.Assign, ast.FunctionDef, ast.ClassDef)]
result = {}
for el in elements:
if type(el) == ast.Assign:
for t in el.targets:
if type(el.value) == ast.Call:
result[t.id] = el.value.func.id + '()'
else:
for attr in ['id', 'i', 's']:
try:
result[t.id] = getattr(el.value, attr)
break
except Exception as e:
pass
elif type(el) == ast.FunctionDef:
result[el.name] = '<function %s>' % el.name
else:
result[el.name] = '<class %s>' % el.name
print result
#
mod = "foo"
import ast, inspect
import importlib
mod = importlib.import_module(mod)
p = ast.parse(inspect.getsource(mod))
from collections import defaultdict
data = defaultdict(defaultdict)
for node in p.body:
if isinstance(node, (ast.ImportFrom, ast.Import)):
continue
if isinstance(node, (ast.ClassDef, ast.FunctionDef)):
data["classes"][node.name] = mod.__dict__[node.name]
elif isinstance(node, ast.Assign):
for trg in node.targets:
if isinstance(node.value, ast.Num):
data["assignments"][trg.id] = node.value.n
elif isinstance(node.value, ast.Str):
data["assignments"][trg.id] = node.value.s
else:
data["assignments"][trg.id] = mod.__dict__[trg.id]
Output:
There is a nice explanation here that lists what the different types do and their attributes which this is based on:
class Nodes(ast.NodeVisitor):
def __init__(self):
self.data = defaultdict()
super(Nodes, self).__init__()
def visit_FunctionDef(self, node):
self.data[node.name] = mod.__dict__[node.name]
print("In FunctionDef with funcion {}".format(node.name))
def visit_ClassDef(self, node):
self.data[node.name] = mod.__dict__[node.name]
def visit_Assign(self, node):
for trg in node.targets:
if isinstance(node.value, (ast.Str, ast.Num, ast.Dict, ast.List, ast.ListComp, ast.NameConstant)):
self.data[trg.id] = mod.__dict__[trg.id]
self.generic_visit(node)
def visit_Name(self, node):
"""
class Name(idctx)
A variable name. id holds the name as a string
and ctx is either class Load class Store class Del.
"""
print("In Name with {}\n".format(node.id))
#
def visit_Dict(self, node):
"""
class Dict(keys, values)
A dictionary. keys and values
hold lists of nodes with matching order
"""
print("In Dict keys = {}, values = {}\n".format(node.keys,node.values))
def visit_Set(self,node):
"""
class Set(elts)
A set. elts holds a list of
nodes representing the elements.
"""
print("In Set elts = {}\n".format(node.elts))
def visit_List(self, node):
"""
class List(eltsctx)
lts holds a list of nodes representing the elements.
ctx is Store if the container
is an assignment target
(i.e. (x,y)=pt), and Load otherwise.
"""
print("In List elts = {}\nctx = {}\n".format(node.elts,node.ctx))
def visit_Tuple(self, node):
"""
class Tuple(eltsctx)
lts holds a list of nodes representing the elements.
ctx is Store if the container
is an assignment target
(i.e. (x,y)=pt), and Load otherwise.
"""
print("In Tuple elts = {}\nctx = {}\n".format(node.elts,node.ctx))
def visit_NameConstant(self, node):
"""
class NameConstant(value)
True, False or None. "value" holds one of those constants.
"""
print("In NameConstant getting value {}\n".format(node.value))
def visit_Load(self, node):
print("In Load with node {}\n".format(node.func))
def visit_Call(self, node):
"""
class Call(func, args, keywords, starargs, kwargs)
A function call. func is the function,
which will often be a Name or Attribute object. Of the arguments:
args holds a list of the arguments passed by position.
keywords holds a list of keyword objects representing arguments
passed by keyword.starargs and kwargs each hold a single node,
for arguments passed as *args and **kwargs.
"""
print("In Call with node {}\n".format(node.func))
def visit_Num(self, node):
print("In Num getting value {}\n".format(node.n))
def visit_Str(self, node):
print("In Str getting value {}\n".format(node.s))
f = Nodes()
f.visit(p)
print(f.data)
A bytecode hack for Python 3.4+. Possible due to dis.get_instructions.
import dis
import importlib
from itertools import islice
import marshal
import os
def consume_iterator(it, n=1):
next(islice(it, n, n), None)
def get_defined_names(module_path):
path, module_name = os.path.split(module_path)
module_name = module_name[:-3]
module_object = importlib.import_module(module_name)
pyc_name = '{}.cpython-34.pyc'.format(module_name)
pyc_path = os.path.join(path, '__pycache__/', pyc_name)
with open(pyc_path, 'rb') as f:
f.read(12) # drop the first 12 bytes
code = marshal.load(f)
# dis.disassemble(code) # see the byte code
instructions = dis.get_instructions(code)
objects = {}
for instruction in instructions:
if instruction.opname == 'STORE_NAME':
objects[instruction.argval] = getattr(module_object,
instruction.argval)
elif instruction.opname == 'IMPORT_NAME':
consume_iterator(instructions, 2)
elif instruction.opname == 'IMPORT_FROM':
consume_iterator(instructions, 1)
return objects
print(get_defined_names('/Users/ashwini/py/so.py'))
For a file like:
#/Users/ashwini/py/so.py
import os
from sys import argv, modules
from math import *
from itertools import product
CST = True
from itertools import permutations, combinations
from itertools import chain
E = 100
from itertools import starmap
def func(): pass
for x in range(10):
pass
class C:
a = 100
d = 1
The output will be:
{'d': 1, 'E': 100, 'CST': True, 'x': 9, 'func': <function func at 0x10efd0510>, 'C': <class 'so.C'>}
A much more better way as someone already mentioned in comments will be to parse the source code using ast module and find out the variable names from there.
While I accepted an answer, it can't hurt to post the solution I ended up using. It's a mix between the other proposals :
import ast
import inspect
import importlib
from types import ModuleType
def extract_definitions(module):
""" Returns the name and value of objects defined at the top level of the given module.
:param module: A module object or the name of the module to import.
:return: A dict {'classes': {}, 'functions': {}, 'assignments': {}} containing defined objects in the module.
"""
if not isinstance(module, ModuleType):
module = importlib.import_module(module)
tree = ast.parse(inspect.getsource(module))
definitions = {'classes': {}, 'functions': {}, 'assignments': {}}
for node in tree.body:
if isinstance(node, ast.ClassDef):
definitions["classes"][node.name] = getattr(module, node.name)
elif isinstance(node, ast.FunctionDef):
definitions["functions"][node.name] = getattr(module, node.name)
elif isinstance(node, ast.Assign):
# for unpacking, you need to loop on all names
for target in node.targets:
definitions["assignments"][target.id] = getattr(module, target.id)
return definitions
I added the ability to import from a string or a module object, then removed the parsing of values and replaced it by a simple getattr from the original module.
Untested
def unexported_names (module):
try:
return [name for name in module.__dict__ if name not in module.__all__]
except AttributeError:
return [name for name in module.__dict__ if name.startswith('_')]

Python: ambiguities when (un-)pickling a dict-based tree

I have a simple tree structure where each node can have several children which are accessed through keys (+ some payload stored in value):
class NodeDict(dict):
def __init__(self, parent):
self.parent = parent
self.value = None
def AddChild(self, label):
self[label] = NodeDict(self)
return self[label]
class TreeDict:
def __init__(self):
self.root = NodeDict(None)
def ToString(self, level, node):
res = ":" + str(node.value) + "\n"
for k, v in node.items():
res += " "*level + str(k) + self.ToString(level + 1, v)
return res
def __str__(self):
return self.ToString(1, self.root)
When I try to pickle such a tree I have the problem that it is not correctly unpickled as the following example shows:
class Obj:
def __init__(self, v):
self.v = v
def __str__(self):
return str(self.v)
t = TreeDict()
a = t.root.AddChild(Obj("A"))
b = a.AddChild(Obj("B"))
c = b.AddChild(Obj("C"))
d = b.AddChild(Obj("D"))
e = a.AddChild(Obj("E"))
print t
import cPickle
cPickle.dump(t, open("test.dat", "w"))
t = cPickle.load(open("test.dat", "r"))
print t
The tree looks like this before
:None
A:None
E:None
B:None
C:None
D:None
and like this after pickling and unpickling:
:None
A:None
B:None
B:None
D:None
C:None
E:None
E:None
The problem arises from the fact that I am using Obj as labels in the tree (i.e. as keys in the dict). (It also fails when using e.g. strings instead of Obj if they are not all mutually different.)
How could this be changed to work as intended?
When subclassing builtin types like dict or using new style classes you should use at least the pickle protocol 2. The default for python2 is still 0, which can have problems with these.
When using dump(t, open("test.dat", "wb"), protocol=2) your code works with cPickle and with pickle alike.
And remember the remark about opening the files in binary mode.
If this is still not working (see Aya's comment), is using another tree structure an option for you? If so, here is a script to pickle DOM tree structure.

Python serializable objects json [duplicate]

This question already has answers here:
How to make a class JSON serializable
(41 answers)
Closed 6 months ago.
class gpagelet:
"""
Holds 1) the pagelet xpath, which is a string
2) the list of pagelet shingles, list
"""
def __init__(self, parent):
if not isinstance( parent, gwebpage):
raise Exception("Parent must be an instance of gwebpage")
self.parent = parent # This must be a gwebpage instance
self.xpath = None # String
self.visibleShingles = [] # list of tuples
self.invisibleShingles = [] # list of tuples
self.urls = [] # list of string
class gwebpage:
"""
Holds all the datastructure after the results have been parsed
holds: 1) lists of gpagelets
2) loc, string, location of the file that represents it
"""
def __init__(self, url):
self.url = url # Str
self.netloc = False # Str
self.gpagelets = [] # gpagelets instance
self.page_key = "" # str
Is there a way for me to make my class json serializable? The thing that I am worried is the recursive reference.
Write your own encoder and decoder, which can be very simple like return __dict__
e.g. here is a encoder to dump totally recursive tree structure, you can enhance it or use as it is for your own purpose
import json
class Tree(object):
def __init__(self, name, childTrees=None):
self.name = name
if childTrees is None:
childTrees = []
self.childTrees = childTrees
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Tree):
return super(MyEncoder, self).default(obj)
return obj.__dict__
c1 = Tree("c1")
c2 = Tree("c2")
t = Tree("t",[c1,c2])
print json.dumps(t, cls=MyEncoder)
it prints
{"childTrees": [{"childTrees": [], "name": "c1"}, {"childTrees": [], "name": "c2"}], "name": "t"}
you can similarly write a decoder but there you will somehow need to identify is it is your object or not, so may be you can put a type too if needed.
Indirect answer: instead of using JSON, you could use YAML, which has no problem doing what you want. (JSON is essentially a subset of YAML.)
Example:
import yaml
o1 = gwebpage("url")
o2 = gpagelet(o1)
o1.gpagelets = [o2]
print yaml.dump(o1)
In fact, YAML nicely handles cyclic references for you.
I implemented a very simple todict method with the help of https://stackoverflow.com/a/11637457/1766716
Iterate over properties that is not starts with __
Eliminate methods
Eliminate some properties manually which is not necessary (for my case, coming from sqlalcemy)
And used getattr to build dictionary.
class User(Base):
id = Column(Integer, primary_key=True)
firstname = Column(String(50))
lastname = Column(String(50))
password = Column(String(20))
def props(self):
return filter(
lambda a:
not a.startswith('__')
and a not in ['_decl_class_registry', '_sa_instance_state', '_sa_class_manager', 'metadata']
and not callable(getattr(self, a)),
dir(self))
def todict(self):
return {k: self.__getattribute__(k) for k in self.props()}
My solution for this was to extend the 'dict' class and perform checks around required/allowed attributes by overriding init, update, and set class methods.
class StrictDict(dict):
required=set()
at_least_one_required=set()
cannot_coexist=set()
allowed=set()
def __init__(self, iterable={}, **kwargs):
super(StrictDict, self).__init__({})
keys = set(iterable.keys()).union(set(kwargs.keys()))
if not keys.issuperset(self.required):
msg = str(self.__class__.__name__) + " requires: " + str([str(key) for key in self.required])
raise AttributeError(msg)
if len(list(self.at_least_one_required)) and len(list(keys.intersection(self.at_least_one_required))) < 1:
msg = str(self.__class__.__name__) + " requires at least one: " + str([str(key) for key in self.at_least_one_required])
raise AttributeError(msg)
for key, val in iterable.iteritems():
self.__setitem__(key, val)
for key, val in kwargs.iteritems():
self.__setitem__(key, val)
def update(self, E=None, **F):
for key, val in E.iteritems():
self.__setitem__(key, val)
for key, val in F.iteritems():
self.__setitem__(key, val)
super(StrictDict, self).update({})
def __setitem__(self, key, value):
all_allowed = self.allowed.union(self.required).union(self.at_least_one_required).union(self.cannot_coexist)
if key not in list(all_allowed):
msg = str(self.__class__.__name__) + " does not allow member '" + key + "'"
raise AttributeError(msg)
if key in list(self.cannot_coexist):
for item in list(self.cannot_coexist):
if key != item and item in self.keys():
msg = str(self.__class__.__name__) + "does not allow members '" + key + "' and '" + item + "' to coexist'"
raise AttributeError(msg)
super(StrictDict, self).__setitem__(key, value)
Example usage:
class JSONDoc(StrictDict):
"""
Class corresponding to JSON API top-level document structure
http://jsonapi.org/format/#document-top-level
"""
at_least_one_required={'data', 'errors', 'meta'}
allowed={"jsonapi", "links", "included"}
cannot_coexist={"data", "errors"}
def __setitem__(self, key, value):
if key == "included" and "data" not in self.keys():
msg = str(self.__class__.__name__) + " does not allow 'included' member if 'data' member is not present"
raise AttributeError(msg)
super(JSONDoc, self).__setitem__(key, value)
json_doc = JSONDoc(
data={
"id": 5,
"type": "movies"
},
links={
"self": "http://url.com"
}
)

Categories