I have a class like this:
class User:
def __init__(self, uid):
userinfo = json.load(urlopen(passportapi + 'user/' + uid))
This class would load user information from a remote api and set corresponding attributes for this class so I can access these attributes by:
print user.username
print user.group_id
Is there any way to implement this?
Thanks
import json
api_result = '{"username":"wawa","age":20}'
class User(object):
def __init__(self, api_result):
userinfo = json.loads(api_result)
self.__dict__.update(userinfo)
import unittest
class DefaultTestCase(unittest.TestCase):
def test_default(self):
user = User(api_result)
self.assertEqual(user.username, 'wawa')
self.assertEqual(user.age, 20)
if __name__ == '__main__':
unittest.main()
You can do this sort of thing using the setattr function:
>>> class A(object):
pass
>>> a = A()
>>> setattr(a, 'val', 4)
>>> a.val
4
In your case, assuming your parsed json file provides some sort of key-value pair (like a dict), you can just iterate through those, and call setattr on self; something like this (assuming userinfo is a dict):
class User:
def __init__(self, uid):
userinfo = json.load(urlopen(passportapi + 'user/' + uid))
for key, value in userinfo.iteritems():
setattr(self, key, value)
Assuming your JSON request returns a Python dictionary:
class User:
def __init__(self, uid):
self.__dict__.update(json.load(urlopen(passportapi + 'user/' + uid)))
Sometimes I like to have a little more encapsulation and control than what 'update' would offer. I would probably accomplish what you are trying to do like this:
class User(object):
def __init__(self, api_result):
self.userinfo = json.loads(api_result)
def __getattr__(self, name):
if name in self.userinfo: return self.userinfo[name]
raise AttributeError
I think this method will allow you to do other things like filter certain keywords and raise custom exceptions for accessing your data.
Related
Suppose I have class hierarchy like this:
class SerializableWidget(object):
# some code
class WidgetA(SerilizableWidget):
# some code
class WidgetB(SerilizableWidget):
# some code
I want to be able to serialize instances of WidgetA and WidgetB (and potentially other widgets) to text files as json. Then, I want to be able to deserialize those, without knowing beforehand their specific class:
some_widget = deserielize_from_file(file_path) # pseudocode, doesn't have to be exactly a method like this
and some_widget needs to be constructed from the precise subclass of SerilizableWidget. How do I do this? What methods exactly do I need to override/implement in each of the classes of my hierarchy?
Assume all fields of the above classes are primitive types. How do I override some __to_json__ and __from_json__ methods, something like that?
You can solve this with many methods. One example is to use the object_hook and default parameters to json.load and json.dump respectively.
All you need is to store the class together with the serialized version of the object, then when loading you have to use a mapping of which class goes with which name.
The example below uses a dispatcher class decorator to store the class name and object when serializing, and look it up later when deserializing. All you need is a _as_dict method on each class to convert the data to a dict:
import json
#dispatcher
class Parent(object):
def __init__(self, name):
self.name = name
def _as_dict(self):
return {'name': self.name}
#dispatcher
class Child1(Parent):
def __init__(self, name, n=0):
super().__init__(name)
self.n = n
def _as_dict(self):
d = super()._as_dict()
d['n'] = self.n
return d
#dispatcher
class Child2(Parent):
def __init__(self, name, k='ok'):
super().__init__(name)
self.k = k
def _as_dict(self):
d = super()._as_dict()
d['k'] = self.k
return d
Now for the tests. First lets create a list with 3 objects of different types.
>>> obj = [Parent('foo'), Child1('bar', 15), Child2('baz', 'works')]
Serializing it will yield the data with the class name in each object:
>>> s = json.dumps(obj, default=dispatcher.encoder_default)
>>> print(s)
[
{"__class__": "Parent", "name": "foo"},
{"__class__": "Child1", "name": "bar", "n": 15},
{"__class__": "Child2", "name": "baz", "k": "works"}
]
And loading it back generates the correct objects:
obj2 = json.loads(s, object_hook=dispatcher.decoder_hook)
print(obj2)
[
<__main__.Parent object at 0x7fb6cd561cf8>,
<__main__.Child1 object at 0x7fb6cd561d68>,
<__main__.Child2 object at 0x7fb6cd561e10>
]
Finally, here's the implementation of dispatcher:
class _Dispatcher:
def __init__(self, classname_key='__class__'):
self._key = classname_key
self._classes = {} # to keep a reference to the classes used
def __call__(self, class_): # decorate a class
self._classes[class_.__name__] = class_
return class_
def decoder_hook(self, d):
classname = d.pop(self._key, None)
if classname:
return self._classes[classname](**d)
return d
def encoder_default(self, obj):
d = obj._as_dict()
d[self._key] = type(obj).__name__
return d
dispatcher = _Dispatcher()
I really liked #nosklo's answer, but I wanted to customize what the property value was for how the model type got saved, so I extended his code a little to add a sub-annotation.
(I know this isn't directly related to the question, but you can use this to serialize to json too since it produces dict objects. Note that your base class must use the #dataclass annotation to serialize correctly - otherwise you could adjust this code to define the __as_dict__ method like #nosklo's answer)
data.csv:
model_type, prop1
sub1, testfor1
sub2, testfor2
test.py:
import csv
from abc import ABC
from dataclasses import dataclass
from polymorphic import polymorphic
#polymorphic(keyname="model_type")
#dataclass
class BaseModel(ABC):
prop1: str
#polymorphic.subtype_when_(keyval="sub1")
class SubModel1(BaseModel):
pass
#polymorphic.subtype_when_(keyval="sub2")
class SubModel2(BaseModel):
pass
with open('data.csv') as csvfile:
reader = csv.DictReader(csvfile, skipinitialspace=True)
for row_data_dict in reader:
price_req = BaseModel.deserialize(row_data_dict)
print(price_req, '\n\tre-serialized: ', price_req.serialize())
polymorphic.py:
import dataclasses
import functools
from abc import ABC
from typing import Type
# https://stackoverflow.com/a/51976115
class _Polymorphic:
def __init__(self, keyname='__class__'):
self._key = keyname
self._class_mapping = {}
def __call__(self, abc: Type[ABC]):
functools.update_wrapper(self, abc)
setattr(abc, '_register_subtype', self._register_subtype)
setattr(abc, 'serialize', lambda self_subclass: self.serialize(self_subclass))
setattr(abc, 'deserialize', self.deserialize)
return abc
def _register_subtype(self, keyval, cls):
self._class_mapping[keyval] = cls
def serialize(self, self_subclass) -> dict:
my_dict = dataclasses.asdict(self_subclass)
my_dict[self._key] = next(keyval for keyval, cls in self._class_mapping.items() if cls == type(self_subclass))
return my_dict
def deserialize(self, data: dict):
classname = data.pop(self._key, None)
if classname:
return self._class_mapping[classname](**data)
raise ValueError(f'Invalid data: {self._key} was not found or it referred to an unrecognized class')
#staticmethod
def subtype_when_(*, keyval: str):
def register_subtype_for(_cls: _Polymorphic):
nonlocal keyval
if not keyval:
keyval = _cls.__name__
_cls._register_subtype(keyval, _cls)
#functools.wraps(_cls)
def construct_original_subclass(*args, **kwargs):
return _cls(*args, **kwargs)
return construct_original_subclass
return register_subtype_for
polymorphic = _Polymorphic
Sample console output:
SubModel1(prop1='testfor1')
re-serialized: {'prop1': 'testfor1', 'model_type': 'sub1'}
SubModel2(prop1='testfor2')
re-serialized: {'prop1': 'testfor2', 'model_type': 'sub2'}
This is a sample python object that I am working with.
class DataObj(object):
def __init__(self, cvid, cvname, address, get_info):
self.cvid = cvid
self.cvname = cvname
self.address = address
self.prof = PROF("Honda", "Jason Jones")
class PROF(object):
def __init__(self, organization, manager_name):
self.organization = organization
self.manager_name = manager_name
self.project_list = [Proj("asd", "asd"), Proj("asdsd", "asdsd")]
class Proj(object):
def __init__(self, projectname, projecttype):
self.projectname = projectname
self.projecttype = projecttype
I need to write a function that takes a list of fields and extract all the fields as key value pair from the DataObj. The trick is it should also look for attributes of object composed inside DataObj class. for example if list of fields is ["cvid", "organization", "projectname"], it should return something like this in the following format
{'cvid' : 'value', 'organization' : 'Honda', Proj :[{'projectname' : 'asd'}, {'projectname' : 'asdsd'}]
Where should I write this function, so my code is more modular? I was thinking about writing it inside DataObj but I wouldn't know what are the attributes of object composed inside DataObj. How to achieve what I am trying to do in more object oriented way?
All I did was simply add __iter__ which basically says hey, you can iterate over me, if you cast the object to an iterabale container type.
class Proj(object):
def __init__(self, projectname, projecttype):
self.projectname = projectname
self.projecttype = projecttype
def __iter__(self):
yield ("projectname", self.projectname)
class PROF(object):
def __init__(self, organization, manager_name):
self.organization = organization
self.manager_name = manager_name
self.project_list = [Proj("asd", "asd"), Proj("asdsd", "asdsd")]
def __iter__(self):
for proj in self.project_list:
yield (dict(proj))
class DataObj(object):
def __init__(self, cvid, cvname, address):
self.cvid = cvid
self.cvname = cvname
self.address = address
self.prof = PROF("Honda", "Jason Jones")
def __iter__(self):
yield ('cvid', self.cvid)
yield ('organization', self.prof.organization)
yield ("Proj", list(self.prof))
do = DataObj("1", "heinst", "A Street, Somewhere, USA")
print dict(do)
Between __getattr__ and operator.attrgetter, you could make this work fairly easily:
class DataObj(object):
def __init__(self, cvid, cvname, address, get_info):
self.cvid = cvid
self.cvname = cvname
self.address = address
self.prof = PROF("Honda", "Jason Jones")
def __getattr__(self, name):
# Called when an attribute is accessed which is not found on DataObj
# You can limit the loop to avoid checking some attributes, or
# remove the loop if only self.prof should be checked
for member in (self.cvid, self.cvname, self.address, self.prof):
try:
return getattr(member, name)
except AttributeError:
pass
raise AttributeError(name)
# If only self.prof should be checked, the function could simplify to:
# return getattr(self.prof, name)
Then you can make a simple utility function that runs against a DataObj to get an arbitrary set of key value pairs from it:
from operator import attrgetter
def extractdata(dataobj, *names):
return dict(zip(names, attrgetter(*names)(dataobj)))
Or as a member of DataObj, just name the first param self to match convention:
def extractdata(self, *names):
return dict(zip(names, attrgetter(*names)(self)))
__getattr__ allows delegation of attribute lookup to contained objects, and attrgetter allows you to retrieve a set of arbitrary attributes in a simple way.
I have some classes to access my collections in mongoDB. I've created a lot of methods for each class. Now I want to know if there is any way to implement these methods once and then my mongoDB classes only contains fields? look at this example:
#mongoBase.py
class MongoBase():
def insert()
pass
def update()
pass
#user.py
class User(MongoBase):
# Here should be only fields declaration and when I call .insert(), the fields should be inserted.
I did it in Java using java reflection. but I can't find something like that in python.
I'm pretty sure you can achieve what you're trying to do by simply referring to self in the parent class.
Here's the code I used:
class MongoBase(object):
def insert(self, field, value):
setattr(self, field, value)
def update(self, field, value):
setattr(self, field, value)
class User(MongoBase):
def __init__(self, name):
self.name = name
And here's how it works:
>>> user = User('Bob')
>>> user.name
'Bob'
>>> user.update('name', 'Rob')
>>> user.name
'Rob'
>>> user.insert('age', 12)
>>> user.age
12
I have a dict of different types for which I want to add a simple getter based on the name of the actual parameter.
For example, for three storage parameters, let's say:
self.storage = {'total':100,'used':88,'free':1}
I am looking now for a way (if possible?) to generate a function on the fly with some meta-programming magic.
Instead of
class spaceObj(object):
def getSize(what='total'):
return storage[what]
or hard coding
#property
def getSizeTotal():
return storage['total']
but
class spaceObj(object):
# manipulting the object's index and magic
#property
def getSize:
return ???
so that calling mySpaceObj.getSizeFree would be derived - with getSize only defined once in the object and related functions derived from it by manipulating the objects function list.
Is something like that possible?
While certainly possible to get an unknown attribute from a class as a property, this is not a pythonic approach (__getattr__ magic methods are rather rubyist)
class spaceObj(object):
storage = None
def __init__(self): # this is for testing only
self.storage = {'total':100,'used':88,'free':1}
def __getattr__(self, item):
if item[:7] == 'getSize': # check if an undefined attribute starts with this
return self.getSize(item[7:])
def getSize(self, what='total'):
return self.storage[what.lower()]
print (spaceObj().getSizeTotal) # 100
You can put the values into the object as properties:
class SpaceObj(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
storage = {'total':100,'used':88,'free':1}
o = SpaceObj(**storage)
print o.total
or
o = SpaceObj(total=100, used=88, free=1)
print o.total
or using __getattr__:
class SpaceObj(object):
def __init__(self, **kwargs):
self.storage = kwargs
def __getattr__(self,name):
return self.storage[name]
o = SpaceObj(total=100, used=88, free=1)
print o.total
The latter approach takes a bit more code but it's more safe; if you have a method foo and someone create the instance with SpaceObj(foo=1), then the method will be overwritten with the first approach.
>>> import new
>>> funcstr = "def wat(): print \"wat\";return;"
>>> funcbin = compile(funcstr,'','exec')
>>> ns = {}
>>> exec funcbin in ns
>>> watfunction = new.function(ns["wat"].func_code,globals(),"wat")
>>> globals()["wat"]=watfunction
>>> wat()
wat
I have a model where I want to use a class method to set the default of for a property:
class Organisation(db.Model):
name=db.StringProperty()
code=db.StringProperty(default=generate_code())
#classmethod
def generate_code(cls):
import random
codeChars='ABCDEF0123456789'
while True: # Make sure code is unique
code=random.choice(codeChars)+random.choice(codeChars)+\
random.choice(codeChars)+random.choice(codeChars)
if not cls.all().filter('code = ',code).get(keys_only=True):
return code
But I get a NameError:
NameError: name 'generate_code' is not defined
How can I access generate_code()?
As I said in a comment, I would use a classmethod to act as a factory and always create you entity through there. It keeps things simpler and no nasty hooks to get the behaviour you want.
Here is a quick example.
class Organisation(db.Model):
name=db.StringProperty()
code=db.StringProperty()
#classmethod
def generate_code(cls):
import random
codeChars='ABCDEF0123456789'
while True: # Make sure code is unique
code=random.choice(codeChars)+random.choice(codeChars)+\
random.choice(codeChars)+random.choice(codeChars)
if not cls.all().filter('code = ',code).get(keys_only=True):
return code
#classmethod
def make_organisation(cls,*args,**kwargs):
new_org = cls(*args,**kwargs)
new_org.code = cls.generate_code()
return new_org
import random
class Test(object):
def __new__(cls):
cls.my_attr = cls.get_code()
return super(Test, cls).__new__(cls)
#classmethod
def get_code(cls):
return random.randrange(10)
t = Test()
print t.my_attr
You need specify the class name: Organisation.generate_code()