Let's say I have the following code
class MyClass(object):
def __init__(self, val1, val2):
self.attr1 = val1
self.attr2 = val2
class MySecondClass(object):
def __init__(self):
self.my_list = []
def add_to_list(self, val1, val2):
self.my_list.append(MyClass(val1, val2))
and the following unit-testing code
import unittest
def myclass_equality(inst1, inst2, msg=None):
val1_matches = inst1.val1 == inst2.val1
val2_matches = inst1.val2 == inst2.val2
if not val1_matches or not val2_matches:
raise unittest.TestCase.failureException(msg)
class ListTests(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ListTests, self).__init__(*args, **kwargs)
self.addTypeEqualityFunc(MyClass, myclass_equality)
def setUp(self):
self.second = MySecondClass()
def test_adding_to_list(self):
val1, val2 = 1, 2
self.second.add_to_list(val1, val2)
my_other_list = [(MyClass(val1, val2))]
self.assertListEqual(self.second.my_list, my_other_list)
However, the assertion fails as the assertListEqual method compares objects with '==' as can be seen here and not with the method I have defined, which is called if I call assertEqual on two MyClass instances.
Are there any workarounds? Because I don't need and want to define __eq__ method for MyClass.
And why assertListEqual compares with '==' and not assertEqual?
Can you just do it in a loop or create another loop wrapper for your myclass_equality function? I feel like I am missing something.
E.g.:
def test_adding_to_list(self):
val1, val2 = 1, 2
self.second.add_to_list(val1, val2)
my_other_list = [(MyClass(val1, val2))]
for i, (a, b) in enumerate(zip(self.second.my_list, my_other_list)):
myclass_equality(a, b, msg='Adding to list failed on element {}:{}!={}'.format(i, a, b)
Related
I want to pass a variable(iterable )between instances of different classes. I have a structure similar with the one below.
Each class has its own module(so no globals) and needs to work in python 3 and 2.
class O:
pass
class A(O):
pass
class B(O):
def __init__(self, cache):
self.cache = cache
class B1(B):
def p(self):
self.cache.add_to_cache("32", "something something")
class B2(B):
def p(self):
self.cache.get_from_cache("52", "something else")
For B and its sub-classes I want to create a cache. All instances of this classes(B, B1, B2) to use the same cache.
To keep it simple, let's say that the cache is just a dict.
c = {}
a = A(c)
b1 = B() - needs c
b1.p()
b2 = C() - needs c
b2.p()
print(cache)
Off course the example above, is wrong because the cache is different for each instance.
The chache should be :
{
"32", "something something"
"52": "something else"
}
Another approach to this is using CacheService as an injectable Singleton service, which I consider a better practice.
Read this first for a code/syntax solution to your direct question, or continue reading for a solution with better design.
class O(object):
pass
class CacheService(object):
__instances = {}
#staticmethod
def getinstance(owner_id):
if owner_id not in CacheService.__instances:
CacheService.__instances[owner_id] = CacheService(owner_id)
return CacheService.__instances[owner_id]
def __init__(self, owner_id):
self._owner_id = owner_id
self._owner_query = CacheService.__name__ + self._owner_id
self._cache = {}
def put_in_cache(self, key, value):
self._cache[self._owner_query + str(key)] = value
def get_from_cache(self, key):
return self._cache.get(self._owner_query + str(key), "the_default")
class B(O):
def __init__(self):
self._cache = CacheService.getinstance(B.__name__)
class B1(B):
def __init__(self):
super(B1, self).__init__()
def p(self):
val1 = self._cache.get_from_cache("a")
print(val1)
class B2(B):
def __init__(self):
super(B2, self).__init__()
def p(self):
self._cache.put_in_cache("a", 2)
if __name__ == "__main__":
b1 = B1()
b2 = B2()
b2.p()
b1.p()
out:
2
This still uses a class variable, but hides it from your "everyday code", and moves it to the "infrastructure level".
I see this as cleaner, as now your class hierarchy shouldn't handle its own global variables.
To directly answer the programming question, Use class variables.
As a side note, it would be much better to use some kind of "CacheService" and inject that to the constructor, rather than use inheritance and class variables.
For this, see my other answer.
Code for using class variables follows:
class O(object):
pass
class B(O):
__cache = {} # use your cache class if you want, I am using dict just for show
def __init__(self):
pass
def _get_from_cache(self, key):
return self._cache.get(key, "default1")
def _put_in_cache(self, key, value):
self._cache[key] = value
class B1(B):
def __init__(self):
super(B1, self).__init__()
def p(self):
val1 = self._get_from_cache("a")
print(val1)
class B2(B):
def __init__(self):
super(B2, self).__init__()
def p(self):
self._put_in_cache("a", 2)
if __name__ == "__main__":
b1 = B1()
b2 = B2()
b2.p()
b1.p()
out:
2
Notice _get_from_cache and _put_in_cache are methods, but they can be #staticmethods, as they only ever access class variables, and their self isn't "really" ever being used. __cache could theoretically be accessed directly by children, but the _get_from_cache and _put_in_cache makes __cache private, and gives a protected API to it.
Let's say I have a class as such:
class Test:
def __init__(self, a, b):
self.a = a
self.b = b
My goal is to create a decorator that handles populating the init args if they do not exist, i.e.:
class Test:
#autoinit
def __init__(self, a, b):
self.a = a
self.b = b
where #autoinit is defined as such:
class autoinit:
def __init__(self, data = {"a": "test_a", "b": "test_b"}):
self.data = data
def __call__(self, func):
decorator = self
def wrapper(*args, **kwargs):
print(decorator.data)
func(self, **decorator.data)
print(decorator.data)
return wrapper
Thus, it will automatically assign the Test attributes to test_a, test_b respectively.
The ideal usage would be as such:
test = Test(a="test_z", b="test_x")
test.a == "test_z"
test.b == "test_x"
# however,
test = Test()
test.a == "test_a"
test.b == "test_b"
# but also,
test = Test(a="test_z")
test.a == "test_z"
test.b == "test_b"
I will always have matching arguments in the Test class to the keys in the data dictionary.
Is this possible? What is the cleanest implementation?
Update:
The intended use is across many independent classes. For example, say I have a global config as such:
config = {
"resourceA": {"a": "test_a", "b": "test_b"},
"resourceB": {"name": "foo", "value": "bar"}
}
The goal would be for the decorator #autoinit(resource="resourceA") to use **config[resource] to populate all __init__ values for given class.
Here's how I would write this:
def autoinit(**kwargs):
if not kwargs:
kwargs = {"a": "test_a", "b": "test_b"} # some default
def wrapper(f):
def wrapped(*args, **overrides):
kwargs.update(overrides) # update kwargs with overrides
return f(*args, **kwargs)
return wrapped
return wrapper
This allows an implementing class as described in your question:
class Test:
#autoinit()
def __init__(self, a, b):
self.a = a
self.b = b
t = Test()
assert t.a = 'test_a'
assert t.b = 'test_b'
t2 = Test(a='test_z')
assert t2.a = 'test_z'
assert t2.b = 'test_b'
With that all being said, consider instead using a mixin that teaches your class how to read from the configuration itself.
from abc import ABC
class ConfigurationDefault(ABC):
#classmethod
def with_config_defaults(cls, config, **kwargs):
new_kwargs = {**config, **kwargs}
return cls(**new_kwargs)
class Test(ConfigurationDefault):
def __init__(self, a, b):
self.a = a
self.b = b
config = {'resources': {'a': 'test_a', 'b': 'test_b'}}
t = Test.with_config_defaults(config['resources'])
t2 = Test.with_config_defaults(config['resources'], a='test_z')
Suppose we wish for A to inherit B - can do via class A(B). However - what about below?
class A():
def __init__(self):
b = B()
ismagic = lambda x: x.count('__') == 2
nonmagic_attrs = [attr for attr in b.__dir__() if not ismagic(attr)]
for attr in nonmagic_attrs:
setattr(self, attr, getattr(b, attr))
A now has all of B's attributes and methods, minus magic - effectively, inheritance. ... or is it? Are there problems with above implementation? Application context & minimally-reproducible example below.
Context: conditional inheritance; I seek for DataGenerator to inherit either NumpyGenerator or HDF5Generator, but not both. This can be done via an external conditional upon two classes, each implementing one inheritance - but that's unelegant and duplicative. Thus, a single-class, wrapper-less implementation is desirable.
Also, no class utilizes static methods, class methods, wrappers, or strictly private methods.
class DataGenerator():
def __init__(self, gentype, **gen_kwargs):
self.a = 1
self.inherit_datagen(gentype, **gen_kwargs)
self.print_info()
print(self.a)
def inherit_datagen(self, gentype, **gen_kwargs):
if gentype == 'numpy':
datagen = NumpyGenerator(**gen_kwargs)
elif gentype == 'hdf5':
datagen = HDF5Generator(**gen_kwargs)
ismagic = lambda x: x.count('__') == 2
nonmagic_attrs = [attr for attr in datagen.__dir__() if not ismagic(attr)]
for attr in nonmagic_attrs:
setattr(self, attr, getattr(datagen, attr))
class NumpyGenerator():
def __init__(self, a=2):
self.a = a
def print_info(self):
print("Am Numpy")
class HDF5Generator():
def __init__(self, a=3):
self.a = a
def print_info(self):
print("Am HDF5")
dgen_numpy = DataGenerator('numpy', a=6)
dgen_hdf5 = DataGenerator('hdf5', a=9)
>> Am Numpy
>> 6
>> Am HDF5
>> 9
So I have a class with a method, which takes string. Somethinkg like this:
class A():
def func(self, name):
# do some stuff with it
I have finite number of possible values, [val1, val2, val2] for example, All strings. I want to use them like this:
a = A()
a.val1() # actually a.func(val1)
I tried to combine decorators and setattr:
class A():
def func(self, val):
# do some stuff with it
def register(self, val):
def wrapper(self):
self.func(val)
setattr(self, val, wrapper)
So I can iterate through all possible values in run-time:
a = A()
for val in vals:
a.register(val)
And it has zero effect. Usually setattr adds new attribute with value None, but in this case nothing happens. Can somebody explain why it is this way and what can I do?
register() isn't a decorator, it's mostly just a "function factory" with side-effects. Also, as I said in a comment, setattr() needs to know what name to assigned to the value.
Here's a way to get your code to work:
class A():
def func(self, val):
# do some stuff with it
print('func({}) called'.format(val))
def register(self, val, name):
def wrapper():
self.func(val)
wrapper.__name__ = name
setattr(self, name, wrapper)
vals = 10, 20, 30
a = A()
for i, val in enumerate(vals, 1):
a.register(val, 'val'+str(i)) # Creates name argument.
a.val1() # -> func(10) called
a.val2() # -> func(20) called
I have the following BaseClass
class A(object):
__metaclass__ = abc.ABCMeta
def __init__(self, val1, val2):
self.v1 = val1
self.v2 = val2
and then some extended class:
class B(A):
def __init__(self, *args, **kwargs):
super(self.__class, self).__init(*args[:len(args)-1], **kwargs
self.v3 = args[len(args)]
basically i want to call it in a way such that:
x = B(1, 2, 34)
but this seems that i need to have a specific order, how do implement init the right way so that the base class can initialize its v1,v2 variables and the extended class B can initialize the v3 value (in this case with 34).
You should be explicit about the required arguments for the super class A.__init__. If you provide < 2 args to B then you would get an error.
class B(A):
def __init__(self, val1, val2, *args):
super(self.__class, self).__init__(val1, val2)
self.v3 = args # or args.pop() or whatever