I am trying to extend a class from a given library but I do not have access to the __init__ of the class, only to a function generating an instance. Something like
class A:
pass
def return_an_A():
return A()
class B(A):
# How to instantiate B with only access to return_an_A
def extend_A():
pass
How can I define the instanciation of class B?
Thanks for your help.
Update
As rightfully noticed, my example was poorly set up so here is, I hope, a better explanation of my real issue.
The original code I used was the following.
import gitlab
# Here gl.projects.get(project_name) is an instance of the class
# gitlab.v4.objects.Project
def project(url, private_token, project_name):
with gitlab.Gitlab(url, private_token) as gl:
return gl.projects.get(project_name)
# This implementation takes an instance of gitlab.v4.objects.Project
def list_files(project, commit_branch):
current_files = []
if not project.empty_repo:
current_files = [
f["path"]
for f in project.repository_tree(
ref=commit_branch
)
]
return current_files
I wanted to have a structure like
class MyProject:
# Here is missing the way to instantiate like the project function
# I don't want to pass a Project instance as a parameter to make it an
# attribute, I would like to extend the class Project itself
def list_files(self, commit_branch):
current_files = []
# Note here that the variables of the
# gitlab.v4.objects.Project are directly accessible
if not self.empty_repo:
current_files = [
f["path"]
for f in self.repository_tree(
ref=commit_branch
)
]
return current_files
but I can't manage to find the right way to write the __init__.
I found a solution to my issue by using a delegation pattern.
class Project:
def __init__(self, private_token, project_name, url):
self.private_token = private_token
self.project_name = project_name
self.url = url
# Delegate all methods and properties to the child class when those are not found
# in the class
def __getattr__(self, attr):
return self._project.__getattribute__(attr)
#property
def _project(self):
with gitlab.Gitlab(self.url, self.private_token) as gl:
return gl.projects.get(self.project_name)
def list_files(self, branch):
current_files = []
if not self._project.empty_repo:
current_files = [
f["path"]
for f in self._project.repository_tree(
ref=branch, recursive=True, all=True
)
if f["type"] == "blob"
]
return current_files
Related
I have some classes FooA and FooB which are basically a collection of "static" methods. They operate on data - let's say it is an DataItem object:
# Base class with common behavior
class FooBase:
#classmethod
def method1(cls, arg, data: DataItem):
#res = ...
return res
#classmethod
def method2(cls, arg1, arg2, data: DataItem):
# res = ... # using method1
return res
# specialized classes
class FooA(FooBase):
# define extra methods
pass
class FooB(FooBase):
# define extra methods
pass
# usage 1: as "static methods"
res = FooA.method1(arg, data)
res2 = FooB.method2(args, data)
Now, I'd like to use these classes as attributes of a "managing" class (MyApp) which also has access to a datasource and should implicitly supply DataItems to the static methods of FooA and FooB. Moreover, the datasource supplies a list of DataItem objects.
# usage 2: as part of an "App" class
# here, the "data" argument should be supplied implicitly by MyApp
# also: MyApp contains a list of "data" objects
class MyApp:
def __init__(self, datasrc):
self.datasrc = datasrc
# this could be a generator
def get_data(self, key) -> List[DataItem]:
return self.datasrc.get_data(key)
# FooA, FooB as class / instance level attributes, descriptors, ???
# usage
my_app = MyApp("datasrc")
res_list = my_app.foo_a.method1(arg) # foo_a is a FooA obj, "data" arg is supplied automatically
# optionally, but not necessarily call as a static attribute:
res = MyApp.foo_a.method1(arg, data: DataItem) # same as FooA.method1(arg, data)
I have tried different things but found not satisfactory solution.
So... I am not sure can it be done in nice way, I thought about that and all approaches has serious drawbacks. One of the problem is we actually want to have a method that returns list or single item, depending on input parameters, which is bad.
One of way could be store datasrc in FooBase, but it violates SRP
class FooBase:
def __init__(self, datasrc):
FooBase.datasrc = datasrc
#classmethod
def method1(cls, arg, data=None):
if data is None:
return [cls.method1(arg, d) for d in cls.datasrc]
return data
Or use isinstance
#classmethod
def method1(cls, arg, data):
if isinstance(data, list):
return [cls.method1(arg, d) for d in data]
return data
But it forces us to adjust every method (which could be done with decorator or metaclass).
Another way could be use some intermediate layer:
def decorator(datasrc):
def wrapper(foo):
def f(*args, **kwargs):
# We could catch TypeError here to serve case when data is passed
return [foo(*args, **kwargs, data=data) for data in datasrc]
return f
return wrapper
class FooAdapter:
def __init__(self, datasrc, foo_cls):
self.datasrc = datasrc
methods = [
getattr(foo_cls, m)
for m in dir(foo_cls)
if callable(getattr(foo_cls, m)) and not m.startswith("__")
] # all methods of our Foo class
for method in methods:
setattr(self, method.__name__, decorator(datasrc)(method))
class MyApp:
def __init__(self, datasrc):
self.datasrc = datasrc
self.foo_a = FooAdapter(datasrc, FooA)
self.foo_b = FooAdapter(datasrc, FooB)
But solution with dynamically added functions breaks IDE support.
The cleanest solution imo could be to have Enum for Foo methods and Enum for Foo classes, then you could write code in MyApp
def get_bulk(m: MethodEnum, f: FooEnum, *args):
return [getattr(enum_to_cls_mapping[f], m)(*args, data=d) for d in self.datasrc]
Say I have 2 different implementations of a class
class ParentA:
def initialize(self):
pass
def some_event(self):
pass
def order(self, value):
# handle order in some way for Parent A
class ParentB:
def initialize(self):
pass
def some_event(self):
pass
def order(self, value):
# handle order in another for Parent B
How can I dynamically let some 3rd class inherit from either ParentA or ParentB based on something like this?
class MyCode:
def initialize(self):
self.initial_value = 1
def some_event(self):
# handle event
order(self.initial_value)
# let MyCode inherit from ParentA and run
run(my_code, ParentA)
Simply store the class-object in a variable (in the example below, it is named base), and use the variable in the base-class-spec of your class statement.
def get_my_code(base):
class MyCode(base):
def initialize(self):
...
return MyCode
my_code = get_my_code(ParentA)
Also, you can use type builtin. As callable, it takes arguments: name, bases, dct (in its simplest form).
def initialize(self):
self.initial_value = 1
def some_event(self):
# handle event
order(self.initial_value)
subclass_body_dict = {
"initialize": initialize,
"some_event": some_event
}
base_class = ParentA # or ParentB, as you wish
MyCode = type("MyCode", (base_class, ), subclass_body_dict)
This is more explicit than snx2 solution, but still - I like his way better.
PS. of course, you dont have to store base_class, nor subclass_body_dict, you can build those values in type() call like:
MyCode = type("MyCode", (ParentA, ), {
"initialize": initialize,
"some_event": some_event
})
Just as a quick copy-and-paste-ready snippet, I've added the comments from shx2's answer to create this (memoized with a created_classes dict attribute, so that the classes created by successive identical calls with the same class will give identical classes):
class ParentA:
val = "ParentA"
class ParentB:
val = "ParentB"
class DynamicClassCreator():
def __init__(self):
self.created_classes = {}
def __call__(self, *bases):
rep = ",".join([i.__name__ for i in bases])
if rep in self.created_classes:
return self.created_classes[rep]
class MyCode(*bases):
pass
self.created_classes[rep] = MyCode
return MyCode
creator = DynamicClassCreator()
instance1 = creator(ParentA, ParentB)()
print(instance1.val) #prints "ParentA"
instance2 = creator(ParentB, ParentA)()
print(instance2.val) #prints "ParentB"
If you wanted to get fancy you could even make DynamicClassCreator a Singleton: https://stackoverflow.com/a/7346105/5122790
As an alternative to Chris's answer implementing the memoisation suggestion for shx2's answer, I'd prefer to use a memoize decorator (the end result is still a class but it's clearer to me that the function is the interface), and also use setdefault to simplify adding to the memo dict, and do not convert the names to string but use the tuple bases itself as the key, simplifying the code to:
class Memoize:
def __init__(self, f):
self.f = f
self.memo = {}
def __call__(self, *args):
return self.memo.setdefault(args, self.f(*args))
class ParentA:
def initialize(self):
pass
class ParentB:
def initialize(self):
pass
#Memoize
def get_my_code(base):
class MyCode(base):
def initialize(self):
pass
return MyCode
a1 = get_my_code(ParentA)
a2 = get_my_code(ParentA)
b1 = get_my_code(ParentB)
print(a1 is a2) # True
print(a1 is b1) # False
(Not a good example as the code provided doesn't actually do anything other than overwrite the parent class's initialize method...)
I'm trying to create a set of classes where each class has a corresponding "array" version of the class. However, I need both classes to be aware of each other. Here is a working example to demonstrate what I'm trying to do. But this requires duplicating a "to_array" in each class. In my actual example, there are other more complicated methods that would need to be duplicated even though the only difference is "BaseArray", "PointArray", or "LineArray". The BaseArray class would similarly have methods that only differ by "BaseObj", "PointObj", or "LineObj".
# ------------------
# Base object types
# ------------------
class BaseObj(object):
def __init__(self, obj):
self.obj = obj
def to_array(self):
return BaseArray([self])
class Point(BaseObj):
def to_array(self):
return PointArray([self])
class Line(BaseObj):
def to_array(self):
return LineArray([self])
# ------------------
# Array object types
# ------------------
class BaseArray(object):
def __init__(self, items):
self.items = [BaseObj(i) for i in items]
class PointArray(BaseArray):
def __init__(self, items):
self.items = [Point(i) for i in items]
class LineArray(BaseArray):
def __init__(self, items):
self.items = [Line(i) for i in items]
# ------------------
# Testing....
# ------------------
p = Point([1])
print(p)
pa = p.to_array()
print(pa)
print(pa.items)
Here is my attempt, which understandably raises an error. I know why I get a NameError and thus I understand why this doesn't work. I'm showing this to make clear what I'd like to do.
# ------------------
# Base object types
# ------------------
class BaseObj(object):
ArrayClass = BaseArray
def __init__(self, obj):
self.obj = obj
def to_array(self):
# By using the "ArrayClass" class attribute here, I can have a single
# "to_array" function on this base class without needing to
# re-implement this function on each subclass
return self.ArrayClass([self])
# In the actual application, there would be other BaseObj methods that
# would use self.ArrayClass to avoid code duplication
class Point(BaseObj):
ArrayClass = PointArray
class Line(BaseObj):
ArrayClass = LineArray
# ------------------
# Array object types
# ------------------
class BaseArray(object):
BaseType = BaseObj
def __init__(self, items):
self.items = [self.BaseType(i) for i in items]
# In the actual application, there would be other BaseArray methods that
# would use self.BaseType to avoid code duplication
class PointArray(BaseArray):
BaseType = Point
class LineArray(BaseArray):
BaseType = Line
# ------------------
# Testing....
# ------------------
p = Point([1])
print(p)
pa = p.to_array()
print(pa)
print(pa.items)
One potential solution would be to just define "ArrayClass" as None for all of the classes, and then after the "array" versions are defined you could monkey patch the original classes like this:
BaseObj.ArrayClass = BaseArray
Point.ArrayClass = PointArray
Line.ArrayClass = LineArray
This works, but it feels a bit unnatural and I suspect there is a better way to achieve this. In case it matters, my use case will ultimate be a plugin to a program that (sadly) still uses Python 2.7, so I need a solution that uses Python 2.7. Ideally the same solution can work in 2.7 and 3+ though.
Here is a solution using decorators. I prefer this to the class attribute assignment ("monkey patch" as I called it) since it keeps things a little more self consistent and clear. I'm happy enough with this, but still interested in other ideas...
# ------------------
# Base object types
# ------------------
class BaseObj(object):
ArrayClass = None
def __init__(self, obj):
self.obj = obj
def to_array(self):
# By using the "ArrayClass" class attribute here, I can have a single
# "to_array" function on this base class without needing to
# re-implement this function on each subclass
return self.ArrayClass([self])
# In the actual application, there would be other BaseObj methods that
# would use self.ArrayClass to avoid code duplication
#classmethod
def register_array(cls):
def decorator(subclass):
cls.ArrayClass = subclass
subclass.BaseType = cls
return subclass
return decorator
class Point(BaseObj):
pass
class Line(BaseObj):
pass
# ------------------
# Array object types
# ------------------
class BaseArray(object):
BaseType = None
def __init__(self, items):
self.items = [self.BaseType(i) for i in items]
# In the actual application, there would be other BaseArray methods that
# would use self.BaseType to avoid code duplication
#Point.register_array()
class PointArray(BaseArray):
pass
#Line.register_array()
class LineArray(BaseArray):
pass
# ------------------
# Testing....
# ------------------
p = Point([1])
print(p)
pa = p.to_array()
print(pa)
print(pa.items)
I'm trying to inject dependencies into my Django view (controller?). Here's some background.
Normally, the urls.py file is what handles the routing. It is usually something like this:
urlpatterns = [
path("", views.get_all_posts, name="get_all_posts"),
path("<int:post_id>", views.get_post, name="get_post"),
path("create", views.create_post, name="create_post"),
]
The problem with this, is that once you get to create_post for instance, you might have a dependency on a service that creates posts:
# views.py
...
def create_post(self):
svc = PostCreationService()
svc.create_post()
This kind of pattern is difficult to test. While I know python testing libraries have tools to mock this sort of thing, I'd rather inject the dependency into the view. Here's what I came up with.
A Controller class that has a static method, export(deps) that takes in a list of dependencies and returns a list of url pattern objects:
class ApiController(object):
#staticmethod
def export(**deps):
ctrl = ApiController(**deps)
return [
path("", ctrl.get_all_posts, name="get_all_posts"),
path("<int:post_id>", ctrl.get_post, name="get_post"),
path("create", ctrl.create_post, name="create_post"),
]
def __init__(self, **deps):
self.deps = deps
def get_all_posts():
pass
...
This looks janky, but I'm not aware of any other way to do what I'm trying to do. The controller needs to return a list of url patterns, and it also needs to take in a list of dependencies. Using the above technique, I can do this in urls.py:
urlpatterns = ApiController.export(foo_service=(lambda x: x))
I am now free to use foo_service in any of the methods of ApiController.
Note:
One alternative would be for the constructor to return the list of urls, but I don't see that as a huge improvement over this. In fact, it strikes me as being more confusing because the class constructor would return a list instead of an instance of the class.
Note 2:
I'm aware that python has mocking tools for mocking class members. Please don't suggest using them. I'd like to use DI as the way to control and manage dependencies.
Any ideas on what the best way to do this is?
Consider injecting using decorators:
from functools import wraps
class ServiceInjector:
def __init__(self):
self.deps = {}
def register(self, name=None):
name = name
def decorator(thing):
"""
thing here can be class or function or anything really
"""
if not name:
if not hasattr(thing, "__name__"):
raise Exception("no name")
thing_name = thing.__name__
else:
thing_name = name
self.deps[thing_name] = thing
return thing
return decorator
def inject(self, func):
#wraps(func)
def decorated(*args, **kwargs):
new_args = args + (self.deps, )
return func(*new_args, **kwargs)
return decorated
# usage:
si = ServiceInjector()
# use func.__name__, registering func
#si.register()
def foo(*args):
return sum(args)
# we can rename what it's been registered as, here, the class is registered
# with name `UpperCase` instead of the class name `UpperCaseRepresentation`
#si.register(name="UpperCase")
class UpperCaseRepresentation:
def __init__(self, value):
self.value = value
def __str__(self):
return self.value.upper()
#register float
si.register(name="PI")(3.141592653)
# inject into functions
#si.inject
def bar(a, b, c, _deps): # the last one in *args would be receiving the dependencies
UpperCase, PI, foo = _deps['UpperCase'], _deps['PI'], _deps['foo']
print(UpperCase('abc')) # ABC
print(PI) # 3.141592653
print(foo(a, b, c, 4, 5)) # = 15
bar(1, 2, 3)
# inject into class methods
class Foo:
#si.inject
def my_method(self, a, b, _deps, kwarg1=30):
return _deps['foo'](a, b, kwarg1)
print(Foo().my_method(1, 2, kwarg1=50)) # = 53
You could take a look at https://github.com/ets-labs/python-dependency-injector, but that is a pretty big setup.
You could also create something small like a Service factory
# services.py
class ServiceFactory:
def __init__(self):
self.__services = {}
def register(self, name, service_class):
# Maybe add some validation
self.__services[name] = service_class
def create(self, name, *args, **kwargs):
# Maybe add some error handling or fallbacks
return self.__services[name](*args, **kwargs)
factory = ServiceFactory()
# In your settings.py for example
from services import factory
factory.register('post_creation', PostCreationService)
# Or maybe in apps.ready do auto_load that will loop all apps and get config from services.py
# In your views.py
from services import factory
def create_post(self):
svc = factory.create('post_creation')
svc.create_post()
# In your tests.py
from services import factory
def setUp(self):
factory.register('post_creation', FakePostCreationService)
While reading Dependency Injection Principles, Practices, and Patterns and trying to apply the examples to a django app I came up with the following:
# views.py
class IndexView(View):
# Must include this to bypass django's validation
product_service: IProductService = None
# Init method not necessary but more explicit
def __init__(self, product_service: IProductService):
self.product_service = product_service
def get(self, request):
self.product_service.do_stuff()
...
# urls.py
# Construct dependencies. I guess this is the closest to the entry-point we can get
# with Django.
repo = DjangoProductRepository()
product_service = ProductService(repo)
urlpatterns = [
path('admin/', admin.site.urls),
path("",
IndexView.as_view(product_service=product_service),
name="index"),
]
This is only an updated version of rabbit.aaron reply above. My idea is to be able to specify which dependencies to inject instead of getting a dictionary with all registered dependencies.
from functools import wraps
class ServiceInjector:
deps = {}
def register(self, name=None):
name = name
def decorator(thing):
"""
thing here can be class or function or anything really
"""
if not name:
if not hasattr(thing, '__name__'):
raise Exception('no name')
thing_name = thing.__name__
else:
thing_name = name
self.__class__.deps[thing_name] = thing
return thing
return decorator
class inject:
def __init__(self, *args):
self.selected_deps = args
def __call__(self, func):
#wraps(func)
def decorated(*args, **kwargs):
selected_deps = {k: v for k, v in ServiceInjector.deps.items() if k in self.selected_deps}
new_kwargs = {**kwargs, **selected_deps}
return func(*args, **new_kwargs)
return decorated
Usage:
si = ServiceInjector()
# use func.__name__, registering func
#si.register()
def foo(*args):
return sum(args)
Custom naming still works
#si.register(name='uppercase')
class UpperCaseRepresentation:
def __init__(self, value):
self.value = value
def __str__(self):
return self.value.upper()
Register float
si.register(name="PI")(3.141592653)
Inject into functions
#si.inject('foo', 'PI', 'uppercase')
def bar(a, b, c, uppercase: UpperCaseRepresentation, **kwargs):
"""
You can specify dependencies as keyword arguments and add typehint annotation.
"""
UpperCase, foo = kwargs['UpperCase'], kwargs['foo']
print(uppercase('abc')) # ABC
print(PI) # 3.141592653
print(foo(a, b, c, 4, 5)) # = 15
bar(1, 2, 3)
Inject into class methods
class Bar:
#si.inject('foo')
def my_method(self, a, b, foo, kwarg1=30):
return foo(a, b, kwarg1)
print(Bar().my_method(1, 2, kwarg1=50)) # = 53
You could go the flask route and export a class instance with a property that initializes and caches the service on first access. E.g:
def default_factory():
pass
# service.py
class ServiceProvider:
def __init__(self, create_instance=default_factory):
self.create_instance = create_instance
_instance = None
#property
def service(self):
if self._instance:
return self._instance
self._instance = self.create_instance()
return self._instance
service_provider = ServiceProvider()
from .service import service_provider
# views.py
def view(request):
service_provider.service.do_stuff()
# etc.
This has the advantages of being easy to mock and not having any magic.
The most boring solution I could come up with involves using class variables:
# Module services.post_service
def default_create_post():
return "foo"
class Provider:
create_post = default_create_post
Then you could import and use normally in a view or elsewhere:
from services import post_service
post_service.Provider.create_post()
# Should return "foo"
And when testing it could be swapped out before being called:
from django.test import TestCase
from services import post_service
from unittest.mock import patch
class MyTestCase(TestCase):
#patch('services.post_service.default_create_post')
def test_some_view(self, mock_create_post):
mock_create_post.return_value = "bar"
post_service.Provider.create_post = mock_create_post
# Now when calling post_service.Provider.create_post it should just return "bar"
I have a model where I want to use a class method to set the default of for a property:
class Organisation(db.Model):
name=db.StringProperty()
code=db.StringProperty(default=generate_code())
#classmethod
def generate_code(cls):
import random
codeChars='ABCDEF0123456789'
while True: # Make sure code is unique
code=random.choice(codeChars)+random.choice(codeChars)+\
random.choice(codeChars)+random.choice(codeChars)
if not cls.all().filter('code = ',code).get(keys_only=True):
return code
But I get a NameError:
NameError: name 'generate_code' is not defined
How can I access generate_code()?
As I said in a comment, I would use a classmethod to act as a factory and always create you entity through there. It keeps things simpler and no nasty hooks to get the behaviour you want.
Here is a quick example.
class Organisation(db.Model):
name=db.StringProperty()
code=db.StringProperty()
#classmethod
def generate_code(cls):
import random
codeChars='ABCDEF0123456789'
while True: # Make sure code is unique
code=random.choice(codeChars)+random.choice(codeChars)+\
random.choice(codeChars)+random.choice(codeChars)
if not cls.all().filter('code = ',code).get(keys_only=True):
return code
#classmethod
def make_organisation(cls,*args,**kwargs):
new_org = cls(*args,**kwargs)
new_org.code = cls.generate_code()
return new_org
import random
class Test(object):
def __new__(cls):
cls.my_attr = cls.get_code()
return super(Test, cls).__new__(cls)
#classmethod
def get_code(cls):
return random.randrange(10)
t = Test()
print t.my_attr
You need specify the class name: Organisation.generate_code()