I'm using a CustomQuerySet using Custom QuerySet and Manager without breaking DRY?. I can only access custom functions using objects. Here's my Code:
class CustomQuerySetManager(models.Manager):
"""A re-usable Manager to access a custom QuerySet"""
def __getattr__(self, attr, *args):
print(attr)
try:
return getattr(self.__class__, attr, *args)
except AttributeError:
# don't delegate internal methods to the queryset
if attr.startswith('__') and attr.endswith('__'):
raise
return getattr(self.get_query_set(), attr, *args)
def get_query_set(self):
return self.model.QuerySet(self.model, using=self._db)
class SampleModel(models.Model):
objects = CustomQuerySetManager()
class QuerySet(models.QuerySet):
def test(self):
print("test function was callsed")
With this these happens:
SampleModel.objects.test() # This works
SampleModel.objects.all().test() # This doesnt works...
Why does this happen?
Related
I have "borrowed" the following singleton decorator:
class Singleton:
def __init__(self, decorated: Callable) -> None:
self._decorated = decorated
self.initialized = False
def Instance(self) -> Callable:
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def Initialize(self, *args, **kwargs) -> Callable:
if self.initialized:
raise Exception("Singleton already initialized")
self._instance = self._decorated(*args, **kwargs)
self.initialized = True
return self._instance
def __call__(self) -> Callable:
if not self.initialized:
raise Exception("Singleton not initialized")
return self._instance
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
and i have applied it here:
#Singleton
class x_DB(DBHandler):
def __init__(self, db_loc:Path):
self.check_db_exists(db_loc)
super().__init__(db_loc)
self.query = self.session.query
self.check_tables_exist()
self.check_for_global()
def check_db_exists(self, path:Path) -> bool:
if not path.exists():
logger.debug(f"DB: {path} does not exist, creating..")
path.touch()
logger.success(f"Successfully created {path}")
return True
...
Now, generally when i am using an instance of the db class, i will get hints like this:
but for some reason, my hints all look like this:
any tips to fixing this and getting my type hinting back?
have tried messing around with the typing package, updating pylance, returning to jedi, installing pyright, ect. but nothing seems to work
Autocomplete depends highly on your IDE's builtin type-checker, and decorators are notorious for confusing the type-checker. One solution is to make your singleton explicitly typed:
from collections.abc import Callable
from pathlib import Path
from typing import TypeVar, Generic, Type
T = TypeVar('T')
class Singleton(Generic[T]):
def __init__(self, decorated: Type[T]) -> None:
self._decorated = decorated
self.initialized = False
def Instance(self) -> T:
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def Initialize(self, *args, **kwargs):
if self.initialized:
raise Exception("Singleton already initialized")
self._instance = self._decorated(*args, **kwargs)
self.initialized = True
return self._instance
def __call__(self) -> T:
if not self.initialized:
raise Exception("Singleton not initialized")
return self._instance
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
This works for me in PyCharm and VS Code.
I have a complex unpickable object that has properties (defined via getters and setters) that are of complex and unpickable type as well. I want to create a multiprocessing proxy for the object to execute some tasks in parallel.
The problem: While I have succeeded to make the getter methods available for the proxy object, I fail to make the getters return proxies for the unpickable return objects.
My setup resembles the following:
from multiprocessing.managers import BaseManager, NamespaceProxy
class A():
#property
def a(self):
return B()
#property
def b(self):
return 2
# unpickable class
class B():
def __init__(self, *args):
self.f = lambda: 1
class ProxyBase(NamespaceProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
class AProxy(ProxyBase): pass
class BProxy(ProxyBase): pass
class MyManager(BaseManager):pass
MyManager.register('A', A, AProxy)
if __name__ == '__main__':
with MyManager() as manager:
myA = manager.A()
print(myA.b) # works great
print(myA.a) # raises error, because the object B is not pickable
I know that I can specify the result type of a method when registering it with the manager. That is, I can do
MyManager.register('A', A, AProxy, method_to_typeid={'__getattribute__':'B'})
MyManager.register('B', B, BProxy)
if __name__ == '__main__':
with MyManager() as manager:
myA = manager.A()
print(myA.a) # works great!
print(myA.b) # returns the same as myA.a ?!
It is clear to me that my solution does not work since the __getattr__ method applies to all properties, whereas I only want it to return a proxy for B when property a is accessed. How could I achieve this?
As a side question: if I remove the *args argument from the __init__ method of B, I get an error that it is called with the wrong number of arguments. Why? How could I resolve this?
I don't this is possible without some hacks, since the choice to return a value or proxy is made based on the method name alone, and not the type of the return value (from Server.serve_client):
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
Also keep in mind exposing __getattribute__ in an unpickable class's proxy basically breaks the proxy functionality when calling methods.
But if you're willing to hack it and just need attribute access, here is a working solution (note calling myA.a.f() still won't work, the lambda is an attribute and is not proxied, only methods are, but that's a different problem).
import os
from multiprocessing.managers import BaseManager, NamespaceProxy, Server
class A():
#property
def a(self):
return B()
#property
def b(self):
return 2
# unpickable class
class B():
def __init__(self, *args):
self.f = lambda: 1
self.pid = os.getpid()
class HackedObj:
def __init__(self, obj, gettypeid):
self.obj = obj
self.gettypeid = gettypeid
def __getattribute__(self, attr):
if attr == '__getattribute__':
return object.__getattribute__(self, attr)
obj = object.__getattribute__(self, 'obj')
result = object.__getattribute__(obj, attr)
if isinstance(result, B):
gettypeid = object.__getattribute__(self, 'gettypeid')
# This tells the server that the return value of this method is
# B, for which we've registered a proxy.
gettypeid['__getattribute__'] = 'B'
return result
class HackedDict:
def __init__(self, data):
self.data = data
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
obj, exposed, gettypeid = self.data[key]
if isinstance(obj, A):
gettypeid = gettypeid.copy() if gettypeid else {}
# Now we need getattr to update gettypeid based on the result
# luckily BaseManager queries the typeid info after the function
# has been invoked
obj = HackedObj(obj, gettypeid)
return (obj, exposed, gettypeid)
class HackedServer(Server):
def __init__(self, registry, address, authkey, serializer):
super().__init__(registry, address, authkey, serializer)
self.id_to_obj = HackedDict(self.id_to_obj)
class MyManager(BaseManager):
_Server = HackedServer
class ProxyBase(NamespaceProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
class AProxy(ProxyBase): pass
class BProxy(ProxyBase): pass
MyManager.register('A', callable=A, proxytype=AProxy)
MyManager.register('B', callable=B, proxytype=BProxy)
if __name__ == '__main__':
print("This process: ", os.getpid())
with MyManager() as manager:
myB = manager.B()
print("Proxy process, using B directly: ", myB.pid)
myA = manager.A()
print('myA.b', myA.b)
print("Proxy process, via A: ", myA.a.pid)
The key to the solution is to replace the _Server in our manager, and then wrap the id_to_obj dict with the one that performs the hack for the specific method we need.
The hack consists on populating the gettypeid dict for the method, but only after it has been evaluated and we know the return type to be one that we would need a proxy for. And we're lucky in the order of evaluations, gettypeid is accessed after the method has been called.
Also luckily gettypeid is used as a local in the serve_client method, so we can return a copy of it and modify it and we don't introduce any concurrency issues.
While this was a fun exercise, I have to say I really advise against this solution, if you're dealing with external code that you cannot modify, you should simply create your own wrapper class that has explicit methods instead of #property accessors, proxy your own class instead, and use method_to_typeid.
Full code example:
def decorator(class_):
class Wrapper:
def __init__(self, *args, **kwargs):
self.instance = class_(*args, **kwargs)
#classmethod
def __getattr__(cls, attr):
return getattr(class_, attr)
return Wrapper
#decorator
class ClassTest:
static_var = "some value"
class TestSomething:
def test_decorator(self):
print(ClassTest.static_var)
assert True
When trying to execute test, getting error:
test/test_Framework.py F
test/test_Framework.py:37 (TestSomething.test_decorator)
self = <test_Framework.TestSomething object at 0x10ce3ceb8>
def test_decorator(self):
> print(ClassTest.static_var)
E AttributeError: type object 'Wrapper' has no attribute 'static_var'
Is it possible to access static fields from the decorated class?
While the answer from #martineau probably better addresses the specific issue you are trying to solve, the more general approach might be to use create a metaclass in order to redefine the instance method __getattr__ on a type instance (and classes are instances of type).
def decorator(class_):
class WrapperMeta(type):
def __getattr__(self, attr):
return getattr(class_, attr)
class Wrapper(metaclass=WrapperMeta):
def __init__(self, *args, **kwargs):
self.instance = class_(*args, **kwargs)
return Wrapper
This allows the attribute look-up on the class itself to be passed through WrapperMeta.__getattr__.
You can get it to work by making the decorator create a class derived from the one being decorated.
Here's what I mean:
def decorator(class_):
class Wrapper(class_):
def __init__(self, *args, **kwargs):
self.instance = super().__init__(*args, **kwargs)
return Wrapper
#decorator
class ClassTest:
static_var = "some value"
print(ClassTest.static_var) # -> some value
My problem is that when i call isSearchQueryValid() and as a result super() i get this error:
must be type, not instance
I already found out that it has something to do with old/new class style but when i combine abstract methods, factories and singleton i am getting a little confused and don't know what is what anymore. Can you please point me the error?
from abc import ABCMeta, abstractmethod, abstractproperty
from Singleton import *
class TorrentSiteFactory():
#staticmethod
def Create(site):
if site == "MySite": return MySite.GetInstance()
class Site(object):
__metaclass__ = ABCMeta
#abstractmethod
def getSearchQueryLink(self): pass
#abstractmethod
def isSearchQueryValid(self, searchQuery, categories = None):
'''Returns True if SearchQuery is ok, False when there are some errors. Additionally returns list of errors and warnings'''
return True
#Singleton
class MySite(Site):
def getList(self, searchQuery):
searchLink = self.getSearchQueryLink(searchQuery)
def getSearchQueryLink(self, searchQuery):
searchQueryIsCorrect = self.isSearchQueryValid(searchQuery, self.Categories)
if searchQueryIsCorrect: return "www.somelink.com"
return None
def isSearchQueryValid(self, searchQuery, categories):
return super(MySite, self).isSearchQueryValid(searchQuery, categories)
And here is the Singleton:
class Singleton():
def __init__(self, decorated):
self._decorated = decorated
def GetInstance(self):
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
Your Singleton implementation is pretty broken. When you do
#Singleton
class MySite(Site):
...
MySite isn't the class you defined. It's an instance of Singleton. It doesn't have any of the methods of MySite, and super(MySite, self) can't find the MRO to look through. You'll have to change Singleton pretty heavily, if not rewrite it completely.
Also, Singleton and TorrentSiteFactory are classic classes, since you forgot to inherit from object.
In my project I'm using code from http://djangosnippets.org/snippets/562/ and I'm encountering some problems.
When I use my own ChainableQuerySetManager and try to create an object through a related object I get a recursion error.
ChainableQuerySetManager source:
class ChainableQuerySetManager(models.Manager):
def __init__(self, qs_class=models.query.QuerySet):
super(ChainableQuerySetManager,self).__init__()
self.queryset_class = qs_class
def get_query_set(self):
return self.queryset_class(self.model)
def __getattr__(self, attr, *args):
try:
return getattr(self.__class__, attr, *args)
except AttributeError:
return getattr(self.get_query_set(), attr, *args)
Extended query set:
class ExtendedQuerySet(models.query.QuerySet):
def get_or_None(self, *args, **kwargs):
result = None
try:
result = self.get(*args, **kwargs)
except ObjectDoesNotExist:
pass
return result
And test models:
class ParentObject(models.Model):
value = models.IntegerField(default=0)
def count_someobjects(self):
return self.someobjects.count()
def create_someobject_throw_related(self):
return self.someobjects.create()
def create_someobject(self):
return SomeObject.objects.create(parent=self)
class SomeObject(models.Model):
parent = models.ForeignKey(ParentObject, related_name='someobjects')
value = models.IntegerField(default=1)
objects = ChainableQuerySetManager(ExtendedQuerySet)
Test Case looks like:
class ExtendedQuerySetTests(TestCase):
def setUp(self):
self.parent = ParentObject.objects.create()
def test_create_someobject_in_parent(self):
someobject = self.parent.create_someobject_throw_related()
I would appreciate your help.
Full source can be found at https://github.com/RANUX/django-simptools
I recently had a similar issue. Try replacing self.__class__ with ChainableQuerySetManager in your query manager. I never quite sorted out exactly why this was the issue, but it did solve things for me.