Python attrs library: define attr.ib(default=) using other attributes - python

I am using the amazing attrs library to define a lot of object attributes in a very elegant way and it has been working like a charm so far.
The only problem that I am currently having is that I sometimes want to define default values by referencing other attr.ib() attributes. Here is some code that would run if the default for name were a static string:
import attr
from attr.validators import instance_of
import datetime
#attr.s
class Something:
some_date = attr.ib(validator=instance_of(datetime.date))
some_number = attr.ib(convert=float)
name = attr.ib(validator=instance_of(str),
default="Generic Name {0} - {1}%".format(
some_date.strftime("%d-%b-%Y"),
some_number * 100)
)
something_instance = Something(some_date=datetime.date.today(), some_number=0.375)
The problem is that name doesn't see a float and a date, but a _CountingAttr object, hence I get an AttributeError (and a TypeError for some_number * 100). Since I can't reference self either, how do I do this?

So this seems not possible with the default keyword at the moment. However, to achieve the same effect, it's possible to use the __attrs_post_init__ method, which can used to execute arbitrary calculations after instance initialization: http://attrs.readthedocs.io/en/stable/examples.html?highlight=attrs_post_init#other-goodies
In my example it would basically come down to adding
def __attrs_post_init__(self):
if self.name is None:
self.name = "Generic Name {0} - {1}%".format(
self.some_date.strftime("%d-%b-%Y"),
self.some_number * 100)
Credit goes to the attrs github issue tracker for pointing me in the right direction.

You can also do it without __attrs_post_init__.
Just use default = attr.Factory(lambda self: ..., takes_self=True)
import attr
from attr.validators import instance_of
import datetime
#attr.s
class Something:
some_date = attr.ib(validator=instance_of(datetime.date))
some_number = attr.ib(convert=float)
name = attr.ib(validator=instance_of(str),
default=attr.Factory(lambda self: "Generic Name {0} - {1}%".format(
self.some_date.strftime("%d-%b-%Y"),
self.some_number * 100)
), takes_self=True)
something_instance = Something(some_date=datetime.date.today(), some_number=0.375)

Related

How do I use variables with classes for python?

There is probably an stupidly obvious solution to this but I'm new to python and can't find it. I'm working out a few of the systems for a practice project I'm working on and I can't seem to get this to work:
class Item:
def __init__(self, name, description, type, mindamage, maxdamage):
self.name = name
self.desc = description
self.type = type
self.mindmg = mindamage
self.maxdmg = maxdamage
woodsman = Item("'Woodsman'", "An automatic chambered in .22lr", "gun", 4, 10)
inspect = input("inspect:").lower()
print(inspect.name)
print(inspect.desc)
print(inspect.type)
I can't find a solution to this for some reason.
Use dataclasses and items dict:
from dataclasses import dataclass
#dataclass
class Item:
name: str
description: str
item_type: str # don't use 'type' for variables name, it's reserved name
min_damage: int
max_damage: int
woodsman = Item(
name="'Woodsman'",
description="An automatic chambered in .22lr",
item_type="gun",
min_damage=4,
max_damage=10
)
# other items...
items = {
"woodsman": woodsman,
# other items...
}
inspect = items.get(input("inspect:").lower())
print(inspect.name)
print(inspect.description)
print(inspect.item_type)
This might be closer to what you're trying to do:
inventory = {
"woodsman": Item("'Woodsman'","An automatic chambered in .22lr","gun",4,10)
}
inspect = inventory[input("inspect:").lower()]
print(inspect.name)
print(inspect.desc)
print(inspect.type)
Note that you will probably want to have some kind of error handling in case the user enters an item that doesn't exist in the inventory.
I was fiddling around and found another solution that works for me:
inspect = input("inspect:").lower()
exec("print(" + inspect + ".name)")

How to validate namedtuple values?

I had namedtuple variable which represents version of application (its number and type). But i want and some restriction to values:
Version = namedtuple("Version", ["app_type", "number"])
version = Version("desktop") # i want only "desktop" and "web" are valid app types
version = Version("deskpop") # i want to protect from such mistakes
My solution for now is primitive class with no methods:
class Version:
def __init__(self, app_type, number):
assert app_type in ('desktop', 'web')
self.app_type = app_type
self.number = number
Is it pythonic? Is it overkill?
You could use enum.Enum, and typing.NamedTuple instead of collections.namedtuple:
Maybe something like this:
from typing import NamedTuple
import enum
class AppType(enum.Enum):
desktop = 0
web = 1
class Version(NamedTuple):
app: AppType
v0 = Version(app=AppType.desktop)
v1 = Version(app=AppType.web)
print(v0, v1)
output:
Version(app=<AppType.desktop: 0>) Version(app=<AppType.web: 1>)
A undefined AppType raises an AttributeError:
v2 = Version(app=AppType.deskpoop)
output:
AttributeError: deskpoop

Method __init__ has too many parameters

I'm super new to Python (I started about 3 weeks ago) and I'm trying to make a script that scrapes web pages for information. After it's retrieved the information it runs through a function to format it and then passes it to a class that takes 17 variables as parameters. The class uses this information to calculate some other variables and currently has a method to construct a dictionary. The code works as intended but a plugin I'm using with Pycharm called SonarLint highlights that 17 variables is too many to use as parameters?
I've had a look for alternate ways to pass the information to the class, such as in a tuple or a list but couldn't find much information that seemed relevant. What's the best practice for passing many variables to a class as parameters? Or shouldn't I be using a class for this kind of thing at all?
I've reduced the amount of variables and code for legibility but here is the class;
Class GenericEvent:
def __init__(self, type, date_scraped, date_of_event, time, link,
blurb):
countdown_delta = date_of_event - date_scraped
countdown = countdown_delta.days
if countdown < 0:
has_passed = True
else:
has_passed = False
self.type = type
self.date_scraped = date_scraped
self.date_of_event = date_of_event
self.time = time
self.link = link
self.countdown = countdown
self.has_passed = has_passed
self.blurb = blurb
def get_dictionary(self):
event_dict = {}
event_dict['type'] = self.type
event_dict['scraped'] = self.date_scraped
event_dict['date'] = self.date_of_event
event_dict['time'] = self.time
event_dict['url'] = self.link
event_dict['countdown'] = self.countdown
event_dict['blurb'] = self.blurb
event_dict['has_passed'] = self.has_passed
return event_dict
I've been passing the variables as key:value pairs to the class after I've cleaned up the data the following way:
event_info = GenericEvent(type="Lunar"
date_scraped=30/01/19
date_of_event=28/07/19
time=12:00
link="www.someurl.com"
blurb="Some string.")
and retrieving a dictionary by calling:
event_info.get_dictionary()
I intend to add other methods to the class to be able to perform other operations too (not just to create 1 dictionary) but would like to resolve this before I extend the functionality of the class.
Any help or links would be much appreciated!
One option is a named tuple:
from typing import Any, NamedTuple
class GenericEvent(NamedTuple):
type: Any
date_scraped: Any
date_of_event: Any
time: Any
link: str
countdown: Any
blurb: str
#property
def countdown(self):
countdown_delta = date_of_event - date_scraped
return countdown_delta.days
#property
def has_passed(self):
return self.countdown < 0
def get_dictionary(self):
return {
**self._asdict(),
'countdown': self.countdown,
'has_passed': self.has_passed,
}
(Replace the Anys with the fields’ actual types, e.g. datetime.datetime.)
Or, if you want it to be mutable, a data class.
I don't think there's anything wrong with what you're doing. You could, however, take your parameters in as a single dict object, and then deal with them by iterating over the dict or doing something explicitly with each one. Seems like that would, in your case, make your code messier.
Since all of your parameters to your constructor are named parameters, you could just do this:
def __init__(self, **params):
This would give you a dict named params that you could then process. The keys would be your parameter names, and the values the parameter values.
If you aligned your param names with what you want the keys to be in your get_dictionary method's return value, saving off this parameter as a whole could make that method trivial to write.
Here's an abbreviated version of your code (with a few syntax errors fixed) that illustrates this idea:
from pprint import pprint
class GenericEvent:
def __init__(self, **params):
pprint(params)
event_info = GenericEvent(type="Lunar",
date_scraped="30/01/19",
date_of_event="28/07/19",
time="12:00",
link="www.someurl.com",
blurb="Some string.")
Result:
{'blurb': 'Some string.',
'date_of_event': '28/07/19',
'date_scraped': '30/01/19',
'link': 'www.someurl.com',
'time': '12:00',
'type': 'Lunar'}

field back to zero after save

I have this class in a module1:
class A(models.Model):
_name="a"
b_id = field.Many2one("b")
tax_old = fields.Float()
tax_value = fields.Float(string="Tax", related = 'b_id.tax_value', store=True)
all_taxes = fields.Float(_compute='compute_all')
#api.depends('tax_value')
def compute_all(self):
self.all_taxes = self.tax_value + self.tax_old
self.update()
In module2 I have this class:
class B(models.Model):
_name="b"
a_ids = fields.One2many("a","b_id")
tax_value = fields.Float(string="Tax")
Now in A view when I change b_id value, tax_value works fine and compute_all works fine, but when I save this record, all_taxes doesn't take tax_value field, only tax_old. And when I open the record form view again and manually write a value in tax_value, it works totally fine.
It should be enough to use b_id on your compute method, because it's related:
#api.multi
#api.depends('b_id')
def compute_all(self):
for record in self:
record.all_taxes = record.b_id.tax_value + record.tax_old
The compute method can be called with a multi record recordset. So use a for loop inside it. And you don't have to do an update() at the end.
You can try it
#api.one
#api.depends('b_id', 'b_id.tax_value')
def compute_all(self):
self.all_taxes = self.tax_value + self.tax_old
Two things:
It ist compute not _compute and you don't need to use self.update().
Try this instead:
# You'll need this
from django.db.models import F
#api.depends('tax_value')
def compute_all(self):
self.update(all_taxes=F('tax_value') + F('tax_old'))
You're missing the self. What you've done is defined a local variable called all_taxes, not the instance variable.. which is what you're after

how to make a lot of parameters available to the entire system?

I have objects from various classes that work together to perform a certain task. The task requires a lot of parameters, provided by the user (through a configuration file). The parameters are used deep inside the system.
I have a choice of having the controller object read the configuration file, and then allocate the parameters as appropriate to the next layer of objects, and so on in each layer. But the only objects themselves know which parameters they need, so the controller object would need to learn a lot of detail about every other object.
The other choice is to bundle all the parameters into a collection, and pass the whole collection into every function call (equivalently, create a global object that stores them, and is accessible to everyone). This looks and feels ugly, and would cause a variety of minor technical issues (e.g., I can't allow two objects to use parameters with the same name; etc.)
What to do?
I have used the "global collection" alternative in the past.
If you are concerned with naming: how would you handle this in your config file? The way I see it, your global collection is a datastructure representing the same information you have in your config file, so if you have a way of resolving or avoiding name clashes in your cfg-file, you can do the same in your global collection.
I hope you don't feel like I'm thread-jacking you - what you're asking about is similar to what I was thinking about in terms of property aggregation to avoid the models you want to avoid.
I also nicked a bit of the declarative vibe that Elixir has turned me onto.
I'd be curious what the Python gurus of stack overflow think of it and what better alternatives there might be. I don't like big kwargs and if I can avoid big constructors I prefer to.
#!/usr/bin/python
import inspect
from itertools import chain, ifilter
from pprint import pprint
from abc import ABCMeta
class Property(object):
def __init__(self, value=None):
self._x = value
def __repr__(self):
return str(self._x)
def getx(self):
return self._x
def setx(self, value):
self._x = value
def delx(self):
del self._x
value = property(getx, setx, delx, "I'm the property.")
class BaseClass(object):
unique_baseclass_thing = Property()
def get_prop_tree(self):
mro = self.__class__.__mro__
r = []
for i in xrange( 0, len(mro) - 1 ):
child_prop_names = set(dir(mro[i]))
parent_prop_names = set(dir(mro[i+1]))
l_k = list( chain( child_prop_names - parent_prop_names ) )
l_n = [ (x, getattr(mro[i],x,None)) for x in l_k ]
l_p = list( ifilter(lambda y: y[1].__class__ == Property, l_n))
r.append(
(mro[i],
(dict
( l_p )
)
)
)
return r
def get_prop_list(self):
return list( chain(* [ x[1].items() for x in reversed( self.get_prop_tree() ) ] ) )
class SubClass(BaseClass):
unique_subclass_thing = Property(1)
class SubSubClass(SubClass):
unique_subsubclass_thing_one = Property("blah")
unique_subsubclass_thing_two = Property("foo")
if __name__ == '__main__':
a = SubSubClass()
for b in a.get_prop_tree():
print '---------------'
print b[0].__name__
for prop in b[1].keys():
print "\t", prop, "=", b[1][prop].value
print
for prop in a.get_prop_list():
When you run it..
SubSubClass
unique_subsubclass_thing_one = blah
unique_subsubclass_thing_two = foo
---------------
SubClass
unique_subclass_thing = 1
---------------
BaseClass
unique_baseclass_thing = None
unique_baseclass_thing None
unique_subclass_thing 1
unique_subsubclass_thing_one blah
unique_subsubclass_thing_two foo

Categories