I am recursively generating few objects, which need a contiguous, unique id. How can I guarantee (easiest) the synchronization in python 2.7.
iid = 1
def next_id():
iid += 1
return iid
def process():
# .. do something
id = next_id()
from itertools import count
iid = count()
print next(iid) # 0
print next(iid) # 1
print next(iid) # 2
etc., and
new_iid = count(10)
print next(new_iid) # 10
print next(new_iid) # 11
print next(new_iid) # 12
for starting at other values than 0.
count() is essentially a generator which infinitely yields values.
Use a mutex:
import threading
iid = 1
iid_lock = threading.Lock()
def next_id():
global iid
with iid_lock:
result = iid
iid += 1
return result
You might like to hide the internals in a class:
class IdGenerator(object):
def __init__(self):
self.cur_id = 1
self.lock = threading.Lock()
def next_id(self):
with self.lock:
result = self.cur_id
self.cur_id += 1
return result
EDIT: Based on the comments, it seems like you're not using threads. This means you don't need the locking mechanism at all. What you initially wrote would be sufficient, though you need the global keyword to make the global variable mutable:
iid = 1
def next_id():
global iid
res = iid
iid += 1
return res
You were thinking to something of this kind:
class Counter():
def __init__(self):
self.theCount = -1
def __call__(self):
self.theCount += 1
return self.theCount
class BorgCounter():
Borg = {'theCount':-1}
def __init__(self):
self.__dict__ = BorgCounter.Borg
def __call__(self):
self.theCount += 1
return self.theCount
myCount = Counter()
mycount2 = Counter()
assert(myCount()==0)
assert(mycount2()==0)
assert(mycount2()==1)
assert(myCount()==1)
assert(myCount()==2)
myCount = BorgCounter()
mycount2 = BorgCounter()
assert(myCount()==0)
assert(mycount2()==1)
assert(mycount2()==2)
assert(myCount()==3)
assert(myCount()==4)
Related
add_digits2(1)(3)(5)(6)(0) should add up all the numbers and stop when it reaches 0.
The output should be 15
The below code works but uses a global variable.
total = 0
def add_digits2(num):
global total
if num == 0:
print(total)
else:
total += num
return add_digits2
The result is correct but needs to do the same thing without using the global variable.
One thing you could do is use partial:
from functools import partial
def add_digits2(num, total=0):
if num == 0:
print(total)
return
else:
total += num
return partial(add_digits2, total=total)
add_digits2(2)(4)(0)
You can just pass in *args as a parameter and return the sum
def add_digits2(*args):
return sum(args)
add_digits2(1, 3, 5 ,6)
You could also use a class, using the __call__ method to obtain this behavior:
class Add_digits:
def __init__(self):
self.total = 0
def __call__(self, val):
if val != 0:
self.total += val
return self
else:
print(self.total)
self.total = 0
add_digits = Add_digits()
add_digits(4)(4)(0)
# 8
add_digits(4)(6)(0)
# 10
though I still don't get why you would want to do this...
Really hard to say what they are after when asking questions like that but the total could be stored in a function attribute. Something like this
>>> def f():
... f.a = 3
>>> f()
>>> f.a
3
I have an entire Deque Array class that looks like this:
from collections import deque
import ctypes
class dequeArray:
DEFAULT_CAPACITY = 10 #moderate capacity for all new queues
def __init__(self):
self.capacity = 5
capacity = self.capacity
self._data = self._make_array(self.capacity)
self._size = 0
self._front = 0
def __len__(self):
return self._size
def __getitem__(self, k): #Return element at index k
if not 0 <= k < self._size:
raise IndexError('invalid index')
return self._data[k]
def isEmpty(self):
if self._data == 0:
return False
else:
return True
def append(self, item): #add an element to the back of the queue
if self._size == self.capacity:
self._data.pop(0)
else:
avail = (self._front + self._size) % len(self._data)
self._data[avail] = item
self._size += 1
#def _resize(self, c):
#B = self._make_array(c)
#for k in range(self._size):
#B[k] = self._A[k]
#self._data = B
#self.capacity = capacity
def _make_array(self, c):
capacity = self.capacity
return (capacity * ctypes.py_object)()
def removeFirst(self):
if self._size == self.capacity:
self._data.pop(0)
else:
answer = self._data[self._front]
self._data[self._front] = None
self._front = (self._front + 1) % len(self._data)
self._size -= 1
print(answer)
def removeLast(self):
return self._data.popleft()
def __str__(self):
return str(self._data)
and when I try to print the deque in the main it prints out something like this,
<bound method dequeArray.__str__ of <__main__.dequeArray object at 0x1053aec88>>
when it should be printing the entire array. I think i need to use the str function and i tried adding
def __str__(self):
return str(self._data)
and that failed to give me the output. I also tried just
def __str__(self):
return str(d)
d being the deque array but I still am not having any success. How do I do i get it to print correctly?
you should call the str function of each element of the array that is not NULL, can be done with the following str function:
def __str__(self):
contents = ", ".join(map(str, self._data[:self._size]))
return "dequeArray[{}]".format(contents)
What I get when I try to q = dequeArray(); print(q) is <__main__.py_object_Array_5 object at 0x006188A0> which makes sense. If you want it list-like, use something like this (print uses __str__ method implicitly):
def __str__(self):
values = []
for i in range(5):
try:
values.append(self._data[i])
except ValueError: # since accessing ctypes array by index
# prior to assignment to this index raises
# the exception
values.append('NULL (never used)')
return repr(values)
Also, several things about the code:
from collections import deque
This import is never user and should be removed.
DEFAULT_CAPACITY = 10
is never used. Consider using it in the __init__:
def __init__(self, capacity=None):
self.capacity = capacity or self.DEFAULT_CAPACITY
This variable inside __init__ is never user and should be removed:
capacity = self.capacity
def _make_array(self, c):
capacity = self.capacity
return (capacity * ctypes.py_object)()
Though this is a valid code, you're doing it wrong unless you're absolutely required to do it in your assignment. Ctypes shouldn't be used like this, Python is a language with automated memory management. Just return [] would be fine. And yes, variable c is never used and should be removed from the signature.
if self._data == 0
In isEmpty always evaluates to False because you're comparing ctypes object with zero, and ctypes object is definitely not a zero.
Approach 1 (global var):
id_constant = 1000
id_cnt = 1
def give_id():
global id_cnt
id_cnt += 1
return id_constant * id_cnt
id = give_id()
Approach 2 (fuc var instead of global var):
id_cnt = 1
def give_id():
id_constant = 1000
global id_cnt
id_cnt += 1
return id_constant * id_cnt
id = give_id()
Approach 3 (pass in global vars):
id_cnt = 1
id_constant = 1000
def give_id(constant, cnt):
return constant * cnt
global id_cnt
id_cnt +=1
id = give_id(id_constant, id_cnt)
im not sure if there are any general rule of thumb but is is widely accepted for a function to access a global variable inside a function? or if the variable is only used for a function, then should it be part of a function variable instead?
The method often depends a little on the situation.
You seem to need unique ids, why not use a generator:
def create_id_generator():
"""Returns an id generator."""
i = 0
while True:
yield i
i += 1
Used with the next() function:
>>> ID_GENERATOR = create_id_generator() # Global variable
>>> my_id = next(ID_GENERATOR)
>>> my_id2 = next(ID_GENERATOR)
>>> my_id3 = next(ID_GENERATOR)
>>> print(my_id, my_id2, my_id3, next(ID_GENERATOR))
0 1 2 3
If you want the ids to be multiples of 1000, you can pass the constant to the generator via parameters:
def create_id_generator(multiplier=1000):
"""Returns an id generator."""
i = 0
while True:
yield i * multiplier
i += 1
You can even add a starting value if you don't want to start from index 0:
def create_id_generator(multiplier=1000, start_index=0):
"""Returns an id generator."""
while True:
yield start_index * multiplier
start_index += 1
If id_constant is actually constant, I would have done:
ID_CONSTANT = 1000
def give_id(id_count):
return ID_CONSTANT * id_count
id_count = 1
id = give_id(id_count)
But it looks like you also have some state (id_count) that needs to be kept up-to-date with the issuing of id, suggesting a generator function:
def give_id(id_count):
while True:
yield ID_CONSTANT * id_count
id_count += 1
or even a class:
class IdCreator(object):
ID_CONSTANT = 1000
def __init__(self, start_count=1):
self.id_count = start_count
def give_id(self):
new_id = self.ID_CONSTANT * self.id_count
self.id_count += 1
return new_id
You could go further and implement iteration for the class.
Global variable is generally something you should avoid.
If you want to have constants, for let's say, configuration purposes I would take more a module approach like:
conf.py
MYCONST = 1000
app.py
import conf
print conf.MYCONST
Or take an OO approach such as:
class Test(object):
def __init__(self):
self._constant = 1000
def give_id(self, cnt):
return self._constant * cnt
From the Zen of Python (i.e. import this)
Namespaces are one honking great idea -- let's do more of those!
In general, if you don't need to put something in the global namespace, it is better to encapsulate it in the local namespace of the function, so I would consider option 2 to be more "pythonic" unless id_constant is going to be used by multiple functions.
You might also try the following using a keyword argument with a default value:
id_cnt = 1
def give_id(id_constant=1000):
global id_cnt
id_cnt += 1
return id_constant * id_cnt
id = give_id()
Then if you ever needed id_constant to be something different, you could call the function as id = give_id(id_constant=500).
A little bit of tricky stuff:
def get_id_func(constant):
class c(object):
def __init__(self, constant):
self.constant = constant
self.id = 0
def func(self):
self.id += 1
return self.id * self.constant
o = c(constant)
return o.func
# create function
f = get_id_func(1000)
# call and test it
assert f() == 1000
assert f() == 2000
assert f() == 3000
Probably you need generator function?
def give_id(id_constant):
delta = 0
while True:
delta += 1
yield id_constant + delta
for i in range(100):
print(give_id(1000)) # prints numbers from 1001 to 1100
I'm trying to figure out how to change some value after each call of the object.
I thougt that call() function is executed after each call.
This should be a simple counter class which decreases value attribute after being called.
class counter():
def __init__(self,value):
self.value = value
def __call__(self):
self.value -= 1
count = counter(50)
print count.value
print count.value
>> 50
>> 50 <-- this should be 49
What am I doing wrong?
If you're not committed to classes, you could use a function and abuse using mutable-types-as-default-initializers:
def counter(init=None, container=[0]):
container[0] -= 1
if init is not None: container[0] = init
return container[0]
x = counter(100)
print(x) # 100
print( counter() ) # 99
print( counter() ) # 98
print( counter() ) # 97
# ...
Call counter with a single argument to set/initialize the counter. Since initialization is actually the first call to the function, it will return that number.
Call counter with no arguments to get the "next value".
(Very similar to what I suggested here)
Alternatively, for a syntax closer to what you had in your question, use properties:
class Counter(object):
def __init__(self, init):
self.val = init
#property
def value(self):
val = self.val
self.val -= 1
return val
count = Counter(50)
print(count.value) # 50
print(count.value) # 49
print(count.value) # 48
print(count.value) # 47
#...
Here, you're creating a Counter object called count, then every time you call count.value it returns the current value and prepares itself for a future call by decrementing it's internal val attribute.
Again, the first time you request the value attribute, it returns the number you initialized it with.
If, for some reason, you want to "peek" at what the next call to count.value will be, without decrementing it, you can look at count.val instead.
__call__ is only invoked when you call the object using ()
To invoke this behaviour you'd have to do
class counter():
def __init__(self,value):
self.value = value
def __call__(self):
print 'called'
self.value -= 1
count = counter(50)
print count.value
count()
print count.value
This may not be exactly what you wanted to do.
Use the property decorator
class counter:
def __init__(self, value):
self._value = value + 1
#property
def value(self):
self._value -= 1
return self._value
count = Counter(50)
print(count.value) # 50
print(count.value) # 49
Alternatly, you could use a closure:
def Counter(n):
n += 1
def inner():
n -= 1
return n
return inner
Though this has to be called every time you want to use it
count1 = Counter(50)
count2 = Counter(50)
print(count1()) # 50
print(count1()) # 49
print(count2()) # 50
print(count2()) # 49
print(count1()) # 48
Defining a custom call() method in the meta-class allows custom behaviour when the class is called, e.g. not always creating a new instance.As no new class instance is created call gets called instead of init.So do this to get the desired result
print count.value
count()
print count.value
I have a class defined like so:
class GameState:
def __init__(self, state=None):
if state is None:
self.fps = 60
self.speed = 1
self.bounciness = 0.9
self.current_level = None
self.next_frame_time = 0
self.init_time = 0
self.real_time = 0
self.game_time = 0
self.game_events = []
self.real_events = []
else:
# THIS being the key line:
self.__dict__.update(**state)
Is there an interface I can define, such that this works (i.e. the ** operator works on my class):
>>> a = GameState()
>>> b = GameState(a)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: update() argument after ** must be a mapping, not GameState
Essentially, I want b to take on all of the attributes of a.
I didn't think it would work, but I tried defining __getitem__ without any luck.
EDIT: I want to avoid using b's __dict__, as I want to also be able to pass a dictionary as an argument, and potentially use ** on GameState objects elsewhere.
let GameState inherit from dict :
class GameState(dict)
and rewrite the __setattr function like this :
def __setattr__(self,name,value) :
self.__dict__[name] = value
self[name] = value
in order for **obj to work, you have to implement (or inherit) the __getitem__() and keys() methods.
def __getitem__(self, item):
return self.__dict__[item] # you maybe should return a copy
def keys(self):
return self.__dict__.keys() # you could filter those
you could do that by updating the b's dict with that of a when creating b. Try this out:
class GameState:
def __init__(self, state=None):
if state is None:
self.fps = 60
self.speed = 1
self.bounciness = 0.9
self.current_level = None
self.next_frame_time = 0
self.init_time = 0
self.real_time = 0
self.game_time = 0
self.game_events = []
self.real_events = []
else:
if type(state) is dict:
self.__dict__.update(**state)
else:
self.__dict__.update(**state.__dict__)
a = GameState()
b = GameState(a)
you might want to create a deepcopy of the dict because you have a list object as part of the attributes. This is safer as there is no sharing of objects.