I'm in the process of learning how to program in twisted, and going through Dave Peticolas' tutorial (http://krondo.com/wp-content/uploads/2009/08/twisted-intro.html). I'm trying to solve the suggested exercise at the end of Part 3 - having multiple independent countdowns going on countdown.py. Here is my code, and the error I'm getting:
#!/usr/bin/python
class countdown(object):
def __init__(self):
self.timer = 0
def count(self, timer):
if self.timer == 0:
reactor.stop()
else:
print self.timer, '...'
self.timer -= 1
reactor.callLater(1, self.count)
from twisted.internet import reactor
obj = countdown()
obj.timer = 10
reactor.callWhenRunning(obj.count(obj.timer))
print 'starting...'
reactor.run()
print 'stopped.'
When executed:
$ ./countdown.py
10 ...
Traceback (most recent call last):
File "./countdown.py", line 21, in <module>
reactor.callWhenRunning(obj.count(obj.timer))
File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 666, in callWhenRunning
_callable, *args, **kw)
File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 645, in addSystemEventTrigger
assert callable(_f), "%s is not callable" % _f
AssertionError: None is not callable
I assume I'm not doing something properly in leveraging an object variable; though I'm not sure what I'm doing wrong.
You are calling you callable before passing it in. The returned result of the obj.count() call is not callable.
You need to pass in the method, not the result of calling it:
reactor.callWhenRunning(obj.count, (obj.timer,))
The positional arguments for your method (here just obj.timer) should be given as a separate tuple.
At closer inspection, you don't even need to pass in obj.timer as an argument. You can just access it on self after all, there is no need to pass it in separately:
class countdown(object):
def __init__(self):
self.timer = 0
def count(self):
if self.timer == 0:
reactor.stop()
else:
print self.timer, '...'
self.timer -= 1
reactor.callLater(1, self.count)
and adjust your callWhenRunning() call accordingly:
reactor.callWhenRunning(obj.count)
Related
I am trying to call different functions based on the value for rb_selection, calling func1 if rb_selection value is 0 and calling func2 if rb_selection value is 1. Both functions take a different set of arguments.
I do not need folder argument(func2 values) when I call func1 and similarly I do not need batch, term arguments(func1 values) when I call func2
It throws me the below error when I try to call the second function, as the values for batch, term are not passed.
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Users\Himajak\Anaconda3\lib\tkinter\__init__.py", line 1705, in __call__
return self.func(*args)
File "<ipython-input-13-02b5f954b815>", line 122, in tb_click
ThreadedTask(self.queue,self.batch_name,self.term_name,self.course,self.rb_selection,self.folder).start()
AttributeError: 'GUI' object has no attribute 'batch_name'
Code looks similar to this:
class class1():
def def1(self):
self.queue = queue.Queue()
ThreadedTask(self.queue,self.rb_selection,self.batch_name,self.folder).start()
#self.master.after(10, self.process_queue)
class class2():
def __init__(self, queue,rb_selection, batch_name ,term_name, folder):
threading.Thread.__init__(self)
self.queue = queue
self.rb_selection = rb_selection
self.batch = batch_name
self.term = term_name
self.folder = folder
def func1(self,batch,term):
time.sleep(5)
print("Fucntion 1 reached")
print(self.batch,self.term)
def func2(self,folder):
time.sleep(5)
print("Function 2 reached")
print(self.folder)
def run(self):
time.sleep(0) # Simulate long running process
if self.rb_selection == '0':
self.func1(self.batch,self.term)
elif self.rb_selection == '1':
self.func2(self.folder)
self.queue.put("Task finished")
Please suggest on how to resolve this issue, thanks in advance!
There is no concept of optional arguments, you can give default value when creating the function like
def __init__(self, queue,rb_selection ,term_name, folder, batch_name="default batch name"):
So that you need not pass batch_name while creating the Instance.
I am trying to run a simple threading function within my simple class.
I am trying to call the Thread function within a method of a class. This Thread function within this method points to another method within the class. The way I tested it out is through the python terminal. Here is my class in increment_thread.py:
from threading import Thread
import time
class Increment:
def __init__(self):
self.count = 0
def add_one(self):
while True:
self.count = self.count + 1
time.sleep(5)
def start(self):
background_thread = Thread(target=add_one)
background_thread.start()
print("Started counting up")
return
def get_count(self):
return print(self.count)
In order to test this, I run python in my terminal, which prompt the python terminal.
Then, I run the following lines:
from increment_thread import Increment
inc = Increment()
inc.get_count() # Yields 0
inc.start()
I expect the thread to start and indicate "Started counting up", but instead I get the following error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "~/python-sandbox/increment_thread.py", line 14, in start
background_thread = Thread(target=add_one)
NameError: name 'add_one' is not defined
Is what I am trying to do possible?
In the Thread constructor should it not be target=self.add_one rather than target=add_one
To pass parameters:
from threading import Thread
import time
class Increment:
count = None
def __init__(self):
self.count = 0
def add_one(self, start_at=0):
self.count = start_at
while True:
self.count = self.count + 1
time.sleep(5)
def start_inc(self, start_at=count):
# Pass args parameter as a tuple
background_thread = Thread(target=self.add_one, args=(start_at,))
background_thread.start()
print("Started counting up")
return
def get_count(self):
return print(self.count)
if __name__ == "__main__":
inc = Increment()
inc.get_count() # Yields 0
inc.start_inc(start_at=5)
while True:
inc.get_count()
time.sleep(2)
Just like class fields, class methods need to be referred to using self.method syntax. So
def start(self):
background_thread = Thread(target=self.add_one)
background_thread.start()
print("Started counting up")
return
I have pretty simple code of worker, might be doing some silly mistake.
class BingWorker(object):
def __init__(self):
self.gm_worker = gearman.GearmanWorker(['localhost:4730'])
completed_job_request = self.gm_worker.register_task('bingmedia', callBing)
def callBing(self, gearman_worker, gearman_job):
print "In worker ", gearman_job.data
return "Kools"
def run(self):
self.gm_worker.work()
if __name__ == '__main__':
BingWorker().run()
gives
Traceback (most recent call last):
File "worker.py", line 16, in <module>
BingWorker().run()
File "worker.py", line 6, in __init__
completed_job_request = self.gm_worker.register_task('bingmedia', callBing)
NameError: global name 'callBing' is not defined
any hints? example is very similar to python example at http://gearman.org/examples/reverse/. just put in class structure
Improved code:
class BingWorker(object):
def __init__(self):
self.gm_worker = gearman.GearmanWorker(['localhost:4730'])
def callBing(self, gearman_worker, gearman_job):
print "In worker ", gearman_job.data
return "Kools"
def run(self):
completed_job_request = self.gm_worker.register_task('bingmedia', self.callBing)
self.gm_worker.work()
if __name__ == '__main__':
BingWorker().run()
You need to change callBing to self.callBing and it might be better to move the registration to the first line of the run method rather than the __init__ method.
The fist is because callBing is a reference to a missing global while self.callBing is a reference to the classes method. The second is because potentially you could get a call to self.callBing before init has finished which would be bad news.
I'm using thread pool while using Tornado to do some work. This is the code:
common/thread_pool.py
import tornado.ioloop
class Worker(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self._queue = queue
def run(self):
logging.info('Worker start')
while True:
content = self._queue.get()
if isinstance(content, str) and content == 'quit':
break
#content: (func, args, on_complete)
func = content[0]
args = content[1]
on_complete = content[2]
resp = func(args)
tornado.ioloop.IOLoop.instance().add_callback(lambda: on_complete(resp))
#i dont know is correct to call this
#self._queue.task_done()
logging.info('Worker stop')
class WorkerPool(object):
_workers = []
def __init__(self, num):
self._queue = Queue.Queue()
self._size = num
def start(self):
logging.info('WorkerPool start %d' % self._size)
for _ in range(self._size):
worker = Worker(self._queue)
worker.start()
self._workers.append(worker)
def stop(self):
for worker in self._workers:
self._queue.put('quit')
for worker in self._workers:
worker.join()
logging.info('WorkerPool stopd')
def append(self, content):
self._queue.put(content)
gateway.py
import tornado.ioloop
import tornado.web
from common import thread_pool
workers = None
class MainServerHandler(tornado.web.RequestHandler):
#tornado.web.asynchronous
def get(self):
start_time = time.time()
method = 'get'
content = (self.handle, (method, self.request, start_time), self.on_complete)
workers.append(content)
#tornado.web.asynchronous
def post(self):
start_time = time.time()
method = 'post'
content = (self.handle, (method, self.request, start_time), self.on_complete)
workers.append(content)
def handle(self, args):
method, request, start_time = args
#for test, just return
return 'test test'
def on_complete(self, res):
logging.debug('on_complete')
self.write(res)
self.finish()
return
def main(argv):
global workers
workers = thread_pool.WorkerPool(conf_mgr.thread_num)
workers.start()
application = tornado.web.Application([(r"/", MainServerHandler)])
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main(sys.argv[1:])
When I make many concurrent requests, I get this error:
ERROR: 2014-09-15 18:04:03: ioloop.py:435 * 140500107065056 Exception in callback <tornado.stack_context._StackContextWrapper object at 0x7fc8b4d6b9f0>
Traceback (most recent call last):
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../third-party/tornado-2.4.1/tornado/ioloop.py", line 421, in _run_callback
callback()
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../common/thread_pool.py", line 39, in <lambda>
tornado.ioloop.IOLoop.instance().add_callback(lambda: on_complete(resp))
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/gateway.py", line 92, in on_complete
self.write(res)
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../third-party/tornado-2.4.1/tornado/web.py", line 489, in write
raise RuntimeError("Cannot write() after finish(). May be caused "
RuntimeError: Cannot write() after finish(). May be caused by using async operations without the #asynchronous decorator.
But I didn't call write after finish. I'm also using the #asynchronous decorator. At the same time, in the logs I see that write/finish is called by same thread.
The issue is with the way you're adding the callback to the I/O loop. Add it like this:
tornado.ioloop.IOLoop.instance().add_callback(on_complete, resp)
And the errors will go away.
You're seeing this strange behavior because when you use a lambda function, you're creating a closure in the local scope of the function, and the variables used in that closure get bound at the point the lambda is executed, not when its created. Consider this example:
funcs = []
def func(a):
print a
for i in range(5):
funcs.append(lambda: func(i))
for f in funcs:
f()
Output:
4
4
4
4
4
Because your worker method is running in a while loop, on_complete ends up getting redefined several times, which also changes the value of on_complete inside the lambda. That means if one worker thread sets on_complete for a handler A, but then gets another task and sets on_complete for handler B prior to the callback set for handler A running, both callbacks end up up running handler B's on_complete.
If you really wanted to use a lambda, you could also avoid this by binding on_complete in the local scope of the lambda:
tornado.ioloop.IOLoop.instance().add_callback(lambda on_complete=on_complete: on_complete(resp))
But just adding the function and its argument directly is much nicer.
I have little experience with decorators in Python, but I'd like to write a function decorator that runs the function, catches a specific exception, and if the exception is caught then re-tries the function a certain number of times. That is, I'd like to do this:
#retry_if_exception(BadStatusLine, max_retries=2)
def thing_that_sometimes_fails(self, foo):
foo.do_something_that_sometimes_raises_BadStatusLine()
I assume this kind of thing is easy with decorators, but I'm not clear about how exactly to go about it.
from functools import wraps
def retry_if_exception(ex, max_retries):
def outer(func):
#wraps(func)
def wrapper(*args, **kwargs):
assert max_retries > 0
x = max_retries
while x:
try:
return func(*args, **kwargs)
except ex:
x -= 1
return wrapper
return outer
see why you better use #wraps
I think you're basically wanting something like this:
def retry_if_exception(exception_type=Exception, max_retries=1):
def decorator(fn):
def wrapper(*args, **kwargs):
for i in range(max_retries+1):
print('Try #', i+1)
try:
return fn(*args, **kwargs)
except exception_type as e:
print('wrapper exception:', i+1, e)
return wrapper
return decorator
#retry_if_exception()
def foo1():
raise Exception('foo1')
#retry_if_exception(ArithmeticError)
def foo2():
x=1/0
#retry_if_exception(Exception, 2)
def foo3():
raise Exception('foo3')
The following seems to do what you've described:
def retry_if_exception( exception, max_retries=2 ):
def _retry_if_exception( method_fn ):
# method_fn is the function that gives rise
# to the method that you've decorated,
# with signature (slf, foo)
from functools import wraps
def method_deco( slf, foo ):
tries = 0
while True:
try:
return method_fn(slf, foo)
except exception:
tries += 1
if tries > max_retries:
raise
return wraps(method_fn)(method_deco)
return _retry_if_exception
Here's an example of it in use:
d = {}
class Foo():
def usually_raise_KeyError(self):
print("d[17] = %s" % d[17])
foo1 = Foo()
class A():
#retry_if_exception(KeyError, max_retries=2)
def something_that_sometimes_fails( self, foo ):
print("About to call foo.usually_raise_KeyError()")
foo.usually_raise_KeyError()
a = A()
a.something_that_sometimes_fails(foo1)
This gives:
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
About to call foo.usually_raise_KeyError()
Traceback (most recent call last):
File " ......... TrapRetryDeco.py", line 39, in <module>
a.something_that_sometimes_fails( foo1)
File " ......... TrapRetryDeco.py", line 15, in method_deco
return method_fn( slf, foo)
File " ......... TrapRetryDeco.py", line 36, in something_that_sometimes_fails
foo.usually_raise_KeyError()
File " ......... TrapRetryDeco.py", line 28, in usually_raise_KeyError
print("d[17] = %s" % d[17])
KeyError: 17
I assume that by "2 retries" you mean the operation will be attempted 3x all told. Your example has a couple of complications which may obscure the basic setup:
It seems you want a method decorator, as your function/method's first parameter is "self"; however, that method immediately delegates to some bad method of its foo parameter. I preserved these complications :)
As outline, you would do something along these lines:
import random
def shaky():
1/random.randint(0,1)
def retry_if_exception(f):
def inner(retries=2):
for retry in range(retries):
try:
return f()
except ZeroDivisionError:
print 'try {}'.format(retry)
raise
return inner
#retry_if_exception
def thing_that_may_fail():
shaky()
thing_that_may_fail()
As written, that will fail about 1/2 the time.
When it does fail, prints:
try 0
try 1
Traceback (most recent call last):
File "Untitled 2.py", line 23, in <module>
thing_that_may_fail()
File "Untitled 2.py", line 10, in inner
return f()
File "Untitled 2.py", line 21, in thing_that_may_fail
shaky()
File "Untitled 2.py", line 4, in shaky
1/random.randint(0,1)
ZeroDivisionError: integer division or modulo by zero
You could adapt this structure to many different types of errors.