Signal handler inside a class - python

I am trying to write a class to handle signals using the signal python module. The reason for having a class is to avoid the use of globals. This is the code I came up with, but unfortunately it is not working:
import signal
import constants
class SignalHandler (object):
def __init__(self):
self.counter = 0
self.break = False
self.vmeHandlerInstalled = False
def setVmeHandler(self):
self.vmeBufferFile = open('/dev/vme_shared_memory0', 'rb')
self.vmeHandlerInstalled = True
signal.signal(signal.SIGUSR1, self.traceHandler)
signal.siginterrupt(signal.SIGUSR1, False)
#...some other stuff...
def setBreakHandler(self):
signal.signal(signal.SIGINT, self.newBreakHandler)
signal.siginterrupt(signal.SIGINT, False)
def newBreakHandler(self, signum, frame):
self.removeVMEHandler()
self.break = True
def traceHandler(self, signum, frame):
self.counter += constants.Count
def removeVMEHandler(self):
if not self.vmeHandlerInstalled: return
if self.vmeBufferFile is None: return
signal.signal(signal.SIGUSR1, signal.SIG_DFL)
self.vmeHandlerInstalled = False
On the main program I use this class in the following way:
def run():
sigHandler = SignalHandler()
sigHandler.setBreakHandler()
sigHandler.setVmeHandler()
while not sigHandler.break:
#....do some stuff
if sigHandler.counter >= constants.Count:
#...do some stuff
This solution is not working, as it appears that the handler for the signal.SIGUSR1 installed in the setVmeHandler method never gets called.
So my question is: is it possible to handle signal inside a class or shall I use globals?

To answer your question, I created the following simple code:
import signal
import time
class ABC(object):
def setup(self):
signal.signal(signal.SIGUSR1, self.catch)
signal.siginterrupt(signal.SIGUSR1, False)
def catch(self, signum, frame):
print("xxxx", self, signum, frame)
abc = ABC()
abc.setup()
time.sleep(20)
If I run it:
python ./test.py
Then in another window send a USR1 signal:
kill -USR1 4357
The process prints the expected message:
('xxxx', <__main__.ABC object at 0x7fada09c6190>, 10, <frame object at 0x7fada0aaf050>)
So I think the answer is Yes, it possible to handle signal inside a class.
As for why you code doesn't work, sorry, I have no idea.

I got a similar problem as toti08, referring to setVmeHandler(self), and found out the handler must have matching parameters i.e. (self, signum,frame).

Related

Python3 weakref WeakMethod and thread safety

I am trying to create a simple callback that can be registered to an object from another thread. The initial object that calls the callback is running on its own thread in this case.
This is best illustrated through the following example:
from pprint import pprint
import sys
import weakref
import threading
import time
class DummyController(object):
def __init__(self):
self.name = "fortytwo"
def callback(self):
print("I am number : " + self.name)
class SomeThread(threading.Thread):
def __init__(self, listener):
threading.Thread.__init__(self)
self.listener = listener
def run(self):
time.sleep(1)
dummy = DummyController()
self.listener.register_callback(dummy.callback)
time.sleep(5)
del dummy
class Listener(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.runner = weakref.WeakMethod(self.default_callback)
self.counter = 20
def default_callback(self):
print("Not implemented")
def register_callback(self, function):
self.runner = weakref.WeakMethod(function)
def run(self):
while self.counter:
try:
self.runner()()
except Exception as e:
pprint(e)
self.counter -= 1
time.sleep(1)
listen = Listener()
some = SomeThread(listen)
listen.start()
some.start()
Now the above code works just fine. But I am concerned about thread-safety here. Reading through weakref docs, it isn't very clear if weakref is really thread safe or not, except for the line:
Changed in version 3.2: Added support for thread.lock, threading.Lock, and code objects.
I might be simply not reading that right. Do I need to add locking, or is everything actually fine and pretty thread safe?
Many thanks
OK, I understand. This is not a problem about thread safe, but just a problem about weak reference.
There is an executable example:
from pprint import pprint
import sys
import weakref
import threading
import time
import gc
class SomeThread(threading.Thread):
def __init__(self, listener):
threading.Thread.__init__(self)
self.listener = listener
def run(self):
class test: # simplify this example.
def callback(self, count):
print(count)
time.sleep(1)
dummy = test()
self.listener.register_callback(dummy.callback)
time.sleep(5)
del dummy
gc.collect() # add this line to do garbage collecting.
class Listener(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.runner = weakref.WeakMethod(self.default_callback)
self.counter = 20
def default_callback(self):
print("Not implemented")
def register_callback(self, function):
self.runner = weakref.WeakMethod(function)
def run(self):
while self.counter:
try:
self.runner()(self.counter)
except Exception as e:
pprint(e)
self.counter -= 1
time.sleep(1)
listen = Listener()
some = SomeThread(listen)
listen.start()
some.start()
output:
TypeError('default_callback() takes 1 positional argument but 2 were given',)
TypeError('default_callback() takes 1 positional argument but 2 were given',)
18
17
16
15
TypeError("'NoneType' object is not callable",)
TypeError("'NoneType' object is not callable",)
TypeError("'NoneType' object is not callable",)
If you explicitly call gc.collect(), callback loses its last strong reference and then it becomes None. As you will never know when will gc collect garbage, there is a potential issue.
It is no matter you use thread or not, just a normal behave of weak reference.
BTW, be careful that exiting SomeThread.run will also implicitly del dummy, you can test it by removing del dummy and moving gc.collect() into try block.

Python - "can't pickle thread.lock" error when creating a thread under a multiprocess in Windows

I'm getting stuck on what I think is a basic multiprocess and threading issue. I've got a multiprocess set up, and within this a thread. However, when I set up the thread class within the init function, I get the following error:
"TypeError: can't pickle thread.lock objects".
However, this does not happen if the thread is set up outside of the init function. Does anyone know why this is happening? Note I'm using Windows.
Some code is below to illustrate the issue. As typed below, it runs fine. However if print_hello() is called from within the DoStuff init def, then the error occurs, if it's called within the multi-process run() def then it's fine.
Can anyone point me in the right direction so it runs fine when called from init? thanks!
import multiprocessing
import threading
import time
class MyProcess(multiprocessing.Process):
def __init__(self, **kwargs):
super(MyProcess, self).__init__(**kwargs)
self.dostuff = DoStuff()
def run(self):
print("starting DoStuff")
# This works fine if the line below is uncommented and __init__ self.print_hello() is commented...
self.dostuff.print_hello()
class DoStuff(object):
def __init__(self, **kwargs):
super(DoStuff, self).__init__(**kwargs)
# If the following is uncommented, the error occurs...
# Note it also occurs if the lines in start_thead are pasted here...
# self.print_hello()
def print_hello(self):
print "hello"
self.start_thread()
def start_thread(self):
self.my_thread_instance = MyThread()
self.my_thread_instance.start()
time.sleep(0.1)
class MyThread(threading.Thread):
def __init__(self):
super(MyThread, self).__init__()
def run(self):
print("Starting MyThread")
if __name__ == '__main__':
mp_target = MyProcess() # Also pass the pipe to transfer data
# mp_target.daemon = True
mp_target.start()
time.sleep(0.1)
It looks like there is no simple answer, and it appears to be a restriction of Windows (Win 7, python 3.6 in my case); on Windows it looks like you need to start the process before you can start the worker thread inside the owned object.
There appears to be no such restriction on Unix (CentOS 7, python 2.7.5).
As an experiment I modified your code as follows; this version checks the OS and starts either the process first, or the thread first:
import multiprocessing
import threading
import time
import os
class MyProcess(multiprocessing.Process):
def __init__(self, **kwargs):
super(MyProcess, self).__init__(**kwargs)
self.dostuff = DoStuff(self)
def run(self):
print("MyProcess.run()")
print("MyProcess.ident = " + repr(self.ident))
if os.name == 'nt':
self.dostuff.start_thread()
class DoStuff(object):
def __init__(self, owner, **kwargs):
super(DoStuff, self).__init__(**kwargs)
self.owner = owner
if os.name != 'nt':
self.start_thread()
def start_thread(self):
print("DoStuff.start_thread()")
self.my_thread_instance = MyThread(self)
self.my_thread_instance.start()
time.sleep(0.1)
class MyThread(threading.Thread):
def __init__(self, owner):
super(MyThread, self).__init__()
self.owner = owner
def run(self):
print("MyThread.run()")
print("MyThread.ident = " + repr(self.ident))
print("MyThread.owner.owner.ident = " + repr(self.owner.owner.ident))
if __name__ == '__main__':
mp_target = MyProcess() # Also pass the pipe to transfer data
mp_target.daemon = True
mp_target.start()
time.sleep(0.1)
... and got the following on Windows, where the process starts first:
MyProcess.run()
MyProcess.ident = 14700
DoStuff.start_thread()
MyThread.run()
MyThread.ident = 14220
MyThread.owner.owner.ident = 14700
... and the following on Linux, where the thread is started first:
DoStuff.start_thread()
MyThread.run()
MyThread.ident = 140316342347520
MyThread.owner.owner.ident = None
MyProcess.run()
MyProcess.ident = 4358
If it were my code I'd be tempted to always start the process first, then create the thread within that process; the following version works fine for me across both platforms:
import multiprocessing
import threading
import time
class MyProcess(multiprocessing.Process):
def __init__(self, **kwargs):
super(MyProcess, self).__init__(**kwargs)
self.dostuff = DoStuff()
def run(self):
print("MyProcess.run()")
self.dostuff.start_thread()
class DoStuff(object):
def __init__(self, **kwargs):
super(DoStuff, self).__init__(**kwargs)
def start_thread(self):
self.my_thread_instance = MyThread()
self.my_thread_instance.start()
time.sleep(0.1)
class MyThread(threading.Thread):
def __init__(self):
super(MyThread, self).__init__()
def run(self):
print("MyThread.run()")
if __name__ == '__main__':
mp_target = MyProcess() # Also pass the pipe to transfer data
mp_target.daemon = True
mp_target.start()
time.sleep(0.1)

Unable to stop running Python thread

I have an application listening on a specific TCP port to handle received requests (listen.py). After that, I have another one (trigger.py) that depending on the requested parameters triggers the respective operation.
Now, lets say the operation A was triggered (opA.py). Operation A uses a worker thread to start (worker.py). When the user request listen.py to stop operation A, the started thread is supposed to stop.
UPDATED:
The problem is that the thread is never stopped since the problem lies in trigger.py. The OperationA instance is lost once the code exits. So, I can never call stopOperation since it show me AttributeError: 'NoneType' object has no attribute 'stopOperation'
Any ideas of How to solve this?
listen.py
from trigger import Trigger
'''
code to handle requests here:
1st: param -> 'start'
2nd: param -> 'stop'
'''
t = Trigger()
t.execute(param)
trigger.py
from opA import OperationA
class Trigger():
def execute(param):
opA = OperationA()
if param == 'start':
opA.startOperation()
elif param == 'stop':
opA.stopOperation()
opA.py
from worker import ThreadParam
class OperationThread(ThreadParam):
def run(self):
while (self.running == False):
'''
do something here
'''
class OperationA():
def _init__(self):
listenThread = OperationThread(self)
def startOperation(self):
self.listenThread.start()
def stopOperation(self):
if self.listenThread.isAlive() == True:
print 'Thread is alive'
self.listenThread.killSignal()
else:
print 'Thread is dead'
worker.py
from threading import Thread
class ThreadParam(Thread):
def __init__(self, _parent):
Thread.__init__(self)
self.parent = _parent
self.running = False;
def killSignal(self):
self.running = True;
A minimal useful Trigger might look like this:
class Trigger(object):
def __init__(self):
self.operation = None
def execute(self, command):
if command == 'start':
assert self.operation is None
self.operation = OperationA()
self.operation.start_operation()
elif command == 'stop':
self.operation.stop_operation()
self.operation = None
else:
print 'Unknown command', repr(command)

Python send variables to thread

I'm trying to create my own threading class in Python2.7. I want it to be able to stop that thread with my own class function. Currently I have something like this:
class loop(threading.Thread):
def __init__(self, myvar):
super(loop, self).__init__()
self.terminate = False
self.myvar = myvar
def run(self):
while not self.terminate:
do.smthng.useful(self.myvar)
def change(self, newvar):
self.myvar = newvar #Doesnt work, in run() my old var is still being used
def stoploop(self):
self.terminate = True #Also not working
l = loop(1)
l.start()
time.sleep(1)
l.change(2) #thread still using "1"
time.sleep(1)
l.stoploop() #doesnt stop
I've read some posts here about this, but it wasnt what I needed.
Any help would be appreciated.
Thank you.
EDIT:
As some of the commenters already stated, this part of code looks like to be really working! Problem is in another place of my project. I've found it, but can't solve it. Maybe some of you could help.
So, my project uses Apache Thrift library and the server is in python.
Server.py:
loo = loop(0)
handler = ServHandler(loo)
processor = serv.Processor(handler)
transport = TSocket.TServerSocket('0.0.0.0', port=9090)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TProcessPoolServer.TProcessPoolServer(processor, transport, tfactory, pfactory)
print 'Starting the server...'
server.serve()
ServHandler.py:
class ServHandler:
def __init__(self, loo):
self.loo = loo
def terminate(self): #Function that can be called remotely
self.loo.stoploop() #Doesn't work
In above case thread isn't terminated and I don't why. There's no error, object exists, but it sets self.terminate value somewhere else. The object id seems to be the same as well as memory address, but it just looks like object is different although loop init function is called only once...
Below is the example, when the loop is terminated successfully.
ServHandler.py:
class ServHandler:
def __init__(self, loo):
self.loo = None
def terminate(self): #Function that can be called remotely
self.loo.stoploop() #Does work!!!!!!
def create(self):
self.loo = loop(0) #Function that can be called remotely
When I create loop object remotely, I can terminate it remotely. But it doesn't fit me. There should be a thread created before thrift server is served and multiple users have to be able to change vars/terminate/etc of that thread. How can I achieve this?
Thank you!
Not a answer per sae, but a useful debug code for the OP
from time import sleep
from threading import Thread
class loop(Thread):
def __init__(self, myvar):
Thread.__init__(self)
self.terminate = False
self.myvar = myvar
def run(self):
while self.terminate is False:
print('Run says myvar is:',self.myvar)
sleep(0.5)
def change(self, newvar):
self.myvar = newvar
def stoploop(self):
self.terminate = True
l = loop(1)
l.start()
sleep(1)
l.change(2)
sleep(1)
l.stoploop()
print('Final product:',l.myvar)
sleep(2)
print('Is the thread alive:',l.isAlive())
Tried your code with some debugging prints, and it's working?
Following code produced:
[torxed#archie ~]$ python test.py
Run says myvar is: 1
Run says myvar is: 1
Run says myvar is: 2 <-- Proves that change() does change `myvar`
Run says myvar is: 2
Final product: 2 <-- Also the global scope knows about the change
Is the thread alive: False <-- And the thread got terminated as intended
However, these are not bulletproof ideas when fetching data or dealing with thread-returns for a number of reasons (even tho i use this method myself from time to time), you should consider using thread.join which should be used in combination with l.toplooop() like so:
l = loop(1)
l.start()
l.change(2)
l.stoploop()
ret = l.join()
Also when updating data you should aquire locks on your data so collisions don't occur, have a look at semaphore objects.
Is it what you need?
import threading
import time
class Worker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.state = threading.Condition()
self.variable = 10
self.paused = False
def run(self):
while True:
with self.state:
if self.paused:
self.state.wait()
self.do_stuff()
def do_stuff(self):
time.sleep(.1)
print self.variable
def resume(self):
with self.state:
self.paused = False
self.state.notify()
def pause(self):
with self.state:
self.paused = True
loop = Worker()
loop.start()
time.sleep(1)
loop.pause()
loop.variable = 11
print 'CHANGED!'
loop.resume()
time.sleep(1)

Using variables in signal handler - require global?

I have a signal handler to handle ctrl-c interrupt. If in the signal handler I want to read a variable set in my main script, is there an alternative to using a "global" statement when setting the variable?
I don't mind doing this, but read this post (Do you use the "global" statement in Python?) in which someone commented that there should be no reason to ever use global.
What is the alternative in this case?
My code looks like this:
def signal_handler(signal, frame):
print "in sig handler - g_var=%s" % g_var
def main():
global g_var
g_var = "test"
time.sleep(120)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
main()
You can use a closure as the signal handler that acquires its state from the main script:
import signal
import sys
import time
def main_function():
data_for_signal_handler = 10
def signal_handler(*args):
print data_for_signal_handler
sys.exit()
signal.signal(signal.SIGINT, signal_handler) # Or whatever signal
while True:
data_for_signal_handler += 1
time.sleep(0.5)
if __name__ == '__main__':
main_function()
You can use partial to create a "closure".
import signal
from functools import partial
def signal_handler(g_var, signal, frame):
print "in sig handler - g_var=%s" % g_var
def main():
g_var = "test"
signal.signal(signal.SIGINT, partial(signal_handler, g_var))
time.sleep(120)
if __name__ == '__main__':
main()
Within the object-oriented paradigm (OOP) it's quite convenient to use lambdas for that purpose. Using lambdas you could pass some additional context (like a self reference) and/or get rid of the unused arguments (signal, frame).
import time
import signal
class Application:
def __init__( self ):
signal.signal( signal.SIGINT, lambda signal, frame: self._signal_handler() )
self.terminated = False
def _signal_handler( self ):
self.terminated = True
def MainLoop( self ):
while not self.terminated:
print( "I'm just doing my job like everyone else" )
time.sleep( 3 )
app = Application()
app.MainLoop()
print( "The app is terminated, exiting ..." )
If you're just reading the variable, there should be no need to make the variable "global"
def foo():
print a
a = 3
foo() #3
global is necessary to allow you to change the variable and have that change propagate into the module namespace.
If you want to pass some state to your callback without using global, the typical way to do this us to use an instance method as the callback:
class foo(object):
def __init__(self,arg):
self.arg = arg
def callback_print_arg(self):
print self.arg
def call_callback(callback):
callback()
a = foo(42)
call_callback(a.callback_print_arg) #42
You can access outer-scope variables from within an inline-defined function, like so:
my_values = {'foo':'bar'}
def handler(signum, frame):
for key,val in my_values.items():
print key,val
my_values['bat']='baz'
#remember to use mutable types, like dicts or lists
signal.signal(signal.SIGINT, handler)

Categories