Handling kill events for python multiprocessing processes - python

For a program that should run both on Linux and Windows (python 2.7), I'm trying to update values of a given object using multiprocessing.Process (while the main program is running, I'm calling the update class by a separate process).
Sometimes it takes too long before my object is updated, so I want to be able to kill my update process, and to continue with the main program. "Too long" is not strictly defined here, but rather a subjective perception of the user.
For a single queue (as in the MyFancyClass example in http://pymotw.com/2/multiprocessing/communication.html) I can kill the update process and the main program continues as I want.
However, when I make a second queue to retrieve the updated object, ending the update process does not allow me to continue in the main program.
What I have so far is:
import multiprocessing
import time, os
class NewParallelProcess(multiprocessing.Process):
def __init__(self, taskQueue, resultQueue, processName):
multiprocessing.Process.__init__(self)
self.taskQueue = taskQueue
self.resultQueue = resultQueue
self.processName = processName
def run(self):
print "pid %s of process that could be killed" % os.getpid()
while True:
next_task = self.taskQueue.get()
if next_task is None:
# poison pill for terminate
print "%s: exiting" % self.processName
self.taskQueue.task_done()
break
print "%s: %s" % (self.processName, next_task)
answer = next_task()
self.taskQueue.task_done()
self.resultQueue.put(answer)
return
class OldObject(object):
def __init__(self):
self.accurate = "OldValue"
self.otherValue = "SomeOtherValue"
class UpdateObject(dict):
def __init__(self, objectToUpdate):
self.objectToUpdate = objectToUpdate
def __call__(self):
returnDict = {}
returnDict["update"] = self.updateValue("NewValue")
return returnDict
def __str__(self):
return "update starting"
def updateValue(self, updatedValue):
for i in range(5):
time.sleep(1) # updating my object - time consuming with possible pid kill
print "working... (pid=%s)" % os.getpid()
self.objectToUpdate.accurate = updatedValue
return self.objectToUpdate
if __name__ == '__main__':
taskQueue = multiprocessing.JoinableQueue()
resultQueue = multiprocessing.Queue()
newProcess = NewParallelProcess(taskQueue, resultQueue, processName="updateMyObject")
newProcess.start()
myObject = OldObject()
taskQueue.put(UpdateObject(myObject))
# poison pill for NewParallelProcess loop and wait to finish
taskQueue.put(None)
taskQueue.join()
# get back results
results = resultQueue.get()
print "Values have been updated"
print "---> %s became %s" % (myObject.accurate, results["update"].accurate)
Any suggestions on how to kill the newProcess and to continue in the main program?
Well, made some modifications, and this does what I want. Not sure whether it is the most efficient, so any improvements are always welcome :)
import multiprocessing
import time, os
class NewParallelProcess(multiprocessing.Process):
def __init__(self, taskQueue, resultQueue, processName):
multiprocessing.Process.__init__(self)
self.taskQueue = taskQueue
self.resultQueue = resultQueue
self.name = processName
def run(self):
print "Process %s (pid = %s) added to the list of running processes" % (self.name, self.pid)
next_task = self.taskQueue.get()
self.taskQueue.task_done()
self.resultQueue.put(next_task())
return
class OldObject(object):
def __init__(self):
self.accurate = "OldValue"
self.otherValue = "SomeOtherValue"
class UpdateObject(dict):
def __init__(self, objectToUpdate, valueToUpdate):
self.objectToUpdate = objectToUpdate
self.valueToUpdate = valueToUpdate
def __call__(self):
returnDict = {}
returnDict["update"] = self.updateValue(self.valueToUpdate)
return returnDict
def updateValue(self, updatedValue):
for i in range(5):
time.sleep(1) # updating my object - time consuming with possible pid kill
print "working... (pid=%s)" % os.getpid()
self.objectToUpdate.accurate = updatedValue
return self.objectToUpdate
if __name__ == '__main__':
# queue for single process
taskQueue = multiprocessing.JoinableQueue()
resultQueue = multiprocessing.Queue()
newProcess = NewParallelProcess(taskQueue, resultQueue, processName="updateMyObject")
newProcess.start()
myObject = OldObject()
taskQueue.put(UpdateObject(myObject, "NewValue"))
while True:
# check if newProcess is still alive
time.sleep(5)
if newProcess.is_alive() is False:
print "Process %s (pid = %s) is not running any more (exit code = %s)" % (newProcess.name, newProcess.pid, newProcess.exitcode)
break
if newProcess.exitcode == 0:
print "ALL OK"
taskQueue.join()
# get back results
print "NOT KILLED"
results = resultQueue.get()
print "Values have been updated"
print "---> %s became %s" % (myObject.accurate, results["update"].accurate)
elif newProcess.exitcode == 1:
print "ended with error in function"
print "KILLED"
for i in range(5):
time.sleep(1)
print "i continue"
elif newProcess.exitcode == -15 or newProcess.exitcode == -9:
print "ended with kill signal %s" % newProcess.exitcode
print "KILLED"
for i in range(5):
time.sleep(1)
print "i continue"
else:
print "no idea what happened"
print "KILLED"
for i in range(5):
time.sleep(1)
print "i continue"

Related

How to get the status of spawn process in twisted python?

I want to trigger many long running processes continiously. And, based on the status returned of each process executed, I need to perform other tasks. In the below example, I'm able to spawn processes, but I'm not able to capture/get the details of the spawn processes execution status returned to mail loop(i.e in CmdProtocol class).
I'm new to this twisted python concepts - Can someone help me here?
import sys
from twisted.internet.protocol import ServerFactory, ProcessProtocol
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.internet import protocol
import os
import signal
class MyPP(protocol.ProcessProtocol):
def __init__(self):
self.parent_id = os.getpid()
def connectionMade(self):
print "connectionMade!"
print "Parent id = %s" % self.parent_id
print "Child process id = %s" % self.transport.pid
def outReceived(self, data):
print "out", data
def errReceived(self, data):
print "error", data
def inConnectionLost(self):
print "inConnectionLost! stdin is closed! (we probably did it)"
print "Parent id = %s" % self.parent_id
print "Child process id closes STDIN= %s" % self.transport.pid
def outConnectionLost(self):
print "outConnectionLost! The child closed their stdout!"
print "Parent id = %s" % self.parent_id
print "Child process id closes STDOUT = %s" % self.transport.pid
def errConnectionLost(self):
print "errConnectionLost! The child closed their stderr."
print "Parent id = %s" % self.parent_id
print "Child process id closes ERRCONN = %s" % self.transport.pid
def processExited(self, reason):
print "processExited %s, status %d" % (self.transport.pid, reason.value.exitCode,)
def processEnded(self, reason):
print "%s processEnded, status %d" % (self.transport.pid, reason.value.exitCode,)
print "quitting"
class CmdProtocol(LineReceiver):
delimiter = '\n'
def connectionMade(self):
self.client_ip = self.transport.getPeer()
print "Client connection from %s" % self.client_ip
def processcmd(self):
pp = MyPP()
cmd = ['c:\Python27\python.exe', '-u', 'print_hi.py']
print "Calling processcmd - <%s>" % cmd
reactor.spawnProcess(pp, cmd[0], cmd[1:])
def connectionLost(self, reason):
print "Lost client connection. Reason: %s" % reason
def lineReceived(self, line):
if not line: return
# Parse the command
print 'Cmd received from %s : %s' % (self.client_ip, line)
commandParts = line.split()
if len(commandParts) > 0:
command = commandParts[0].lower()
args = commandParts[1:]
try:
print "Command received : <%s>" % command
method = getattr(self, command)
except AttributeError, e:
self.sendLine('Error: no such command.')
else:
try:
res = method()
print "Returned status:%s" % res
self.sendLine('Command executed successfully.')
except Exception, e:
self.sendLine('Error: ' + str(e))
def do_kill(self, pid):
"""kill: Kill a process (PID)"""
print 'Killing pid:%s' % pid
res = os.kill(int(pid), signal.SIGTERM)
print "Kill Status %s" % res
class MyFactory(ServerFactory):
protocol = CmdProtocol
def __init__(self):
print "Factory called"
reactor.listenTCP(8000, MyFactory())
reactor.run()
This is actually a very basic Python data structures question. You just need to refer to an instance of CmdProtocol from an instance of MyPP. Since CmdProtocol is what constructs MyPP in the first place, this is easy. Just change the construction of MyPP to look like this:
def processcmd(self):
pp = MyPP(self)
and then MyPP.__init__ to look like this:
def __init__(self, cmd_protocol):
self.parent_id = os.getpid()
self.cmd_protocol = cmd_protocol
Then, in any method on MyPP, you can access the relevant CmdProtocol instance with self.cmd_protocol.

Python threading: Queue workers - hang the program

I'm trying to implement a simple Queue with workers that do something.
The program should wait until the workers have finished emptying the queue, and continue execution.
I took the documentation example and tried to implement it in a class, since this is how it's gonna be implemented in my project.
Like this:
class Test:
def __init__(self, n, q):
self.q = Queue()
print "Starting workers..."
for i in range(n):
t = threading.Thread(target=self.worker)
t.daemon = True
t.start()
print "Workers started"
for i in range(q):
self.q.put(i)
self.q.join()
print "Exiting"
def worker(self):
name = threading.currentThread().getName()
print "Thread %s started" % name
while True:
item = self.q.get()
print "Processing item %d" % item
sleep(1)
self.q.task_done()
When instantiating the class t = Test(2, 100), all I can see is the "Thread... started" messages and the program hangs.
What is wrong with the code?
EDIT:
I just noticed that while this code hangs in IDLE (where I tested it), it performs flawlessly on the command line.
Looks like an environmental problem.
Yes, this have to be an environmental problem. I even tested it on a few different editors and PCs.
Output
Starting workers...
Thread Thread-1 started
Thread Thread-2 started
Workers started
Processing item 0
Processing item 1
Processing item 2
Processing item 3
Processing item 4
Processing item 5
Processing item 6
Processing item 7
Processing item 8
Processing item 9
Exiting
Code:
from Queue import Queue
import threading
from time import sleep
class Test:
def __init__(self, n, q):
self.q = Queue()
print "Starting workers..."
for i in range(n):
t = threading.Thread(target=self.worker)
t.daemon = True
t.start()
print "Workers started"
for i in range(q):
self.q.put(i)
self.q.join()
print "Exiting"
def worker(self):
name = threading.currentThread().getName()
print "Thread %s started" % name
while True:
item = self.q.get()
print "Processing item %d" % item
sleep(1)
self.q.task_done()
t = Test(2, 10)

need help in python multiprocessing.lock()

I'm using a Python program to compute an average value of a list of floats.
Following the program logic:
The program is started with some arguments.
A list "hostgroups" is created.
For-in loop over the list "hostgroups" starting a function worker(hosgroup,var1, var2,var3,...)
Inside the worker-function two variables are build with some input variables of the worker
4a. Inside the worker a subworker-function is called with some input variables of the worker
4b. The subworker returns some new variables
4c. Back in the worker
4d. some things are done
4d. At last in the worker a final-function is called with some variables.
So far, so fine!
My next step is to set up a multiprocessing... who can help?
UPDATE:
Here is my actual approach:
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
def run(self):
proc_name = self.name
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
print '%s: Exiting' % proc_name
self.task_queue.task_done()
break
print '%s: %s' % (proc_name, next_task)
answer = next_task()
self.task_queue.task_done()
self.result_queue.put(answer)
return
class Task(object):
def __init__(self, hostgroup, lock):
self.hostgroup = hostgroup
self.lock = lock
def __call__(self):
print 'Doing something fancy for %s!' % self.hostgroup
try:
lock.acquire()
worker(self.hostgroup,hostsfile,mod_inputfile,outputdir,testmode,backup_dir,start_time,end_time,rrdname,unit,yesterday,now_epoch,rrd_interval,rrd_heartbeat,name)
finally:
lock.release()
def __str__(self):
return 'str %s' % self.hostgroup
if __name__ == '__main__':
lock = multiprocessing.Lock()
# Establish communication queues
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
# Start consumers
num_consumers = multiprocessing.cpu_count() * 2
print 'Creating %d consumers' % num_consumers
consumers = [ Consumer(tasks, results)
for i in xrange(num_consumers) ]
for w in consumers:
w.start()
# Enqueue jobs
for hostgroup in hostgroups:
tasks.put(Task(hostgroup,lock))
# Add a poison pill for each consumer
for i in xrange(num_consumers):
tasks.put(None)
# Wait for all of the tasks to finish
tasks.join()
---> Fine, so far! But no Lock is possible, all results are the same....
Why is lock.acquire() not working?
I find multiprocessing.Pool to be much easier to use than the Queue class. The basic setup is
from multiprocessing import Pool
p = Pool(processes=<number of processes>)
p.map(function, [a, b, c])
Which will call function(a), function(b), function(c) in independent processes

threading.Event wait function not signaled when subclassing Process class

For following code never gets past the wait function in run. I'm certain I'm doing something ridiculously stupid, but since I'm not smart enough to figure out what, I'm asking. Any help is appreciated. Here is the code:
import threading
import multiprocessing
from multiprocessing import Process
class SomeClass(Process):
def __init__(self):
Process.__init__(self)
self.event = threading.Event()
self.event.clear()
def continueExec(self):
print multiprocessing.current_process().name
print self
print "Set:" + str(self.event.is_set())
self.event.set()
print "Set:" + str(self.event.is_set())
def run(self):
print "I'm running with it"
print multiprocessing.current_process().name
self.event.wait()
print "I'm further than I was"
print multiprocessing.current_process().name
self.event.clear()
def main():
s_list = []
for t in range(3):
s = SomeClass()
print "s:" + str(s)
s_list.append(s)
s.start()
raw_input("Press enter to send signal")
for t in range(3):
print "s_list["+str(t)+"]:" + str(s_list[t])
s_list[t].continueExec()
raw_input("Press enter to send signal")
for t in range(3):
s_list[t].join()
print "All Done"
if __name__ == "__main__":
main()

Max threads Python and how kill Thread?

I make daemon which creates N threads. N beetwen 1 to 500.
When thread end handling data it must die. When thread start it create log file and write to it. When it die - file should be remove. When threads started its check LOCK file. If file removed it should be die. But sometimes not. Why thread does not die immidiatly?
class HandlerWorker(threading.Thread):
q_reader = None
q_writer = None
q_cmd = None
id = ''
task_id = 0
def __init__(self,id,task_id,q_reader,q_writer,q_cmd):
'''
Constructor
'''
#print "Worker %d started" % int(id)
self.id = id
self.task_id = task_id
self.q_cmd = q_cmd
self.q_reader = q_reader
self.q_writer = q_writer
threading.Thread.__init__(self)
def __del__(self):
print "Destroy worker %d %d" % (int(self.task_id),int(self.id))
def isPid(self):
is_pid = True
try:
cfg = ConfigReader('config.json')
general = cfg.getGeneral()
pidfile = "%s%d" % (str(general['pids']),int(self.task_id))
f=open(pidfile,'r')
f.close()
except:
is_pid = False
return is_pid
def run(self):
'''
Handle email
'''
is_run = True
cfg = ConfigReader('config.json')
general = cfg.getGeneral()
logpath = str(general['pids'])+"../logs/%d_%d" % (int(self.task_id),int(self.id))
f = open(logpath,"w+")
while is_run:
if not self.q_reader.empty():
msg = self.q_reader.get()
# convert message
self.q_writer.put(msg)
log_str = "Date: %s Email:%s Status:%d\n" % (str(time.asctime()),str(msg),int(status))
f.write(log_str);
f.flush()
is_run = self.isPid()
time.sleep(1)
f.close()
try:
os.remove(logpath)
except:
print "Can't remove LOG file: %s" %logpath
print "Stop thread %d %d" % (int(self.task_id),int(self.id))

Categories