Twisted spawnProcess, send output of one process to input of another - python

I am trying to use twisted spawnProcess to replicate the behavior of something like this:
cat <input.txt | wc -w
This is just an example of two commands, in reality I have my own processes (say python or bash scripts or external programs) where each process reads from stdin and writes to stdout. Just like the above example, I want to pipe stdout from one process to stdin of another, and I want to do this using spawnProcess. I used some hints here:
Twisted pipe two processes with spawnProcess
but I can't get it to work. It just hangs when reading from stdin on the second spawnProcess protocol. My code is below.What am I doing wrong? How exactly can I achieve this objective? Is it better to call the second spawnProcess from within the first?
#!/usr/bin/env python
from twisted.internet import protocol
from twisted.internet import reactor
import re
import os
import sys
class CatPP(protocol.ProcessProtocol):
def __init__(self,input_data):
self.input_data=input_data
self.data = ""
def connectionMade(self):
print "connectionMade in CatPP! Now writing to stdin of cat"
print " writing this data: %s" % self.input_data
self.transport.write(self.input_data+'\n')
print " closing stdin"
self.transport.closeStdin() # tell them we're done
print " stdin closed"
def outReceived(self, data):
print "outReceived from cat! with %d bytes!" % len(data)
self.data = self.data + data
print " received this: %s" % self.data
def errReceived(self, data):
print "errReceived from cat! with %d bytes!" % len(data)
def inConnectionLost(self):
print "inConnectionLost for cat! stdin is closed! (we probably did it)"
def outConnectionLost(self):
print "outConnectionLost for cat! The child closed their stdout!"
# now is the time to examine what they wrote
print "I saw cat write this:", self.data
def errConnectionLost(self):
print "errConnectionLost for cat! The child closed their stderr."
def processExited(self, reason):
print "processExited for cat, status %d" % (reason.value.exitCode,)
def processEnded(self, reason):
print "processEnded for cat, status %d" % (reason.value.exitCode,)
class WcPP(protocol.ProcessProtocol):
def __init__(self):
self.data = ""
def connectionMade(self):
print "connectionMade! Now reading from pipe to get stdin for wp"
print " reading from stdin"
txt = sys.stdin.read()
print " Read this from stdin: %s" % (txt,)
self.transport.write(txt)
self.transport.closeStdin() # tell them we're done
def outReceived(self, data):
print "outReceived from cat! with %d bytes!" % len(data)
self.data = self.data + data
def errReceived(self, data):
print "errReceived from cat! with %d bytes!" % len(data)
def inConnectionLost(self):
print "inConnectionLost for cat! stdin is closed! (we probably did it)"
def outConnectionLost(self):
print "outConnectionLost for cat! The child closed their stdout!"
# now is the time to examine what they wrote
print "Final output:", self.data
#(dummy, lines, words, chars, file) = re.split(r'\s+', self.data)
#print "I saw %s lines" % lines
def errConnectionLost(self):
print "errConnectionLost for cat! The child closed their stderr."
def processExited(self, reason):
print "processExited for cat, status %d" % (reason.value.exitCode,)
def processEnded(self, reason):
print "processEnded for cat, status %d" % (reason.value.exitCode,)
reactor.stop()
readPipe, writePipe = os.pipe()
handle=open('junkin.txt','r')
cat_txt=handle.read()
handle.close()
pp1 = CatPP(cat_txt)
pp2 = WcPP()
reactor.spawnProcess(pp1, "cat", ["cat"], {}, childFDs={1: writePipe})
reactor.spawnProcess(pp2, "wc", ["wc", "-w"], {},childFDs={0: readPipe})
reactor.run()
try:
os.close(readPipe)
except:
print "Exception closing readPipe"
try:
os.close(writePipe)
except:
print "Exception closing writePipe"

Here is a working example.
In the case of piping through cat | wc, spawnProcess duplicates the pipes so you need to close them.
from twisted.internet import protocol
from twisted.internet import reactor
import os
class Writer(protocol.ProcessProtocol):
def __init__(self, data):
self.data = data
def connectionMade(self):
print "Writer -- connection made"
self.transport.writeToChild(0, self.data)
self.transport.closeChildFD(0)
def childDataReceived(self, fd, data):
pass
def processEnded(self, status):
pass
class Reader(protocol.ProcessProtocol):
def __init__(self):
pass
def connectionMade(self):
print "Reader -- connection made"
pass
def childDataReceived(self, fd, data):
print "Reader -- childDataReceived"
self.received = data
def processEnded(self, status):
print "process ended, got:", self.received
class WriteRead(protocol.ProcessProtocol):
def __init__(self, data):
self.data = data
def connectionMade(self):
self.transport.writeToChild(0, self.data)
self.transport.closeChildFD(0)
def childDataReceived(self, fd, data):
self.received = data
print "got data:", data
def processEnded(self, status):
print "process ended - now what?"
def test1(data):
# just call wc
p2 = reactor.spawnProcess(WriteRead(data), "wc", ["wc"], env=None, childFDs={0: "w", 1: "r"})
reactor.run()
def test2(data):
rfd, wfd = os.pipe()
p1 = reactor.spawnProcess(Writer(data), "cat", ["cat"], env=None, childFDs={0:"w", 1: wfd })
p2 = reactor.spawnProcess(Reader(), "wc", ["wc", "-w"], env=None, childFDs={0: rfd, 1: "r"})
os.close(rfd)
os.close(wfd)
reactor.run()
test2("this is a test")

Ok I figured out how to do it but without using the pipes approach from Twisted pipe two processes with spawnProcess, which I never got to work. Instead I used a class ChainableProcessProtocol that takes another ChainableProcessProtocol as an argument.This way you can chain these together, with the output of a prior protocol writing to the stdin of the next protocol. This chaining stops when the next_protocol is None. The final output is the data from the final protocol. Here is my example:
#!/usr/bin/env python
from twisted.internet import protocol
from twisted.internet import reactor, defer
import re
import os
import sys
import json
def shutdown(x):
print "Shutdown called"
reactor.stop()
class ChainableProcessProtocol(protocol.ProcessProtocol):
def __init__(self,cmd,cmdargs,input_data,next_protocol):
self.cmd=cmd
self.cmdargs=cmdargs
self.input_data=input_data
self.next_protocol=next_protocol
self.data = ""
def set_input_data(self,new_input_data):
self.input_data=new_input_data
def connectionMade(self):
print "connectionMade in %s! Now writing to stdin of cat" % self.cmd
print " writing this data: %s" % self.input_data
self.transport.write(self.input_data+'\n')
print " closing stdin"
self.transport.closeStdin() # tell them we're done
print " stdin closed"
def outReceived(self, data):
print "outReceived from %s! with %d bytes!" % (self.cmd,len(data))
self.data = self.data + data
print " received this: %s" % self.data
def errReceived(self, data):
print "errReceived from %s! with %d bytes!" % (self.cmd,len(data))
def inConnectionLost(self):
print "inConnectionLost for %s! stdin is closed! (we probably did it)" %(self.cmd,)
def outConnectionLost(self):
print "outConnectionLost for %s! The child closed their stdout!" %(self.cmd,)
# now is the time to examine what they wrote
print "I saw %s write this: %s" % (self.cmd,self.data)
#
# cmd is done, now write to next_protocol if set
#
if self.next_protocol:
print "Calling chained protocol"
self.next_protocol.set_input_data(self.data)
npcmd=self.next_protocol.cmd
npcmdargs=self.next_protocol.cmdargs
print "npcmd is %s" % (npcmd,)
print "npcmdargs is %s" % (json.dumps(npcmdargs),)
reactor.spawnProcess(self.next_protocol, npcmd, npcmdargs, {})
else:
print "No chained protocol"
def errConnectionLost(self):
print "errConnectionLost for %s! The child closed their stderr." % (self.cmd,)
def processExited(self, reason):
print "processExited for %s, status %d" % (self.cmd,reason.value.exitCode,)
def processEnded(self, reason):
print "processEnded for %s, status %d" % (self.cmd,reason.value.exitCode,)
handle=open('junkin.txt','r')
in_txt=handle.read()
handle.close()
#
# Create the last protocol first because earlier protocol(s) need it
#
pp2 = ChainableProcessProtocol("wc",["wc","-w"],'',None)
#
# The first process takes an instance of the second process as an argument
#
pp1 = ChainableProcessProtocol("cat",["cat"],in_txt,pp2)
#
# before using spawnProcess, lets create a deferred to shut down the reactor in 2 seconds
# This should give us enough time. This is only so our test script does not run forever
#
d=defer.Deferred()
d.addCallback(shutdown)
reactor.callLater(2, d.callback, '')
#
# Now spawn the first process
#
reactor.spawnProcess(pp1, pp1.cmd, pp1.cmdargs, {})
reactor.run()
print "Final output is data in pp2: %s" % (pp2.data.strip(),)
print "Done!"

Related

Python 3.6 Object/Class Threading

I'm looking to create a "self contained threaded class" using Python 3.
At a high level, want I would liked to do is to spawn up 50 asynchronous device objects from my "main" class and then just use their methods as needed. This is not difficult when just dealing with objects in synchronous situation but gets cloudy quite quickly as we move to asynchronous processing. The primary idea to keep the threading self contained in the device class so my base (main.py) code stays streamlined/clean and without any of the thread management.
I don't plan on any resource sharing in this case so I think I'm clear of any thread lock issues.
Here is some sample code that I hope someone can provide some hints or samples into making it a self threaded class (meaning I don't want to manage threads at the main.py level):
Sample main.py
from deviceworker import Device
availableworkers = {'USA':'services.groupkt.com', 'IND':'services.groupkt.com'}
Activeworkers = []
for name, ip in availableworkers.items():
Activeworkers.append(Device(name, ip))
for worker in Activeworkers:
worker.checkcountry() # asynchronous call - (we don't want to wait for a response)
# The idea is to keep this code as clean as possible.
Sample Object: deviceworker.py
import urllib.request
import urllib.parse
import json
class Device:
def __init__(self, name, endpoint, preamble = 'state', port = 80 ):
self.name = name
self.connected =False
self.connection = HTTPConnection(endpoint, preamble, port)
self.getStatus()
def getStatus(self, check_for = None):
self.urlresponse = json.loads(self.connection.GET('get/USA/all')) #Use USA just to verify connection
if check_for:
pass
self.connected = True
def checkcountry(self):
print(self.connection.GET('get/%s/all' % self.name))
class HTTPConnection:
def __init__(self, endpoint, preamble = None, port = 80):
if preamble: # specificing a version after the port and before method
self.url = 'http://%s:%s/%s/' % (endpoint, port, preamble)
else:
self.url = 'http://%s:%s/' % (endpoint, port)
print('_init_ url=%s' % self.url)
def GET(self, operation):
#try:
#print('%s%s' % (self.url, operation))
with urllib.request.urlopen('%s%s' % (self.url, operation)) as f:
return f.read().decode('utf-8')
#except Exception as e:
#raise Exception("GET Request Failed")
I've stripped most of the exception handling for simplicity. The sample above should work.
--- UPDATE ---
So I think I've sort of figured it out. Still not getting the parrellism I would expect from the documentation.
import threading
import urllib.request
import urllib.parse
import json
import time
class Device(threading.Thread):
def __init__(self, name, endpoint, preamble = 'state', port = 80 ):
threading.Thread.__init__(self)
self.name = name
self.connected = False
self.connection = HTTPConnection(endpoint, preamble, port)
print('%s: __init__' % self.name)
def run(self):
self.getStatus()
print('%s: hit run()' % self.name)
def getStatus(self):
self.urlresponse = json.loads(self.connection.GET('get/USA/all')) #Use USA just to verify connection
self.connected = True
def checkcountry(self):
if (self.name == 'USA'): self.waittime = 10
else: self.waittime = 0
print('%s: Getting Codes - wait time: %s' % (self.name, self.waittime))
start_time=time.time()
time.sleep(self.waittime)
result =self.connection.GET('get/%s/all' % self.name)
elapsed_time=time.time() - start_time
print('%s: Got Codes - second: %s' % (self.name, elapsed_time))
class HTTPConnection:
def __init__(self, endpoint, preamble = None, port = 80):
if preamble: # specificing a version after the port and before method
self.url = 'http://%s:%s/%s/' % (endpoint, port, preamble)
else:
self.url = 'http://%s:%s/' % (endpoint, port)
def GET(self, operation):
with urllib.request.urlopen('%s%s' % (self.url, operation)) as f:
return f.read().decode('utf-8')
DeviceList = {'USA':'services.groupkt.com', 'IND':'services.groupkt.com'}
ActiveDevices = []
DeviceList = {'USA':'services.groupkt.com', 'IND':'services.groupkt.com'}
ActiveDevices = []
for name, ip in DeviceList.items():
print('main: creating object for: %s' % name)
newDevice = Device(name, ip)
ActiveDevices.append(newDevice)
newDevice.start()
for device in ActiveDevices:
print('main: calling checkcountry() for: %s' % device.name)
device.checkcountry()
Here are the results:
main: creating object for: USA
USA: __init__
main: creating object for: IND
IND: __init__
main: calling checkcountry() for: USA
USA: Getting Codes - wait time: 10
USA: Got Codes - second: 10.167016744613647
main: calling checkcountry() for: IND
IND: Getting Codes - wait time: 0
IND: Got Codes - second: 0.11001110076904297
I by adding in the delay to the USA search I would have expected the IND to finish first but it appears that it serialized.
I'm running this on:
Python 3.6.0 (v3.6.0:41df79263a11, Dec 23 2016, 07:18:10) [MSC v.1900 32 bit (Intel)] on win32
Here is a custom thread sample with locking worked great for me, better than using the event.
Try it in Colab.
import threading,time
i=0
luk=threading.Lock()
global i
global y
global t_num
class special_thread(threading.Thread):
"""This function starts a Thread class"""
def __init__(self, execute,threadID , name, daemon,args=(), repetitive=False,kwargs=None, interval_sec=60 ):
threading.Thread.__init__(self)
self.daemon = daemon
self.stopped = threading.Event()
self.interval_sec = interval_sec
self.execute = execute
self.name = name
if kwargs is None:
kwargs = {}
self.args = args
self.kwargs=kwargs
self.repetitive=repetitive
self.threadID = threadID
print(args)
def stop(self):
self.stopped.set()
self.join()
def run(self):
if self.repetitive:
while not self.stopped.wait(self.interval_sec):
self.execute(*self.args,**self.kwargs)
else:
self.execute(*self.args,**self.kwargs)
def center(t_num):
y=0
luk.acquire()
caller = inspect.getouterframes(inspect.currentframe())[1][3]
print(' {} is aquiring by {} '.format( caller, str(time.ctime())))
y+=t_num
print( "Inside %s()" % caller)
print('thread number is ',t_num,y)
time.sleep(2*t_num)
luk.release()
print(' {} is releasing by {} '.format( caller, str(time.ctime())))
def target_uno():
t_num=1
center(t_num)
def target_dos():
t_num=2
center(t_num)
target_uno=special_thread(execute=target_uno, args=(),repetitive=True, interval_sec=1,threadID=10004,
name='target_uno',
daemon=False )
target_dos=special_thread(execute=target_dos, args=(),repetitive=True, interval_sec=1,threadID=10004,
name='target_dos',
daemon=False )
if __name__ == "__main__":
target_uno.start()
target_dos.start()
time.sleep(20)
target_uno.stop()
target_dos.stop()

Handling kill events for python multiprocessing processes

For a program that should run both on Linux and Windows (python 2.7), I'm trying to update values of a given object using multiprocessing.Process (while the main program is running, I'm calling the update class by a separate process).
Sometimes it takes too long before my object is updated, so I want to be able to kill my update process, and to continue with the main program. "Too long" is not strictly defined here, but rather a subjective perception of the user.
For a single queue (as in the MyFancyClass example in http://pymotw.com/2/multiprocessing/communication.html) I can kill the update process and the main program continues as I want.
However, when I make a second queue to retrieve the updated object, ending the update process does not allow me to continue in the main program.
What I have so far is:
import multiprocessing
import time, os
class NewParallelProcess(multiprocessing.Process):
def __init__(self, taskQueue, resultQueue, processName):
multiprocessing.Process.__init__(self)
self.taskQueue = taskQueue
self.resultQueue = resultQueue
self.processName = processName
def run(self):
print "pid %s of process that could be killed" % os.getpid()
while True:
next_task = self.taskQueue.get()
if next_task is None:
# poison pill for terminate
print "%s: exiting" % self.processName
self.taskQueue.task_done()
break
print "%s: %s" % (self.processName, next_task)
answer = next_task()
self.taskQueue.task_done()
self.resultQueue.put(answer)
return
class OldObject(object):
def __init__(self):
self.accurate = "OldValue"
self.otherValue = "SomeOtherValue"
class UpdateObject(dict):
def __init__(self, objectToUpdate):
self.objectToUpdate = objectToUpdate
def __call__(self):
returnDict = {}
returnDict["update"] = self.updateValue("NewValue")
return returnDict
def __str__(self):
return "update starting"
def updateValue(self, updatedValue):
for i in range(5):
time.sleep(1) # updating my object - time consuming with possible pid kill
print "working... (pid=%s)" % os.getpid()
self.objectToUpdate.accurate = updatedValue
return self.objectToUpdate
if __name__ == '__main__':
taskQueue = multiprocessing.JoinableQueue()
resultQueue = multiprocessing.Queue()
newProcess = NewParallelProcess(taskQueue, resultQueue, processName="updateMyObject")
newProcess.start()
myObject = OldObject()
taskQueue.put(UpdateObject(myObject))
# poison pill for NewParallelProcess loop and wait to finish
taskQueue.put(None)
taskQueue.join()
# get back results
results = resultQueue.get()
print "Values have been updated"
print "---> %s became %s" % (myObject.accurate, results["update"].accurate)
Any suggestions on how to kill the newProcess and to continue in the main program?
Well, made some modifications, and this does what I want. Not sure whether it is the most efficient, so any improvements are always welcome :)
import multiprocessing
import time, os
class NewParallelProcess(multiprocessing.Process):
def __init__(self, taskQueue, resultQueue, processName):
multiprocessing.Process.__init__(self)
self.taskQueue = taskQueue
self.resultQueue = resultQueue
self.name = processName
def run(self):
print "Process %s (pid = %s) added to the list of running processes" % (self.name, self.pid)
next_task = self.taskQueue.get()
self.taskQueue.task_done()
self.resultQueue.put(next_task())
return
class OldObject(object):
def __init__(self):
self.accurate = "OldValue"
self.otherValue = "SomeOtherValue"
class UpdateObject(dict):
def __init__(self, objectToUpdate, valueToUpdate):
self.objectToUpdate = objectToUpdate
self.valueToUpdate = valueToUpdate
def __call__(self):
returnDict = {}
returnDict["update"] = self.updateValue(self.valueToUpdate)
return returnDict
def updateValue(self, updatedValue):
for i in range(5):
time.sleep(1) # updating my object - time consuming with possible pid kill
print "working... (pid=%s)" % os.getpid()
self.objectToUpdate.accurate = updatedValue
return self.objectToUpdate
if __name__ == '__main__':
# queue for single process
taskQueue = multiprocessing.JoinableQueue()
resultQueue = multiprocessing.Queue()
newProcess = NewParallelProcess(taskQueue, resultQueue, processName="updateMyObject")
newProcess.start()
myObject = OldObject()
taskQueue.put(UpdateObject(myObject, "NewValue"))
while True:
# check if newProcess is still alive
time.sleep(5)
if newProcess.is_alive() is False:
print "Process %s (pid = %s) is not running any more (exit code = %s)" % (newProcess.name, newProcess.pid, newProcess.exitcode)
break
if newProcess.exitcode == 0:
print "ALL OK"
taskQueue.join()
# get back results
print "NOT KILLED"
results = resultQueue.get()
print "Values have been updated"
print "---> %s became %s" % (myObject.accurate, results["update"].accurate)
elif newProcess.exitcode == 1:
print "ended with error in function"
print "KILLED"
for i in range(5):
time.sleep(1)
print "i continue"
elif newProcess.exitcode == -15 or newProcess.exitcode == -9:
print "ended with kill signal %s" % newProcess.exitcode
print "KILLED"
for i in range(5):
time.sleep(1)
print "i continue"
else:
print "no idea what happened"
print "KILLED"
for i in range(5):
time.sleep(1)
print "i continue"

Python: Threading does not let the rest of the program run

In my program, when a thread starts, only the thread runs while the rest of the program waits for it. Here is the program:
import socket
from time import sleep
import threading
import string
class IRC:
"IRC Module for python"
def __init__(self, HOST, PORT, NICK, REALNAME, IDENT):
print "New Connection"
self.s=socket.socket( )
self.s.connect((HOST, PORT))
self.s.send("USER %s %s bla :%s\r\n" % (IDENT, HOST, REALNAME))
self.s.send("NICK %s\r\n" % NICK)
self.rb="ReadBuffer"
t = threading.Thread(target=self.ping())
t.start()
print "started thread"
def read(self):
readbuffer=readbuffer+self.s.recv(1024)
temp=string.split(readbuffer, "\n")
readbuffer=temp.pop( )
return temp
def ping(self):
"Handles Pinging"
readbuffer=""
print "Started pinging"
while True:
readbuffer=readbuffer+self.s.recv(1024)
temp=string.split(self.rb, "\n")
readbuffer=temp.pop( )
for line in temp:
line=string.rstrip(line)
line=string.split(line)
if(line[0]=='PING'):
self.s.send("PONG %s\r\n" % line[1])
print("Ponged")
self.pinged=True
print "ran"
def send(self, message, channel):
self.s.send("PRIVMSG %s :%s\r\n" % (channel, message))
Since the comment already solved your problem, there's not much more to say. Here's what doc says:
class threading.Thread(group=None, target=None, name=None, args=(),
kwargs={}, *, daemon=None)
This constructor should always be called
with keyword arguments. Arguments are:
group should be None; reserved for future extension when a ThreadGroup
class is implemented.
target is the callable object to be invoked by the run() method.
Defaults to None, meaning nothing is called.
So you should set target to a callable, which is the self.ping method, rather then call it.

How to get the status of spawn process in twisted python?

I want to trigger many long running processes continiously. And, based on the status returned of each process executed, I need to perform other tasks. In the below example, I'm able to spawn processes, but I'm not able to capture/get the details of the spawn processes execution status returned to mail loop(i.e in CmdProtocol class).
I'm new to this twisted python concepts - Can someone help me here?
import sys
from twisted.internet.protocol import ServerFactory, ProcessProtocol
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.internet import protocol
import os
import signal
class MyPP(protocol.ProcessProtocol):
def __init__(self):
self.parent_id = os.getpid()
def connectionMade(self):
print "connectionMade!"
print "Parent id = %s" % self.parent_id
print "Child process id = %s" % self.transport.pid
def outReceived(self, data):
print "out", data
def errReceived(self, data):
print "error", data
def inConnectionLost(self):
print "inConnectionLost! stdin is closed! (we probably did it)"
print "Parent id = %s" % self.parent_id
print "Child process id closes STDIN= %s" % self.transport.pid
def outConnectionLost(self):
print "outConnectionLost! The child closed their stdout!"
print "Parent id = %s" % self.parent_id
print "Child process id closes STDOUT = %s" % self.transport.pid
def errConnectionLost(self):
print "errConnectionLost! The child closed their stderr."
print "Parent id = %s" % self.parent_id
print "Child process id closes ERRCONN = %s" % self.transport.pid
def processExited(self, reason):
print "processExited %s, status %d" % (self.transport.pid, reason.value.exitCode,)
def processEnded(self, reason):
print "%s processEnded, status %d" % (self.transport.pid, reason.value.exitCode,)
print "quitting"
class CmdProtocol(LineReceiver):
delimiter = '\n'
def connectionMade(self):
self.client_ip = self.transport.getPeer()
print "Client connection from %s" % self.client_ip
def processcmd(self):
pp = MyPP()
cmd = ['c:\Python27\python.exe', '-u', 'print_hi.py']
print "Calling processcmd - <%s>" % cmd
reactor.spawnProcess(pp, cmd[0], cmd[1:])
def connectionLost(self, reason):
print "Lost client connection. Reason: %s" % reason
def lineReceived(self, line):
if not line: return
# Parse the command
print 'Cmd received from %s : %s' % (self.client_ip, line)
commandParts = line.split()
if len(commandParts) > 0:
command = commandParts[0].lower()
args = commandParts[1:]
try:
print "Command received : <%s>" % command
method = getattr(self, command)
except AttributeError, e:
self.sendLine('Error: no such command.')
else:
try:
res = method()
print "Returned status:%s" % res
self.sendLine('Command executed successfully.')
except Exception, e:
self.sendLine('Error: ' + str(e))
def do_kill(self, pid):
"""kill: Kill a process (PID)"""
print 'Killing pid:%s' % pid
res = os.kill(int(pid), signal.SIGTERM)
print "Kill Status %s" % res
class MyFactory(ServerFactory):
protocol = CmdProtocol
def __init__(self):
print "Factory called"
reactor.listenTCP(8000, MyFactory())
reactor.run()
This is actually a very basic Python data structures question. You just need to refer to an instance of CmdProtocol from an instance of MyPP. Since CmdProtocol is what constructs MyPP in the first place, this is easy. Just change the construction of MyPP to look like this:
def processcmd(self):
pp = MyPP(self)
and then MyPP.__init__ to look like this:
def __init__(self, cmd_protocol):
self.parent_id = os.getpid()
self.cmd_protocol = cmd_protocol
Then, in any method on MyPP, you can access the relevant CmdProtocol instance with self.cmd_protocol.

Max threads Python and how kill Thread?

I make daemon which creates N threads. N beetwen 1 to 500.
When thread end handling data it must die. When thread start it create log file and write to it. When it die - file should be remove. When threads started its check LOCK file. If file removed it should be die. But sometimes not. Why thread does not die immidiatly?
class HandlerWorker(threading.Thread):
q_reader = None
q_writer = None
q_cmd = None
id = ''
task_id = 0
def __init__(self,id,task_id,q_reader,q_writer,q_cmd):
'''
Constructor
'''
#print "Worker %d started" % int(id)
self.id = id
self.task_id = task_id
self.q_cmd = q_cmd
self.q_reader = q_reader
self.q_writer = q_writer
threading.Thread.__init__(self)
def __del__(self):
print "Destroy worker %d %d" % (int(self.task_id),int(self.id))
def isPid(self):
is_pid = True
try:
cfg = ConfigReader('config.json')
general = cfg.getGeneral()
pidfile = "%s%d" % (str(general['pids']),int(self.task_id))
f=open(pidfile,'r')
f.close()
except:
is_pid = False
return is_pid
def run(self):
'''
Handle email
'''
is_run = True
cfg = ConfigReader('config.json')
general = cfg.getGeneral()
logpath = str(general['pids'])+"../logs/%d_%d" % (int(self.task_id),int(self.id))
f = open(logpath,"w+")
while is_run:
if not self.q_reader.empty():
msg = self.q_reader.get()
# convert message
self.q_writer.put(msg)
log_str = "Date: %s Email:%s Status:%d\n" % (str(time.asctime()),str(msg),int(status))
f.write(log_str);
f.flush()
is_run = self.isPid()
time.sleep(1)
f.close()
try:
os.remove(logpath)
except:
print "Can't remove LOG file: %s" %logpath
print "Stop thread %d %d" % (int(self.task_id),int(self.id))

Categories