I'm working on server written in python. When the client sends a cmd the server will call a function with unknown running time. So to avoid blocking I used threading. But when looking at the child process it seems that they're not terminating, causing a lot of memory usage.
EDIT : Here is the tree of the directory : http://pastebin.com/WZDxLquC
Following answers I found on stackoverflow I implemented a custom Thread class:
sThreads.py :
import threading
class Thread(threading.Thread):
def __init__(self, aFun, args = ()):
super(Thread, self).__init__(None, aFun, None, args)
self.stopped = threading.Event()
def stop(self):
self.stopped.set()
def isStopped(self):
return self.stopped.isSet()
Then here is the server's loop:
some where in mainServer.py:
def serve_forever(self, aCustomClass, aSize = 1024):
while True:
self.conn, self.addr = self.sock.accept()
msg = self.recvMSG(4096)
if(msg):
self.handShake(msg)
print 'Accepted !'
while True:
msg = self.recvMSG(aSize)
if(msg):
t = sThreads.Thread(self.handle, (aCustomClass,))
t.start()
self.currentThreads.append(t)
if(self.workers > 0):
tt = sThreads.Thread(self.respond)
tt.start()
if(self.workers == 0 and len(self.currentThreads) > 0):
for th in self.currentThreads:
th.stop()
Using a custom Thread class will not solve the issue and it still does not stop the terminated threads!
EDIT : added the handle() and respond() methods :
def handle(self, aClass):
self.workers += 1
self.queue.put(aClass._onRecieve(self.decodeStream()))
def respond(self):
while self.workers > 0:
msgToSend, wantToSend = self.queue.get()
self.workers -= 1
if(wantToSend):
print 'I want to send :', msgToSend
continue #Send is not yet implemented !
It seems that self.queue.get() was causing all the issue ...
Related
I'm writing a Python module to read jstest output and make Xbox gamepad working in Python on Linux. I need to start in background infinite while loop in __init__ on another thread that looks like this:
import os
from threading import Thread
import time
import select
import subprocess
class Joystick:
"""Initializes base class and launches jstest and xboxdrv"""
def __init__(self, refreshRate=2000, deadzone=4000):
self.proc = subprocess.Popen(['xboxdrv', '-D', '-v', '--detach-kernel-driver', '--dpad-as-button'], stdout=subprocess.PIPE, bufsize=0)
self.pipe = self.proc.stdout
self.refresh = refreshRate
self.refreshDelay = 1.0 / refreshRate
self.refreshTime = 0 # indicates the next refresh
self.deadzone = deadzone
self.start()
self.xbox = subprocess.Popen(['jstest', '--normal', '/dev/input/js0'], stdout=subprocess.PIPE, bufsize=-1, universal_newlines=True)
self.response = self.xbox.stdout.readline()
a = Thread(target=self.reload2())
a.start()
print("working")
def reload2(self):
while True:
self.response = self.xbox.stdout.readline()
print("read")
time.sleep(0.5)
def start(self):
global leftVibrateAmount, rightVibrateAmount
leftVibrateAmount = 0
rightVibrateAmount = 0
readTime = time.time() + 1 # here we wait a while
found = False
while readTime > time.time() and not found:
readable, writeable, exception = select.select([self.pipe], [], [], 0)
if readable:
response = self.pipe.readline()
# tries to detect if controller is connected
if response == b'[ERROR] XboxdrvDaemon::run(): fatal exception: DBusSubsystem::request_name(): failed to become primary owner of dbus name\n':
raise IOError("Another instance of xboxdrv is running.")
elif response == b'[INFO] XboxdrvDaemon::connect(): connecting slot to thread\n':
found = True
self.reading = response
elif response == b'':
raise IOError('Are you running as sudo?')
if not found:
self.pipe.close()
# halt if controller not found
raise IOError("Xbox controller/receiver isn't connected")
The loop is defined to start running in __init__ function like so:
a = threading.Thread(target=self.reload2) # code hangs here
a.start()
But each time I create variable "a", whole program hangs in while loop, which should be running in another thread.
Thanks for help.
You may be having issues with your __init__. I put it in a simple class as an example, and it runs as expected.
import time
from threading import Thread
class InfiniteLooper():
def __init__(self):
a = Thread(target=self.reload2) # reload, not reload(), otherwise you're executing reload2 and assigning the result to Target, but it's an infinite loop, so...
print('Added thread')
a.start()
print('Thread started')
def reload2(self):
while True:
self.response = input('Enter something')
print('read')
time.sleep(0.5)
loop = InfiniteLooper()
Output:
Added thread
Thread started
Enter something
1
read
Enter something
1
read
As you can see, the "Enter something" appears after I've added the thread and started it. It also loops fine
I have an application listening on a specific TCP port to handle received requests (listen.py). After that, I have another one (trigger.py) that depending on the requested parameters triggers the respective operation.
Now, lets say the operation A was triggered (opA.py). Operation A uses a worker thread to start (worker.py). When the user request listen.py to stop operation A, the started thread is supposed to stop.
UPDATED:
The problem is that the thread is never stopped since the problem lies in trigger.py. The OperationA instance is lost once the code exits. So, I can never call stopOperation since it show me AttributeError: 'NoneType' object has no attribute 'stopOperation'
Any ideas of How to solve this?
listen.py
from trigger import Trigger
'''
code to handle requests here:
1st: param -> 'start'
2nd: param -> 'stop'
'''
t = Trigger()
t.execute(param)
trigger.py
from opA import OperationA
class Trigger():
def execute(param):
opA = OperationA()
if param == 'start':
opA.startOperation()
elif param == 'stop':
opA.stopOperation()
opA.py
from worker import ThreadParam
class OperationThread(ThreadParam):
def run(self):
while (self.running == False):
'''
do something here
'''
class OperationA():
def _init__(self):
listenThread = OperationThread(self)
def startOperation(self):
self.listenThread.start()
def stopOperation(self):
if self.listenThread.isAlive() == True:
print 'Thread is alive'
self.listenThread.killSignal()
else:
print 'Thread is dead'
worker.py
from threading import Thread
class ThreadParam(Thread):
def __init__(self, _parent):
Thread.__init__(self)
self.parent = _parent
self.running = False;
def killSignal(self):
self.running = True;
A minimal useful Trigger might look like this:
class Trigger(object):
def __init__(self):
self.operation = None
def execute(self, command):
if command == 'start':
assert self.operation is None
self.operation = OperationA()
self.operation.start_operation()
elif command == 'stop':
self.operation.stop_operation()
self.operation = None
else:
print 'Unknown command', repr(command)
I'm trying to start a data queue server under a managing process (so that it can later be turned into a service), and while the data queue server function works fine in the main process, it does not work in a process created using multiprocessing.Process.
The dataQueueServer and dataQueueClient code is based on the code from the multiprocessing module documentation here.
When run on its own, dataQueueServer works well. However, when run using a multiprocessing.Process's start() in mpquueue, it doesn't work (when tested with the client). I am using the dataQueueClient without changes to test both cases.
The code does reach the serve_forever in both cases, so I think the server is working, but something is blocking it from communicating back to the client in the mpqueue case.
I have placed the loop that runs the serve_forever() part under a thread, so that it can be stoppable.
Here is the code:
mpqueue # this is the "manager" process trying to spawn the server in a child process
import time
import multiprocessing
import threading
import dataQueueServer
class Printer():
def __init__(self):
self.lock = threading.Lock()
def tsprint(self, text):
with self.lock:
print text
class QueueServer(multiprocessing.Process):
def __init__(self, name = '', printer = None):
multiprocessing.Process.__init__(self)
self.name = name
self.printer = printer
self.ml = dataQueueServer.MainLoop(name = 'ml', printer = self.printer)
def run(self):
self.printer.tsprint(self.ml)
self.ml.start()
def stop(self):
self.ml.stop()
if __name__ == '__main__':
printer = Printer()
qs = QueueServer(name = 'QueueServer', printer = printer)
printer.tsprint(qs)
printer.tsprint('starting')
qs.start()
printer.tsprint('started.')
printer.tsprint('Press Ctrl-C to quit')
try:
while True:
time.sleep(60)
except KeyboardInterrupt:
printer.tsprint('\nTrying to exit cleanly...')
qs.stop()
printer.tsprint('stopped')
dataQueueServer
import time
import threading
from multiprocessing.managers import BaseManager
from multiprocessing import Queue
HOST = ''
PORT = 50010
AUTHKEY = 'authkey'
## Define some helper functions for use by the main process loop
class Printer():
def __init__(self):
self.lock = threading.Lock()
def tsprint(self, text):
with self.lock:
print text
class QueueManager(BaseManager):
pass
class MainLoop(threading.Thread):
"""A thread based loop manager, allowing termination signals to be sent
to the thread"""
def __init__(self, name = '', printer = None):
threading.Thread.__init__(self)
self._stopEvent = threading.Event()
self.daemon = True
self.name = name
if printer is None:
self.printer = Printer()
else:
self.printer = printer
## create the queue
self.queue = Queue()
## Add a function to the handler to return the queue to clients
self.QM = QueueManager
self.QM.register('get_queue', callable=lambda:self.queue)
self.queue_manager = self.QM(address=(HOST, PORT), authkey=AUTHKEY)
self.queue_server = self.queue_manager.get_server()
def __del__(self):
self.printer.tsprint( 'closing...')
def run(self):
self.printer.tsprint( '{}: started serving'.format(self.name))
self.queue_server.serve_forever()
def stop(self):
self.printer.tsprint ('{}: stopping'.format(self.name))
self._stopEvent.set()
def stopped(self):
return self._stopEvent.isSet()
def start():
printer = Printer()
ml = MainLoop(name = 'ml', printer = printer)
ml.start()
return ml
def stop(ml):
ml.stop()
if __name__ == '__main__':
ml = start()
raw_input("\nhit return to stop")
stop(ml)
And a client:
dataQueueClient
import datetime
from multiprocessing.managers import BaseManager
n = 0
N = 10**n
HOST = ''
PORT = 50010
AUTHKEY = 'authkey'
def now():
return datetime.datetime.now()
def gen(n, func, *args, **kwargs):
k = 0
while k < n:
yield func(*args, **kwargs)
k += 1
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue')
m = QueueManager(address=(HOST, PORT), authkey=AUTHKEY)
m.connect()
queue = m.get_queue()
def load(msg, q):
return q.put(msg)
def get(q):
return q.get()
lgen = gen(N, load, msg = 'hello', q = queue)
t0 = now()
while True:
try:
lgen.next()
except StopIteration:
break
t1 = now()
print 'loaded %d items in ' % N, t1-t0
t0 = now()
while queue.qsize() > 0:
queue.get()
t1 = now()
print 'got %d items in ' % N, t1-t0
So it seems like the solution is simple enough: Don't use serve_forever(), and use manager.start() instead.
According to Eli Bendersky, the BaseManager (and it's extended version SyncManager) already spawns the server in a new process (and looking at the multiprocessing.managers code confirms this). The problem I have been experiencing stems from the form used in the example, in which the server is started under the main process.
I still don't understand why the current example doesn't work when run under a child process, but that's no longer an issue.
Here's the working (and much simplified from OP) code to manage multiple queue servers:
Server:
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
HOST = ''
PORT0 = 5011
PORT1 = 5012
PORT2 = 5013
AUTHKEY = 'authkey'
name0 = 'qm0'
name1 = 'qm1'
name2 = 'qm2'
description = 'Queue Server'
def CreateQueueServer(HOST, PORT, AUTHKEY, name = None, description = None):
name = name
description = description
q = Queue()
class QueueManager(SyncManager):
pass
QueueManager.register('get_queue', callable = lambda: q)
QueueManager.register('get_name', callable = name)
QueueManager.register('get_description', callable = description)
manager = QueueManager(address = (HOST, PORT), authkey = AUTHKEY)
manager.start() # This actually starts the server
return manager
# Start three queue servers
qm0 = CreateQueueServer(HOST, PORT0, AUTHKEY, name0, description)
qm1 = CreateQueueServer(HOST, PORT1, AUTHKEY, name1, description)
qm2 = CreateQueueServer(HOST, PORT2, AUTHKEY, name2, description)
raw_input("return to end")
Client:
from multiprocessing.managers import SyncManager
HOST = ''
PORT0 = 5011
PORT1 = 5012
PORT2 = 5013
AUTHKEY = 'authkey'
def QueueServerClient(HOST, PORT, AUTHKEY):
class QueueManager(SyncManager):
pass
QueueManager.register('get_queue')
QueueManager.register('get_name')
QueueManager.register('get_description')
manager = QueueManager(address = (HOST, PORT), authkey = AUTHKEY)
manager.connect() # This starts the connected client
return manager
# create three connected managers
qc0 = QueueServerClient(HOST, PORT0, AUTHKEY)
qc1 = QueueServerClient(HOST, PORT1, AUTHKEY)
qc2 = QueueServerClient(HOST, PORT2, AUTHKEY)
# Get the queue objects from the clients
q0 = qc0.get_queue()
q1 = qc1.get_queue()
q2 = qc2.get_queue()
# put stuff in the queues
q0.put('some stuff')
q1.put('other stuff')
q2.put({1:123, 2:'abc'})
# check their sizes
print 'q0 size', q0.qsize()
print 'q1 size', q1.qsize()
print 'q2 size', q2.qsize()
# pull some stuff and print it
print q0.get()
print q1.get()
print q2.get()
Adding an additional server to share a dictionary with the information of the running queue servers so that consumers can easily tell what's available where is easy enough using that model. One thing to note, though, is that the shared dictionary requires slightly different syntax than a normal dictionary: dictionary[0] = something will not work. You need to use dictionary.update([(key, value), (otherkey, othervalue)]) and dictionary.get(key) syntax, which propagates across to all other clients connected to this dictionary..
I am trying to write a method that counts down to a given time and unless a restart command is given, it will execute the task. But I don't think Python threading.Timer class allows for timer to be cancelable.
import threading
def countdown(action):
def printText():
print 'hello!'
t = threading.Timer(5.0, printText)
if (action == 'reset'):
t.cancel()
t.start()
I know the above code is wrong somehow. Would appreciate some kind guidance over here.
You would call the cancel method after you start the timer:
import time
import threading
def hello():
print "hello, world"
time.sleep(2)
t = threading.Timer(3.0, hello)
t.start()
var = 'something'
if var == 'something':
t.cancel()
You might consider using a while-loop on a Thread, instead of using a Timer.
Here is an example appropriated from Nikolaus Gradwohl's answer to another question:
import threading
import time
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
self.count = 10
def run(self):
while self.count > 0 and not self.event.is_set():
print self.count
self.count -= 1
self.event.wait(1)
def stop(self):
self.event.set()
tmr = TimerClass()
tmr.start()
time.sleep(3)
tmr.stop()
I'm not sure if I understand correctly. Do you want to write something like in this example?
>>> import threading
>>> t = None
>>>
>>> def sayHello():
... global t
... print "Hello!"
... t = threading.Timer(0.5, sayHello)
... t.start()
...
>>> sayHello()
Hello!
Hello!
Hello!
Hello!
Hello!
>>> t.cancel()
>>>
The threading.Timer class does have a cancel method, and although it won't cancel the thread, it will stop the timer from actually firing. What actually happens is that the cancel method sets a threading.Event, and the thread actually executing the threading.Timer will check that event after it's done waiting and before it actually executes the callback.
That said, timers are usually implemented without using a separate thread for each one. The best way to do it depends on what your program is actually doing (while waiting for this timer), but anything with an event loop, like GUI and network frameworks, all have ways to request a timer that is hooked into the eventloop.
Im not sure if best option but for me is woking like this:
t = timer_mgr(.....) append to list "timers.append(t)" and then after all created you can call:
for tm in timers:#threading.enumerate():
print "********", tm.cancel()
my timer_mgr() class is this:
class timer_mgr():
def __init__(self, st, t, hFunction, id, name):
self.is_list = (type(st) is list)
self.st = st
self.t = t
self.id = id
self.hFunction = hFunction
self.thread = threading.Timer(t, self.handle_function, [id])
self.thread.name = name
def handle_function(self, id):
if self.is_list:
print "run_at_time:", datetime.now()
self.hFunction(id)
dt = schedule_fixed_times(datetime.now(), self.st)
print "next:", dt
self.t = (dt-datetime.now()).total_seconds()
else:
self.t = self.st
print "run_every", self.t, datetime.now()
self.hFunction(id)
self.thread = threading.Timer(self.t, self.handle_function, [id])
self.thread.start()
def start(self):
self.thread.start()
def cancel(self):
self.thread.cancel()
Inspired by above post.
Cancelable and Resetting Timer in Python. It uses thread.
Features: Start, Stop, Restart, callback function.
Input: Timeout, sleep_chunk values, and callback_function.
Can use or inherit this class in any other program. Can also pass arguments to the callback function.
Timer should respond in middle also. Not just after completion of full sleep time. So instead of using one full sleep, using small chunks of sleep and kept checking event object in loop.
import threading
import time
class TimerThread(threading.Thread):
def __init__(self, timeout=3, sleep_chunk=0.25, callback=None, *args):
threading.Thread.__init__(self)
self.timeout = timeout
self.sleep_chunk = sleep_chunk
if callback == None:
self.callback = None
else:
self.callback = callback
self.callback_args = args
self.terminate_event = threading.Event()
self.start_event = threading.Event()
self.reset_event = threading.Event()
self.count = self.timeout/self.sleep_chunk
def run(self):
while not self.terminate_event.is_set():
while self.count > 0 and self.start_event.is_set():
# print self.count
# time.sleep(self.sleep_chunk)
# if self.reset_event.is_set():
if self.reset_event.wait(self.sleep_chunk): # wait for a small chunk of timeout
self.reset_event.clear()
self.count = self.timeout/self.sleep_chunk # reset
self.count -= 1
if self.count <= 0:
self.start_event.clear()
#print 'timeout. calling function...'
self.callback(*self.callback_args)
self.count = self.timeout/self.sleep_chunk #reset
def start_timer(self):
self.start_event.set()
def stop_timer(self):
self.start_event.clear()
self.count = self.timeout / self.sleep_chunk # reset
def restart_timer(self):
# reset only if timer is running. otherwise start timer afresh
if self.start_event.is_set():
self.reset_event.set()
else:
self.start_event.set()
def terminate(self):
self.terminate_event.set()
#=================================================================
def my_callback_function():
print 'timeout, do this...'
timeout = 6 # sec
sleep_chunk = .25 # sec
tmr = TimerThread(timeout, sleep_chunk, my_callback_function)
tmr.start()
quit = '0'
while True:
quit = raw_input("Proceed or quit: ")
if quit == 'q':
tmr.terminate()
tmr.join()
break
tmr.start_timer()
if raw_input("Stop ? : ") == 's':
tmr.stop_timer()
if raw_input("Restart ? : ") == 'r':
tmr.restart_timer()
These code is the Server part of my proxy program, and its function is creating the socket and fork four process to accept one by one.
In my program I use gevent model to dispatch all my function and before I alter it to multiple process, my program is all right. but now when I use the second process, the first one stop running, I don't find where is wrong, maybe the 'accept' function or my event is stop dispatch.
It have already bothered me for two days I hope someone can help me.
BTW, my English is poor, I try my best to explain it, hoping you can understand.
class Client(object):
def __init__(self, ent, ev):
...
def receive( self ):
...
if "Content-Length" in dic:
self.ent_s_send = core.event(core.EV_WRITE,
self.conn.fileno(),
self.ser_send,
[self.conn,self.body]
)
self.recv_ent = core.event(core.EV_READ,
self.sock.fileno(),
self.recv_content
)
self.recv_ent.add()
...
def recv_content(self, ent, ev):
...
self.n = self.sock.recv_into(self.msg,
min(self.total-self.num, 20000),
socket.MSG_DONTWAIT)
**time.sleep(0.1)**
#if i add it here to let the event slow down the problem solved, how it could be?
self.num += self.n
self.msg_buffer.fromstring(self.msg.tostring()[:self.n])
...
if self.total > self.num: #if not the last msg continue recving and sending...
self.ent_s_send.add()
self.recv_ent.add()
...
def ser_send(self, ent, ev):
...
num = self.conn.send(self.msg_buffer,socket.MSG_DONTWAIT)
...
self.msg_buffer = self.msg_buffer[num:]
...
...
class Server(object):
def __init__( self ):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(('localhost', 18001))
self.sock.listen(50)
self.mutex = multiprocessing.Lock()
def loop( self, ):
for i in range(0,4):
pid = os.fork()
if pid == 0 or pid == -1:
break
if pid == -1:
print "Fork failed!!!"
sys.exit()
elif pid == 0: **# create four child ps to accept the socket!**
print "Child PID = %d" % os.getpid()
core.init()
self.event = core.event(core.EV_READ,
self.sock.fileno(),
self.onlink)
self.event.add()
core.dispatch()
else:
os.wait()
def onlink( self, ent, ev):
self.mutex.acquire()
print 'Accept PID = %s' % os.getpid()
try:
self.conn, self.addr = self.sock.accept()
**#I think 'accept' is the the problem, but I cannot see how.**
except socket.error, why:
if why.args[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED]:
return
else:
raise
print self.sock,self.conn,self.addr
self.mutex.release()
print 'Release PID = %s' % os.getpid()
cc = Chat( self.conn, self.sock )
self.event.add()
if __name__ == '__main__':
s1 = Server()
s1.loop()
accept() is a blocking call. It'll wait indefinitely for a client to connect. Holding a mutex over a blocking operation like that is a Bad IdeaTM since you totally lock all other concurrent processes out.
Also, as #Maxim noted in the comments, you don't really need to lock around accept(). Just let the OS arbitrate dequeuing of incoming connections and dispatch them to your processes.