python multi daemon in one program doesn't work - python

I try to build a python program that run two or more daemon in same time, the daemons all listen different port and do things according their code. But when I finish and run, I found that only one daemon can work but the second. How can I make that work? Following is my test code:
#!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import time
import socket
import logging
import atexit
import os
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
class controlDaemon(Daemon):
global tcpListenPort
global bufferSize
def run(self):
logger.debug("start listen:"+str(tcpListenPort))
address = ('127.0.0.1', tcpListenPort)
udpListenSocket2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpListenSocket2.bind(address)
while True:
data = udpListenSocket2.recv(bufferSize)
logger.debug("received:{0}".format(data))
udpListenSocket2.close()
class notificationPushDaemon(Daemon):
global udpListenPort
global bufferSize
def run(self):
logger.debug("start listen:"+str(udpListenPort))
address = ('127.0.0.1', udpListenPort)
udpListenSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpListenSocket.bind(address)
while True:
data = udpListenSocket.recv(bufferSize)
logger.debug("received:{0}".format(data))
udpListenSocket.close()
def InitLog():
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("tt.log")
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
if __name__ == "__main__":
logger = logging.getLogger("logtest")
InitLog()
tcpListenPort = 19999
udpListenPort = 19966
tcpPidFile = '/tmp/test1.pid'
udpPidFile = '/tmp/test2.pid'
cDaemon = controlDaemon(tcpPidFile)
npDaemon = notificationPushDaemon(udpPidFile)
bufferSize = 65535
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
cDaemon.start()
npDaemon.start()
elif 'stop' == sys.argv[1]:
cDaemon.stop()
npDaemon.stop()
elif 'restart' == sys.argv[1]:
cDaemon.restart()
npDaemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)

A Unix daemon is a process so trying to have one process to be two deamons doesn't work. Either write two separate daemons and another program or script to start/stop/restart both or write just one deamon and let it do two jobs in parallel with threading or multiprocessing. Here is your example rewritten to use threads:
#!/usr/bin/env python
# coding: utf8
import atexit
import logging
import os
import socket
import sys
import time
from contextlib import closing
from signal import SIGTERM
from threading import Thread
LOGGER = logging.getLogger('logtest')
PID_FILENAME = '/tmp/test.pid'
TCP_LISTEN_PORT = 19999
UDP_LISTEN_PORT = 19966
BUFFER_SIZE = 65535
class Daemon(object):
pass
# ... Class definition from question goes here ...
def control():
LOGGER.debug('start listen: %s', TCP_LISTEN_PORT)
address = ('127.0.0.1', TCP_LISTEN_PORT)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.bind(address)
while True:
sock, address = server_socket.accept()
with closing(sock):
while True:
data = sock.recv(BUFFER_SIZE)
if not data:
break
LOGGER.debug('received from %s: %s', address, data)
def push_notifications():
LOGGER.debug('start listen: %s', UDP_LISTEN_PORT)
address = ('127.0.0.1', UDP_LISTEN_PORT)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(address)
while True:
data, address = sock.recvfrom(BUFFER_SIZE)
LOGGER.debug('received from %s: %s', address, data)
sock.close()
class TheDaemon(Daemon):
#staticmethod
def run():
thread = Thread(target=control)
thread.start()
push_notifications()
def init_logging():
LOGGER.setLevel(logging.DEBUG)
file_handler = logging.FileHandler('tt.log')
file_handler.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.ERROR)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
stream_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
LOGGER.addHandler(file_handler)
LOGGER.addHandler(stream_handler)
def main():
init_logging()
commands = ['start', 'stop', 'restart']
daemon = TheDaemon(PID_FILENAME)
if len(sys.argv) == 2:
command = sys.argv[1]
if command in commands:
getattr(daemon, command)()
else:
print 'Unknown command'
sys.exit(2)
else:
print 'usage: {0} {1}'.format(sys.argv[0], '|'.join(commands))
sys.exit(2)
if __name__ == '__main__':
main()
The TCP and UDP handling code had some errors. If you expect many TCP connections at the same time you might want to handle single connections in their own threads.
The global statements did not have any effect.

You can use the multiprocessing module of standard python.
import multiprocessing as mp
class Daemon(mp.Process):
def __init__(self):
mp.Process.__init__(self)
def run(self):
here everything that should be a demon goes
main():
daemon = Daemon()
daemon.daemon = True # this lets the daemon be presistent in the memory, even when the program quits
daemon.start()

Related

Logging to a single file from multiple processes in multiple modules

In Logging Cookbook I found Logging to a single file from multiple processes, I would like to use it with multiple modules, each module is a process, do you have any idea?
I used first code in the Logging to a single file from multiple processes.
Lets say the main in module, and worker_process in another, how to do that?
main.py:
def listener_configurer():
root = logging.getLogger()
h = logging.handlers.RotatingFileHandler('mptest.log', 'a', 300, 10)
f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
h.setFormatter(f)
root.addHandler(h)
def listener_process(queue, configurer):
configurer()
while True:
try:
record = queue.get()
if record is None: # We send this as a sentinel to tell the listener to quit.
break
logger = logging.getLogger(record.name)
logger.handle(record) # No level or filter logic applied - just do it!
except Exception:
import sys, traceback
print('Whoops! Problem:', file=sys.stderr)
traceback.print_exc(file=sys.stderr)
def main():
queue = multiprocessing.Queue(-1)
listener = multiprocessing.Process(target=listener_process,
args=(queue, listener_configurer))
listener.start()
workers = []
for i in range(10):
worker = worker_process.TEST(queue)
workers.append(worker)
worker.start()
for w in workers:
w.join()
queue.put_nowait(None)
listener.join()
if __name__ == '__main__':
main()
worker_process.py
def worker_configurer(queue):
h = logging.handlers.QueueHandler(queue)
root = logging.getLogger()
root.addHandler(h)
root.setLevel(logging.DEBUG)
class TEST(multiprocessing.Process):
def __init__(self, queue, func=worker_configurer):
super(TEST, self).__init__()
self.queue = queue
self.func = func
self.LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL]
self.LOGGERS = ['a.b.c', 'd.e.f']
self.MESSAGES = [
'Random message #1',
'Random message #2',
'Random message #3',
]
def run(self):
self.func(self.queue)
name = multiprocessing.current_process().name
print('Worker started: %s' % name)
for i in range(10):
time.sleep(random())
logger = logging.getLogger(choice(self.LOGGERS))
level = choice(self.LEVELS)
message = choice(self.MESSAGES)
logger.log(level, message)
print('Worker finished: %s' % name)
This is not work correctly, I would like to use the code (first code) in the doc with multiple modules, as I mentioned before.

How to implement a daemon process in Python?

I am writing a small program which has a heartbeat process and an echo process. I implemented this with a multiprocessing library, but it doesn't seem to work.
from multiprocessing import Process
import os
import time
def ticking():
while True:
time.sleep(1)
print 'ticking'
def echo():
while True:
a = raw_input('please type something')
print 'echo: ' + a
if __name__ == '__main__':
p = Process(target=ticking, args=())
p.start()
p.join()
p = Process(target=echo, args=())
p.start()
p.join()
You create a process that will run forever and join() to it. The second process will never get created, because the join() will stall your main process forever.
If this is how you want to proceed, then you should for example first create both processes and then join them:
if __name__ == '__main__':
p1 = Process(target=ticking, args=())
p1.start()
p2 = Process(target=echo, args=())
p2.start()
p1.join()
p2.join()
For create a daemon you can use this function:
def daemonize():
"""UNIX double fork mechanism."""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as err:
sys.stderr.write('_Fork #1 failed: {0}\n'.format(err))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as err:
sys.stderr.write('_Fork #2 failed: {0}\n'.format(err))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'w')
se = open(os.devnull, 'w')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())

zmq PUBhandler could not be used in __init__() method

I have a class MyLogger for sending messages to log server by using PUBhandler.
An exception gets raised when MyLogger is instanced in LogWorker.init() method (like version 1), however, it is ok if MyLogger is instanced in LogWorker.log_worker() method (version 2).
Any suggestions would be appreciated.
import logging
from multiprocessing import Process
import os
import random
import sys
import time
import zmq
from zmq.log.handlers import PUBHandler
class MyLogger(object):
''''''
def __init__(self, port, handler=None):
self.port = port
self.handler = handler or self._construct_sock_handler()
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
if not self.logger.handlers:
self.logger.addHandler(self.handler)
def _construct_sock_handler(self):
context = zmq.Context()
log_sock = context.socket(zmq.PUB)
log_sock.connect("tcp://127.0.0.1:%i" % self.port)
time.sleep(0.1)
handler = PUBHandler(log_sock)
return handler
def get_logger(self):
return self.logger
def sub_logger(port, level=logging.DEBUG):
ctx = zmq.Context()
sub = ctx.socket(zmq.SUB)
sub.bind('tcp://127.0.0.1:%i' % port)
sub.setsockopt(zmq.SUBSCRIBE, "")
logging.basicConfig(level=level)
while True:
level, message = sub.recv_multipart()
if message.endswith('\n'):
# trim trailing newline, which will get appended again
message = message[:-1]
log = getattr(logging, level.lower())
log(message)
class LogWorker(object):
def __init__(self):
- pass # version 1
+ self.logger = MyLogger(port).get_logger() # version 2
def log_worker(self, port):
- self.logger = MyLogger(port).get_logger() # version 1
print "starting logger at %i with level=%s" % (os.getpid(), logging.DEBUG)
while True:
level = logging.INFO
self.logger.log(level, "Hello from %i!" % os.getpid())
time.sleep(1)
if __name__ == '__main__':
if len(sys.argv) > 1:
n = int(sys.argv[1])
else:
n = 2
port = 5555
workers = [Process(target=LogWorker().log_worker, args=(port,)) for _ in range(n)]
[w.start() for w in workers]
try:
sub_logger(port)
except KeyboardInterrupt:
pass
finally:
[ w.terminate() for w in workers ]
answer from pyzmq owner minrk:
You cannot pass zmq contexts or sockets across the fork boundary that happens when you instantiate a subprocess with multiprocessing. You have to make sure that you create your Context after you are in the subprocess.
solution:
def work():
worker = LogWorker(port)
worker.log_worker()
workers = [ Process(target=work) for _ in range(n) ]

Logging with Multiprocessing in python

I am currently struggling to get a simple multiprocessing log working:
I am using the MultiProcessingLog from this answer: https://stackoverflow.com/a/894284/293195
I have a simple ConverterImage which should be able to spit out the log, which works, but the exceptions and tracelog never appear in the log?
Does somebody know whats the problem here?
import os, traceback,logging
import multiprocessing, threading, logging, sys, traceback
from multiprocessing import Pool, Manager
from logging import FileHandler
class MultiProcessingLog(logging.Handler):
def __init__(self, name, mode):
logging.Handler.__init__(self)
self._handler = FileHandler(name, mode)
self.queue = multiprocessing.Queue(-1)
t = threading.Thread(target=self.receive)
t.daemon = True
t.start()
def setFormatter(self, fmt):
logging.Handler.setFormatter(self, fmt)
self._handler.setFormatter(fmt)
def receive(self):
while True:
try:
record = self.queue.get()
self._handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
def send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
dummy = self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
self._handler.close()
logging.Handler.close(self)
class ConvertImage:
def __init__(self, logger=None):
self.logger = logger
def __call__(self,f):
self.process(f)
def process(self,f):
try:
logging.info("Process %i" % os.getpid() )
raise NameError("Stupid error")
except Exception as e:
logging.info("Exception: " + e.message)
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
logging.info(exc_buffer.getvalue())
raise e
except:
logging.info("Exception!")
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
logging.info(exc_buffer.getvalue())
raise
mpl = MultiProcessingLog("ImageProcessing.log", mode='w+')
mpl.setFormatter( logging.Formatter('%(asctime)s - %(lineno)d - %(levelname)-8s - %(message)s') )
logger = logging.getLogger()
logger.addHandler(mpl)
logger.setLevel(logging.DEBUG)
pool = Pool();
converter = ConvertImage()
# map converter.process function over all files
result = pool.map_async(converter, ["A","B","C"]);
pool.close();
pool.join()
logging.shutdown()
In ConvertImage, you use logging module functions instead of self.logger methods. Can you try the code bellow?
def process(self,f):
try:
self.logger.info("Process %i" % os.getpid() )
raise NameError("Stupid error")
except Exception as e:
self.logger.info("Exception: " + e.message)
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
self.logger.info(exc_buffer.getvalue())
raise e
except:
self.logger.info("Exception!")
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
self.logger.info(exc_buffer.getvalue())
raise

How to create connection timeout with python SocketServer

Good day! I was writen simple server:
class SingleTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
self.request.close()
class SimpleServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
def running():
server = SimpleServer((settings.host, settings.port), SingleTCPHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
How to set connection timeout. I want when the client not send me data and is not active in 30 seconds, server will close connection.
P.S. sorry for my english.
UPDATE
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import time
import SocketServer
import datetime
import settings
import os
from signal import SIGTERM, SIGCHLD, signal, alarm
import socket
import subprocess
from threading import Thread
import MySQLdb
import re
class SingleTCPHandler(SocketServer.BaseRequestHandler):
"One instance per connection. Override handle(self) to customize action."
def handle(self):
alarm(30)
data = self.request.recv(1024)
# Some code
self.request.close()
class SimpleServer(SocketServer.ForkingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
def running():
server = SimpleServer((settings.host, settings.port), SingleTCPHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
def deamonize(stdout='/dev/null', stderr=None, stdin='/dev/null', pidfile=None, startmsg='started with pid %s'):
try:
pid = os.fork()
if (pid > 0):
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.chdir(settings.place)
os.umask(0)
os.setsid()
try:
pid = os.fork()
if (pid > 0):
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
if (not stderr):
stderr = stdout
print stdin, stdout, stderr
si = file(stdin, 'r')
so = file(stdout, 'a+')
se = file(stderr, 'a+', 0)
pid = str(os.getpid())
sys.stderr.write("\n%s\n" % startmsg % pid)
sys.stderr.flush()
if pidfile: file(pidfile, 'w+').write("%s\n" % pid)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def startstop(stdout='/dev/null', stderr=None, stdin='/dev/null', pidfile='pid.txt', startmsg='started with pid %s'):
if len(sys.argv) > 1:
action = sys.argv[1]
try:
pf = open(pidfile)
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if ((action == 'stop') or (action == 'restart')):
if (not pid):
mess = "Не могу остановить, pid файл '%s' отсутствует.\n"
sys.stderr.write(mess % pidfile)
sys.exit(1)
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
os.remove(pidfile)
if 'stop' == action:
sys.exit(0)
action = 'start'
pid = None
else:
print str(err)
sys.exit(1)
if ('start' == action):
if (pid):
mess = "Старт отменен — pid файл '%s' существует.\n"
sys.stderr.write(mess % pidfile)
sys.exit(1)
deamonize(stdout, stderr, stdin, pidfile, startmsg)
return
print "Синтакс запуска: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
if (__name__ == "__main__"):
startstop(stdout=settings.log, pidfile=settings.pid)
running()
If you use StreamRequestHandler instead of BaseRequestHandler, you just need to override the timeout variable there, and it will be set. If you want to learn how to do it yourself, just look at the SocketServer.py
Here's an example, this will kill any connections that aren't done in 5 seconds:
#!/usr/bin/env python
import SocketServer
class myHandler(SocketServer.StreamRequestHandler):
timeout = 5
def handle(self):
recvdata = ""
while True:
tmp = self.request.recv(16384)
recvdata = recvdata + tmp.strip()
if (len(tmp) < 16384):
break;
self.request.send("Received: {0}".format(recvdata))
class myApp(SocketServer.TCPServer):
def __init__(self):
SocketServer.TCPServer.__init__(self, ("localhost", 5555), myHandler)
print self.server_address
try:
self.serve_forever()
except KeyboardInterrupt:
print "Got keyboard interrupt, shutting down"
self.shutdown()
if __name__ == "__main__":
app = myApp()
This uses the python's socket settimeout() call.
I don't think your alarm() solution will work with threading or forking.
Please look at it:
import sys
import SocketServer
class SingleTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
self.request.close()
class SimpleServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
timeout = 30
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
def handle_timeout(self):
print 'Timeout!'
def running():
server = SimpleServer(('localhost', 6666), SingleTCPHandler)
try:
#server.serve_forever()
server.handle_request()
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
running()
# vim: filetype=python syntax=python expandtab shiftwidth=4 softtabstop=4 encoding=utf8
If you want to handle more than one request you need to execute server.handle_request() again.

Categories