How to create connection timeout with python SocketServer - python

Good day! I was writen simple server:
class SingleTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
self.request.close()
class SimpleServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
def running():
server = SimpleServer((settings.host, settings.port), SingleTCPHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
How to set connection timeout. I want when the client not send me data and is not active in 30 seconds, server will close connection.
P.S. sorry for my english.
UPDATE
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import time
import SocketServer
import datetime
import settings
import os
from signal import SIGTERM, SIGCHLD, signal, alarm
import socket
import subprocess
from threading import Thread
import MySQLdb
import re
class SingleTCPHandler(SocketServer.BaseRequestHandler):
"One instance per connection. Override handle(self) to customize action."
def handle(self):
alarm(30)
data = self.request.recv(1024)
# Some code
self.request.close()
class SimpleServer(SocketServer.ForkingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
def running():
server = SimpleServer((settings.host, settings.port), SingleTCPHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
def deamonize(stdout='/dev/null', stderr=None, stdin='/dev/null', pidfile=None, startmsg='started with pid %s'):
try:
pid = os.fork()
if (pid > 0):
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.chdir(settings.place)
os.umask(0)
os.setsid()
try:
pid = os.fork()
if (pid > 0):
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
if (not stderr):
stderr = stdout
print stdin, stdout, stderr
si = file(stdin, 'r')
so = file(stdout, 'a+')
se = file(stderr, 'a+', 0)
pid = str(os.getpid())
sys.stderr.write("\n%s\n" % startmsg % pid)
sys.stderr.flush()
if pidfile: file(pidfile, 'w+').write("%s\n" % pid)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def startstop(stdout='/dev/null', stderr=None, stdin='/dev/null', pidfile='pid.txt', startmsg='started with pid %s'):
if len(sys.argv) > 1:
action = sys.argv[1]
try:
pf = open(pidfile)
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if ((action == 'stop') or (action == 'restart')):
if (not pid):
mess = "Не могу остановить, pid файл '%s' отсутствует.\n"
sys.stderr.write(mess % pidfile)
sys.exit(1)
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
os.remove(pidfile)
if 'stop' == action:
sys.exit(0)
action = 'start'
pid = None
else:
print str(err)
sys.exit(1)
if ('start' == action):
if (pid):
mess = "Старт отменен — pid файл '%s' существует.\n"
sys.stderr.write(mess % pidfile)
sys.exit(1)
deamonize(stdout, stderr, stdin, pidfile, startmsg)
return
print "Синтакс запуска: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
if (__name__ == "__main__"):
startstop(stdout=settings.log, pidfile=settings.pid)
running()

If you use StreamRequestHandler instead of BaseRequestHandler, you just need to override the timeout variable there, and it will be set. If you want to learn how to do it yourself, just look at the SocketServer.py
Here's an example, this will kill any connections that aren't done in 5 seconds:
#!/usr/bin/env python
import SocketServer
class myHandler(SocketServer.StreamRequestHandler):
timeout = 5
def handle(self):
recvdata = ""
while True:
tmp = self.request.recv(16384)
recvdata = recvdata + tmp.strip()
if (len(tmp) < 16384):
break;
self.request.send("Received: {0}".format(recvdata))
class myApp(SocketServer.TCPServer):
def __init__(self):
SocketServer.TCPServer.__init__(self, ("localhost", 5555), myHandler)
print self.server_address
try:
self.serve_forever()
except KeyboardInterrupt:
print "Got keyboard interrupt, shutting down"
self.shutdown()
if __name__ == "__main__":
app = myApp()
This uses the python's socket settimeout() call.
I don't think your alarm() solution will work with threading or forking.

Please look at it:
import sys
import SocketServer
class SingleTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
self.request.close()
class SimpleServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
timeout = 30
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
def handle_timeout(self):
print 'Timeout!'
def running():
server = SimpleServer(('localhost', 6666), SingleTCPHandler)
try:
#server.serve_forever()
server.handle_request()
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
running()
# vim: filetype=python syntax=python expandtab shiftwidth=4 softtabstop=4 encoding=utf8
If you want to handle more than one request you need to execute server.handle_request() again.

Related

python multi daemon in one program doesn't work

I try to build a python program that run two or more daemon in same time, the daemons all listen different port and do things according their code. But when I finish and run, I found that only one daemon can work but the second. How can I make that work? Following is my test code:
#!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import time
import socket
import logging
import atexit
import os
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
class controlDaemon(Daemon):
global tcpListenPort
global bufferSize
def run(self):
logger.debug("start listen:"+str(tcpListenPort))
address = ('127.0.0.1', tcpListenPort)
udpListenSocket2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpListenSocket2.bind(address)
while True:
data = udpListenSocket2.recv(bufferSize)
logger.debug("received:{0}".format(data))
udpListenSocket2.close()
class notificationPushDaemon(Daemon):
global udpListenPort
global bufferSize
def run(self):
logger.debug("start listen:"+str(udpListenPort))
address = ('127.0.0.1', udpListenPort)
udpListenSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpListenSocket.bind(address)
while True:
data = udpListenSocket.recv(bufferSize)
logger.debug("received:{0}".format(data))
udpListenSocket.close()
def InitLog():
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("tt.log")
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
if __name__ == "__main__":
logger = logging.getLogger("logtest")
InitLog()
tcpListenPort = 19999
udpListenPort = 19966
tcpPidFile = '/tmp/test1.pid'
udpPidFile = '/tmp/test2.pid'
cDaemon = controlDaemon(tcpPidFile)
npDaemon = notificationPushDaemon(udpPidFile)
bufferSize = 65535
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
cDaemon.start()
npDaemon.start()
elif 'stop' == sys.argv[1]:
cDaemon.stop()
npDaemon.stop()
elif 'restart' == sys.argv[1]:
cDaemon.restart()
npDaemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
A Unix daemon is a process so trying to have one process to be two deamons doesn't work. Either write two separate daemons and another program or script to start/stop/restart both or write just one deamon and let it do two jobs in parallel with threading or multiprocessing. Here is your example rewritten to use threads:
#!/usr/bin/env python
# coding: utf8
import atexit
import logging
import os
import socket
import sys
import time
from contextlib import closing
from signal import SIGTERM
from threading import Thread
LOGGER = logging.getLogger('logtest')
PID_FILENAME = '/tmp/test.pid'
TCP_LISTEN_PORT = 19999
UDP_LISTEN_PORT = 19966
BUFFER_SIZE = 65535
class Daemon(object):
pass
# ... Class definition from question goes here ...
def control():
LOGGER.debug('start listen: %s', TCP_LISTEN_PORT)
address = ('127.0.0.1', TCP_LISTEN_PORT)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.bind(address)
while True:
sock, address = server_socket.accept()
with closing(sock):
while True:
data = sock.recv(BUFFER_SIZE)
if not data:
break
LOGGER.debug('received from %s: %s', address, data)
def push_notifications():
LOGGER.debug('start listen: %s', UDP_LISTEN_PORT)
address = ('127.0.0.1', UDP_LISTEN_PORT)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(address)
while True:
data, address = sock.recvfrom(BUFFER_SIZE)
LOGGER.debug('received from %s: %s', address, data)
sock.close()
class TheDaemon(Daemon):
#staticmethod
def run():
thread = Thread(target=control)
thread.start()
push_notifications()
def init_logging():
LOGGER.setLevel(logging.DEBUG)
file_handler = logging.FileHandler('tt.log')
file_handler.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.ERROR)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
stream_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
LOGGER.addHandler(file_handler)
LOGGER.addHandler(stream_handler)
def main():
init_logging()
commands = ['start', 'stop', 'restart']
daemon = TheDaemon(PID_FILENAME)
if len(sys.argv) == 2:
command = sys.argv[1]
if command in commands:
getattr(daemon, command)()
else:
print 'Unknown command'
sys.exit(2)
else:
print 'usage: {0} {1}'.format(sys.argv[0], '|'.join(commands))
sys.exit(2)
if __name__ == '__main__':
main()
The TCP and UDP handling code had some errors. If you expect many TCP connections at the same time you might want to handle single connections in their own threads.
The global statements did not have any effect.
You can use the multiprocessing module of standard python.
import multiprocessing as mp
class Daemon(mp.Process):
def __init__(self):
mp.Process.__init__(self)
def run(self):
here everything that should be a demon goes
main():
daemon = Daemon()
daemon.daemon = True # this lets the daemon be presistent in the memory, even when the program quits
daemon.start()

Logging with Multiprocessing in python

I am currently struggling to get a simple multiprocessing log working:
I am using the MultiProcessingLog from this answer: https://stackoverflow.com/a/894284/293195
I have a simple ConverterImage which should be able to spit out the log, which works, but the exceptions and tracelog never appear in the log?
Does somebody know whats the problem here?
import os, traceback,logging
import multiprocessing, threading, logging, sys, traceback
from multiprocessing import Pool, Manager
from logging import FileHandler
class MultiProcessingLog(logging.Handler):
def __init__(self, name, mode):
logging.Handler.__init__(self)
self._handler = FileHandler(name, mode)
self.queue = multiprocessing.Queue(-1)
t = threading.Thread(target=self.receive)
t.daemon = True
t.start()
def setFormatter(self, fmt):
logging.Handler.setFormatter(self, fmt)
self._handler.setFormatter(fmt)
def receive(self):
while True:
try:
record = self.queue.get()
self._handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
def send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
dummy = self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
self._handler.close()
logging.Handler.close(self)
class ConvertImage:
def __init__(self, logger=None):
self.logger = logger
def __call__(self,f):
self.process(f)
def process(self,f):
try:
logging.info("Process %i" % os.getpid() )
raise NameError("Stupid error")
except Exception as e:
logging.info("Exception: " + e.message)
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
logging.info(exc_buffer.getvalue())
raise e
except:
logging.info("Exception!")
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
logging.info(exc_buffer.getvalue())
raise
mpl = MultiProcessingLog("ImageProcessing.log", mode='w+')
mpl.setFormatter( logging.Formatter('%(asctime)s - %(lineno)d - %(levelname)-8s - %(message)s') )
logger = logging.getLogger()
logger.addHandler(mpl)
logger.setLevel(logging.DEBUG)
pool = Pool();
converter = ConvertImage()
# map converter.process function over all files
result = pool.map_async(converter, ["A","B","C"]);
pool.close();
pool.join()
logging.shutdown()
In ConvertImage, you use logging module functions instead of self.logger methods. Can you try the code bellow?
def process(self,f):
try:
self.logger.info("Process %i" % os.getpid() )
raise NameError("Stupid error")
except Exception as e:
self.logger.info("Exception: " + e.message)
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
self.logger.info(exc_buffer.getvalue())
raise e
except:
self.logger.info("Exception!")
exc_buffer = io.StringIO()
traceback.print_exc(file=exc_buffer)
self.logger.info(exc_buffer.getvalue())
raise

Interact with executables over server in Python?

I want to run an executable on a TCP server and take its input from socket connections interactively and send the output back to the client until the executable is terminated. I was trying it with piping through Popen class of subprocess but its not helping with interaction with executable ( its just take input only one time but i want the input to be taken all the time until program exits ).
Suppose I send "1" input to the server then server must send the stdout corresponding to "1" input to client and then ask for next input and do it till the executable exits in continuation .
Just provide the socket as the standard input, output, and error of the subprocess. E.g.:
import socket
import subprocess
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
listener.bind(('0.0.0.0', 0))
listener.listen(5)
print(listener.getsockname())
try:
while True:
client, addr = listener.accept()
subprocess.Popen(['cat'], stdin=client, stdout=client, stderr=client)
client.close()
except KeyboardInterrupt:
pass
finally:
listener.close()
This probably requires a POSIX-compliant operating system.
Here is an implementation using circuits:
server.py:
#!/usr/bin/env python
from uuid import uuid4 as uuid
from subprocess import Popen, PIPE
from circuits import handler, Component, Debugger, Event
from circuits.io import File
from circuits.net.sockets import TCPServer
from circuits.net.events import close, write
class kill(Event):
"""kill Event"""
class Command(Component):
channel = "cmd"
def __init__(self, sock, command, channel=channel):
super(Command, self).__init__(channel=channel)
self._sock = sock
self._command = command
self._buffer = None
self._p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE)
self._stdin = File(
self._p.stdin, channel="{0:s}.stdin".format(self.channel)
).register(self)
self._stdout = File(
self._p.stdout, channel="{0:s}.stdout".format(self.channel)
).register(self)
self.addHandler(
handler("eof", channel=self._stdout.channel)(self._on_stdout_eof)
)
self.addHandler(
handler("read", channel=self._stdout.channel)(self._on_stdout_read)
)
def write(self, data):
self.fire(write(data), self._stdin.channel)
def kill(self):
self._p.terminate()
self.unregister()
#staticmethod
def _on_stdout_eof(self):
self.fire(kill(), self.channel)
self.fire(close(self._sock), self.parent.channel)
#staticmethod
def _on_stdout_read(self, data):
self.fire(write(self._sock, data), "server")
class Server(Component):
channel = "server"
def init(self, bind, cmd):
self.cmd = cmd
self.clients = {}
TCPServer(bind).register(self)
def connect(self, sock, host, port):
command = Command(sock, self.cmd, channel=uuid()).register(self)
self.clients[sock] = command
def disconnect(self, sock):
command = self.clients[sock]
self.fire(kill(), command.channel)
del self.clients[sock]
def read(self, sock, data):
command = self.clients[sock]
self.fire(write(data), command.channel)
server = Server(("0.0.0.0", 8000), "python app.py")
Debugger().register(server)
server.run()
app.py:
#!/usr/bin/env python
from __future__ import print_function
import sys
def function1():
print("I am function 1!")
def function2():
print("I am function 2!")
def function3():
raise SystemExit(0)
MENU_OPTIONS = (
(1, "Function 1"),
(2, "Function 2"),
(3, "Function 3")
)
FUNCTIONS = {
1: function1,
2: function2,
3: function3
}
def main():
while True:
try:
print("Menu:")
for option, description in MENU_OPTIONS:
print("{0:d}) {1:s}".format(option, description))
print()
sys.stdout.flush()
choice = raw_input("> ")
try:
FUNCTIONS[int(choice)]()
except ValueError:
print("Invalid Input")
except (KeyboardInterrupt, EOFError):
raise SystemExit(0)
if __name__ == "__main__":
main()
For an example session (this example has been thoroughly tested):
Server Session: http://codepad.org/F7qZKdKa
Client Session: http://codepad.org/ss33wgAI
Have fun! :)
Note: I'm actually the developer/author of 1[circuits]. I thought this would be a nice example to write up.

How to get the status of spawn process in twisted python?

I want to trigger many long running processes continiously. And, based on the status returned of each process executed, I need to perform other tasks. In the below example, I'm able to spawn processes, but I'm not able to capture/get the details of the spawn processes execution status returned to mail loop(i.e in CmdProtocol class).
I'm new to this twisted python concepts - Can someone help me here?
import sys
from twisted.internet.protocol import ServerFactory, ProcessProtocol
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.internet import protocol
import os
import signal
class MyPP(protocol.ProcessProtocol):
def __init__(self):
self.parent_id = os.getpid()
def connectionMade(self):
print "connectionMade!"
print "Parent id = %s" % self.parent_id
print "Child process id = %s" % self.transport.pid
def outReceived(self, data):
print "out", data
def errReceived(self, data):
print "error", data
def inConnectionLost(self):
print "inConnectionLost! stdin is closed! (we probably did it)"
print "Parent id = %s" % self.parent_id
print "Child process id closes STDIN= %s" % self.transport.pid
def outConnectionLost(self):
print "outConnectionLost! The child closed their stdout!"
print "Parent id = %s" % self.parent_id
print "Child process id closes STDOUT = %s" % self.transport.pid
def errConnectionLost(self):
print "errConnectionLost! The child closed their stderr."
print "Parent id = %s" % self.parent_id
print "Child process id closes ERRCONN = %s" % self.transport.pid
def processExited(self, reason):
print "processExited %s, status %d" % (self.transport.pid, reason.value.exitCode,)
def processEnded(self, reason):
print "%s processEnded, status %d" % (self.transport.pid, reason.value.exitCode,)
print "quitting"
class CmdProtocol(LineReceiver):
delimiter = '\n'
def connectionMade(self):
self.client_ip = self.transport.getPeer()
print "Client connection from %s" % self.client_ip
def processcmd(self):
pp = MyPP()
cmd = ['c:\Python27\python.exe', '-u', 'print_hi.py']
print "Calling processcmd - <%s>" % cmd
reactor.spawnProcess(pp, cmd[0], cmd[1:])
def connectionLost(self, reason):
print "Lost client connection. Reason: %s" % reason
def lineReceived(self, line):
if not line: return
# Parse the command
print 'Cmd received from %s : %s' % (self.client_ip, line)
commandParts = line.split()
if len(commandParts) > 0:
command = commandParts[0].lower()
args = commandParts[1:]
try:
print "Command received : <%s>" % command
method = getattr(self, command)
except AttributeError, e:
self.sendLine('Error: no such command.')
else:
try:
res = method()
print "Returned status:%s" % res
self.sendLine('Command executed successfully.')
except Exception, e:
self.sendLine('Error: ' + str(e))
def do_kill(self, pid):
"""kill: Kill a process (PID)"""
print 'Killing pid:%s' % pid
res = os.kill(int(pid), signal.SIGTERM)
print "Kill Status %s" % res
class MyFactory(ServerFactory):
protocol = CmdProtocol
def __init__(self):
print "Factory called"
reactor.listenTCP(8000, MyFactory())
reactor.run()
This is actually a very basic Python data structures question. You just need to refer to an instance of CmdProtocol from an instance of MyPP. Since CmdProtocol is what constructs MyPP in the first place, this is easy. Just change the construction of MyPP to look like this:
def processcmd(self):
pp = MyPP(self)
and then MyPP.__init__ to look like this:
def __init__(self, cmd_protocol):
self.parent_id = os.getpid()
self.cmd_protocol = cmd_protocol
Then, in any method on MyPP, you can access the relevant CmdProtocol instance with self.cmd_protocol.

Still can't stop the Thread

I want to stop the thread with simple CTR+C, but data is keep coming and coming, so I have to close the window and start it again. Help me please to fix the issue. In order to see what I'm saying, just type your twitter username and password in user and pass
import threading
import random
import time
import Queue
import urllib2
import sys
import simplejson, pycurl
import sys, signal
queue = Queue.Queue()
keep_running = True
user = "" # username
pswd = "" # password
headers = [ "Content-Type: text/xml; charset: UTF-8; "]
class MyThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.buffer = ""
self.streaming_url = "https://stream.twitter.com/1/statuses/filter.json?track=justinbieber"
def start_crawl(self, user, pswd):
self.conn = pycurl.Curl()
self.conn.setopt(pycurl.USERPWD, "%s:%s" % (user, pswd))
self.conn.setopt(pycurl.URL, self.streaming_url)
#self.conn.setopt(pycurl.POST, 1)
self.conn.setopt(pycurl.HTTPHEADER, headers)
self.conn.setopt(pycurl.READFUNCTION, self.storage)
self.conn.setopt(pycurl.VERBOSE, 1)
self.conn.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
self.conn.perform()
def storage(self, data):
self.buffer += str(data)
if isinstance(self.buffer, str):
if self.buffer.endswith("\r\n") and self.buffer.strip():
content = json.loads(self.buffer)
self.buffer = ""
self.queue.put(content)
self.queue.task_done()
#if "text" in content:
#print u"{0[user][name]}: {0[text]}".format(content)
else:
return data
def run(self):
while keep_running:
self.start_crawl(user,pswd)
if signal.signal(signal.SIGINT, signal.SIG_DFL):
sys.exit()
#line = self.names[random.randint(0,len(self.names)-1)]
#queue.put(line)
class Starter():
def __init__(self):
self.queue = queue
t = MyThread(self.queue)
t.start()
self.next()
def next(self):
while True:
time.sleep(0.1)
if not self.queue.empty():
line = self.queue.get(timeout=0.2)
print line, self.queue.qsize()
else:
print 'waiting for queue'
def main():
try:
Starter()
queue.join()
except KeyboardInterrupt, e:
print 'Stopping'
global keep_running
keep_running = False
#Join all existing threads to main thread.
for thread in threading.enumerate():
if thread is not threading.currentThread():
thread.join()
sys.exit(1)
main()
Set the thread as daemon and it will be killed with your program
import threading
import random
import time
import Queue
import urllib2
import sys
import simplejson, pycurl
import sys, signal
queue = Queue.Queue()
keep_running = True
user = "" # username
pswd = "" # password
headers = [ "Content-Type: text/xml; charset: UTF-8; "]
class MyThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.buffer = ""
self.streaming_url = "https://stream.twitter.com/1/statuses/filter.json?track=justinbieber"
def start_crawl(self, user, pswd):
self.conn = pycurl.Curl()
self.conn.setopt(pycurl.USERPWD, "%s:%s" % (user, pswd))
self.conn.setopt(pycurl.URL, self.streaming_url)
#self.conn.setopt(pycurl.POST, 1)
self.conn.setopt(pycurl.HTTPHEADER, headers)
self.conn.setopt(pycurl.READFUNCTION, self.storage)
self.conn.setopt(pycurl.VERBOSE, 1)
self.conn.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
self.conn.perform()
def storage(self, data):
self.buffer += str(data)
if isinstance(self.buffer, str):
if self.buffer.endswith("\r\n") and self.buffer.strip():
content = json.loads(self.buffer)
self.buffer = ""
self.queue.put(content)
self.queue.task_done()
#if "text" in content:
#print u"{0[user][name]}: {0[text]}".format(content)
else:
return data
def run(self):
while keep_running:
self.start_crawl(user,pswd)
if signal.signal(signal.SIGINT, signal.SIG_DFL):
sys.exit()
#line = self.names[random.randint(0,len(self.names)-1)]
#queue.put(line)
class Starter():
def __init__(self):
self.queue = queue
t = MyThread(self.queue)
t.daemon = True
t.start()
self.next()
def next(self):
while True:
time.sleep(0.1)
if not self.queue.empty():
line = self.queue.get(timeout=0.2)
print line, self.queue.qsize()
else:
print 'waiting for queue'
def main():
try:
Starter()
queue.join()
except KeyboardInterrupt, e:
print 'Stopping'
raise
main()

Categories