I'm fairly new with both Python and Twisted so I may just not be understanding things properly but I seem to be stuck at a point where I need help.
What I want to do is use ReconnectingClientFactory on an SSL connection. I have it all running, but if the connection is dropped all data sent to the transport's write() method is simply dropped without any error. The actual method called is twisted.protocols.tls.TLSMemoryBIOProtocol.write().
Here's what I think is happening (starting from a working connection):
connection is lost
call to write() method (source code here) with some data
self.disconnecting is False so data passes to _write() method
_write method gets to _lostTLSConnection which is True and then just runs return
connection is regained but no data sent because it's not buffered any where
Here's the a reduced version of the client:
from OpenSSL import SSL
from twisted.internet.protocol import (Protocol, ReconnectingClientFactory)
from twisted.internet import (reactor, ssl)
import struct
class MetricsServer(Protocol):
streambuffer = bytearray()
def connectionMade(self):
self.transport.setTcpKeepAlive(True) # maintain the TCP connection
self.transport.setTcpNoDelay(False) # allow Nagle algorithm
print("connected to server")
def dataReceived(self, data):
print("from server:", data)
def connectionLost(self, reason):
self.connected = 0
print("server connection lost:", reason)
class MetricsServerFactory(ReconnectingClientFactory):
protocol = MetricsServer
maxDelay = 300 # maximum seconds between retries
factor = 1.6180339887498948
packet_sequence_number = 0
active_connection = None
def buildProtocol(self, addr):
self.resetDelay()
if self.active_connection == None:
self.active_connection = self.protocol()
return self.active_connection
def get_packet_sequence_number(self):
self.packet_sequence_number += 1
return self.packet_sequence_number
def send_data(self):
print ("sending ssl packet")
packet = struct.pack("!I", self.get_packet_sequence_number())
self.active_connection.transport.write(packet)
reactor.callLater(1.0, metrics_server.send_data)
class CtxFactory(ssl.ClientContextFactory):
def getContext(self):
self.method = SSL.TLSv1_METHOD
ctx = ssl.ClientContextFactory.getContext(self)
ctx.use_certificate_file('keys/client.crt')
ctx.use_privatekey_file('keys/client.key')
def verifyCallback(connection, x509, errnum, errdepth, ok):
return bool(ok)
ctx.set_verify(SSL.VERIFY_PEER, verifyCallback)
ctx.load_verify_locations("keys/ca.pem")
return ctx
if __name__ == "__main__":
metrics_server = MetricsServerFactory()
reactor.connectSSL('localhost', 8000, metrics_server, CtxFactory())
reactor.callLater(3.0, metrics_server.send_data)
reactor.run()
And here's a simple server that outputs the data it receives:
from OpenSSL import SSL
from twisted.internet import ssl, reactor
from twisted.internet.protocol import Factory, Protocol
class Echo(Protocol):
sent_back_data = False
def dataReceived(self, data):
print(' '.join("{0:02x}".format(x) for x in data))
def verifyCallback(connection, x509, errnum, errdepth, ok):
return bool(ok)
if __name__ == '__main__':
factory = Factory()
factory.protocol = Echo
myContextFactory = ssl.DefaultOpenSSLContextFactory(
'keys/server.key', 'keys/server.crt'
)
ctx = myContextFactory.getContext()
ctx.set_verify(
SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
verifyCallback
)
ctx.load_verify_locations("keys/ca.pem")
reactor.listenSSL(8000, factory, myContextFactory)
reactor.run()
Process to recreate issue:
first you need to generate your own certs and CA for this to work
run the server first
run the client code
wait for some output on the server end and then end the program
notice the client continues to try and send data
restart the server end
notice the server end will continue receiving packets, but packets sent when the connection was lost are simply dropped
As a work-around I tried implementing my own buffer to send the data on re-connection, but ran into another issue. I want it to send the data when the connection is re-established and the only hook I can see is Protocol.connectionMade(). However, that method is called before the TLS handshaking is actually done so it ends up being caught by an exception handler in _write() and placed into another buffer to be sent later. But, that buffer only seems to be sent if data is received from the other end (which doesn't occur very often in my case, and would also mean that the data could arrive at the other end in the wrong order because a write() may be called before data is received). I also think another disconnection before data is received will also cause that buffer of data to just be wiped.
EDIT: added sample code for the first issue. It's probably odd that I have that active_connection in the Factory, but I am trying to make it work as a singleton.
Okay, I figured out the problem with my workaround... I was passing a bytearray to be written to the transport and then immediately cleared it afterwards not realizing that the write was being deferred until after I cleared the buffer. So, I pass a copy of the bytearray and it seems to be working now.
It still doesn't seem quite right that every call to the write has to be proceeded by a check to see if it's connected as the whole idea of the ReconnectingClientFactory is that it handles maintaining the connection for you. Also, I think it's possible to lose a connection between that if statement and when the write() is actually run, so it's still possible to lose data.
from OpenSSL import SSL
from twisted.internet.protocol import (Protocol, ReconnectingClientFactory)
from twisted.internet import (reactor, ssl)
import struct
class MetricsServer(Protocol):
streambuffer = bytearray()
def connectionMade(self):
self.transport.setTcpKeepAlive(True) # maintain the TCP connection
self.transport.setTcpNoDelay(False) # allow Nagle algorithm
print("connected to server")
if len(self.transport.factory.wrappedFactory.send_buffer) > 0:
self.transport.write(bytes(self.transport.factory.wrappedFactory.send_buffer))
self.transport.factory.wrappedFactory.send_buffer.clear()
def dataReceived(self, data):
print("from server:", data)
def connectionLost(self, reason):
self.connected = 0
print("server connection lost:", reason)
class MetricsServerFactory(ReconnectingClientFactory):
protocol = MetricsServer
maxDelay = 300 # maximum seconds between retries
factor = 1.6180339887498948
packet_sequence_number = 0
active_connection = None
send_buffer = bytearray()
def buildProtocol(self, addr):
self.resetDelay()
if self.active_connection == None:
self.active_connection = self.protocol()
return self.active_connection
def get_packet_sequence_number(self):
self.packet_sequence_number += 1
return self.packet_sequence_number
def send_data(self):
print ("sending ssl packet")
packet = struct.pack("!I", self.get_packet_sequence_number())
if self.active_connection and self.active_connection.connected:
self.active_connection.transport.write(packet)
else:
self.send_buffer.extend(packet)
reactor.callLater(1.0, metrics_server.send_data)
class CtxFactory(ssl.ClientContextFactory):
def getContext(self):
self.method = SSL.TLSv1_METHOD
ctx = ssl.ClientContextFactory.getContext(self)
ctx.use_certificate_file('keys/client.crt')
ctx.use_privatekey_file('keys/client.key')
def verifyCallback(connection, x509, errnum, errdepth, ok):
return bool(ok)
ctx.set_verify(SSL.VERIFY_PEER, verifyCallback)
ctx.load_verify_locations("keys/ca.pem")
return ctx
if __name__ == "__main__":
metrics_server = MetricsServerFactory()
reactor.connectSSL('localhost', 8000, metrics_server, CtxFactory())
reactor.callLater(3.0, metrics_server.send_data)
reactor.run()
Related
I'm trying to simulate a situation where data is received by a server periodically. In my set up, I run one process that sets up the server and another process that sets up a bunch of clients (suffices to think of a single client). I have set up some of the code by putting together bits and pieces mostly from here. The server/clients communicate by sending messages using transport.write. First, the server tells the clients to start (this works fine AFAIK). The clients report back to the server as they make progress. What has me confused is that I only get these intermittent messages at the very end when the client is done. This could be a problem with buffer flush and I tried (unsuccessfully) things like This. Also, each message is pretty large and I tried sending the same message multiple times so the buffer would get cleared.
I suspect what I am seeing is a problem with returning the control to the transport but I can't figure out how to do away with it.
Any help with this or any other issues that jump up to you is much appreciated.
Server:
from twisted.internet import reactor, protocol
import time
import serverSideAnalysis
import pdb
#import bson, json, msgpack
import _pickle as pickle # I expect the users to authenticate and not
# do anything malicious.
PORT = 9000
NUM = 1
local_scratch="/local/scratch"
class Hub(protocol.Protocol):
def __init__(self,factory, clients, nclients):
self.clients = clients
self.nclients = nclients
self.factory = factory
self.dispatcher = serverSideAnalysis.ServerTalker(NUM, self,
local_scratch)
def connectionMade(self):
print("connected to user" , (self))
if len(self.clients) < self.nclients:
self.factory.clients.append(self)
else:
self.factory.clients[self.nclients] = self
if len(self.clients) == NUM:
val = input("Looks like everyone is here, shall we start? (Y/N)")
while (val.upper() != "Y"):
time.sleep(20)
val = input("Looks like everyone is here, shall we start??? (Y/N)")
message = pickle.dumps({"TASK": "INIT", "SUBTASK":"STORE"})
self.message(message) # This reaches the client as I had expected
def message(self, command):
for c in self.factory.clients:
c.transport.write(command)
def connectionLost(self, reason):
self.factory.clients.remove(self)
self.nclients -= 1
def dataReceived(self, data):
if len(self.clients) == NUM:
self.dispatcher.dispatch(data)
class PauseTransport(protocol.Protocol):
def makeConnection(self, transport):
transport.pauseProducing()
class HubFactory(protocol.Factory):
def __init__(self, num):
self.clients = set([])
self.nclients = 0
self.totConnections = num
def buildProtocol(self, addr):
print(self.nclients)
if self.nclients < self.totConnections:
self.nclients += 1
return Hub(self, self.clients, self.nclients)
protocol = PauseTransport()
protocol.factory = self
return protocol
factory = HubFactory(NUM)
reactor.listenTCP(PORT, factory)
factory.clients = []
reactor.run()
Client:
from twisted.internet import reactor, protocol
import time
import clientSideAnalysis
import sys
HOST = 'localhost'
PORT = 9000
local_scratch="/local/scratch"
class MyClient(protocol.Protocol):
def connectionMade(self):
print("connected!")
self.factory.clients.append(self)
print ("clients are ", self.factory.clients)
self.cdispatcher = clientSideAnalysis.ServerTalker(analysis_file_name, local_scratch, self)
def clientConnectionLost(self, reason):
#TODO send warning
self.factory.clients.remove(self)
def dataReceived(self, data): #This is the problematic part I think
self.cdispatcher.dispatch(data)
print("1 sent")
time.sleep(10)
self.cdispatcher.dispatch(data)
print("2 sent")
time.sleep(10)
self.cdispatcher.dispatch(data)
time.sleep(10)
def message(self, data):
self.transport.write(data)
class MyClientFactory(protocol.ClientFactory):
protocol = MyClient
if __name__=="__main__":
analysis_file_name = sys.argv[1]
factory = MyClientFactory()
reactor.connectTCP(HOST, PORT, factory)
factory.clients = []
reactor.run()
The last bit of relevant information about what the dispatchers do.
In both cases, they load the message that has arrived (a dictionary) and do a few computations based on the content. Every once in a while, they use the message method to communicate with thier current values.
Finally, I'm using python 3.6. and twisted 18.9.0
The way you return control to the reactor from a Protocol.dataReceived method is you return from that method. For example:
def dataReceived(self, data):
self.cdispatcher.dispatch(data)
print("1 sent")
If you want more work to happen after this, you have some options. If you want the work to happen after some amount of time has passed, use reactor.callLater. If you want the work to happen after it is dispatched to another thread, use twisted.internet.threads.deferToThread. If you want the work to happen in response to some other event (for example, data being received), put it in the callback that handles that event (for example, dataReceived).
I'm using Autobahn to connect to a websocket like this.
class MyComponent(ApplicationSession):
#inlineCallbacks
def onJoin(self, details):
print("session ready")
def oncounter(*args, **args2):
print("event received: args: {} args2: {}".format(args, args2))
try:
yield self.subscribe(oncounter, u'topic')
print("subscribed to topic")
except Exception as e:
print("could not subscribe to topic: {0}".format(e))
if __name__ == '__main__':
addr = u"wss://mywebsocketaddress.com"
runner = ApplicationRunner(url=addr, realm=u"realm1", debug=False, debug_app=False)
runner.run(MyComponent)
This works great, and I am able to receive messages. However, after around 3-4 hours, sometimes much sooner, the messages abruptly stop coming. It appears that the websocket times out (does that happen?), possibly due to connection issues.
How can I auto-reconnect with autobahn when that happens?
Here's my attempt, but the reconnecting code is never called.
class MyClientFactory(ReconnectingClientFactory, WampWebSocketClientFactory):
maxDelay = 10
maxRetries = 5
def startedConnecting(self, connector):
print('Started to connect.')
def clientConnectionLost(self, connector, reason):
print('Lost connection. Reason: {}'.format(reason))
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
print('Connection failed. Reason: {}'.format(reason))
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
class MyApplicationRunner(object):
log = txaio.make_logger()
def __init__(self, url, realm, extra=None, serializers=None,
debug=False, debug_app=False,
ssl=None, proxy=None):
assert(type(url) == six.text_type)
assert(realm is None or type(realm) == six.text_type)
assert(extra is None or type(extra) == dict)
assert(proxy is None or type(proxy) == dict)
self.url = url
self.realm = realm
self.extra = extra or dict()
self.serializers = serializers
self.debug = debug
self.debug_app = debug_app
self.ssl = ssl
self.proxy = proxy
def run(self, make, start_reactor=True):
if start_reactor:
# only select framework, set loop and start logging when we are asked
# start the reactor - otherwise we are running in a program that likely
# already tool care of all this.
from twisted.internet import reactor
txaio.use_twisted()
txaio.config.loop = reactor
if self.debug or self.debug_app:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
isSecure, host, port, resource, path, params = parseWsUrl(self.url)
# factory for use ApplicationSession
def create():
cfg = ComponentConfig(self.realm, self.extra)
try:
session = make(cfg)
except Exception as e:
if start_reactor:
# the app component could not be created .. fatal
self.log.error(str(e))
reactor.stop()
else:
# if we didn't start the reactor, it's up to the
# caller to deal with errors
raise
else:
session.debug_app = self.debug_app
return session
# create a WAMP-over-WebSocket transport client factory
transport_factory = MyClientFactory(create, url=self.url, serializers=self.serializers,
proxy=self.proxy, debug=self.debug)
# supress pointless log noise like
# "Starting factory <autobahn.twisted.websocket.WampWebSocketClientFactory object at 0x2b737b480e10>""
transport_factory.noisy = False
# if user passed ssl= but isn't using isSecure, we'll never
# use the ssl argument which makes no sense.
context_factory = None
if self.ssl is not None:
if not isSecure:
raise RuntimeError(
'ssl= argument value passed to %s conflicts with the "ws:" '
'prefix of the url argument. Did you mean to use "wss:"?' %
self.__class__.__name__)
context_factory = self.ssl
elif isSecure:
from twisted.internet.ssl import optionsForClientTLS
context_factory = optionsForClientTLS(host)
from twisted.internet import reactor
if self.proxy is not None:
from twisted.internet.endpoints import TCP4ClientEndpoint
client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port'])
transport_factory.contextFactory = context_factory
elif isSecure:
from twisted.internet.endpoints import SSL4ClientEndpoint
assert context_factory is not None
client = SSL4ClientEndpoint(reactor, host, port, context_factory)
else:
from twisted.internet.endpoints import TCP4ClientEndpoint
client = TCP4ClientEndpoint(reactor, host, port)
d = client.connect(transport_factory)
# as the reactor shuts down, we wish to wait until we've sent
# out our "Goodbye" message; leave() returns a Deferred that
# fires when the transport gets to STATE_CLOSED
def cleanup(proto):
if hasattr(proto, '_session') and proto._session is not None:
if proto._session.is_attached():
return proto._session.leave()
elif proto._session.is_connected():
return proto._session.disconnect()
# when our proto was created and connected, make sure it's cleaned
# up properly later on when the reactor shuts down for whatever reason
def init_proto(proto):
reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
return proto
# if we connect successfully, the arg is a WampWebSocketClientProtocol
d.addCallback(init_proto)
# if the user didn't ask us to start the reactor, then they
# get to deal with any connect errors themselves.
if start_reactor:
# if an error happens in the connect(), we save the underlying
# exception so that after the event-loop exits we can re-raise
# it to the caller.
class ErrorCollector(object):
exception = None
def __call__(self, failure):
self.exception = failure.value
reactor.stop()
connect_error = ErrorCollector()
d.addErrback(connect_error)
# now enter the Twisted reactor loop
reactor.run()
# if we exited due to a connection error, raise that to the
# caller
if connect_error.exception:
raise connect_error.exception
else:
# let the caller handle any errors
return d
The error I'm getting:
2016-10-09T21:00:40+0100 Connection to/from tcp4:xxx.xx.xx.xx:xxx was lost in a non-clean fashion: Connection lost
2016-10-09T21:00:40+0100 _connectionLost: [Failure instance: Traceback (failure with no frames): : Connection to the other side was lost in a non-clean fashion: Connection l
ost.
]
2016-10-09T21:00:40+0100 WAMP-over-WebSocket transport lost: wasClean=False, code=1006, reason="connection was closed uncleanly (peer dropped the TCP connection without previous WebSocket closing handshake)"
2016-10-09T21:10:39+0100 EXCEPTION: no messages received
2016-10-09T21:10:39+0100 Traceback (most recent call last):
You can use a ReconnectingClientFactory if you're using Twisted. There's a simple example by the autobahn developer on github. Unfortunately there doesn't seem to be an implementation of an ApplicationRunner that comes pre-built with this functionality but it doesn't look too difficult to implement on your own. Here's an asyncio variant which should be straight forward to Twisted. You could follow this issue as it seems there is a desire by the dev team to incorporate reconnecting client.
Have you tried pip3 install autobahn-autoreconnect ?
# from autobahn.asyncio.wamp import ApplicationRunner
from autobahn_autoreconnect import ApplicationRunner
Here is an example of an automatically reconnecting ApplicationRunner. The important line to enable auto-reconnect is:
runner.run(session, auto_reconnect=True)
You will also want to activate automatic WebSocket ping/pong (eg in Crossbar.io), to a) minimize connection drops because of timeouts, and b) to allow fast detection of lost connections.
I'm trying to create a small program that will log information output from a device via TCP
Basically this just streams data out, that i want to capture, and dump into a database for dealing with later
but the device reboots so i need to be able to reconnect when the socket closes with out any human interference
so this is what i have so far
import socket, time, logging, sys, smtplib # Import socket module
logging.basicConfig(filename='Tcplogger.log',level=logging.DEBUG,format='%(asctime)s : %(levelname)s : %(message)s')
logging.info('|--------------------------------------|')
logging.info('|--------------- TCP Logger Starting---|')
logging.info('|--------------------------------------|')
host = '127.0.0.01' # host or Ip address
port = 12345 # output port
retrytime = 1 # reconnect time
reconnectattemps = 10 # Number of time to try and reconnect
class TPCLogger:
def __init__(self):
logging.debug('****Trying connection****')
print('****Trying connection****')
self.initConnection()
def initConnection(self):
s = socket.socket()
try:
s.connect((host, port))
logging.debug('****Connected****')
except IOError as e:
while 1:
reconnectcount = 0;
logging.error(format(e.errno)+' : '+format(e.strerror))
while 1:
reconnectcount = reconnectcount + 1
logging.error('Retrying connection to Mitel attempt : '+str(reconnectcount))
try:
s.connect((host, port))
connected = True
logging.debug('****Connected****')
except IOError as e:
connected = False
logging.error(format(e.errno)+' : '+format(e.strerror))
if reconnectcount == reconnectattemps:
logging.error('******####### Max Reconnect attempts reached logger will Terminate ######******')
sys.exit("could Not connect")
time.sleep(retrytime)
if connected == True:
break
break
while 1:
s.recv(1034)
LOGGER= TCPLogger()
Which all works fine on start up if a try to connect and its not there it will retry the amount of times set by reconnectattemps
but he is my issue
while 1:
s.recv(1034)
when this fails i want to try to reconnect
i could of course type out or just copy my connection part again but what i want to be able todo is call a function that will handle the connection and retry and hand me back the connection object
for example like this
class tcpclient
#set some var
host, port etc....
def initconnection:
connect to socket and retry if needed
RETURN SOCKET
def dealwithdata:
initconnection()
while 1:
try:
s.recv
do stuff here copy to db
except:
log error
initconnection()
I think this is possible but im really not geting how the class/method system works in python so i think im missing something here
FYI just in case you didn't notice iv very new to python. any other comments on what i already have are welcome too :)
Thanks
Aj
Recommendation
For this use-case I would recommend something higher-level than sockets. Why? Controlling all these exceptions and errors for yourself can be irritating when you just want to retrieve or send data and maintain connection.
Of course you can achieve what you want with your plain solution, but you mess with code a bit more, methinks. Anyway it'll look similarly to class amustafa wrote, with handling socket errors to close/reconnect method, etc.
Example
I made some example for easier solution using asyncore module:
import asyncore
import socket
from time import sleep
class Client(asyncore.dispatcher_with_send):
def __init__(self, host, port, tries_max=5, tries_delay=2):
asyncore.dispatcher.__init__(self)
self.host, self.port = host, port
self.tries_max = tries_max
self.tries_done = 0
self.tries_delay = tries_delay
self.end = False # Flag that indicates whether socket should reconnect or quit.
self.out_buffer = '' # Buffer for sending.
self.reconnect() # Initial connection.
def reconnect(self):
if self.tries_done == self.tries_max:
self.end = True
return
print 'Trying connecting in {} sec...'.format(self.tries_delay)
sleep(self.tries_delay)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.connect((self.host, self.port))
except socket.error:
pass
if not self.connected:
self.tries_done += 1
print 'Could not connect for {} time(s).'.format(self.tries_done)
def handle_connect(self):
self.tries_done = 0
print 'We connected and can get the stuff done!'
def handle_read(self):
data = self.recv(1024)
if not data:
return
# Check for terminator. Can be any action instead of this clause.
if 'END' in data:
self.end = True # Everything went good. Shutdown.
else:
print data # Store to DB or other thing.
def handle_close(self):
print 'Connection closed.'
self.close()
if not self.end:
self.reconnect()
Client('localhost', 6666)
asyncore.loop(timeout=1)
reconnnect() method is somehow core of your case - it's called when connection is needed to be made: when class initializes or connection brokes.
handle_read() operates any recieved data, here you log it or something.
You can even send data using buffer (self.out_buffer += 'message'), which will remain untouched after reconnection, so class will resume sending when connected again.
Setting self.end to True will inform class to quit when possible.
asyncore takes care of exceptions and calls handle_close() when such events occur, which is convenient way of dealing with connection failures.
You should look at the python documentation to understand how classes and methods work. The biggest difference between python methods and methods in most other languages is the addition of the "self" tag. The self represents the instance that a method is called against and is automatically fed in by the python system. So:
class TCPClient():
def __init__(self, host, port, retryAttempts=10 ):
#this is the constructor that takes in host and port. retryAttempts is given
# a default value but can also be fed in.
self.host = host
self.port = port
self.retryAttempts = retryAttempts
self.socket = None
def connect(self, attempt=0):
if attempts<self.retryAttempts:
#put connecting code here
if connectionFailed:
self.connect(attempt+1)
def diconnectSocket(self):
#perform all breakdown operations
...
self.socket = None
def sendDataToDB(self, data):
#send data to db
def readData(self):
#read data here
while True:
if self.socket is None:
self.connect()
...
Just make sure you properly disconnect the socket and set it to None.
I have the following situation:
SomeServer(S) <-> (C)MyApp(S) <-> (C)User
(S) represents a server socket
(C) represents a client socket
Essentially, MyApp initiates communication with SomeServer (SomeServer(S) <-> (C)MyApp) and once some authentication routines are successful MyApp(S) starts waiting for (C)User to connect. As soon as User connects, MyApp relays data from SomeServer to User. This happens in both directions.
I have SomeServer(S) <-> (C)MyApp working perfectly, but I'm not able to get MyApp(S) <-> (C)User working. I get as far as User connecting to MyApp(S), but can't get data relayed!
Ok, I hope that's some what clear ;) Now let me show my code for MyApp. Btw the implementation of SomeServer and User are not relevant for solving my question, as neither can be modified.
I have commented my code indicating where I'm experiencing issues. Oh, I should also mention that I have no problem scrapping the whole "Server Section" for some other code if necessary. This is a POC, so my main focus is getting the functionality working rather than writing efficient code. Thanks for you time.
''' MyApp.py module '''
import asyncore, socket
import SSL
# Client Section
# Connects to SomeServer
class MyAppClient(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
connectionPhase = 1
def handle_read(self):
print "connectionPhase =", self.connectionPhase
# The following IF statements may not make sense
# as I have removed code irrelevant to this question
if self.connectionPhase < 3: # authentication phase
data = self.recv(1024)
print 'Received:', data
# Client/Server authentication is handled here
# Everything from this point on happens over
# an encrypted socket using SSL
# Start the RelayServer listening on localhost 8080
# self.socket is encrypted and is the socket communicating
# with SomeServer
rs = RelayServer(('localhost', 8080), self.socket)
print 'RelayServer started'
# connectionPhase = 3 when this IF loop is done
elif self.connectionPhase == 3: # receiving data for User
data = self.recv(1024)
print 'Received data - forward to User:', data
# Forward this data to User
# Don't understand why data is being read here
# when the RelayServer was instantiated above
# Server Section
# Connects to User
class RelayConnection(asyncore.dispatcher):
def __init__(self, client, sock):
asyncore.dispatcher.__init__(self)
self.client = client
print "connecting to %s..." % str(sock)
def handle_connect(self):
print "connected."
# Allow reading once the connection
# on the other side is open.
self.client.is_readable = True
# For some reason this never runs, i.e. data from SomeServer
# isn't read here, but instead in MyAppClient.handle_read()
# don't know how to make it arrive here instead as it should
# be relayed to User
def handle_read(self):
self.client.send(self.recv(1024))
class RelayClient(asyncore.dispatcher):
def __init__(self, server, client, sock):
asyncore.dispatcher.__init__(self, client)
self.is_readable = False
self.server = server
self.relay = RelayConnection(self, sock)
def handle_read(self):
self.relay.send(self.recv(1024))
def handle_close(self):
print "Closing relay..."
# If the client disconnects, close the
# relay connection as well.
self.relay.close()
self.close()
def readable(self):
return self.is_readable
class RelayServer(asyncore.dispatcher):
def __init__(self, bind_address, MyAppClient_sock):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(bind_address)
self.MyAppClient_sock = MyAppClient_sock
print self.MyAppClient_sock
self.listen(1)
def handle_accept(self):
conn, addr = self.accept()
RelayClient(self, conn, self.MyAppClient_sock)
if __name__ == "__main__":
# Connect to host
# First connection stage
connectionPhase = 1
c = MyAppClient('host', port) # SomeServer's host and port
asyncore.loop()
EDIT:
#samplebias I replaced my complete module with your code (not shown) and I have re-added all the bits and pieces that I need for authentication etc.
At this point I'm getting the same result, as with my own code above. What I mean is that MyApp (or Server in your code) is connected to SomeServer and passing data back and forth. Everything is fine thus far. When User (or client application) connects to localhost 8080, this code is run:
if not self.listener:
self.listener = Listener(self.listener_addr, self)
BUT, this is not run
# if user is attached, send data
elif self.user:
print 'self.user'
self.user.send(data)
So, Server is not relaying data to User. I added print statements throughout the User class to see what is run and init is the only thing. handle_read() never runs.
Why is this?
The code is a bit hard to follow, and I'm sure there are a few bugs. For
example in handle_read() you're passing MyAppClient's raw socket self.socket to
RelayServer. You end up with both MyAppClient and RelayConnection working on the same socket.
Rather than attempt to suggest bug fixes to the original code I put together
an example which does what your code intents and is cleaner and easier to follow.
I've tested it talking to an IMAP server and it works, but omits some
things for brevity (error handling, proper close() handling in all cases, etc).
Server initiates the connection to "someserver". Once it connects
it starts the Listener.
Listener listens on port 8080 and accepts only 1 connection, creates a User,
and passes it a reference to Server. Listener rejects all other
client connections while User is active.
User forwards all data to Server, and vice versa. The comments
indicate where the authentication should be plugged in.
Source:
import asyncore
import socket
class User(asyncore.dispatcher_with_send):
def __init__(self, sock, server):
asyncore.dispatcher_with_send.__init__(self, sock)
self.server = server
def handle_read(self):
data = self.recv(4096)
# parse User auth protocol here, authenticate, set phase flag, etc.
# if authenticated, send data to server
if self.server:
self.server.send(data)
def handle_close(self):
if self.server:
self.server.close()
self.close()
class Listener(asyncore.dispatcher_with_send):
def __init__(self, listener_addr, server):
asyncore.dispatcher_with_send.__init__(self)
self.server = server
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(listener_addr)
self.listen(1)
def handle_accept(self):
conn, addr = self.accept()
# this listener only accepts 1 client. while it is serving 1 client
# it will reject all other clients.
if not self.server.user:
self.server.user = User(conn, self.server)
else:
conn.close()
class Server(asyncore.dispatcher_with_send):
def __init__(self, server_addr, listener_addr):
asyncore.dispatcher_with_send.__init__(self)
self.server_addr = server_addr
self.listener_addr = listener_addr
self.listener = None
self.user = None
def start(self):
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect(self.server_addr)
def handle_error(self, *n):
self.close()
def handle_read(self):
data = self.recv(4096)
# parse SomeServer auth protocol here, set phase flag, etc.
if not self.listener:
self.listener = Listener(self.listener_addr, self)
# if user is attached, send data
elif self.user:
self.user.send(data)
def handle_close(self):
if self.user:
self.user.server = None
self.user.close()
self.user = None
if self.listener:
self.listener.close()
self.listener = None
self.close()
self.start()
if __name__ == '__main__':
app = Server(('someserver', 143), ('localhost', 8080))
app.start()
asyncore.loop()
I try to program a TCPServer with threads (ThreadingMixIn) in Python. The problem is that I can't shut it down properly as I get the socket.error: [Errno 48] Address already in use when I try to run it again. This is a minimal example of the python code that triggers the problem:
import socket
import threading
import SocketServer
class FakeNetio230aHandler(SocketServer.BaseRequestHandler):
def send(self,message):
self.request.send(message+N_LINE_ENDING)
def handle(self):
self.request.send("Hello\n")
class FakeNetio230a(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass):
self.allow_reuse_address = True
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
if __name__ == '__main__':
for i in range(2):
fake_server = FakeNetio230a(("", 1234), FakeNetio230aHandler)
server_thread = threading.Thread(target=fake_server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
# might add some client connection here
fake_server.shutdown()
All the main code should do is to start the server, shut it down and run it again. But it triggers the error stated above because the socket has not been released after the first shutdown.
I thought that setting self.allow_reuse_address = True could solve the problem, but that did not work. When the python program finishes I can run it again straight away and it can start the server once (but again not twice).
However the problem is gone when I randomize the port (replace 1234 by 1234+i for example) as no other server is listening on that address.
There is a similar SO Q Shutting down gracefully from ThreadingTCPServer but the solution (set allow_reuse_address to True does not work for my code and I don't use ThreadingTCPServer).
How do I have to modify my code in order to be able to start the server twice in my code?
Some more information: The reason why I'm doing this is that I want to run some unit tests for my python project. This requires to provide a (fake) server that my software should to connect to.
edit:
I just found the most correct answer to my problem: I have to add fake_server.server_close() at the end of my main execution code (right after fake_server.shutdown()). I found it in the source file of the TCPServer implementation. All it does is self.socket.close().
Somehow, fake_server doesn't unbind when you assign to it (in first line in for statement).
To fix that, just remove fake_server at the end of loop:
del fake_server # force server to unbind
This post helped me get over the un-closed socket problem.
I had the same problem and wanted to post here my simple implementation for TCP server class (and client method).
I made a TCPThreadedServer class. In order to use it is needed to be inherited, and the method process(msg) must be overridden. the overridden method invokes every time the server gets a message msg, and if it returns a not None object, it will be returned as string to the connected client.
from SocketServer import TCPServer, StreamRequestHandler, ThreadingMixIn
import threading
class TCPThreadedServer(TCPServer, ThreadingMixIn):
class RequstHandler(StreamRequestHandler):
def handle(self):
msg = self.rfile.readline().strip()
reply = self.server.process(msg)
if reply is not None:
self.wfile.write(str(reply) + '\n')
def __init__(self, host, port, name=None):
self.allow_reuse_address = True
TCPServer.__init__(self, (host, port), self.RequstHandler)
if name is None: name = "%s:%s" % (host, port)
self.name = name
self.poll_interval = 0.5
def process(self, msg):
"""
should be overridden
process a message
msg - string containing a received message
return - if returns a not None object, it will be sent back
to the client.
"""
raise NotImplemented
def serve_forever(self, poll_interval=0.5):
self.poll_interval = poll_interval
self.trd = threading.Thread(target=TCPServer.serve_forever,
args = [self, self.poll_interval],
name = "PyServer-" + self.name)
self.trd.start()
def shutdown(self):
TCPServer.shutdown(self)
TCPServer.server_close(self)
self.trd.join()
del self.trd
I found it quite easy to use:
class EchoServerExample(TCPThreadedServer):
def __init__(self):
TCPThreadedServer.__init__(self, "localhost", 1234, "Server")
def process(self, data):
print "EchoServer Got: " + data
return str.upper(data)
for i in range(10):
echo = EchoServerExample()
echo.serve_forever()
response = client("localhost", 1234, "hi-%i" % i)
print "Client received: " + response
echo.shutdown()
I used the method:
import socket
def client(ip, port, msg, recv_len=4096,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
msg = str(msg)
response = None
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((ip, port))
if timeout != socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.send(msg + "\n")
if recv_len > 0:
response = sock.recv(recv_len)
finally:
sock.close()
return response
Enjoy it!
Change your FakeNetio230a definition to this:
class FakeNetio230a(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass):
self.allow_reuse_address = True
SocketServer.TCPServer.__init__(self,
server_address,
RequestHandlerClass,
False) # do not implicitly bind
Then, add these two lines in your entry point below your FakeNetio230a instantiation:
fake_server.server_bind() # explicitly bind
fake_server.server_activate() # activate the server
Here's an example:
if __name__ == '__main__':
for i in range(2):
fake_server = FakeNetio230a(("", 1234), FakeNetio230aHandler)
fake_server.server_bind() # explicitly bind
fake_server.server_activate() # activate the server
server_thread = threading.Thread(target=fake_server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
# might add some client connection here
fake_server.shutdown()