I just started using Twisted. I want to connect to an FTP server and perform some basic operations (use threading if possible). I am using this example.
Which does the job quite well. The question is how to add a SOCKS4/5 proxy usage to the code? Can somebody please provide a working example? I have tried this link too.
But,
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An example of using the FTP client
"""
# Twisted imports
from twisted.protocols.ftp import FTPClient, FTPFileListProtocol
from twisted.internet.protocol import Protocol, ClientCreator
from twisted.python import usage
from twisted.internet import reactor, endpoints
# Socks support test
from socksclient import SOCKSv4ClientProtocol, SOCKSWrapper
from twisted.web import client
# Standard library imports
import string
import sys
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class BufferingProtocol(Protocol):
"""Simple utility class that holds all data written to it in a buffer."""
def __init__(self):
self.buffer = StringIO()
def dataReceived(self, data):
self.buffer.write(data)
# Define some callbacks
def success(response):
print 'Success! Got response:'
print '---'
if response is None:
print None
else:
print string.join(response, '\n')
print '---'
def fail(error):
print 'Failed. Error was:'
print error
def showFiles(result, fileListProtocol):
print 'Processed file listing:'
for file in fileListProtocol.files:
print ' %s: %d bytes, %s' \
% (file['filename'], file['size'], file['date'])
print 'Total: %d files' % (len(fileListProtocol.files))
def showBuffer(result, bufferProtocol):
print 'Got data:'
print bufferProtocol.buffer.getvalue()
class Options(usage.Options):
optParameters = [['host', 'h', 'example.com'],
['port', 'p', 21],
['username', 'u', 'webmaster'],
['password', None, 'justapass'],
['passive', None, 0],
['debug', 'd', 1],
]
# Socks support
def wrappercb(proxy):
print "connected to proxy", proxy
pass
def run():
def sockswrapper(proxy, url):
dest = client._parse(url) # scheme, host, port, path
endpoint = endpoints.TCP4ClientEndpoint(reactor, dest[1], dest[2])
return SOCKSWrapper(reactor, proxy[1], proxy[2], endpoint)
# Get config
config = Options()
config.parseOptions()
config.opts['port'] = int(config.opts['port'])
config.opts['passive'] = int(config.opts['passive'])
config.opts['debug'] = int(config.opts['debug'])
# Create the client
FTPClient.debug = config.opts['debug']
creator = ClientCreator(reactor, FTPClient, config.opts['username'],
config.opts['password'], passive=config.opts['passive'])
#creator.connectTCP(config.opts['host'], config.opts['port']).addCallback(connectionMade).addErrback(connectionFailed)
# Socks support
proxy = (None, '1.1.1.1', 1111, True, None, None)
sw = sockswrapper(proxy, "ftp://example.com")
d = sw.connect(creator)
d.addCallback(wrappercb)
reactor.run()
def connectionFailed(f):
print "Connection Failed:", f
reactor.stop()
def connectionMade(ftpClient):
# Get the current working directory
ftpClient.pwd().addCallbacks(success, fail)
# Get a detailed listing of the current directory
fileList = FTPFileListProtocol()
d = ftpClient.list('.', fileList)
d.addCallbacks(showFiles, fail, callbackArgs=(fileList,))
# Change to the parent directory
ftpClient.cdup().addCallbacks(success, fail)
# Create a buffer
proto = BufferingProtocol()
# Get short listing of current directory, and quit when done
d = ftpClient.nlst('.', proto)
d.addCallbacks(showBuffer, fail, callbackArgs=(proto,))
d.addCallback(lambda result: reactor.stop())
# this only runs if the module was *not* imported
if __name__ == '__main__':
run()
I know the code is wrong. I need Solution.
Okay, so here's a solution (gist) that uses python's built-in ftplib, as well as the open source SocksiPy module.
It doesn't use twisted, and it doesn't explicitly use threads, but using and communicting between threads is pretty easily done with threading.Thread and threading.Queue in python's standard threading module
Basically, we need to subclass ftplib.FTP to support substituting our own create_connection method and add proxy configuration semantics.
The "main" logic just configures an FTP client that connects via a localhost socks proxy, such as one created by ssh -D localhost:1080 socksproxy.example.com, and downloads a source snapshot for GNU autoconf to the local disk.
import ftplib
import socket
import socks # socksipy (https://github.com/mikedougherty/SocksiPy)
class FTP(ftplib.FTP):
def __init__(self, host='', user='', passwd='', acct='',
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
proxyconfig=None):
"""Like ftplib.FTP constructor, but with an added `proxyconfig` kwarg
`proxyconfig` should be a dictionary that may contain the following
keys:
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxyconfig = proxyconfig or {}
ftplib.FTP.__init__(self, host, user, passwd, acct, timeout)
def connect(self, host='', port=0, timeout=-999):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
self.sock = self.create_connection(self.host, self.port)
self.af = self.sock.family
self.file = self.sock.makefile('rb')
self.welcome = self.getresp()
return self.welcome
def create_connection(self, host=None, port=None):
host, port = host or self.host, port or self.port
if self.proxyconfig:
phost, pport = self.proxyconfig['addr'], self.proxyconfig['port']
err = None
for res in socket.getaddrinfo(phost, pport, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socks.socksocket(af, socktype, proto)
sock.setproxy(**self.proxyconfig)
if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(self.timeout)
sock.connect((host, port))
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
else:
sock = socket.create_connection((host, port), self.timeout)
return sock
def ntransfercmd(self, cmd, rest=None):
size = None
if self.passiveserver:
host, port = self.makepasv()
conn = self.create_connection(host, port)
try:
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise ftplib.error_reply, resp
except:
conn.close()
raise
else:
raise Exception("Active transfers not supported")
if resp[:3] == '150':
# this is conditional in case we received a 125
size = ftplib.parse150(resp)
return conn, size
if __name__ == '__main__':
ftp = FTP(host='ftp.gnu.org', user='anonymous', passwd='guest',
proxyconfig=dict(proxytype=socks.PROXY_TYPE_SOCKS5, rdns=False,
addr='localhost', port=1080))
with open('autoconf-2.69.tar.xz', mode='w') as f:
ftp.retrbinary("RETR /gnu/autoconf/autoconf-2.69.tar.xz", f.write)
To elaborate why I asked some of my original questions:
1) Do you need to support active transfers or will PASV transfers be sufficient?
Active transfers are much harder to do via a socks proxy because they require the use of the PORT command. With the PORT command, your ftp client tells the FTP server to connect to you on a specific port (e.g., on your PC) in order to send the data. This is likely to not work for users behind a firewall or NAT/router. If your SOCKS proxy server is not behind a firewall, or has a public IP, it is possible to support active transfers, but it is complicated: It requires your SOCKS server (ssh -D does support this) and client library (socksipy does not) to support remote port binding. It also requires the appropriate hooks in the application (my example throws an exception if passiveserver = False) to do a remote BIND instead of a local one.
2) Does it have to use twisted?
Twisted is great, but I'm not the best at it, and I haven't found a really great SOCKS client implementation. Ideally there would be a library out there that allowed you to define and/or chain proxies together, returning an object that implements the IReactorTCP interface, but I have not yet found anything like this just yet.
3) Is your socks proxy behind a VIP or just a single host directly connected to the Internet?
This matters because of the way PASV transfer security works. In a PASV transfer, the client asks the server to provide a port to connect in order to start a data transfer. When the server accepts a connection on that port, it SHOULD verify the client is connected from the same source IP as the connection that requested the transfer. If your SOCKS server is behind a VIP, it is less likely that the outbound IP of the connection made for the PASV transfers will match the outbound IP of the primary communication connection.
Related
I'm trying to make a Python server where multiple clients can connect but I've run into a problem I tried everything that I found on the internet.
I'm running a laptop whit windows 7 and an I3 processor.
This is the file called tcp:
import socket
def make_server (ip,port):
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((ip, port))
server.listen(1)
return server
except Exception as ex:
print(ex)
return None
def accept(server):
conn, addr = server.accept()
return conn
def make_client():
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return client
def client_connect(client,ip,port):
client.connect((ip,port))
def sendall(conn,mess):
conn.send(str(mess).encode("utf-8"))
def rec(conn,rate):
mess = conn.recv(rate).decode("utf-8")
return mess
def close(client):
client.close()
This is the server:
from multiprocessing import Process
from random import randint
import tcp
import sys
def start(sip, sport):
print("Making sob server...")
print("id= {}".format(sport))
sserver = tcp.make_server(sip, sport)
print("Sub Server Started!")
sconn = tcp.accept(sserver)
tcp.sendall(sconn, "connected!!")
while True:
try:
tcp.sendall(sconn, randint(0, 100))
except Exception as ex:
print("")
print("From server {} error:".format(port))
print(ex)
print("")
break
ip = "192.168.0.102"
port = 8000
subport = 9000
server = tcp.make_server(ip, port)
if server is None:
sys.exit(0)
print("Started!")
while True:
print("Wating for new connection!")
con = tcp.accept(server)
print("Connected!")
subport = subport + 1
tcp.sendall(con, subport)
print("New Port Sent!")
print("New Port = {}".format(subport))
subs = Process(target=start, args=(ip, subport))
subs.start()
subs.join()
This is the client:
import tcp
import time
nport = 0
ip = "192.168.0.102"
port = 8000
client = tcp.make_client()
tcp.client_connect(client,ip,port)
nport = tcp.rec(client,1024)
print(nport)
tcp.close(client)
nport = int(nport)
time.sleep(1)
print(nport)
client = tcp.make_client()
tcp.client_connect(client,ip,nport)
while True:
mess = tcp.rec(client, 1024)
if(mess):
print(mess)
The error is:
[WinError 10048]Only one usage of each socket address (protocol/network address/port) is normally permitted Python
Feel free to change anything you want.
If you need any info in plus just ask.
You are creating a socket in the client with tcp.make_client. You are then using that socket to connect to the server via tcp.client_connect. Presumably you successfully receive the new port number back from the server. But then you are trying to re-use the same socket to connect to those ports.
This is the proximate cause of your error: A socket can only be used for a single TCP connection. If you want to create a new connection, you must first create a new socket.
That being said, if you are simply trying to create a server that will accept multiple connections, you're making it way too complicated. The server can receive any number of connections on its single listening port, as long as a different address/port combination is used by each client.
One way to structure this in a server is something like this:
# Create and bind listening socket
lsock = socket.socket()
lsock.bind(('', port))
lsock.listen(1)
while True:
csock, addr = lsock.accept()
print("Got connection from {}".format(addr))
# Start sub-process passing it the newly accepted socket as argument
subs = Process(target=start, args=(csock, ))
subs.start()
# Close our handle to the new socket (it will remain open in the
# sub-process which will use it to talk to the client)
csock.close()
# NOTE: do not call subs.join here unless you want the parent to *block*
# waiting for the sub-process to finish (and if so, what is the point in
# creating a sub-process?)
There are several others ways to do it as well: you can create multiple threads to handle multiple connections, or you can handle all connections in a single thread by using select or with asynchronous I/O.
The client is typically much simpler -- as it usually only cares about its own one connection -- and doesn't care which way the server is implemented:
sock = socket.socket()
sock.connect((ip, port))
while True:
sock.send(...)
sock.recv(...)
If the client does wish to connect to the same server again, it simply creates a second socket and call its connect method with the same server IP and port.
Usually, the client never needs to specify its own port, only the server's port. It simply calls connect and the client-side operating system chooses an unused port for it. So the first time, the client creates a socket and connects it (to the server's listening port), the client-side OS may choose port 50001. The next time it creates and connects a socket, it may get 50002 and so on. (The exact port numbers chosen depend on the operating system implementation and other factors, such as what other programs are running and creating connections.)
So, given client IP 192.168.0.101 and server IP 192.168.0.102, and assuming the server is listening on port 8000, this would result in these two connections:
(192.168.0.101/50001) ====> (192.168.0.102/8000)
(192.168.0.101/50002) ====> (192.168.0.102/8000)
I'm trying to create a small program that will log information output from a device via TCP
Basically this just streams data out, that i want to capture, and dump into a database for dealing with later
but the device reboots so i need to be able to reconnect when the socket closes with out any human interference
so this is what i have so far
import socket, time, logging, sys, smtplib # Import socket module
logging.basicConfig(filename='Tcplogger.log',level=logging.DEBUG,format='%(asctime)s : %(levelname)s : %(message)s')
logging.info('|--------------------------------------|')
logging.info('|--------------- TCP Logger Starting---|')
logging.info('|--------------------------------------|')
host = '127.0.0.01' # host or Ip address
port = 12345 # output port
retrytime = 1 # reconnect time
reconnectattemps = 10 # Number of time to try and reconnect
class TPCLogger:
def __init__(self):
logging.debug('****Trying connection****')
print('****Trying connection****')
self.initConnection()
def initConnection(self):
s = socket.socket()
try:
s.connect((host, port))
logging.debug('****Connected****')
except IOError as e:
while 1:
reconnectcount = 0;
logging.error(format(e.errno)+' : '+format(e.strerror))
while 1:
reconnectcount = reconnectcount + 1
logging.error('Retrying connection to Mitel attempt : '+str(reconnectcount))
try:
s.connect((host, port))
connected = True
logging.debug('****Connected****')
except IOError as e:
connected = False
logging.error(format(e.errno)+' : '+format(e.strerror))
if reconnectcount == reconnectattemps:
logging.error('******####### Max Reconnect attempts reached logger will Terminate ######******')
sys.exit("could Not connect")
time.sleep(retrytime)
if connected == True:
break
break
while 1:
s.recv(1034)
LOGGER= TCPLogger()
Which all works fine on start up if a try to connect and its not there it will retry the amount of times set by reconnectattemps
but he is my issue
while 1:
s.recv(1034)
when this fails i want to try to reconnect
i could of course type out or just copy my connection part again but what i want to be able todo is call a function that will handle the connection and retry and hand me back the connection object
for example like this
class tcpclient
#set some var
host, port etc....
def initconnection:
connect to socket and retry if needed
RETURN SOCKET
def dealwithdata:
initconnection()
while 1:
try:
s.recv
do stuff here copy to db
except:
log error
initconnection()
I think this is possible but im really not geting how the class/method system works in python so i think im missing something here
FYI just in case you didn't notice iv very new to python. any other comments on what i already have are welcome too :)
Thanks
Aj
Recommendation
For this use-case I would recommend something higher-level than sockets. Why? Controlling all these exceptions and errors for yourself can be irritating when you just want to retrieve or send data and maintain connection.
Of course you can achieve what you want with your plain solution, but you mess with code a bit more, methinks. Anyway it'll look similarly to class amustafa wrote, with handling socket errors to close/reconnect method, etc.
Example
I made some example for easier solution using asyncore module:
import asyncore
import socket
from time import sleep
class Client(asyncore.dispatcher_with_send):
def __init__(self, host, port, tries_max=5, tries_delay=2):
asyncore.dispatcher.__init__(self)
self.host, self.port = host, port
self.tries_max = tries_max
self.tries_done = 0
self.tries_delay = tries_delay
self.end = False # Flag that indicates whether socket should reconnect or quit.
self.out_buffer = '' # Buffer for sending.
self.reconnect() # Initial connection.
def reconnect(self):
if self.tries_done == self.tries_max:
self.end = True
return
print 'Trying connecting in {} sec...'.format(self.tries_delay)
sleep(self.tries_delay)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.connect((self.host, self.port))
except socket.error:
pass
if not self.connected:
self.tries_done += 1
print 'Could not connect for {} time(s).'.format(self.tries_done)
def handle_connect(self):
self.tries_done = 0
print 'We connected and can get the stuff done!'
def handle_read(self):
data = self.recv(1024)
if not data:
return
# Check for terminator. Can be any action instead of this clause.
if 'END' in data:
self.end = True # Everything went good. Shutdown.
else:
print data # Store to DB or other thing.
def handle_close(self):
print 'Connection closed.'
self.close()
if not self.end:
self.reconnect()
Client('localhost', 6666)
asyncore.loop(timeout=1)
reconnnect() method is somehow core of your case - it's called when connection is needed to be made: when class initializes or connection brokes.
handle_read() operates any recieved data, here you log it or something.
You can even send data using buffer (self.out_buffer += 'message'), which will remain untouched after reconnection, so class will resume sending when connected again.
Setting self.end to True will inform class to quit when possible.
asyncore takes care of exceptions and calls handle_close() when such events occur, which is convenient way of dealing with connection failures.
You should look at the python documentation to understand how classes and methods work. The biggest difference between python methods and methods in most other languages is the addition of the "self" tag. The self represents the instance that a method is called against and is automatically fed in by the python system. So:
class TCPClient():
def __init__(self, host, port, retryAttempts=10 ):
#this is the constructor that takes in host and port. retryAttempts is given
# a default value but can also be fed in.
self.host = host
self.port = port
self.retryAttempts = retryAttempts
self.socket = None
def connect(self, attempt=0):
if attempts<self.retryAttempts:
#put connecting code here
if connectionFailed:
self.connect(attempt+1)
def diconnectSocket(self):
#perform all breakdown operations
...
self.socket = None
def sendDataToDB(self, data):
#send data to db
def readData(self):
#read data here
while True:
if self.socket is None:
self.connect()
...
Just make sure you properly disconnect the socket and set it to None.
Ok, so I did the following, figuring it would raise an exception if it could not connect:
>>> s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
>>> s.settimeout(0.2)
>>> s.connect(("thisdomaiyndontexistduhh.com", 80))
But no exception was raised. How do I test if there is a server open on a port with Python's socket module? Thanks!
Here's why the code above never fails.
My ISP (Frontier) configures DNS such that for any domain that does not exist, it will return "198.105.251.114". As such, they actually have a web server listening on port 80 at that address to display some garbage/spam search results. Change your host to use 8.8.8.8 (Google server) for DNS and your code above will likely work.
Given that these sorts of "captive networks" are common, the first thing your code should do is determine if it is on such a network. Hence, the right thing to do is call socket.gethostbyname("thisdomaiyndontexistduhh.com"). If it actually returns a value, then you know you are behind such a DNS server. As such, then do a gethostbyname call on the server you want to probe. If it returns the same DNS address, you know the server doesn't exist. Otherwise, proceed with the connect call to finish the test.
Update: I've been learning Python over the holidays, so I used this problem as excuse to practice. Here's my code:
import socket
def DoesServiceExist(host, port):
captive_dns_addr = ""
host_addr = ""
try:
captive_dns_addr = socket.gethostbyname("BlahThisDomaynDontExist22.com")
except:
pass
try:
host_addr = socket.gethostbyname(host)
if (captive_dns_addr == host_addr):
return False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((host, port))
s.close()
except:
return False
return True
I found this solution using telnetlib and urllib to test the connection between servers in urls like "https://example.com:6789/test", in this particular case, I'm looking for the port first, then the schema. I'm considering just "http" and "https" but it's easy to add or change the schema (sftp, ssh, etc.).
import telnetlib
import time
from urllib.parse import urlsplit
def verify_service(endpoint):
netloc = "{0.netloc}".format(urlsplit(endpoint))
scheme = "{0.scheme}".format(urlsplit(endpoint))
if netloc == '':
return False
p = netloc.split(":")
if len(p) == 2:
host = p[0]
port = p[1]
else:
host = p[0]
port = 80 if scheme == "http" else "443"
try:
tn = telnetlib.Telnet(host,port)
time.sleep(1)
except:
return False
return True
Maybe someone here will have a response for this thing which is just driving me insane.
To make it simple, I'm making a kind of proxy. Whenever it receives something, it forwards everything to a server, and sends back the response. So there is one socket always listening on port 4557 for clients, and for each incoming connection, there is a new socket created on a random port to connect to the server port 4556.
Clients <==> Proxy <==> Server
Also, there another socket which is instantiated and listening for requests coming from the server and to be forwarded to the corresponding client.
Here is an example:
Client A connects to proxy on port 4557
Proxy creates a socket to Server on port 4556
Along with that, it creates a socket listening on port 40100
Client sends stuff, forwarded to Server
Client disconnects. Close client connection and socket to server
Some time later, Server sends stuff to proxy on port 40100
Everything's forwarded to Client A (port 40100 corresponding to Client A)
And so on..
So far in my tests, I use a simple python script for sending a unique tcp packet to the proxy, along with a dump server showing received data and echoing back.
So the issue is that when a connection to the proxy is closed, the connection to the Server should also be closed with "sock.close()". However it just seems to be completely ignored. The socket remains as ESTABLISHED.
About the code now.
A few notes.
DTN and Node are respectively Server and Clients.
runCallback is called in a loop until thread dies.
finalCallback is called when the thread is dying.
Associations between remote hosts (Client), proxy ports (to Server) and proxies are kept in the dictionaries: TCPProxyHostRegister (RemoteHost => Proxy), TCPProxyPortRegister (Port => Proxy), TCPPortToHost (Port => RemoteHost).
The first class is TCPListenerThread.
It just listen on a specific port and instantiate proxies (one for each Client=>Server couple and Server=>Client couple) and forward them connections.
class TCPListenerThread(StoppableThread):
def __init__(self, tcp_port):
StoppableThread.__init__(self)
self.tcp_port = tcp_port
self.sock = socket.socket( socket.AF_INET, # Internet
socket.SOCK_STREAM ) # tcp
self.sock.bind( (LOCAL_ADDRESS, self.tcp_port) )
self.sock.listen(1)
def runCallback(self):
print "Listen on "+str(self.tcp_port)+".."
conn, addr = self.sock.accept()
if isFromDTN(addr):
tcpProxy = getProxyFromPort(tcp_port)
if not tcpProxy:
tcpProxy = TCPProxy(host, True)
else:
host = addr[0]
tcpProxy = getProxyFromHost(host)
if not tcpProxy:
tcpProxy = TCPProxy(host, False)
tcpProxy.handle(conn)
def finalCallback(self):
self.sock.close()
Now comes the TCP Proxy:
It associates a remote host (Client) with a port connecting to Server.
If it's a connection coming from a new Client, it will create a new listener (see above) for the Server and create a socket ready to forward everything to Server.
class TCPProxy():
def __init__(self, remote, isFromDTN):
#remote = port for Server or Remote host for Client
self.isFromDTN = isFromDTN
self.conn = None
#add itself to proxy registries
#If listening from a node
if not isFromDTN:
#Set node remote host
self.remoteHost = remote
TCPProxyHostRegister[self.remoteHost] = self
#Set port to DTN interface + listener
self.portToDTN = getNewTCPPort()
TCPPortToHost[self.portToDTN] = self.remoteHost
newTCPListenerThread(self.portToDTN)
#Or from DTN
else:
self.portToDTN = remote
TCPProxyPortRegister[self.portToDTN] = self
self.remoteHost = getRemoteHostFromPortTCP(self.portToDTN)
def handle(self, conn):
print "New connection!"
#shouldn't happen, but eh
if self.conn != None:
self.closeConnections()
self.conn = conn
#init socket with remote
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.isFromDTN:
self.sock.connect((self.remoteHost, 4556)) #TODO: handle dynamic port..
else:
self.sock.connect((DTN_Address, DTN_TCPPort))
#handle connection in a thread
self.handlerThread = newTCPHandlerThread(self)
#handle reply in a therad
self.replyThread = newTCPReplyThread(self)
def closeConnections(self):
try:
if self.conn != None:
print "Close connections!"
self.sock.close()
self.conn.close()
self.conn = None
self.handlerThread.kill()
self.replyThread.kill()
except Exception, err:
print str(err)
#pass
def forward(self, data):
print "TCP forwarding data: "+data
self.sock.send(data)
def forwardBack(self, data):
print "TCP forwarding data back: "+data
self.conn.send(data)
In this proxy class, I instantiate two classes, TCPHandlerThread and TCPReplyThread. They are responsible for forwarding to Server, and forwarding back to Client, respectively.
class TCPHandlerThread(StoppableThread):
def __init__(self, proxy):
StoppableThread.__init__(self)
self.proxy = proxy
def runCallback(self):
test = False
while 1:
data = self.proxy.conn.recv(BUFFER_SIZE)
if test:
self.proxy.sock.close()
test = True
if not data:
break
print "TCP received data:", data
self.proxy.forward(data)
self.kill()
def finalCallback(self):
self.proxy.closeConnections()
class TCPReplyThread(StoppableThread):
def __init__(self, proxy):
StoppableThread.__init__(self)
self.proxy = proxy
def runCallback(self):
while 1:
data = self.proxy.sock.recv(BUFFER_SIZE)
if not data:
break
print "TCP received back data: "+data
self.proxy.forwardBack(data)
self.kill()
def finalCallback(self):
self.proxy.closeConnections()
You see that whenever a connection is closed, the thread dies and the other connection (Client/Server to proxy or Proxy to Server/Client) should be closed in Proxy.closeConnections()
I noticed that when closeConnections() is "data = self.proxy.conn.recv(BUFFER_SIZE)", it goes well, but when it's called even right after the latter statement, it goes wrong.
I wiresharked TCP, and the proxy doesn't send any "bye signal". The socket state doesn't go to TIME_WAIT or whatever, it just remains ESTABLISHED.
Also, I tested it on Windows and Ubuntu.
On Windows it goes exactly as I explained
On Ubuntu, it works well for usually (not always), 2 connections, and the third time I connect with the same client in exactly the same way to the proxy, it goes wrong again exactly as explained.
Here are the three files i'm using so that you can have a look at the whole code. I'm sorry the proxy file might not be really easy to read. Was SUPPOSED to be a quick dev.
http://hognerud.net/stackoverflow/
Thanks in advance..
It's surely something stupid. Please don't hit me too hard when you see it :(
First I'm sorry that I currently have not the time to actually run and test your code.
But the idea came to my mind, that your problem might actually have something todo with using blocking mode vs. non-blocking mode on the socket. In that case you should checkout the "socket" module help in the python documentation, especially socket.setblocking().
My guess is, that the proxy.conn.recv() function only returns, when actually BUFFER_SIZE bytes where received by the socket. Because of this the thread is blocked until enough data was received and therefore the socket doesn't get closed.
As I said first, this is currently just a guess, so please don't vote me down if it doesn't solve the problem...
So I have been having a hard time sending email from my school's email address. It is SSL and I could only find this code online by Matt Butcher that works with SSL:
import smtplib, socket
__version__ = "1.00"
__all__ = ['SMTPSSLException', 'SMTP_SSL']
SSMTP_PORT = 465
class SMTPSSLException(smtplib.SMTPException):
"""Base class for exceptions resulting from SSL negotiation."""
class SMTP_SSL (smtplib.SMTP):
"""This class provides SSL access to an SMTP server.
SMTP over SSL typical listens on port 465. Unlike StartTLS, SMTP over SSL
makes an SSL connection before doing a helo/ehlo. All transactions, then,
are done over an encrypted channel.
This class is a simple subclass of the smtplib.SMTP class that comes with
Python. It overrides the connect() method to use an SSL socket, and it
overrides the starttles() function to throw an error (you can't do
starttls within an SSL session).
"""
certfile = None
keyfile = None
def __init__(self, host='', port=0, local_hostname=None, keyfile=None, certfile=None):
"""Initialize a new SSL SMTP object.
If specified, `host' is the name of the remote host to which this object
will connect. If specified, `port' specifies the port (on `host') to
which this object will connect. `local_hostname' is the name of the
localhost. By default, the value of socket.getfqdn() is used.
An SMTPConnectError is raised if the SMTP host does not respond
correctly.
An SMTPSSLError is raised if SSL negotiation fails.
Warning: This object uses socket.ssl(), which does not do client-side
verification of the server's cert.
"""
self.certfile = certfile
self.keyfile = keyfile
smtplib.SMTP.__init__(self, host, port, local_hostname)
def connect(self, host='localhost', port=0):
"""Connect to an SMTP server using SSL.
`host' is localhost by default. Port will be set to 465 (the default
SSL SMTP port) if no port is specified.
If the host name ends with a colon (`:') followed by a number,
that suffix will be stripped off and the
number interpreted as the port number to use. This will override the
`port' parameter.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
# MB: Most of this (Except for the socket connection code) is from
# the SMTP.connect() method. I changed only the bare minimum for the
# sake of compatibility.
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i+1:]
try: port = int(port)
except ValueError:
raise socket.error, "nonnumeric port"
if not port: port = SSMTP_PORT
if self.debuglevel > 0: print>>stderr, 'connect:', (host, port)
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
if self.debuglevel > 0: print>>stderr, 'connect:', (host, port)
self.sock.connect(sa)
# MB: Make the SSL connection.
sslobj = socket.ssl(self.sock, self.keyfile, self.certfile)
except socket.error, msg:
if self.debuglevel > 0:
print>>stderr, 'connect fail:', (host, port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
# MB: Now set up fake socket and fake file classes.
# Thanks to the design of smtplib, this is all we need to do
# to get SSL working with all other methods.
self.sock = smtplib.SSLFakeSocket(self.sock, sslobj)
self.file = smtplib.SSLFakeFile(sslobj);
(code, msg) = self.getreply()
if self.debuglevel > 0: print>>stderr, "connect:", msg
return (code, msg)
def setkeyfile(self, keyfile):
"""Set the absolute path to a file containing a private key.
This method will only be effective if it is called before connect().
This key will be used to make the SSL connection."""
self.keyfile = keyfile
def setcertfile(self, certfile):
"""Set the absolute path to a file containing a x.509 certificate.
This method will only be effective if it is called before connect().
This certificate will be used to make the SSL connection."""
self.certfile = certfile
def starttls():
"""Raises an exception.
You cannot do StartTLS inside of an ssl session. Calling starttls() will
return an SMTPSSLException"""
raise SMTPSSLException, "Cannot perform StartTLS within SSL session."
And then my code:
import ssmtplib
conn = ssmtplib.SMTP_SSL('HOST')
conn.login('USERNAME','PW')
conn.ehlo()
conn.sendmail('FROM_EMAIL', 'TO_EMAIL', "MESSAGE")
conn.close()
And got this error:
/Users/Jake/Desktop/Beth's Program/ssmtplib.py:116: DeprecationWarning: socket.ssl() is deprecated. Use ssl.wrap_socket() instead.
sslobj = socket.ssl(self.sock, self.keyfile, self.certfile)
Traceback (most recent call last):
File "emailer.py", line 5, in
conn = ssmtplib.SMTP_SSL('HOST')
File "/Users/Jake/Desktop/Beth's Program/ssmtplib.py", line 79, in init
smtplib.SMTP.init(self, host, port, local_hostname)
File "/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/smtplib.py", line 239, in init
(code, msg) = self.connect(host, port)
File "/Users/Jake/Desktop/Beth's Program/ssmtplib.py", line 131, in connect
self.sock = smtplib.SSLFakeSocket(self.sock, sslobj)
AttributeError: 'module' object has no attribute 'SSLFakeSocket'
Thank you!
That code you've found seems to be for an older version, considering the deprecation warning. Maybe you can get by with the stdlib: There is a SMTP_SSL class as of Python 2.6, and as of at least 2.4 there is a starttls method on the plaintext SMTP class.