How can I get a socket's .recv() not to block? - python

I'm trying to write a simple daemon that listens for orders on a Unix socket. The following works, but the connection.recv(1024) line blocks, meaning I can't kill the server gracefully:
import socket, os
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as server:
server.bind("/tmp/sock")
server.listen()
connection, __ = server.accept()
with connection:
while True:
data = connection.recv(1024)
print("Hi!") # This line isn't executed 'til data is sent
if data:
print(data.decode())
Ideally, I'd like to place all of this inside a Thread that checks a self.should_stop property every self.LOOP_TIME seconds, and if that value is set to True, then exit. However, as that .recv() line blocks, there's no way for my program to be doing anything other than waiting at any given time.
Surely there's a proper way to do this, but as I'm new to sockets, I have no idea what that is.
Edit
Jeremy Friesner's answer put me on the right track. I realised that I could allow the thread to block and simply set .should_stop then pass an b"" to the socket so that it'd un-block, see that it should stop, and then exit cleanly. Here's the end result:
import os
import socket
from pathlib import Path
from shutil import rmtree
from threading import Thread
class MyThreadThing(Thread):
RUNTIME_DIR = Path(os.getenv("XDG_RUNTIME_DIR", "/tmp")) / "my-project-name"
def __init__(self):
super().__init__(daemon=True)
self.should_stop = False
if self.RUNTIME_DIR.exists():
rmtree(self.RUNTIME_DIR)
self.RUNTIME_DIR.mkdir(0o700)
self.socket_path = self.RUNTIME_DIR / "my-project.sock"
def run(self) -> None:
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(self.socket_path.as_posix())
s.listen()
while True:
connection, __ = s.accept()
action = ""
with connection:
while True:
received = connection.recv(1024).decode()
action += received
if not received:
break
# Handle whatever is in `action`
if self.should_stop:
break
self.socket_path.unlink()
def stop(self):
"""
Trigger this when you want to stop the listener.
"""
self.should_stop = True
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.connect(self.socket_path.as_posix())
s.send(b"")

Using arbitrary-length timeouts is always a bit unsatisfactory -- either you set the timeout-value to a relatively long time, in which case your program becomes slow to react to the quit-request, because it is pointlessly waiting for timeout period to expire... or you set the timeout-value to a relatively short time, in which case your program is constantly waking up to see if it should quit, wasting CPU power 24/7 to check for an event which might never arrive.
A more elegant way to deal with the problem is to create a pipe, and send a byte on the pipe when you want your event-loop to exit. Your event loop can simultaneously "watch" both the pipe's reading-end file-descriptor and your networking-socket(s) via select(), and when that file-descriptor indicates it is ready-for-read, your event loop can respond by exiting. This approach is entirely event-driven, so it requires no CPU wakeups except when there is actually something to do.
Below is an example version of your program that implements a signal-handler for SIGINT (aka pressing Control-C) to sends the please-quit-now byte on the pipe:
import socket, os
import select
import signal, sys
# Any bytes written to (writePipeFD) will become available for reading on (readPipeFD)
readPipeFD, writePipeFD = os.pipe()
# Set up a signal-handler to handle SIGINT (aka Ctrl+C) events by writing a byte to the pipe
def signal_handler(sig, frame):
print("signal_handler() is executing -- SIGINT detected!")
os.write(writePipeFD, b"\0") # doesn't matter what we write; a single 0-byte will do
signal.signal(signal.SIGINT, signal_handler)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as serverSock:
serverSock.bind("/tmp/sock")
serverSock.listen()
# Wait for incoming connection (or the please-quit signal, whichever comes first)
connection = None
while True:
readReady,writeReady,exceptReady = select.select([readPipeFD,serverSock], [], [])
if (readPipeFD in readReady):
print("accept-loop: Someone wrote a byte to the pipe; time to go away!");
break
if (connection in readReady):
connection, __ = serverSock.accept()
break
# Read data from incoming connection (or the please-quit signal, whichever comes first)
if connection:
with connection:
while True:
readReady,writeReady,exceptReady = select.select([readPipeFD,connection], [], [])
if (readPipeFD in readReady):
print("Connection-loop: Someone wrote a byte to the pipe; time to go away!");
break
if (connection in readReady):
data = connection.recv(1024)
print("Hi!") # This line isn't executed 'til data is sent
if data:
print(data.decode())
print("Bye!")

Use a timeout identical to your LOOP_TIME like so:
import socket, os
LOOP_TIME = 10
should_stop = False
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as server:
server.bind("/tmp/sock")
server.listen()
connection, __ = server.accept()
connection.settimeout(LOOP_TIME)
with connection:
while not should_stop:
try:
data = connection.recv(1024)
except socket.timeout:
continue
print("Hi!") # This line isn't executed 'til data is sent
if data:
print(data.decode())
You may use select, but if it's only a single simple socket, this way is a bit less complicated.
You can choose to place it in a different thread with a self.should_stop or just at the main - it will now listen to the KeyboardInterrupt.

Related

non-blocking i/o using select

I have a sample client-server program that does non-blocking I/O for several sockets not using processes or threads. It uses select. Unfortunately, the server just shows lots of blank lines and that's all. Where is the mistake?
Running on MacOS.
Thanks in advance.
Server:
import socket
import select
sock = socket.socket()
sock.bind(('', 10001))
sock.listen()
conn1, _ = sock.accept()
conn2, _ = sock.accept()
conn1.setblocking(0)
conn2.setblocking(0)
epoll = select.poll()
epoll.register(conn1.fileno(), select.POLLIN | select.POLLOUT)
epoll.register(conn2.fileno(), select.POLLIN | select.POLLOUT)
conn_map = {
conn1.fileno(): conn1,
conn2.fileno(): conn2,
}
while True:
events = epoll.poll(1)
for fileno, event in events:
if event & select.POLLIN:
data = conn_map[fileno].recv(1024)
print(data.decode('utf8'))
elif event & select.POLLOUT:
conn_map[fileno].send('ping'.encode('utf8'))
Client:
import socket
from multiprocessing import Pool
def create_socket_and_send_data(number):
with socket.create_connection(('127.0.0.1', 10001)) as sock:
try:
sock.sendall(f'client {number}\n'.encode('utf8'))
except socket.error as ex:
print('data sending error', ex)
print(f'data for {number} has been sent')
if __name__ == '__main__':
with Pool(processes=2) as pool:
pool.map(create_socket_and_send_data, range(2))
Unfortunately, the server just shows lots of blank lines and that's all.
Actually this is not true.
The server prints at the beginning the lines it got from the clients. After they've send these lines the client close the connection which means that select.POLLIN gets triggered again on the socket and recv returns empty data.
This empty data is the sign that the peer has closed the connection. Once it got this sign the server should close the connection to the client and remove the fileno from the select. Instead your server prints the empty string with a newline and continues to expect new POLLIN events. These will come again and again and will always an empty buffer, thus leading to all the empty lines you see.
select is paradoxically easier to use for input than for output. For input, you receive an event each time new data arrives on a socket, so you always ask for all the sockets and have something to process for every new event.
For output, select will just say that a socket if ready to accept new data. Which is almost always true except if you have just filled a buffer. So you should only poll for an output socket when you have something to write there.
So you should register your sockets with select.POLLIN only. For the write part, you should either directly write to a socket without polling if you can hope that the peer should always be able to receive, or set up a queue with pending output per socket, modify the polling state of a socket with select.POLLIN | select.POLLOUT when there is something in its queue and modify it back with select.POLLIN back when the queue is empty again.

Python: How to interrupt raw_input() in other thread

I am writing a simple client-server program in python. In the client program, I am creating two threads (using Python's threading module), one for receiving, one for sending. The receiving thread continuously receives strings from the server side; while the sending thread continuously listens to the user input (using raw_input()) and send it to the server side. The two threads communicate using a Queue (which is natively synchronized, LIKE!).
The basic logic is like following:
Receiving thread:
global queue = Queue.Queue(0)
def run(self):
while 1:
receive a string from the server side
if the string is QUIT signal:
sys.exit()
else:
put it into the global queue
Sending thread:
def run(self):
while 1:
str = raw_input()
send str to the server side
fetch an element from the global queue
deal with the element
As you can see, in the receiving thread, I have a if condition to test whether the server has sent a "QUIT signal" to the client. If it has, then I want the whole program to stop.
The problem here is that for most of its time, the sending thread is blocked by "raw_input()" and waiting for the user input. When it is blocked, calling "sys.exit()" from the other thread (receiving thread) will not terminate the sending thread immediately. The sending thread has to wait for the user to type something and hit the enter button.
Could anybody inspire me how to get around with this? I do not mind using alternatives of "raw_input()". Actually I do not even mind changing the whole structure.
-------------EDIT-------------
I am running this on a linux machine, and my Python version is 2.7.5
You could just make the sending thread daemonic:
send_thread = SendThread() # Assuming this inherits from threading.Thread
send_thread.daemon = True # This must be called before you call start()
The Python interpreter won't be blocked from exiting if the only threads left running are daemons. So, if the only thread left is send_thread, your program will exit, even if you're blocked on raw_input.
Note that this will terminate the sending thread abruptly, no matter what its doing. This could be dangerous if it accesses external resources that need to be cleaned up properly or shouldn't be interrupted (like writing to a file, for example). If you're doing anything like that, protect it with a threading.Lock, and only call sys.exit() from the receiving thread if you can acquire that same Lock.
The short answer is you can't. input() like a lot of such input commands is blocking and it's blocking whether everything about the thread has been killed. You can sometimes call sys.exit() and get it to work depending on the OS, but it's not going to be consistent. Sometimes you can kill the program by deferring out to the local OS. But, then you're not going to be widely cross platform.
What you might want to consider if you have this is to funnel the functionality through the sockets. Because unlike input() we can do timeouts, and threads and kill things rather easily. It also gives you the ability to do multiple connections and maybe accept connections more broadly.
import socket
import time
from threading import Thread
def process(command, connection):
print("Command Entered: %s" % command)
# Any responses are written to connection.
connection.send(bytes('>', 'utf-8'))
class ConsoleSocket:
def __init__(self):
self.keep_running_the_listening_thread = True
self.data_buffer = ''
Thread(target=self.tcp_listen_handle).start()
def stop(self):
self.keep_running_the_listening_thread = False
def handle_tcp_connection_in_another_thread(self, connection, addr):
def handle():
while self.keep_running_the_listening_thread:
try:
data_from_socket = connection.recv(1024)
if len(data_from_socket) != 0:
self.data_buffer += data_from_socket.decode('utf-8')
else:
break
while '\n' in self.data_buffer:
pos = self.data_buffer.find('\n')
command = self.data_buffer[0:pos].strip('\r')
self.data_buffer = self.data_buffer[pos + 1:]
process(command, connection)
except socket.timeout:
continue
except socket.error:
if connection is not None:
connection.close()
break
Thread(target=handle).start()
connection.send(bytes('>', 'utf-8'))
def tcp_listen_handle(self, port=23, connects=5, timeout=2):
"""This is running in its own thread."""
sock = socket.socket()
sock.settimeout(timeout)
sock.bind(('', port))
sock.listen(connects) # We accept more than one connection.
while self.keep_running_the_listening_thread:
connection = None
try:
connection, addr = sock.accept()
address, port = addr
if address != '127.0.0.1': # Only permit localhost.
connection.close()
continue
# makes a thread deals with that stuff. We only do listening.
connection.settimeout(timeout)
self.handle_tcp_connection_in_another_thread(connection, addr)
except socket.timeout:
pass
except OSError:
# Some other error.
if connection is not None:
connection.close()
sock.close()
c = ConsoleSocket()
def killsocket():
time.sleep(20)
c.stop()
Thread(target=killsocket).start()
This launches a listener thread for the connections set on port 23 (telnet), and you connect and it passes that connection off to another thread. And it starts a killsocket thread that disables the various threads and lets them die peacefully (for demonstration purposes). You cannot however connect localhost within this code, because you'd need input() to know what to send to the server, which recreates the problem.

issues with socket programming - python

I am doing a client-server project for my college project,
we have to allocate the login to the client.
Client system will request its status for every 2 seconds(to check whether the client is locked or unlocked). and server will accept the client request and reply the client status to the system.
But the problem is server thread is not responding to the client request.
CLIENT THREAD:
def checkPort():
while True:
try:
s = socket.socket()
s.connect((host, port))
s.send('pc1') # send PC name to the server
status = s.recv(1024) # receive the status from the server
if status == "unlock":
disableIntrrupts() # enable all the functions of system
else:
enableInterrupts() # enable all the functions of system
time.sleep(5)
s.close()
except Exception:
pass
SERVER THREAD:
def check_port():
while True:
try:
print "hello loop is repeating"
conn, addr = s.accept()
data = conn.recv(1024)
if exit_on_click == 1:
break
if (any(sublist[0] == data for sublist in available_sys)):
print "locked"
conn.send("lock")
elif (any(sublist[0] == data for sublist in occupied_sys)):
conn.send("unlock")
print "unlocked"
else:
print "added to gui for first time"
available_sys.append([data,addr[0],nameText,usnText,branchText])
availSysList.insert('end',data)
except Exception:
pass
But my problem is server thread is not executing more than 2 time,
So its unable to accept client request more than one time.
can't we handle multiple client sockets using single server socket?
How to handle multiple client request from server ?
Thanks for any help !!
Its because your server, will block waiting for a new connection on this line
conn, addr = s.accept()
This is because calls like .accept and .read are blocking calls that hold the process
You need to consider an alternative design, where in you either.
Have one process per connection (this idea is stupid)
One thread per connection (this idea is less stupid than the first but still mostly foolish)
Have a non blocking design that allows multiple clients and read/write without blocking execution.
To achieve the first, look at multiprocessing, the second is threading the third is slightly more complicated to get your head around but will yield the best results, the go to library for event driven code in Python is twisted but there are others like
gevent
tulip
tornado
And so so many more that I haven't listed here.
here's an full example of implementing a threaded server. it's fully functional and comes with the benefit of using SSL as well. further, i use threaded event objects to signal another class object after storing my received data in a database.
please note, _sni and _cams_db are additional modules purely of my own. if you want to see the _sni module (provides SNI support for pyOpenSSL), let me know.
what follows this, is a snippet from camsbot.py, there's a whole lot more that far exceeds the scope of this question. what i've built is a centralized message relay system. it listens to tcp/2345 and accepts SSL connections. each connection passes messages into the system. short lived connections will connect, pass message, and disconnect. long lived connections will pass numerous messages after connecting. messages are stored in a database and a threading.Event() object (attached to the DB class) is set to tell the bot to poll the database for new messages and relay them.
the below example shows
how to set up a threaded tcp server
how to pass information from the listener to the accept handler such as config data and etc
in addition, this example also shows
how to employ an SSL socket
how to do some basic certificate validations
how to cleanly wrap and unwrap SSL from a tcp socket
how to use poll() on the socket instead of select()
db.pending is a threading.Event() object in _cams_db.py
in the main process we start another thread that waits on the pending object with db.pending.wait(). this makes that thread wait until another thread does db.pending.set(). once it is set, our waiting thread immediately wakes up and continues to work. when our waiting thread is done, it calls db.pending.clear() and goes back to the beginning of the loop and starts waiting again with db.pending.wait()
while True:
db.pending.wait()
# after waking up, do code. for example, we wait for incoming messages to
# be stored in the database. the threaded server will call db.pending.set()
# which will wake us up. we'll poll the DB for new messages, relay them, clear
# our event flag and go back to waiting.
# ...
db.pending.clear()
snippet from camsbot.py:
import sys, os, sys, time, datetime, threading, select, logging, logging.handlers
import configparser, traceback, re, socket, hashlib
# local .py
sys.path.append('/var/vse/python')
import _util, _webby, _sni, _cams_db, _cams_threaded_server, _cams_bot
# ...
def start_courier(config):
# default values
host = '::'
port = 2345
configp = config['configp']
host = configp.get('main', 'relay msp hostport')
# require ipv6 addresses be specified in [xx:xx:xx] notation, therefore
# it is safe to look for :nnnn at the end
if ':' in host and not host.endswith(']'):
port = host.split(':')[-1]
try:
port = int(port, 10)
except:
port = 2345
host = host.split(':')[:-1][0]
server = _cams_threaded_server.ThreadedTCPServer((host, port), _cams_threaded_server.ThreadedTCPRequestHandler, config)
t = threading.Thread(target=server.serve_forever, name='courier')
t.start()
_cams_threaded_server.py:
import socket, socketserver, select, datetime, time, threading
import sys, struct
from OpenSSL.SSL import SSLv23_METHOD, SSLv3_METHOD, TLSv1_METHOD, OP_NO_SSLv2
from OpenSSL.SSL import VERIFY_NONE, VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, Context, Connection
from OpenSSL.SSL import FILETYPE_PEM
from OpenSSL.SSL import WantWriteError, WantReadError, WantX509LookupError, ZeroReturnError, SysCallError
from OpenSSL.crypto import load_certificate
from OpenSSL import SSL
# see note at beginning of answer
import _sni, _cams_db
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address, HandlerClass, config):
socketserver.BaseServer.__init__(self, server_address, HandlerClass)
self.address_family = socket.AF_INET6
self.connected = []
self.logger = config['logger']
self.config = config
self.socket = socket.socket(self.address_family, self.socket_type)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sc = Context(TLSv1_METHOD)
sc.set_verify(VERIFY_PEER|VERIFY_FAIL_IF_NO_PEER_CERT, _sni.verify_cb)
sc.set_tlsext_servername_callback(_sni.pick_certificate)
self.sc = sc
self.server_bind()
self.server_activate()
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
config = self.server.config
logger = self.server.logger
connected = self.server.connected
sc = self.server.sc
try:
self.peer_hostname = socket.gethostbyaddr(socket.gethostbyname(self.request.getpeername()[0]))[0]
except:
self.peer_hostname = '!'+self.request.getpeername()[0]
logger.info('peer: {}'.format(self.peer_hostname))
ssl_s = Connection(sc, self.request)
ssl_s.set_accept_state()
try:
ssl_s.do_handshake()
except:
t,v,tb = sys.exc_info()
logger.warn('handshake failed {}'.format(v))
ssl_s.setblocking(True)
self.ssl_s = ssl_s
try:
peercert = ssl_s.get_peer_certificate()
except:
peercert = False
t,v,tb = sys.exc_info()
logger.warn('SSL get peer cert failed: {}'.format(v))
if not peercert:
logger.warn('No peer certificate')
else:
acl = config['configp']['main'].get('client cn acl', '').split(' ')
cert_subject = peercert.get_subject().CN
logger.info('Looking for {} in acl: {}'.format(cert_subject,acl))
if cert_subject in acl:
logger.info('{} is permitted'.format(cert_subject))
else:
logger.warn('''client CN not approved''')
# it's ok to block here, every socket has its own thread
ssl_s.setblocking(True)
self.db = config['db']
msgcount = 0
p = select.poll()
# don't want writable, just readable
p.register(self.request, select.POLLIN|select.POLLPRI|select.POLLERR|select.POLLHUP|select.POLLNVAL)
peername = ssl_s.getpeername()
x = peername[0]
if x.startswith('::ffff:'):
x = x[7:]
peer_ip = x
try:
host = socket.gethostbyaddr(x)[0]
except:
host = peer_ip
logger.info('{}/{}:{} connected'.format(host, peer_ip, peername[1]))
connected.append( [host, peername[1]] )
if peercert:
threading.current_thread().setName('{}/port={}/CN={}'.format(host, peername[1], peercert.get_subject().CN))
else:
threading.current_thread().setName('{}/port={}'.format(host, peername[1]))
sockclosed = False
while not sockclosed:
keepreading = True
#logger.debug('starting 30 second timeout for poll')
pe = p.poll(30.0)
if not pe:
# empty list means poll timeout
# for SSL sockets it means WTF. we get an EAGAIN like return even if the socket is blocking
continue
logger.debug('poll indicates: {}'.format(pe))
#define SSL_NOTHING 1
#define SSL_WRITING 2
#define SSL_READING 3
#define SSL_X509_LOOKUP 4
while keepreading and not sockclosed:
data,sockclosed,keepreading = self._read_ssl_data(2, head=True)
if sockclosed or not keepreading:
time.sleep(5)
continue
plen = struct.unpack('H', data)[0]
data,sockclosed,keepreading = self._read_ssl_data(plen)
if sockclosed or not keepreading:
time.sleep(5)
continue
# send thank you, ignore any errors since we appear to have gotten
# the message
try:
self.ssl_s.sendall(b'ty')
except:
pass
# extract the timestamp
message_ts = data[0:8]
msgtype = chr(data[8])
message = data[9:].decode()
message_ts = struct.unpack('d', message_ts)[0]
message_ts = datetime.datetime.utcfromtimestamp(message_ts).replace(tzinfo=datetime.timezone.utc)
self.db.enqueue(config['group'], peer_ip, msgtype, message, message_ts)
self.db.pending.set()
# we're recommended to use the return socket object for any future operations rather than the original
try:
s = ssl_s.unwrap()
s.close()
except:
pass
connected.remove( [host, peername[1]] )
t_name = threading.current_thread().getName()
logger.debug('disconnect: {}'.format(t_name))
def _read_ssl_data(self, wantsize=16384, head=False):
_w = ['WANT_NOTHING','WANT_READ','WANT_WRITE','WANT_X509_LOOKUP']
logger = self.server.logger
data = b''
sockclosed = False
keepreading = True
while len(data) < wantsize and keepreading and not sockclosed:
rlen = wantsize - len(data)
try:
w,wr = self.ssl_s.want(),self.ssl_s.want_read()
#logger.debug(' want({}) want_read({})'.format(_w[w],wr))
x = self.ssl_s.recv(rlen)
#logger.debug(' recv(): {}'.format(x))
if not ( x or len(x) ):
raise ZeroReturnError
data += x
if not (len(x) == len(data) == wantsize):
logger.info(' read={}, len(data)={}, plen={}'.format(len(x),len(data),wantsize))
except WantReadError:
# poll(), when ready, read more
keepreading = False
logger.info(' got WantReadError')
continue
except WantWriteError:
# poll(), when ready, write more
keepreading = False
logger.info(' got WantWriteError')
continue
except ZeroReturnError:
# socket got closed, a '0' bytes read also means the same thing
keepreading = False
sockclosed = True
logger.info(' ZRE, socket closed normally')
continue
except SysCallError:
keepreading = False
sockclosed = True
t,v,tb = sys.exc_info()
if v.args[0] == -1: # normal EOF
logger.info(' EOF found, keepreading=False')
else:
logger.info('{} terminated session abruptly while reading plen'.format(self.peer_hostname))
logger.info('t: {}'.format(t))
logger.info('v: {}'.format(v))
continue
except:
t,v,tb = sys.exc_info()
logger.warning(' fucked? {}'.format(v))
raise
if not head and not len(data) == wantsize:
logger.warn(' short read {} of {}'.format(len(data), wantsize))
return data,sockclosed,keepreading
let's start with a bare bones threaded tcp server.
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address, HandlerClass):
socketserver.BaseServer.__init__(self, server_address, HandlerClass)
self.address_family = socket.AF_INET
self.socket = socket.socket(self.address_family, self.socket_type)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_bind()
self.server_activate()
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# self.request is your accepted socket, do all your .read() and .wirte() on it
s = self.request
request = s.read(1024)
# decide locked or unlocked. this example arbitrarily writes back 'locked'
s.write('locked')
# we're done, close the socket and exit with a default return of None
s.close()
ok, start your threaded server with this in your main() function:
server = threading.ThreadedTCPServer(('127.0.0.1', 1234), ThreadedTCPRequestHandler)
t = threading.Thread(target=server.serve_forever, name='optional_name')
t.start()
now you can let the threading module handle the semantics of concurrency and not worry about it.
You might want to take a look at 0MQ and concurrent.futures. 0MQ has a Tornado event loop in the library and it reduces the complexity of socket programming. concurrent.futures is a high level interface over threading or multiprocessing.
You can see different concurrent server approaches at
https://bitbucket.org/arco_group/upper/src
These will help you to choose the better way for you.
Cheers

Thread synchronization in Python

I am currently working on a school project where the assignment, among other things, is to set up a threaded server/client system. Each client in the system is supposed to be assigned its own thread on the server when connecting to it. In addition i would like the server to run other threads, one concerning input from the command line and another concerning broadcasting messages to all clients. However, I can't get this to run as i want to. It seems like the threads are blocking each other. I would like my program to take inputs from the command line, at the "same time" as the server listens to connected clients, and so on.
I am new to python programming and multithreading, and allthough I think my idea is good, I'm not suprised my code doesn't work. Thing is I'm not exactly sure how I'm going to implement the message passing between the different threads. Nor am I sure exactly how to implement the resource lock commands properly. I'm going to post the code for my server file and my client file here, and I hope someone could help me with this. I think this actually should be two relative simple scripts. I have tried to comment on my code as good as possible to some extend.
import select
import socket
import sys
import threading
import client
class Server:
#initializing server socket
def __init__(self, event):
self.host = 'localhost'
self.port = 50000
self.backlog = 5
self.size = 1024
self.server = None
self.server_running = False
self.listen_threads = []
self.local_threads = []
self.clients = []
self.serverSocketLock = None
self.cmdLock = None
#here i have also declared some events for the command line input
#and the receive function respectively, not sure if correct
self.cmd_event = event
self.socket_event = event
def openSocket(self):
#binding server to port
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.host, self.port))
self.server.listen(5)
print "Listening to port " + str(self.port) + "..."
except socket.error, (value,message):
if self.server:
self.server.close()
print "Could not open socket: " + message
sys.exit(1)
def run(self):
self.openSocket()
#making Rlocks for the socket and for the command line input
self.serverSocketLock = threading.RLock()
self.cmdLock = threading.RLock()
#set blocking to non-blocking
self.server.setblocking(0)
#making two threads always running on the server,
#one for the command line input, and one for broadcasting (sending)
cmd_thread = threading.Thread(target=self.server_cmd)
broadcast_thread = threading.Thread(target=self.broadcast,args=[self.clients])
cmd_thread.daemon = True
broadcast_thread.daemon = True
#append the threads to thread list
self.local_threads.append(cmd_thread)
self.local_threads.append(broadcast_thread)
cmd_thread.start()
broadcast_thread.start()
self.server_running = True
while self.server_running:
#connecting to "knocking" clients
try:
c = client.Client(self.server.accept())
self.clients.append(c)
print "Client " + str(c.address) + " connected"
#making a thread for each clientn and appending it to client list
listen_thread = threading.Thread(target=self.listenToClient,args=[c])
self.listen_threads.append(listen_thread)
listen_thread.daemon = True
listen_thread.start()
#setting event "client has connected"
self.socket_event.set()
except socket.error, (value, message):
continue
#close threads
self.server.close()
print "Closing client threads"
for c in self.listen_threads:
c.join()
def listenToClient(self, c):
while self.server_running:
#the idea here is to wait until the thread gets the message "client
#has connected"
self.socket_event.wait()
#then clear the event immidiately...
self.socket_event.clear()
#and aquire the socket resource
self.serverSocketLock.acquire()
#the below is the receive thingy
try:
recvd_data = c.client.recv(self.size)
if recvd_data == "" or recvd_data == "close\n":
print "Client " + str(c.address) + (" disconnected...")
self.socket_event.clear()
self.serverSocketLock.release()
return
print recvd_data
#I put these here to avoid locking the resource if no message
#has been received
self.socket_event.clear()
self.serverSocketLock.release()
except socket.error, (value, message):
continue
def server_cmd(self):
#this is a simple command line utility
while self.server_running:
#got to have a smart way to make this work
self.cmd_event.wait()
self.cmd_event.clear()
self.cmdLock.acquire()
cmd = sys.stdin.readline()
if cmd == "":
continue
if cmd == "close\n":
print "Server shutting down..."
self.server_running = False
self.cmdLock.release()
def broadcast(self, clients):
while self.server_running:
#this function will broadcast a message received from one
#client, to all other clients, but i guess any thread
#aspects applied to the above, will work here also
try:
send_data = sys.stdin.readline()
if send_data == "":
continue
else:
for c in clients:
c.client.send(send_data)
self.serverSocketLock.release()
self.cmdLock.release()
except socket.error, (value, message):
continue
if __name__ == "__main__":
e = threading.Event()
s = Server(e)
s.run()
And then the client file
import select
import socket
import sys
import server
import threading
class Client(threading.Thread):
#initializing client socket
def __init__(self,(client,address)):
threading.Thread.__init__(self)
self.client = client
self.address = address
self.size = 1024
self.client_running = False
self.running_threads = []
self.ClientSocketLock = None
def run(self):
#connect to server
self.client.connect(('localhost',50000))
#making a lock for the socket resource
self.clientSocketLock = threading.Lock()
self.client.setblocking(0)
self.client_running = True
#making two threads, one for receiving messages from server...
listen = threading.Thread(target=self.listenToServer)
#...and one for sending messages to server
speak = threading.Thread(target=self.speakToServer)
#not actually sure wat daemon means
listen.daemon = True
speak.daemon = True
#appending the threads to the thread-list
self.running_threads.append(listen)
self.running_threads.append(speak)
listen.start()
speak.start()
#this while-loop is just for avoiding the script terminating
while self.client_running:
dummy = 1
#closing the threads if the client goes down
print "Client operating on its own"
self.client.close()
#close threads
for t in self.running_threads:
t.join()
return
#defining "listen"-function
def listenToServer(self):
while self.client_running:
#here i acquire the socket to this function, but i realize I also
#should have a message passing wait()-function or something
#somewhere
self.clientSocketLock.acquire()
try:
data_recvd = self.client.recv(self.size)
print data_recvd
except socket.error, (value,message):
continue
#releasing the socket resource
self.clientSocketLock.release()
#defining "speak"-function, doing much the same as for the above function
def speakToServer(self):
while self.client_running:
self.clientSocketLock.acquire()
try:
send_data = sys.stdin.readline()
if send_data == "close\n":
print "Disconnecting..."
self.client_running = False
else:
self.client.send(send_data)
except socket.error, (value,message):
continue
self.clientSocketLock.release()
if __name__ == "__main__":
c = Client((socket.socket(socket.AF_INET, socket.SOCK_STREAM),'localhost'))
c.run()
I realize this is quite a few code lines for you to read through, but as I said, I think the concept and the script in it self should be quite simple to understand. It would be very much appriciated if someone could help me synchronize my threads in a proper way =)
Thanks in advance
---Edit---
OK. So I now have simplified my code to just containing send and receive functions in both the server and the client modules. The clients connecting to the server gets their own threads, and the send and receive functions in both modules operetes in their own separate threads. This works like a charm, with the broadcast function in the server module echoing strings it gets from one client to all clients. So far so good!
The next thing i want my script to do, is taking specific commands, i.e. "close", in the client module to shut down the client, and join all running threads in the thread list. Im using an event flag to notify the listenToServer and the main thread that the speakToServer thread has read the input "close". It seems like the main thread jumps out of its while loop and starts the for loop that is supposed to join the other threads. But here it hangs. It seems like the while loop in the listenToServer thread never stops even though server_running should be set to False when the event flag is set.
I'm posting only the client module here, because I guess an answer to get these two threads to synchronize will relate to synchronizing more threads in both the client and the server module also.
import select
import socket
import sys
import server_bygg0203
import threading
from time import sleep
class Client(threading.Thread):
#initializing client socket
def __init__(self,(client,address)):
threading.Thread.__init__(self)
self.client = client
self.address = address
self.size = 1024
self.client_running = False
self.running_threads = []
self.ClientSocketLock = None
self.disconnected = threading.Event()
def run(self):
#connect to server
self.client.connect(('localhost',50000))
#self.client.setblocking(0)
self.client_running = True
#making two threads, one for receiving messages from server...
listen = threading.Thread(target=self.listenToServer)
#...and one for sending messages to server
speak = threading.Thread(target=self.speakToServer)
#not actually sure what daemon means
listen.daemon = True
speak.daemon = True
#appending the threads to the thread-list
self.running_threads.append((listen,"listen"))
self.running_threads.append((speak, "speak"))
listen.start()
speak.start()
while self.client_running:
#check if event is set, and if it is
#set while statement to false
if self.disconnected.isSet():
self.client_running = False
#closing the threads if the client goes down
print "Client operating on its own"
self.client.shutdown(1)
self.client.close()
#close threads
#the script hangs at the for-loop below, and
#refuses to close the listen-thread (and possibly
#also the speak thread, but it never gets that far)
for t in self.running_threads:
print "Waiting for " + t[1] + " to close..."
t[0].join()
self.disconnected.clear()
return
#defining "speak"-function
def speakToServer(self):
#sends strings to server
while self.client_running:
try:
send_data = sys.stdin.readline()
self.client.send(send_data)
#I want the "close" command
#to set an event flag, which is being read by all other threads,
#and, at the same time set the while statement to false
if send_data == "close\n":
print "Disconnecting..."
self.disconnected.set()
self.client_running = False
except socket.error, (value,message):
continue
return
#defining "listen"-function
def listenToServer(self):
#receives strings from server
while self.client_running:
#check if event is set, and if it is
#set while statement to false
if self.disconnected.isSet():
self.client_running = False
try:
data_recvd = self.client.recv(self.size)
print data_recvd
except socket.error, (value,message):
continue
return
if __name__ == "__main__":
c = Client((socket.socket(socket.AF_INET, socket.SOCK_STREAM),'localhost'))
c.run()
Later on, when I get this server/client system up and running, I will use this system on some elevator models we have here on the lab, with each client receiving floor orders or "up" and "down" calls. The server will be running an distribution algorithm and updating the elevator queues on the clients that are most appropriate for the requested order. I realize it's a long way to go, but I guess one should just take one step at the time =)
Hope someone has the time to look into this. Thanks in advance.
The biggest problem I see with this code is that you have far too much going on right away to easily debug your problem. Threading can get extremely complicated because of how non-linear the logic becomes. Especially when you have to worry about synchronizing with locks.
The reason you are seeing clients blocking on each other is because of the way you are using your serverSocketLock in your listenToClient() loop in the server. To be honest this isn't exactly your problem right now with your code, but it became the problem when I started to debug it and turned the sockets into blocking sockets. If you are putting each connection into its own thread and reading from them, then there is no reason to use a global server lock here. They can all read from their own sockets at the same time, which is the purpose of the thread.
Here is my recommendation to you:
Get rid of all the locks and extra threads that you don't need, and start from the beginning
Have the clients connect as you do, and put them in their thread as you do. And simply have them send data every second. Verify that you can get more than one client connecting and sending, and that your server is looping and receiving. Once you have this part working, you can move on to the next part.
Right now you have your sockets set to non-blocking. This is causing them all to spin really fast over their loops when data is not ready. Since you are threading, you should set them to block. Then the reader threads will simply sit and wait for data and respond immediately.
Locks are used when threads will be accessing shared resources. You obviously need to for any time a thread will try and modify a server attribute like a list or a value. But not when they are working on their own private sockets.
The event you are using to trigger your readers doesn't seem necessary here. You have received the client, and you start the thread afterwards. So it is ready to go.
In a nutshell...simplify and test one bit at a time. When its working, add more. There are too many threads and locks right now.
Here is a simplified example of your listenToClient method:
def listenToClient(self, c):
while self.server_running:
try:
recvd_data = c.client.recv(self.size)
print "received:", c, recvd_data
if recvd_data == "" or recvd_data == "close\n":
print "Client " + str(c.address) + (" disconnected...")
return
print recvd_data
except socket.error, (value, message):
if value == 35:
continue
else:
print "Error:", value, message
Backup your work, then toss it - partially.
You need to implement your program in pieces, and test each piece as you go. First, tackle the input part of your program. Don't worry about how to broadcast the input you received. Instead worry that you are able to successfully and repeatedly receive input over your socket. So far - so good.
Now, I assume you would like to react to this input by broadcasting to the other attached clients. Well too bad, you can't do that yet! Because, I left one minor detail out of the paragraph above. You have to design a PROTOCOL.
What is a protocol? It's a set of rules for communication. How does your server know when the client had finished sending it's data? Is it terminated by some special character? Or perhaps you encode the size of the message to be sent as the first byte or two of the message.
This is turning out to be a lot of work, isn't it? :-)
What's a simple protocol. A line-oriented protocol is simple. Read 1 character at a time until you get to the end of record terminator - '\n'. So, clients would send records like this to your server --
HELO\n
MSG DAVE Where Are Your Kids?\n
So, assuming you have this simple protocol designed, implement it. For now, DON'T WORRY ABOUT THE MULTITHREADING STUFF! Just worry about making it work.
Your current protocol is to read 1024 bytes. Which may not be bad, just make sure you send 1024 byte messages from the client.
Once you have the protocol stuff setup, move on to reacting to the input. But for now you need something that will read input. Once that is done, we can worry about doing something with it.
jdi is right, you have too much program to work with. Pieces are easier to fix.

How to tell if a connection is dead in python

I want my python application to be able to tell when the socket on the other side has been dropped. Is there a method for this?
Short answer:
use a non-blocking recv(), or a blocking recv() / select() with a very
short timeout.
Long answer:
The way to handle socket connections is to read or write as you need to, and be prepared to handle connection errors.
TCP distinguishes between 3 forms of "dropping" a connection: timeout, reset, close.
Of these, the timeout can not really be detected, TCP might only tell you the time has not expired yet. But even if it told you that, the time might still expire right after.
Also remember that using shutdown() either you or your peer (the other end of the connection) may close only the incoming byte stream, and keep the outgoing byte stream running, or close the outgoing stream and keep the incoming one running.
So strictly speaking, you want to check if the read stream is closed, or if the write stream is closed, or if both are closed.
Even if the connection was "dropped", you should still be able to read any data that is still in the network buffer. Only after the buffer is empty will you receive a disconnect from recv().
Checking if the connection was dropped is like asking "what will I receive after reading all data that is currently buffered ?" To find that out, you just have to read all data that is currently bufferred.
I can see how "reading all buffered data", to get to the end of it, might be a problem for some people, that still think of recv() as a blocking function. With a blocking recv(), "checking" for a read when the buffer is already empty will block, which defeats the purpose of "checking".
In my opinion any function that is documented to potentially block the entire process indefinitely is a design flaw, but I guess it is still there for historical reasons, from when using a socket just like a regular file descriptor was a cool idea.
What you can do is:
set the socket to non-blocking mode, but than you get a system-depended error to indicate the receive buffer is empty, or the send buffer is full
stick to blocking mode but set a very short socket timeout. This will allow you to "ping" or "check" the socket with recv(), pretty much what you want to do
use select() call or asyncore module with a very short timeout. Error reporting is still system-specific.
For the write part of the problem, keeping the read buffers empty pretty much covers it. You will discover a connection "dropped" after a non-blocking read attempt, and you may choose to stop sending anything after a read returns a closed channel.
I guess the only way to be sure your sent data has reached the other end (and is not still in the send buffer) is either:
receive a proper response on the same socket for the exact message that you sent. Basically you are using the higher level protocol to provide confirmation.
perform a successful shutdow() and close() on the socket
The python socket howto says send() will return 0 bytes written if channel is closed. You may use a non-blocking or a timeout socket.send() and if it returns 0 you can no longer send data on that socket. But if it returns non-zero, you have already sent something, good luck with that :)
Also here I have not considered OOB (out-of-band) socket data here as a means to approach your problem, but I think OOB was not what you meant.
It depends on what you mean by "dropped". For TCP sockets, if the other end closes the connection either through
close() or the process terminating, you'll find out by reading an end of file, or getting a read error, usually the errno being set to whatever 'connection reset by peer' is by your operating system. For python, you'll read a zero length string, or a socket.error will be thrown when you try to read or write from the socket.
From the link Jweede posted:
exception socket.timeout:
This exception is raised when a timeout occurs on a socket
which has had timeouts enabled via a prior call to settimeout().
The accompanying value is a string whose value is currently
always “timed out”.
Here are the demo server and client programs for the socket module from the python docs
# Echo server program
import socket
HOST = '' # Symbolic name meaning all available interfaces
PORT = 50007 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connected by', addr
while 1:
data = conn.recv(1024)
if not data: break
conn.send(data)
conn.close()
And the client:
# Echo client program
import socket
HOST = 'daring.cwi.nl' # The remote host
PORT = 50007 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.send('Hello, world')
data = s.recv(1024)
s.close()
print 'Received', repr(data)
On the docs example page I pulled these from, there are more complex examples that employ this idea, but here is the simple answer:
Assuming you're writing the client program, just put all your code that uses the socket when it is at risk of being dropped, inside a try block...
try:
s.connect((HOST, PORT))
s.send("Hello, World!")
...
except socket.timeout:
# whatever you need to do when the connection is dropped
If I'm not mistaken this is usually handled via a timeout.
I translated the code sample in this blog post into Python: How to detect when the client closes the connection?, and it works well for me:
from ctypes import (
CDLL, c_int, POINTER, Structure, c_void_p, c_size_t,
c_short, c_ssize_t, c_char, ARRAY
)
__all__ = 'is_remote_alive',
class pollfd(Structure):
_fields_ = (
('fd', c_int),
('events', c_short),
('revents', c_short),
)
MSG_DONTWAIT = 0x40
MSG_PEEK = 0x02
EPOLLIN = 0x001
EPOLLPRI = 0x002
EPOLLRDNORM = 0x040
libc = CDLL('libc.so.6')
recv = libc.recv
recv.restype = c_ssize_t
recv.argtypes = c_int, c_void_p, c_size_t, c_int
poll = libc.poll
poll.restype = c_int
poll.argtypes = POINTER(pollfd), c_int, c_int
class IsRemoteAlive: # not needed, only for debugging
def __init__(self, alive, msg):
self.alive = alive
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return 'IsRemoteAlive(%r,%r)' % (self.alive, self.msg)
def __bool__(self):
return self.alive
def is_remote_alive(fd):
fileno = getattr(fd, 'fileno', None)
if fileno is not None:
if hasattr(fileno, '__call__'):
fd = fileno()
else:
fd = fileno
p = pollfd(fd=fd, events=EPOLLIN|EPOLLPRI|EPOLLRDNORM, revents=0)
result = poll(p, 1, 0)
if not result:
return IsRemoteAlive(True, 'empty')
buf = ARRAY(c_char, 1)()
result = recv(fd, buf, len(buf), MSG_DONTWAIT|MSG_PEEK)
if result > 0:
return IsRemoteAlive(True, 'readable')
elif result == 0:
return IsRemoteAlive(False, 'closed')
else:
return IsRemoteAlive(False, 'errored')
Trying to improve on #kay response. I made a more pythonic version
(Note that it was not yet tested in a "real-life" environment, and only on Linux)
This detects if the remote side closed the connection, without actually consuming the data:
import socket
import errno
def remote_connection_closed(sock: socket.socket) -> bool:
"""
Returns True if the remote side did close the connection
"""
try:
buf = sock.recv(1, socket.MSG_PEEK | socket.MSG_DONTWAIT)
if buf == b'':
return True
except BlockingIOError as exc:
if exc.errno != errno.EAGAIN:
# Raise on unknown exception
raise
return False
Here is a simple example from an asyncio echo server:
import asyncio
async def handle_echo(reader, writer):
addr = writer.get_extra_info('peername')
sock = writer.get_extra_info('socket')
print(f'New client: {addr!r}')
# Initial of client command
data = await reader.read(100)
message = data.decode()
print(f"Received {message!r} from {addr!r}")
# Simulate a long async process
for _ in range(10):
if remote_connection_closed(sock):
print('Remote side closed early')
return
await asyncio.sleep(1)
# Write the initial message back
print(f"Send: {message!r}")
writer.write(data)
await writer.drain()
writer.close()
async def main():
server = await asyncio.start_server(
handle_echo, '127.0.0.1', 8888)
addrs = ', '.join(str(sock.getsockname()) for sock in server.sockets)
print(f'Serving on {addrs}')
async with server:
await server.serve_forever()
if __name__ == '__main__':
asyncio.run(main())

Categories