I'd like to be able to respond to a GET by immediately returning a message and then starting a process to run in the background and finish a task. This works fine with BaseHTTPServer.HTTPServer and extending BaseHTTPRequestHandler. However, when I follow the instructions here to add SSL to the server, the response isn't sent until the subprocess finishes. Assuming that Firefox is displaying what it receives as it receives it, and double-checking with Firebug, it looks like nothing at all is sent until the subprocess completes. I tested in Chrome and saw the same result. Code below; I'm at a loss for how to continue debugging.
class SecureHTTPServer(HTTPServer):
def __init__(self, server_address, HandlerClass):
BaseServer.__init__(self, server_address, HandlerClass)
ctx = SSL.Context(SSL.SSLv3_METHOD)
ctx.use_privatekey_file(SSL_CERT)
ctx.use_certificate_file(SSL_CERT)
self.socket = SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
self.server_bind()
self.server_activate()
def shutdown_request(self, request):
request.shutdown()
class RecordingHandler(BaseHTTPRequestHandler):
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
def do_GET(self):
key = 'abc'
p = Process(target = some_func, args = (key,))
# Some_func takes ~15 seconds to complete, and it logs its progress so that it's clear that the response is only sent after some_func is complete.
p.daemon = True
p.start()
self.send_response(200)
self.end_headers()
self.wfile.write('recording key ' + key)
def run():
try:
server = SecureHTTPServer(('', 8080), RecordingHandler)
print 'Waiting for input on port 8080.'
server.serve_forever()
except KeyboardInterrupt:
print 'Received keyboard interrupt, exiting.'
server.shutdown()
Apparently I just needed to give up, restart, and try googling things again. I found this solution:
import BaseHTTPServer, SimpleHTTPServer
import ssl
httpd = BaseHTTPServer.HTTPServer(('localhost', 4443), SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile='path/to/localhost.pem', server_side=True)
httpd.serve_forever()
I still have no idea what was wrong with the previous setup, but this works just fine.
Related
I am trying to build a simple server-client model to do the file transfer task. My server.py and client.py look like this:
<Server.py>
import socket
s = socket.socket()
host = socket.gethostname()
port = 1717
s.bind((host, port))
s.listen(1)
print(host)
print("Waiting for the client ...")
conn, addr = s.accept()
print(addr, "Connected!")
filename = "My file name"
file = open(filename, 'rb')
file_data = file.read(2048)
conn.send(file_data)
print("File has been sent to server.")
s.close()
<Client.py>
import socket
import time
time.sleep(3)
s = socket.socket()
host = "ubuntu"
port = 1717
s.connect((host, port))
print("Connected ....")
filename = "My file name"
file = open(filename, 'wb')
file_data = s.recv(2048)
file.write(file_data)
file.close()
print("File has been received.")
Also, I wrote a shell file to run the server and client, because I can only get no error if the server runs before the client, I wrote in my shell script something like this:
python3 ./some_path/server.py &
python3 ./some_path/client.py $n
Notice that I also added the time.sleep(3) at the beginning of my Client.py because I found the shell script command I wrote does not guarantee that server runs first. Now this problem is resolved, however, I am getting the 'Adress already in use' error because of s.bind() in the server.py every time I want to run the whole thing for the second time.
That's saying, If I open my Ubuntu, and run the shell script, it worked and everything is fine as expected. But when it's done and I want to run again, I would get the 'Adress already in use'.
So my questions are:
How to solve this, so that I test the functionalities without rebooting the whole computer.
Are there any more sophisticated way to make client.py always run after the server.py than my time.sleep() way?
Are there any more sophisticated ways to get the hostname instead of specifying in advance? As you can see from the client.py I basically set the host to "ubuntu" because that's what I get if I print the hostname from the server-side.
Thank you so much for reading these long questions...I just want to make things more clear...
Much appreciated it if you can answer any one of my questions or even give some suggestions.
By the way, I am testing all these on a ubuntu 14.04 machine.
Firstly you need to close the socket in the client as well.
Secondly you should call shutdown before closing the socket.
Please see this https://stackoverflow.com/a/598759/6625498
Try to reboot entirely the system.
It may means that the process still running.
How to solve this, so that I test the functionalities without rebooting the whole computer.
Please run this command on the shell script if you get the this message "Adress already in use"
sudo killall -9 python3
And then run your server and client.
Are there any more sophisticated way to make client.py always run after the server.py than my time.sleep() way
Please use this codes.
server.py
import socket
import threading
import socketserver
socketserver.TCPServer.allow_reuse_address = True
__all__ = ['server']
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
cur_thread = threading.current_thread()
requests = self.server.requests
if self.request not in requests:
requests[self.request] = {'client_id': cur_thread.ident,
'client_address': self.client_address[0],
'client_port': self.client_address[1]}
if callable(self.server.onConnected):
self.server.onConnected(self.request, self.server)
while True:
try:
buffer = self.request.recv(my_constant.MSG_MAX_SIZE)
if not buffer:
break
buffer = str(binascii.hexlify(buffer))
buffer = [buffer[i:i + 2] for i in range(2, len(buffer) - 1, 2)]
self.server.onData(buffer, self.server, self.request) # process receive function
except socket.error:
print(str(socket.error))
break
if callable(self.server.onDisconnected) and (self.request in requests):
self.server.onDisconnected(self.request, self.server)
self.request.close()
class server(socketserver.ThreadingTCPServer):
def __init__(self, host='', port=16838, *args, **kwargs):
socketserver.ThreadingTCPServer.__init__(self, (host, port), ThreadedTCPRequestHandler)
self.requests = {}
self.server_thread = threading.Thread(target=self.serve_forever)
self.server_thread.setDaemon(True)
self.server_thread.start()
self.onConnected = None
self.onDisconnected = None
self.onData = None
def stop(self):
self.quote_send_thread_stop = True
for request in list(self.requests):
self.shutdown_request(request)
if self.onDisconnected:
self.onDisconnected(request, self)
self.shutdown()
self.server_close()
def broadcast(self, data):
for request in list(self.requests):
try:
request.sendall(data)
except socket.error:
print(str(socket.error))
del self.requests[request]
def send(self, request, data):
try:
request.sendall(data)
except socket.error:
print(str(socket.error))
del self.requests[request]
def sendRaw(self, client_id, data):
pass
def disconnect(self, client_id):
for request in list(self.requests):
if client_id == self.requests[request]['client_id']:
self.shutdown_request(request)
if self.onDisconnected:
self.onDisconnected(request, self)
else:
del self.requests[request]
def onConnected(request, server):
try:
print('[onConnected] client_address: ' + str(server.requests[request]))
except Exception as e:
print(str(e))
def onDisconnected(request, server):
try:
print('[onDisconnected] client_address: ' + str(server.requests[request]))
del server.requests[request]
except Exception as e:
print(str(e))
def onData(request, server):
#define your process message
pass
main.py
his_server = server.server(sever_host, sever_port)
his_server.onConnected = server.onConnected
his_server.onDisconnected = server.onDisconnected
his_server.onData = server.onData
client.py
import socket
import time
from common.constant import *
from threading import Thread
import binascii
from .packet import *
import threading
def recv_msg(sock):
while True:
try:
res = sock.recv(buf_size)
if not res:
continue
buffer = str(binascii.hexlify(res))
buffer = [buffer[i:i + 2] for i in range(2, len(buffer) - 1, 2)]
#packet parsing, you maybe change this part.
packet_parsing(buffer)
time.sleep(0.100)
except socket.error:
print(str(socket.error))
break
class history_thread(threading.Thread):
def __init__(self, threadID, name, delay, server, port):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.delay = delay
self.server = server
self.port = port
self.sock = None
def run(self):
print("Starting " + self.name)
while True:
try:
self.sock = socket.socket()
self.sock.connect((self.server, self.port))
tc = Thread(target=recv_msg, args=(self.sock,))
tc.start()
threads = []
threads.append(tc)
for pip in threads:
pip.join()
self.sock.close()
self.sock = None
except socket.error:
print(str(socket.error))
if self.sock is not None:
self.sock.close()
self.sock = None
time.sleep(self.delay)
def send(self, data):
if self.sock is None:
return -1
try:
self.sock.sendall(data)
except:
print(str(socket.error))
I have a scenario where I need to first respond with HTTP 200 to a server request (due to a time limit) and then continue processing with the actual work.
I also can not use threads, processes, tasks, queues or any other method that would allow me to do this by starting a parallel "process".
My approach is to use the build in "Simple HTTP" server and I am looking for a way to force the server to respond with HTTP 200 and then be able to continue processing.
The current code will receive a POST request and print its content after a 3 seconds. I put a placeholder where I would like to send the response.
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
class MyWebServer(BaseHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
self.send_response_only(200)
self.end_headers()
# force server to send request ???
time.sleep(3)
print(post_data)
def run(server_class=HTTPServer, handler_class=MyWebServer, port=8000):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
if __name__ == "__main__":
run()
I figured out a workaround solution. You can force the server to send a 200 OK and continue processing after with these two commands:
self.finish()
self.connection.close()
This solution is from this SO question: SimpleHTTPRequestHandler close connection before returning from do_POST method
However, this will apparently close the internal IO buffer that the server uses and it won't be able to server any additional requests after that.
To avoid running into an exception it works to terminate the program (which works for me). However this is just a workaround and I would still be looking for a solution that allows the server to keep processing new requests.
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
class MyHandler(BaseHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
self.send_response_only(200)
self.end_headers()
self.finish()
self.connection.close()
time.sleep(3)
print(post_data)
quit()
def run(server_class=HTTPServer, handler_class=MyHandler, port=8000):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
if __name__ == "__main__":
run()
I have a simple multithreading server, But it creates a new thread for each socket, I don't want to create a lot of threads. My idea is to receive the messages in other way: when the user send a message, it will add the message to a queue of messages and with a threadpool the server will handle these requests.
The simple multithreaded server:
import socket
import threading
class ThreadedServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(5)
while True:
client, address = self.sock.accept()
client.settimeout(60)
threading.Thread(target = self.listenToClient,args = (client,address)).start()
def listenToClient(self, client, address):
size = 1024
while True:
try:
data = client.recv(size)
if data:
# Set the response to echo back the recieved data
response = data
client.send(response)
else:
raise error('Client disconnected')
except:
client.close()
return False
if __name__ == "__main__":
port_num = input("Port? ")
ThreadedServer('',port_num).listen()
How can I implement my idea or is there better way to do it?
The question seems to be pretty old but i also stumble upon the same issue while working on the socket server, so here is the below code which you can use to make threaded socket server which doesnt spawn new threads on arrival.
Just to give gist ThreadingMixIn classes is overided with threaded pool.
class ThreadPoolMixIn(socketserver.ThreadingMixIn):
'''
use a thread pool instead of a new thread on every request
'''
# numThreads = 50
allow_reuse_address = True # seems to fix socket.error on server restart
def serve_forever(self):
'''
Handle one request at a time until doomsday.
'''
print('[X] Server is Running with No of Threads :- {}'.format(self.numThreads))
# set up the threadpool
self.requests = Queue(self.numThreads)
for x in range(self.numThreads):
t = threading.Thread(target = self.process_request_thread)
t.setDaemon(1)
t.start()
# server main loop
while True:
self.handle_request()
self.server_close()
def process_request_thread(self):
'''
obtain request from queue instead of directly from server socket
'''
while True:
socketserver.ThreadingMixIn.process_request_thread(self, *self.requests.get())
def handle_request(self):
'''
simply collect requests and put them on the queue for the workers.
'''
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.requests.put((request, client_address))
And then it is called in ThreadedTCPRequest Handler and override the numThreads parameter :
class ThreadedTCPServer(ThreadPoolMixIn, socketserver.TCPServer):
#Extend base class and overide the thread paramter to control the number of threads.
def __init__(self, no_of_threads, server_address, ThreadedTCPRequestHandler):
self.numThreads = no_of_threads
super().__init__(server_address, ThreadedTCPRequestHandler)
Ultimately creating the server which serves forever :
def create_multi_threaded_socket(CONFIG, HandlerClass = ThreadedTCPRequestHandler,
ServerClass = ThreadedTCPServer,
protocol="HTTP/1.0"):
server_address = ('', CONFIG.port)
HandlerClass.protocol_version = protocol
# httpd = ServerClass(server_address, HandlerClass)
server = ThreadedTCPServer(CONFIG.no_of_threads, server_address, ThreadedTCPRequestHandler)
sa = server.socket.getsockname()
print("Serving HTTP on {} port : {}".format(sa[0], sa[1]))
server.serve_forever()
I got the sample code from :
http://code.activestate.com/recipes/574454-thread-pool-mixin-class-for-use-with-socketservert/
Modified bit according to my need.
Hope this helps :) .
I am trying to learn how to use sockets and a useful asynchronous backend. I've started in python with asyncore. After reading various online posts I've written a very simple chat server and connection client, reproduced below.
It seems to work. I open a python interactive session and type
> import chatserver
> server = chatserver.EchoServer('localhost', 7667)
> server.serve()
Then I open another IPython interactive session and type
> import chatserver
> cxn = chatserver.Connection()
> cxn._connect('localhost', 7667)
When I do that, I get a log output in the server window indicating that a connection has been made. Good. Then I type
> cxn.say('hi')
Nothing happens for a while, and then log messages show up for the server and client as expected.
Why is this delay ocurring?
Am I using the log functionality correctly?
I used threading to make it so that I could use the interactive session while the asyncore loop does it's thing for the Connection. Did I do this in a reasonable way?
(optional) If I don't include the line self.out_buffer="" in the Connection._connect function I get an error saying that .out_buffer does not exist. What's up with this?
import asyncore
import socket
import logging
import threading
logging.basicConfig(level=logging.DEBUG, format="%(created)-15s %(msecs)d %(levelname)8s %(thread)d %(name)s %(message)s")
log = logging.getLogger(__name__)
class Connection(asyncore.dispatcher_with_send):
def __init__(self):
asyncore.dispatcher.__init__(self)
def _connect(self, host, port, timeout=5, password=None):
self.host = host
self.port = port
self.out_buffer=""
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
#Run the asyncore loop in its own thread so that we can use the interactive session
self.loop = threading.Thread(target=asyncore.loop)
self.loop.daemon = True
self.loop.start()
def say(self, msg):
self.out_buffer = msg
def handle_read(self):
data = self.recv(4096)
log.debug('Received %s'%data)
class EchoHandler(asyncore.dispatcher_with_send):
def handle_read(self):
log.debug("handle_read")
data = self.recv(1024)
log.debug("after recv")
if data:
log.debug("got data: %s"%data)
self.out_buffer = data
else:
log.debug("got null data")
class EchoServer(asyncore.dispatcher):
SOCKET_TYPE = socket.SOCK_STREAM
ADDRESS_FAMILY = socket.AF_INET
def __init__(self, host, port):
self.address = (host,port)
asyncore.dispatcher.__init__(self)
self.create_socket(self.ADDRESS_FAMILY, self.SOCKET_TYPE)
log.debug("bind address=%s %s"%(host,port))
self.bind(self.address)
self.listen(1)
def fileno(self):
return self.socket.fileno()
def serve(self):
asyncore.loop()
#Start asyncore loop in new thread
# self.loop = threading.Thread(target=asyncore.loop)
# self.loop.daemon = True
# self.loop.start()
def handle_accept(self):
"""Deal with a newly accepted client"""
(connSock, clientAddress) = self.accept()
log.info("conn made: clientAddress=%s %s"%(clientAddress[0], clientAddress[1]))
#Make a handler for this connection
EchoHandler(connSock)
def handle_close(self):
self.close()
Looking at the asyncore docs you are relying on asyncore.dispatcher_with_send to call send() and the default timeout for asyncore.loop() is 30 seconds. This may explain the delay.
It turns out the problem was as Eero suggested.
I made two changes:
In EchoServer
asyncore.loop() to asyncore.loop(timeout=0.1)
In Connection
self.loop = threading.Thread(target=asyncore.loop) to self.loop = threading.Thread(target=asyncore.loop, kwargs={'timeout':0.1})
The response is now much faster. This seems like a hack though so if someone can explain a way to get the same effect in a proper way please contribute.
I try to program a TCPServer with threads (ThreadingMixIn) in Python. The problem is that I can't shut it down properly as I get the socket.error: [Errno 48] Address already in use when I try to run it again. This is a minimal example of the python code that triggers the problem:
import socket
import threading
import SocketServer
class FakeNetio230aHandler(SocketServer.BaseRequestHandler):
def send(self,message):
self.request.send(message+N_LINE_ENDING)
def handle(self):
self.request.send("Hello\n")
class FakeNetio230a(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass):
self.allow_reuse_address = True
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
if __name__ == '__main__':
for i in range(2):
fake_server = FakeNetio230a(("", 1234), FakeNetio230aHandler)
server_thread = threading.Thread(target=fake_server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
# might add some client connection here
fake_server.shutdown()
All the main code should do is to start the server, shut it down and run it again. But it triggers the error stated above because the socket has not been released after the first shutdown.
I thought that setting self.allow_reuse_address = True could solve the problem, but that did not work. When the python program finishes I can run it again straight away and it can start the server once (but again not twice).
However the problem is gone when I randomize the port (replace 1234 by 1234+i for example) as no other server is listening on that address.
There is a similar SO Q Shutting down gracefully from ThreadingTCPServer but the solution (set allow_reuse_address to True does not work for my code and I don't use ThreadingTCPServer).
How do I have to modify my code in order to be able to start the server twice in my code?
Some more information: The reason why I'm doing this is that I want to run some unit tests for my python project. This requires to provide a (fake) server that my software should to connect to.
edit:
I just found the most correct answer to my problem: I have to add fake_server.server_close() at the end of my main execution code (right after fake_server.shutdown()). I found it in the source file of the TCPServer implementation. All it does is self.socket.close().
Somehow, fake_server doesn't unbind when you assign to it (in first line in for statement).
To fix that, just remove fake_server at the end of loop:
del fake_server # force server to unbind
This post helped me get over the un-closed socket problem.
I had the same problem and wanted to post here my simple implementation for TCP server class (and client method).
I made a TCPThreadedServer class. In order to use it is needed to be inherited, and the method process(msg) must be overridden. the overridden method invokes every time the server gets a message msg, and if it returns a not None object, it will be returned as string to the connected client.
from SocketServer import TCPServer, StreamRequestHandler, ThreadingMixIn
import threading
class TCPThreadedServer(TCPServer, ThreadingMixIn):
class RequstHandler(StreamRequestHandler):
def handle(self):
msg = self.rfile.readline().strip()
reply = self.server.process(msg)
if reply is not None:
self.wfile.write(str(reply) + '\n')
def __init__(self, host, port, name=None):
self.allow_reuse_address = True
TCPServer.__init__(self, (host, port), self.RequstHandler)
if name is None: name = "%s:%s" % (host, port)
self.name = name
self.poll_interval = 0.5
def process(self, msg):
"""
should be overridden
process a message
msg - string containing a received message
return - if returns a not None object, it will be sent back
to the client.
"""
raise NotImplemented
def serve_forever(self, poll_interval=0.5):
self.poll_interval = poll_interval
self.trd = threading.Thread(target=TCPServer.serve_forever,
args = [self, self.poll_interval],
name = "PyServer-" + self.name)
self.trd.start()
def shutdown(self):
TCPServer.shutdown(self)
TCPServer.server_close(self)
self.trd.join()
del self.trd
I found it quite easy to use:
class EchoServerExample(TCPThreadedServer):
def __init__(self):
TCPThreadedServer.__init__(self, "localhost", 1234, "Server")
def process(self, data):
print "EchoServer Got: " + data
return str.upper(data)
for i in range(10):
echo = EchoServerExample()
echo.serve_forever()
response = client("localhost", 1234, "hi-%i" % i)
print "Client received: " + response
echo.shutdown()
I used the method:
import socket
def client(ip, port, msg, recv_len=4096,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
msg = str(msg)
response = None
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((ip, port))
if timeout != socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.send(msg + "\n")
if recv_len > 0:
response = sock.recv(recv_len)
finally:
sock.close()
return response
Enjoy it!
Change your FakeNetio230a definition to this:
class FakeNetio230a(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass):
self.allow_reuse_address = True
SocketServer.TCPServer.__init__(self,
server_address,
RequestHandlerClass,
False) # do not implicitly bind
Then, add these two lines in your entry point below your FakeNetio230a instantiation:
fake_server.server_bind() # explicitly bind
fake_server.server_activate() # activate the server
Here's an example:
if __name__ == '__main__':
for i in range(2):
fake_server = FakeNetio230a(("", 1234), FakeNetio230aHandler)
fake_server.server_bind() # explicitly bind
fake_server.server_activate() # activate the server
server_thread = threading.Thread(target=fake_server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
# might add some client connection here
fake_server.shutdown()