Creating a Delayed Response in a Python 2 HTTP Server - python

I'm creating a very simple HTTP server in python 2 for testing.
I would like to randomly delay by a fixed amount the reply to a GET request without closing the connection to the server or shutting down the server first.
Here is the current code I would like to modify:
# Only runs under Python 2
import BaseHTTPServer
import time
from datetime import datetime
class SimpleRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
print "incoming request: " + self.path
self.wfile.write('HTTP-1.0 200 Okay\r\n\r\n')
self.wfile.write(modes(self.path))
def run(server_class = BaseHTTPServer.HTTPServer,
handler_class = SimpleRequestHandler):
server_address = ('', 80)
httpd = server_class(server_address, handler_class)
httpd.serve_forever()
def modes(argument):
# returns value based on time of day to simulate data
curr_time = datetime.now()
seconds = int(curr_time.strftime('%S'))
if seconds < 20:
return "reply1"
elif seconds >= 20 and seconds < 40:
return "reply2"
else:
return "reply3"
run ( )
The delay would be be added every, say, 3rd or 4th time a GET is received but when the delay times out, a properly formed response would be sent.
Thanks.

Related

How to fetch date and time from internet [duplicate]

I need to get the time for the UK from an NTP server. Found stuff online however any time I try out the code, I always get a return date time, the same as my computer. I changed the time on my computer to confirm this, and I always get that, so it's not coming from the NTP server.
import ntplib
from time import ctime
c = ntplib.NTPClient()
response = c.request('uk.pool.ntp.org', version=3)
response.offset
print (ctime(response.tx_time))
print (ntplib.ref_id_to_text(response.ref_id))
x = ntplib.NTPClient()
print ((x.request('ch.pool.ntp.org').tx_time))
This will work (Python 3):
import socket
import struct
import sys
import time
def RequestTimefromNtp(addr='0.de.pool.ntp.org'):
REF_TIME_1970 = 2208988800 # Reference time
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = b'\x1b' + 47 * b'\0'
client.sendto(data, (addr, 123))
data, address = client.recvfrom(1024)
if data:
t = struct.unpack('!12I', data)[10]
t -= REF_TIME_1970
return time.ctime(t), t
if __name__ == "__main__":
print(RequestTimefromNtp())
The timestamps returned as call to the NTP server returns time in seconds.
ctime() provides datetime format based on local machine's timezone settings by default. Thus, for uk timezone you need to convert tx_time using that timezone. Python's in-built datetime module contains function for this purpose
import ntplib
from datetime import datetime, timezone
c = ntplib.NTPClient()
# Provide the respective ntp server ip in below function
response = c.request('uk.pool.ntp.org', version=3)
response.offset
print (datetime.fromtimestamp(response.tx_time, timezone.utc))
UTC timezone used here. For working with different timezones you can use pytz library
This is basically Ahmads answer but working for me on Python 3. I am currently keen on Arrow as simplifying times and then you get:
import arrow
import socket
import struct
import sys
def RequestTimefromNtp(addr='0.de.pool.ntp.org'):
REF_TIME_1970 = 2208988800 # Reference time
client = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
data = b'\x1b' + 47 * b'\0'
client.sendto( data, (addr, 123))
data, address = client.recvfrom( 1024 )
if data:
t = struct.unpack( '!12I', data )[10]
t -= REF_TIME_1970
return arrow.get(t)
print(RequestTimefromNtp())
The following function is working well using python 3:
def GetNTPDateTime(server):
try:
ntpDate = None
client = ntplib.NTPClient()
response = client.request(server, version=3)
ntpDate = ctime(response.tx_time)
print (ntpDate)
except Exception as e:
print (e)
return datetime.datetime.strptime(ntpDate, "%a %b %d %H:%M:%S %Y")
I used ntplib server and get date and change format in dd-mm-yyyy

How to run 2 differents loops in 2 differents threads?

I'm doing a telemetry application using Azure IoT Hub, Azure IoT SDK in Python and a raspberry pi with temperature and humidity sensors.
Humidity + Temperature sensors => Rasperry Pi => Azure IoT Hub
In my first implementation thanks azure examples, I used one loop that collect data from the temperature sensor and the humidity sensor, and send them to Azure IoT Hub in the same time every 60 second.
>>> 1 Loop every 60s = Collect data & send data of temperature and humidity
Now I would like to send them with different frequencies, I mean :
One loop will collect the data of the temperature sensor and send it to Azure IoT Hub every 60 seconds;
Whereas a second loop will collect the data of the humidity sensor and send it to Azure IoT Hub every 600 seconds.
>>> 1 Loop every 60s= Collect data & send data of temperature
>>> 2 Loop every 600s= Collect data & send data of humidity
I think the tool I need is multi-threading, but I don't understand which library or structure I have to implement in my case.
Here is the code provided by Azure, including one loop that handles temperature and humidity at the same time. Reading the data and sending to Azure every 60 seconds.
import random
import time
import sys
# Using the Python Device SDK for IoT Hub:
from iothub_client import IoTHubClient, IoTHubClientError,
IoTHubTransportProvider, IoTHubClientResult
from iothub_client import IoTHubMessage, IoTHubMessageDispositionResult,
IoTHubError, DeviceMethodReturnValue
# The device connection string to authenticate the device with your IoT hub.
CONNECTION_STRING = "{Your IoT hub device connection string}"
# Using the MQTT protocol.
PROTOCOL = IoTHubTransportProvider.MQTT
MESSAGE_TIMEOUT = 10000
# Define the JSON message to send to IoT Hub.
TEMPERATURE = 20.0
HUMIDITY = 60
MSG_TXT = "{\"temperature\": %.2f,\"humidity\": %.2f}"
def send_confirmation_callback(message, result, user_context):
print ( "IoT Hub responded to message with status: %s" % (result) )
def iothub_client_init():
# Create an IoT Hub client
client = IoTHubClient(CONNECTION_STRING, PROTOCOL)
return client
def iothub_client_telemetry_sample_run():
try:
client = iothub_client_init()
print ( "IoT Hub device sending periodic messages, press Ctrl-C to exit" )
#******************LOOP*******************************
while True:
# Build the message with simulated telemetry values.
temperature = TEMPERATURE + (random.random() * 15)
humidity = HUMIDITY + (random.random() * 20)
msg_txt_formatted = MSG_TXT % (temperature, humidity)
message = IoTHubMessage(msg_txt_formatted)
# Send the message.
print( "Sending message: %s" % message.get_string() )
client.send_event_async(message, send_confirmation_callback, None)
time.sleep(60)
except IoTHubError as iothub_error:
print ( "Unexpected error %s from IoTHub" % iothub_error )
return
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
if __name__ == '__main__':
print ( "IoT Hub Quickstart #1 - Simulated device" )
print ( "Press Ctrl-C to exit" )
iothub_client_telemetry_sample_run()
I would like to use the same structure of functions, including two loops that handles temperature and humidity, one every 60s and one every 600s.
while True:
# Build the message with simulated telemetry values.
temperature = TEMPERATURE + (random.random() * 15)
msg_txt_formatted1 = MSG_TXT1 % (temperature)
message1 = IoTHubMessage(msg_txt_formatted1)
# Send the message.
print( "Sending message: %s" % message1.get_string() )
client.send_event_async(message1, send_confirmation_callback, None)
time.sleep(60)
while True:
# Build the message with simulated telemetry values.
humidity = HUMIDITY + (random.random() * 20)
msg_txt_formatted2 = MSG_TXT2 % (humidity)
message2 = IoTHubMessage(msg_txt_formatted2)
# Send the message.
print( "Sending message: %s" % message2.get_string() )
client.send_event_async(message2, send_confirmation_callback, None)
time.sleep(600)
How can I do that? How to call those loops with multi-threading or another method?
It may be simpler to do something like
while True:
loop_b()
for _ in range(10):
loop_a()
time.sleep(60)
or even
while True:
time.sleep(1)
now = time.time()
if now % 60 == 0:
loop_a()
if now % 600 == 0:
loop_b()
But if you really want to use threads, then:
import threading
class LoopAThread(threading.Thread):
def run(self):
loop_a()
class LoopBThread(threading.Thread):
def run(self):
loop_b()
...
thread_a = LoopAThread()
thread_b = LoopBThread()
thread_a.start()
thread_b.start()
thread_a.join()
thread_b.join()
Here are two competing approaches to consider
Don't bother with threads at all. Just have one loop that sleeps every 60 seconds like you have now. Keep track of the last time you sent humidity data. If 600 seconds has passed, then send it. Otherwise, skip it and go to sleep for 60 seconds. Something like this:
from datetime import datetime, timedelta
def iothub_client_telemetry_sample_run():
last_humidity_run = None
humidity_period = timedelta(seconds=600)
client = iothub_client_init()
while True:
now = datetime.now()
send_temperature_data(client)
if not last_humidity_run or now - last_humidity_run >= humidity_period:
send_humidity_data(client)
last_humidity_run = now
time.sleep(60)
Rename iothub_client_telemetry_sample_run to temperature_thread_func or something like it. Create a separate function that looks just like it for humidity. Spawn two threads from the main function of your program. Set them to daemon mode so they shutdown when the user exits
from threading import Thread
def temperature_thread_func():
client = iothub_client_init()
while True:
send_temperature_data(client)
time.sleep(60)
def humidity_thread_func():
client = iothub_client_init()
while True:
send_humidity_data(client)
time.sleep(600)
if __name__ == '__main__':
temp_thread = Thread(target=temperature_thread_func)
temp_thread.daemon = True
humidity_thread = Thread(target=humidity_thread_func)
humidity_thread.daemon = True
input('Polling for data. Press a key to exit')
Notes:
If you decide to use threads, consider using an
event
to terminate them cleanly.
time.sleep is not a precise way to keep
time. You might need a different timing mechanism if the samples need
to be taken at precise moments.

Redis: # of channels degrading latency. How to prevent degradation?

pub.py
import redis
import datetime
import time
import json
import sys
import threading
import gevent
from gevent import monkey
monkey.patch_all()
def main(chan):
redis_host = '10.235.13.29'
r = redis.client.StrictRedis(host=redis_host, port=6379)
while True:
def getpkg():
package = {'time': time.time(),
'signature' : 'content'
}
return package
#test 2: complex data
now = json.dumps(getpkg())
# send it
r.publish(chan, now)
print 'Sending {0}'.format(now)
print 'data type is %s' % type(now)
time.sleep(1)
def zerg_rush(n):
for x in range(n):
t = threading.Thread(target=main, args=(x,))
t.setDaemon(True)
t.start()
if __name__ == '__main__':
num_of_chan = 10
zerg_rush(num_of_chan)
cnt = 0
stop_cnt = 21
while True:
print 'Waiting'
cnt += 1
if cnt == stop_cnt:
sys.exit(0)
time.sleep(30)
sub.py
import redis
import threading
import time
import json
import gevent
from gevent import monkey
monkey.patch_all()
def callback(ind):
redis_host = '10.235.13.29'
r = redis.client.StrictRedis(host=redis_host, port=6379)
sub = r.pubsub()
sub.subscribe(str(ind))
start = False
avg = 0
tot = 0
sum = 0
while True:
for m in sub.listen():
if not start:
start = True
continue
got_time = time.time()
decoded = json.loads(m['data'])
sent_time = float(decoded['time'])
dur = got_time - sent_time
tot += 1
sum += dur
avg = sum / tot
print decoded #'Recieved: {0}'.format(m['data'])
file_name = 'logs/sub_%s' % ind
f = open(file_name, 'a')
f.write('processing no. %s' % tot)
f.write('it took %s' % dur)
f.write('current avg: %s\n' % avg)
f.close()
def zerg_rush(n):
for x in range(n):
t = threading.Thread(target=callback, args=(x,))
t.setDaemon(True)
t.start()
def main():
num_of_chan = 10
zerg_rush(num_of_chan)
while True:
print 'Waiting'
time.sleep(30)
if __name__ == '__main__':
main()
I am testing redis pubsub to replace the use of rsh to communicate with remote boxes.
One of the things I have tested for was the number of channels affecting latency of publish and pubsub.listen().
Test: One publisher and one subscriber per channel (publisher publish every one second). Incremented the number of channels from and observed the latency (The duration from the moment publisher publish a message to the moment subscriber got the message via listen)
num of chan--------------avg latency in seconds
10:----------------------------------0.004453
50:----------------------------------0.005246
100:---------------------------------0.0155
200:---------------------------------0.0221
300:---------------------------------0.0621
Note: tested on 2 CPU + 4GB RAM + 1 NICsĀ RHEL6.4 VM.
What can I do to maintain low latency with high number of channels?
Redis is single-threaded so increasing more cpus wont help. maybe more RAM? if so, how much more?
Anything I can do code-wise or bottleneck is in Redis itself?
Maybe the limitation comes from the way my test codes are written with threading?
EDIT:
Redis Cluster vs ZeroMQ in Pub/Sub, for horizontally scaled distributed systems
Accepted answer says "You want to minimize latency, I guess. The number of channels is irrelevant. The key factors are the number of publishers and number of subscribers, message size, number of messages per second per publisher, number of messages received by each subscriber, roughly. ZeroMQ can do several million small messages per second from one node to another; your bottleneck will be the network long before it's the software. Most high-volume pubsub architectures therefore use something like PGM multicast, which ZeroMQ supports."
From my testings, i dont know if this is true. (The claim that the number of channels is irrelevant)
For example, i did a testing.
1) One channel. 100 publishers publishing to a channel with 1 subscriber listening. Publisher publishing one second at a time. latency was 0.00965 seconds
2) Same testing except 1000 publishers. latency was 0.00808 seconds
Now during my channel testing:
300 channels with 1 pub - 1 sub resulted in 0.0621 and this is only 600 connections which is less than above testing yet significantly slow in latency

Benchmarking tool using twisted

I am trying to write a web benchmarking tool base on twisted. Twisted is very fantastic asynchronous framework for web applications. Because I get started with this framework for just two weeks, I face a problem, here is it:
When I test this benchmarking tool compare with ApacheBench, the result differs greatly on the same concurrency. Here is the result of my tool:
python pyab.py 50000 50 http://xx.com/a.txt
speed:1063(q/s), worker:50, interval:7, req_made:7493, req_done:7443, req_error:0
And Here is the result of Apache Bench:
ab -c 50 -n 50000 http://xx.com/a.txt
Server Software: nginx/1.4.1
Server Hostname: 203.90.245.26
Server Port: 8080
Document Path: /a.txt
Document Length: 6 bytes
Concurrency Level: 50
Time taken for tests: 6.89937 seconds
Complete requests: 50000
Failed requests: 0
Write errors: 0
Total transferred: 12501750 bytes
HTML transferred: 300042 bytes
Requests per second: 8210.27 [#/sec] (mean)
Time per request: 6.090 [ms] (mean)
Time per request: 0.122 [ms] (mean, across all concurrent requests)
Transfer rate: 2004.62 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 0 0.8 0 4
Processing: 1 5 3.4 5 110
Waiting: 0 2 3.6 2 109
Total: 1 5 3.5 5 110
Percentage of the requests served within a certain time (ms)
50% 5
66% 6
75% 6
80% 6
90% 7
95% 7
98% 8
99% 8
100% 110 (longest request)
On the same url and concurrency, ApacheBench can go up to 8000 req/sec, while pyab only 1000 req/sec.
Here is my code(pyab.py):
from twisted.internet import reactor,threads
from twisted.internet.protocol import Protocol
from twisted.internet.defer import Deferred
from twisted.web.client import Agent
from twisted.web.client import HTTPConnectionPool
from twisted.web.http_headers import Headers
from twisted.python import log
import time, os, stat, logging, sys
from collections import Counter
logging.basicConfig(
#filename= "/%s/log/%s.%s" % (RUN_DIR,RUN_MODULE,RUN_TIME),
format="%(asctime)s [%(levelname)s] %(message)s",
level=logging.WARNING,
#level=logging.DEBUG,
stream=sys.stdout
)
#log.startLogging(sys.stdout)
observer = log.PythonLoggingObserver()
observer.start()
class IgnoreBody(Protocol):
def __init__(self, deferred, tl):
self.deferred = deferred
self.tl = tl
def dataReceived(self, bytes):
pass
def connectionLost(self, reason):
self.deferred.callback(None)
class Pyab:
def __init__( self, n = 50000, concurrency = 100, url='http://203.90.245.26:8080/a.txt'):
self.n = n
self.url = url
self.pool = HTTPConnectionPool(reactor, persistent=True)
self.pool.maxPersistentPerHost = concurrency
self.agent = Agent(reactor, connectTimeout = 5, pool = self.pool)
#self.agent = Agent(reactor, connectTimeout = 5)
self.time_start = time.time()
self.max_worker = concurrency
self.cnt = Counter({
'worker' : 0 ,
'req_made' : 0,
'req_done' : 0,
'req_error' : 0,
})
def monitor( self ):
interval = int(time.time() - self.time_start)
speed = 0
if interval != 0:
speed = int( self.cnt['req_done'] / interval )
log.msg("speed:%d(q/s), worker:%d, interval:%d, req_made:%d, req_done:%d, req_error:%d"
% (speed, self.cnt['worker'], interval, self.cnt['req_made'], self.cnt['req_done'], self.cnt['req_error']), logLevel=logging.WARNING)
reactor.callLater(1, lambda : self.monitor())
def start( self ):
self.keeprunning = True
self.monitor()
self.readMore()
def stop( self ):
self.keeprunning = False
def readMore( self ):
while self.cnt['worker'] < self.max_worker and self.cnt['req_done'] < self.n :
self.make_request()
if self.keeprunning and self.cnt['req_done'] < self.n:
reactor.callLater( 0.0001, lambda: self.readMore() )
else:
reactor.stop()
def make_request( self ):
d = self.agent.request(
'GET',
#'http://examplexx.com/',
#'http://example.com/',
#'http://xa.xingcloud.com/v4/qvo/WDCXWD7500AADS-00M2B0_WD-WCAV5E38536685366?update0=ref0%2Ccor&update1=nation%2Ccn&action0=visit&_ts=1376397973636',
#'http://203.90.245.26:8080/a.txt',
self.url,
Headers({'User-Agent': ['Twisted Web Client Example']}),
None)
self.cnt['worker'] += 1
self.cnt['req_made'] += 1
def cbResponse(resp):
self.cnt['worker'] -= 1
self.cnt['req_done'] += 1
log.msg('response received')
finished = Deferred()
resp.deliverBody(IgnoreBody(finished, self))
return finished
def cbError(error):
self.cnt['worker'] -= 1
self.cnt['req_error'] += 1
log.msg(error, logLevel=logging.ERROR)
d.addCallback(cbResponse)
d.addErrback(cbError)
if __name__ == '__main__' :
if len(sys.argv) < 4:
print "Usage: %s <n> <concurrency> <url>" % (sys.argv[0])
sys.exit()
ab = Pyab(n=int(sys.argv[1]), concurrency=int(sys.argv[2]), url=sys.argv[3])
ab.start()
reactor.run()
Is there any wrong with my code? Thanks!
When I last used it, ab was known to have dozens of serious bugs. Sometimes that would cause it to report massively inflated results. Sometimes it would report negative results. Sometimes it would crash. I'd try another tool, like httperf, as a sanity check.
However, if your server is actually that fast, then you might have another issue.
Even if ab has been fixed, you're talking here about a C program versus a Python program running on CPython. 8x slower than C in Python is not actually all that bad, so I don't expect there is actually anything wrong with your program, except that it doesn't make use of spawnProcess and multi-core concurrency.
For starters, see if you get any better results on PyPy.

python chat using poll

I need to built a python chat and I'm stacked in the very final step. I've built the server and the client and I have the following problem while running the code:
server.py 127.0.0.1
-in a separate window client.py 127.0.0.1
-another client
-type the nicknames to chat for both clients and get the correct answer 'yuppie' meaning you are connected
a client try to speak
message is not read by the other client until it doesn't print something, after printing it get the message printed on its screen correctly.
I'd like to get the message without being obliged to print something, it's pretty unrealistic!!! Code of client and server are below in 2 different classes. Thank you!
#! /usr/bin/env python
import socket,sys,select,re
PORT=1060
class Server():
def __init__(self,host):
#building listen_sock
self.listen_sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.listen_sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.listen_sock.bind((host,PORT))
self.listen_sock.listen(20)
#building dict for socket and socket state
self.sockets={self.listen_sock.fileno(): self.listen_sock}
self.socket_state={self.listen_sock.fileno():''}
#building poll object
self.poll=select.poll()
self.poll.register(self.listen_sock,select.POLLIN)
#users' list
self.users_list={}
#DON'T LOOK HERE
#initialize the sender
#self.sender=0
# self.users=re.compile("\s*\$(get users connected)$\s*",re.IGNORECASE)
# self.nick=re.compile("\s*\$\$(\w*\d*)\$\$\s*",re.IGNORECASE)
# self.quit=re.compile("\s*\$(quit)\$\s*",re.IGNORECASE)
#self.commands=[self.users,self.nick,self.quit]
#funcion to receive message from client (work well)
def recv_until(self,fd,suffix):
self.message=''
#checking the end of the message
while not self.message.endswith(suffix):
data=self.sockets[fd].recv(16)
if not data:
raise EOFError('socket closed before we saw %r' % suffix)
self.message+=data
self.message=self.message[:-1]
#delete client (work well)
def del_client(self,fd):
del self.users_list[fd]
del self.socket_state[fd]
self.poll.unregister(fd)
#print the remaining active connections
if not len(self.users_list):
print 'Anyone is connected, waiting for new connection'
else:
print self.users_list
#add new client and change the of the file descriptor for that client (work well)
def new_client(self,fd):
newsock, sockname = self.listen_sock.accept()
print 'new connection from ', newsock.getpeername()
newsock.setblocking(False)
#recording the new connection
fd=newsock.fileno()
self.sockets[fd]=newsock
self.poll.register(fd,select.POLLOUT)
self.socket_state[fd]='ask nick'
#DON'T LOOK HERE
# def handle_query(self,fd):
# for n,command in enumerate(self.commands):
# match=command.search(self.message)
# if n==1 and match:
# self.users_list[self.sockets[fd].getpeername()]=match.group(1)
# print self.users_list
# for value in self.users_list.values():
# self.sockets[fd].sendall(value+'\n')
#starting the main function of the class
def chat(self):
while True:
#here il where the code hangs up waitng and waiting (WORKS BAD)
#return a tuple, identify where (fd) the event (event) is happening
for fd,event in self.poll.poll():
#print the state of each socket and the poll object
print self.socket_state
print self.poll.poll()
#starting the state machine
#remove closed sockets
if event & (select.POLLHUP | select.POLLERR |
select.POLLNVAL):
#deleting the socket closed at fd
self.del_client(fd)
#if the socket referred to is our listen_sock and we have a new connection request
elif self.sockets[fd] is self.listen_sock:
#recording the new entry!
self.new_client(fd)
#managing all the situation where it is necessary to answer to a client
#and changing the state of the socket and that of the sockets[fd]
elif event & select.POLLOUT:
if self.socket_state[fd]=='ask nick':
self.sockets[fd].sendall('identify\n')
self.poll.modify(self.sockets[fd],select.POLLIN)
self.socket_state[fd]='get user'
if self.socket_state[fd]=='invalid nick':
self.sockets[fd].sendall('invalid nick\n')
for value in self.users_list.values():
self.sockets[fd].sendall('\n'+value+'\n')
self.socket_state[fd]='ask nick'
if self.socket_state[fd]=='connected':
print '3'
self.sockets[fd].sendall('yuppie\n')
self.poll.modify(self.sockets[fd],select.POLLIN)
self.socket_state[fd]='ready to communicate'
if self.socket_state[fd]=='ready to receive':
self.sockets[fd].sendall(self.message)
print '4'
self.poll.modify(self.sockets[fd],select.POLLIN)
self.socket_state[fd]='ready to communicate'
#managing all the situation where it is necessary to get values from clients
elif event & select.POLLIN:
if self.socket_state[fd]=='get user':
self.recv_until(fd,'\n')
if self.message not in self.users_list.values():
self.users_list[fd]=self.message
self.poll.modify(self.sockets[fd],select.POLLOUT)
self.socket_state[fd]='connected'
else:
self.poll.modify(self.sockets[fd],select.POLLOUT)
self.socket_state[fd]='invalid nick'
if self.socket_state[fd]=='ready to communicate':
self.recv_until(fd,'\n')
print '5'
for i in self.users_list.keys():
if i!=fd:
self.poll.modify(self.sockets[i],select.POLLOUT)
self.socket_state[i]='ready to receive'
if __name__ == '__main__':
se=Server(sys.argv[1])
se.chat()
#! /usr/bin/env python
import sys,socket,select,threading,time
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
HOST=sys.argv.pop()
PORT=1060
class Client():
def setup(self):
server_address=(HOST,PORT)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(server_address)
def chat(self):
while True:
time.sleep(1)
text=raw_input('>>> ')
self.sock.sendall(text+'\n')
def rec(self):
while True:
mess=self.sock.recv(16)
if mess:
print '$$$ ', mess,
def start(self):
l=threading.Thread(target=self.rec)
t=threading.Thread(target=self.chat)
t.start()
l.start()
if __name__=='__main__':
cl=Client()
cl.setup()
cl.start()
Next time take a look at http://www.zeromq.org/, it has a nice python binding http://zeromq.github.com/pyzmq/. It's perfect for this kind of stuff.

Categories