I am new to python and my networking logics are at the beginner level. I have an HTTP server running in a VM and when I curl it from a different terminal on the same machine, I get the expected response. I am looking for a functionality where I can get the same response on my mobile device when I type the ip and port in the browser. My mobile device is connected to the same WiFi. Here's the server code:
import socket
MAX_PACKET = 32768
def recv_all(sock):
r'''Receive everything from `sock`, until timeout occurs, meaning sender
is exhausted, return result as string.'''
# dirty hack to simplify this stuff - you should really use zero timeout,
# deal with async socket and implement finite automata to handle incoming data
prev_timeout = sock.gettimeout()
try:
sock.settimeout(0.01)
rdata = []
while True:
try:
rdata.append(sock.recv(MAX_PACKET))
except socket.timeout:
return ''.join(rdata)
# unreachable
finally:
sock.settimeout(prev_timeout)
def normalize_line_endings(s):
r'''Convert string containing various line endings like \n, \r or \r\n,
to uniform \n.'''
return ''.join((line + '\n') for line in s.splitlines())
def run():
r'''Main loop'''
# Create TCP socket listening on 10000 port for all connections,
# with connection queue of length 1
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, \
socket.IPPROTO_TCP)
server_sock.bind(('10.0.2.15',80))
server_sock.listen(1)
while True:
# accept connection
client_sock, client_addr = server_sock.accept()
# headers and body are divided with \n\n (or \r\n\r\n - that's why we
# normalize endings). In real application usage, you should handle
# all variations of line endings not to screw request body
request = normalize_line_endings(recv_all(client_sock)) # hack again
request_head, request_body = request.split('\n\n', 1)
# first line is request headline, and others are headers
request_head = request_head.splitlines()
request_headline = request_head[0]
# headers have their name up to first ': '. In real world uses, they
# could duplicate, and dict drops duplicates by default, so
# be aware of this.
request_headers = dict(x.split(': ', 1) for x in request_head[1:])
# headline has form of "POST /can/i/haz/requests HTTP/1.0"
request_method, request_uri, request_proto = request_headline.split(' ', 3)
response_body = [
'<html><body><h1>Hello, world!</h1>',
'<p>This page is in location %(request_uri)r, was requested ' % locals(),
'using %(request_method)r, and with %(request_proto)r.</p>' % locals(),
'<p>Request body is %(request_body)r</p>' % locals(),
'<p>Actual set of headers received:</p>',
'<ul>',
]
for request_header_name, request_header_value in request_headers.iteritems():
response_body.append('<li><b>%r</b> == %r</li>' % (request_header_name, \
request_header_value))
response_body.append('</ul></body></html>')
response_body_raw = ''.join(response_body)
# Clearly state that connection will be closed after this response,
# and specify length of response body
response_headers = {
'Content-Type': 'text/html; encoding=utf8',
'Content-Length': len(response_body_raw),
'Connection': 'close',
}
response_headers_raw = ''.join('%s: %s\n' % (k, v) for k, v in \
response_headers.iteritems())
# Reply as HTTP/1.1 server, saying "HTTP OK" (code 200).
response_proto = 'HTTP/1.1'
response_status = '200'
response_status_text = 'OK' # this can be random
# sending all this stuff
client_sock.send('%s %s %s' % (response_proto, response_status, \
response_status_text))
client_sock.send(response_headers_raw)
client_sock.send('\n') # to separate headers from body
client_sock.send(response_body_raw)
# and closing connection, as we stated before
client_sock.close()
run()
Here's the response when I run curl from a different terminal on the same VM.
I want to ping it from my mobile device connected to the same WiFi. Thank you!
Related
I'm trying to code a python script through which I want to send packets from one host (desktop ubuntu) to a server (Ubuntu server) using multiple source addresses. I decided that would be best to use raw sockets and define my own TCP and IP header. I succeed to send the SYN packet and also get a response with the SYN-ACK packet but then my host doesn't respond with the ACK packet but with the RST packet. This is the first problem with I met in my solution. After the beginning of a three-way handshake I would like to send an HTTP GET keep-alive request. Does anyone know how to do it? Or does anyone know a better solution or library to use?
def send_raw_socket():
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_TCP)
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
version_ihl_type_of_service = b'\x45\x00'
total_length = b'\x00\x3c'
Indentification = b'\xab\xcd'
flags_fragment_offset = b'\x00\x00'
TTL_protocol = b'\x40\x06'
Dest_add = b'\x0a\x0a'
Dest_add2 = b'\x1e\x03'
Src_add = b'\x0a\x0a'
src_add2 =b'\x0a\x0a'
Header_Checksum = my_ip_header_checksum(version_ihl_type_of_service, total_length, Indentification, flags_fragment_offset, TTL_protocol, Src_add,src_add2, Dest_add, Dest_add2)
IP_protocol = b'\x00\x06'
TCP_header_lenght = b'\x00\x14'
src_port = (int(src_port.hex(), 16)+49607).to_bytes(2, byteorder='big')
dest_port = b'\x1f\x95' # Source Port | Destination Port
seq_n1 = b'\x00\x00'
seq_n2 = b'\x00\x00' # Sequence Number
ack_n1 = b'\x00\x00'
ack_n2 = b'\x00\x00' # Acknowledgement Number
do_res = b'\x50\x02'
flags_win_s = b'\x71\x10' # Data Offset, Reserved, Flags | Window Size
checksum= tcp_header_checksum(IP_protocol, Src_add, src_add2, Dest_add, Dest_add2, TCP_header_lenght, src_port, dest_port, seq_n1, seq_n2, ack_n1, ack_n2, do_res, flags_win_s)
u_pinter= b'\x00\x00' # Checksum | Urgent Pointer
packet = version_ihl_type_of_service+total_length+Indentification+flags_fragment_offset+TTL_protocol+Header_Checksum+Src_add+src_add2+Dest_add+Dest_add2+src_port+dest_port+seq_n1+seq_n2+ack_n1+ack_n2+do_res+flags_win_s+checksum+u_pinter
s.sendto(packet, ('10.10.30.3',8085))
Wireshark three-way handshake
I need to send a message when my program is done running, but I also want to include variables such as how long the program took to run in the text.
Here is my code for the texting:
import smtplib
carriers = {
'att': '#mms.att.net',
'tmobile': ' #tmomail.net',
'verizon': '#vtext.com',
'sprint': '#page.nextel.com'
}
def send(message):
# Replace the number with your own, or consider using an argument\dict for multiple people.
to_number = 'xxxxxxxxxx{}'.format(carriers['verizon'])
auth = ('xxxxx', 'xxxx')
# Establish a secure session with gmail's outgoing SMTP server using your gmail account
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(auth[0], auth[1])
# Send text message through SMS gateway of destination number
server.sendmail(auth[0], to_number, message)
Obviously, I replaced my info with the xxx.
Now, to send my text I'm calling the function using:
found = 'The program is done!'
timetook = "Time took: %s (HOUR:MIN:SEC)" % timedelta(seconds=round(elapsed_time_secs))
send(found)
send(timetook)
It just sends blank texts for the timetook, but the program is done message works fine. How do I send the timetook?
The problem is that you aren't following the rules for SMTP. Below is the equivalent solution I wrote for my own use many years ago.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""SMS (short message service) functions."""
import logging
import smtplib
import sys
import rate_limiting
SMTP_GATEWAY = 'localhost'
FROM_ADDR = 'me#domain.us'
PHONE_NUMBER = '4081234567'
# T-Mobile SMS email gateway
# TO_ADDR = PHONE_NUMBER + '#tmomail.net'
# Verizon SMS email gateway
TO_ADDR = PHONE_NUMBER + '#vtext.com'
# Allow only three SMS messages per minute and five per hour.
short_term_rate_limiter = rate_limiting.SimpleRateLimiter(3, 60)
long_term_rate_limiter = rate_limiting.SimpleRateLimiter(5, 60 * 60)
def SendSms(msg, why=''):
"""Send a SMS message."""
short_term_rate_exceeded = short_term_rate_limiter()
long_term_rate_exceeded = long_term_rate_limiter()
if short_term_rate_exceeded or long_term_rate_exceeded:
logging.warning('SMS message rate exceeded, dropping msg: %s', msg)
return
smtp_conn = smtplib.SMTP(SMTP_GATEWAY)
hdrs = 'From: {}\r\nTo: {}\r\n\r\n'.format(FROM_ADDR, TO_ADDR)
if why:
hdrs += 'Subject: {}\r\n'.format(why[:20])
hdrs += "\r\n\r\n"
max_msg_len = 140 - 3 - min(20, len(why))
msg = hdrs + msg[:max_msg_len]
# Make sure the message has only ASCII characters.
msg = msg.encode('ascii', errors='replace').decode('ascii')
smtp_conn.sendmail(FROM_ADDR, [TO_ADDR], msg)
smtp_conn.quit()
if __name__ == '__main__':
SendSms(' '.join(sys.argv[1:]))
In l3_learning.py, there is a method in class l3_switch named _handle_PacketIn.
Now I understand that this event is when a switch contacts controller when it receives a packet corresponding to which it has no entries in its table.
What I don't understand is that here
packet = event.parsed
Now what does packet.next mean in isinstance(packet.next, ipv4)?
def _handle_PacketIn (self, event):
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if dpid not in self.arpTable:
# New switch -- create an empty table
self.arpTable[dpid] = {}
for fake in self.fakeways:
self.arpTable[dpid][IPAddr(fake)] = Entry(of.OFPP_NONE,
dpid_to_mac(dpid))
if packet.type == ethernet.LLDP_TYPE:
# Ignore LLDP packets
return
if isinstance(packet.next, ipv4):
log.debug("%i %i IP %s => %s", dpid,inport,
packet.next.srcip,packet.next.dstip)
# Send any waiting packets...
self._send_lost_buffers(dpid, packet.next.srcip, packet.src, inport)
# Learn or update port/MAC info
if packet.next.srcip in self.arpTable[dpid]:
if self.arpTable[dpid][packet.next.srcip] != (inport, packet.src):
log.info("%i %i RE-learned %s", dpid,inport,packet.next.srcip)
else:
log.debug("%i %i learned %s", dpid,inport,str(packet.next.srcip))
self.arpTable[dpid][packet.next.srcip] = Entry(inport, packet.src)
# Try to forward
dstaddr = packet.next.dstip
if dstaddr in self.arpTable[dpid]:
# We have info about what port to send it out on...
prt = self.arpTable[dpid][dstaddr].port
mac = self.arpTable[dpid][dstaddr].mac
I think I have figured it out.
packet is entire packet that data-link layer sends to the physical layer. packet.next removes the encapsulation of data-link layer and reveals the IP packet (the packet sent by IP layer to data-link layer). So to get the source MAC address we use packet.src and to get the IP address of the source we use packet.next.srcip
I am trying to measure the responses back from DNS servers. Making a sniffer for a typical DNS response that is less than 512 bytes is no big deal. My issue is receiving large 3000+ byte responses - in some cases 5000+ bytes. I haven't been able to get a socket working that can receive that data reliably. Is there a way with Python sockets to receive from a specific source address?
Here is what I have so far:
import socket
import struct
def craft_dns(Qdns):
iden = struct.pack('!H', randint(0, 65535))
QR_thru_RD = chr(int('00000001', 2)) # '\x01'
RA_thru_RCode = chr(int('00100000', 2)) # '\x00'
Qcount = '\x00\x01' # question count is 1
ANcount = '\x00\x00'
NScount = '\x00\x00'
ARcount = '\x00\x01' # additional resource count is 1
pad = '\x00' #
Rtype_ANY = '\x00\xff' # Request ANY record
PROtype = '\x00\x01' # Protocol IN || '\x00\xff' # Protocol ANY
DNSsec_do = chr(int('10000000', 2)) # flips DNSsec bit to enable
edns0 = '\x00\x00\x29\x10\x00\x00\x00\x00\x00\x00\x00' # DNSsec disabled
domain = Qdns.split('.')
quest = ''
for x in domain:
quest += struct.pack('!B', len(x)) + x
packet = (iden+QR_thru_RD+RA_thru_RCode+Qcount+ANcount+NScount+ARcount+
quest+pad+Rtype_ANY+PROtype+edns0) # remove pad if asking <root>
return packet
def craft_ip(target, resolv):
ip_ver_len = int('01000101', 2) # IPvers: 4, 0100 | IP_hdr len: 5, 0101 = 69
ipvers = 4
ip_tos = 0
ip_len = 0 # socket will put in the right length
iden = randint(0, 65535)
ip_frag = 0 # off
ttl = 255
ip_proto = socket.IPPROTO_UDP # dns, brah
chksm = 0 # socket will do the checksum
s_addr = socket.inet_aton(target)
d_addr = socket.inet_aton(resolv)
ip_hdr = struct.pack('!BBHHHBBH4s4s', ip_ver_len, ip_tos, ip_len, iden,
ip_frag, ttl, ip_proto, chksm, s_addr, d_addr)
return ip_hdr
def craft_udp(sport, dest_port, packet):
#sport = randint(0, 65535) # not recommended to do a random port generation
udp_len = 8 + len(packet) # calculate length of UDP frame in bytes.
chksm = 0 # socket fills in
udp_hdr = struct.pack('!HHHH', sport, dest_port, udp_len, chksm)
return udp_hdr
def get_len(resolv, domain):
target = "10.0.0.3"
d_port = 53
s_port = 5353
ip_hdr = craft_ip(target, resolv)
dns_payload = craft_dns(domain) # '\x00' for root
udp_hdr = craft_udp(s_port, d_port, dns_payload)
packet = ip_hdr + udp_hdr + dns_payload
buf = bytearray("-" * 60000)
recvSock = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))
recvSock.settimeout(1)
sendSock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
sendSock.settimeout(1)
sendSock.connect((resolv, d_port))
sendSock.send(packet)
msglen = 0
while True:
try:
pkt = recvSock.recvfrom(65535)
msglen += len(pkt[0])
print repr(pkt[0])
except socket.timeout as e:
break
sendSock.close()
recvSock.close()
return msglen
result = get_len('75.75.75.75', 'isc.org')
print result
For some reason doing
pkt = sendSock.recvfrom(65535)
Recieves nothing at all. Since I'm using SOCK_RAW the above code is less than ideal, but it works - sort of. If the socket is extremely noisy (like on a WLAN), I could end up receiving well beyond the DNS packets, because I have no way to know when to stop receiving packets when receiving a multipacket DNS answer. For a quiet network, like a lab VM, it works.
Is there a better way to use a receiving socket in this case?
Obviously from the code, I'm not that strong with Python sockets.
I have to send with SOCK_RAW because I am constructing the packet in a raw format. If I use SOCK_DGRAM the custom packet will be malformed when sending to a DNS resolver.
The only way I could see is to use the raw sockets receiver (recvSock.recv or recvfrom) and unpack each packet, look if the source and dest address match within what is supplied in get_len(), then look to see if the fragment bit is flipped. Then record the byte length of each packet with len(). I'd rather not do that. It just seems there is a better way.
Ok I was stupid and didn't look at the protocol for the receiving socket. Socket gets kind of flaky when you try to receive packets on a IPPROTO_RAW protocol, so we do need two sockets. By changing to IPPROTO_UDP and then binding it, the socket was able to follow the complete DNS response over multiple requests. I got rid of the try/catch and the while loop, as it was no longer necessary and I'm able to pull the response length with this block:
recvSock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_UDP)
recvSock.settimeout(.3)
recvSock.bind((target, s_port))
sendSock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
#sendSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sendSock.settimeout(.3)
sendSock.bind((target, s_port))
sendSock.connect((resolv, d_port))
sendSock.send(packet)
pkt = recvSock.recvfrom(65535)
msglen = len(pkt[0])
Now the method will return the exact bytes received from a DNS query. I'll leave this up in case anyone else needs to do something similar :)
I'd like to write a small Bluetooth server application to my Nokia phone in PyS60. It needs to be able to send response to the client's request and be able to push data to the client as well.
option 1:
if I use socket.recv(1024), the program waits until something is received, therefore the server can't push data to the client. The Python for S60 implementation is missing the socket.settimeout() method, so I couldn't write a proper non-blocking code.
oprion 2:
The socket.makefile() approach was looking good, but couldn't make it work. When I replaced the conn.recv(1024) to fd = socket.makefile() fd.readline(), it didn't read a thing.
option 3:
Looked into the select() function, but had no luck with it. When I changed the conn.recv() to the r,w,e = select.select([conn],[],[]) like it's been suggested the client doesn't even connect. It hangs at "Waiting for the client...". Strange...
I know that there are pretty nice server implementations and asynchronous API-s as well, but I only need a really basic stuff here. Thanks in advance!
here's what I have:
sock = btsocket.socket(btsocket.AF_BT, btsocket.SOCK_STREAM)
channel = btsocket.bt_rfcomm_get_available_server_channel(sock)
sock.bind(("", channel))
sock.listen(1)
btsocket.bt_advertise_service(u"name", sock, True, btsocket.RFCOMM)
print "Waiting for the client..."
conn, client_mac = sock.accept()
print "connected: " + client_mac
while True:
try:
data = conn.recv(1024)
if len(data) != 0:
print "received [%s]" % data
if data.startswith("something"): conn.send("something\r\n")
else:
conn.send("some other data \r\n")
except:
pass
It's obviously blocking, so the "some other data" is never sent, but it's the best I've got so far. At least I can send something in reply to the client.
Found the solution finally!
The select function wasn't working with the btsocket module of the newer PyS60 ports.
Someone wrote a new_btsocket (available here) with a working select function.
Here is a simple example based on an echo server
#!/usr/bin/python
import socket
import select
server = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
server.bind( ('localhost', 12556) )
server.listen( 5 )
toread = [server]
running = 1
# we will shut down when all clients disconenct
while running:
rready,wready,err = select.select( toread, [], [] )
for s in rready:
if s == server:
# accepting the socket, which the OS passes off to another
# socket so we can go back to selecting. We'll append this
# new socket to the read list we select on next pass
client, address = server.accept()
toread.append( client ) # select on this socket next time
else:
# Not the server's socket, so we'll read
data = s.recv( 1024 )
if data:
print "Received %s" % ( data )
else:
print "Client disconnected"
s.close()
# remove socket so we don't watch an invalid
# descriptor, decrement client count
toread.remove( s )
running = len(toread) - 1
# clean up
server.close()
That said, I still find socketserver cleaner and easier. Implement handle_request and call serve_forever
Here's an Epoll Server Implementation (non-blocking)
http://pastebin.com/vP6KPTwH (same thing as below, felt this might be easier to copy)
use python epollserver.py to start the server.
Test it using wget localhost:8888
import sys
import socket, select
import fcntl
import email.parser
import StringIO
import datetime
"""
See:
http://docs.python.org/library/socket.html
"""
__author__ = ['Caleb Burns', 'Ben DeMott']
def main(argv=None):
EOL1 = '\n\n'
EOL2 = '\n\r\n'
response = 'HTTP/1.0 200 OK\r\nDate: Mon, 1 Jan 1996 01:01:01 GMT\r\n'
response += 'Content-Type: text/plain\r\nContent-Length: 13\r\n\r\n'
response += 'Hello, world!'
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Tell the server socket file descriptor to destroy itself when this program ends.
socketFlags = fcntl.fcntl(serversocket.fileno(), fcntl.F_GETFD)
socketFlags |= fcntl.FD_CLOEXEC
fcntl.fcntl(serversocket.fileno(), fcntl.F_SETFD, socketFlags)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(('0.0.0.0', 8888))
serversocket.listen(1)
# Use asynchronous sockets.
serversocket.setblocking(0)
# Allow a queue of up to 128 requests (connections).
serversocket.listen(128)
# Listen to socket events on the server socket defined by the above bind() call.
epoll = select.epoll()
epoll.register(serversocket.fileno(), select.EPOLLIN)
print "Epoll Server Started..."
try:
#The connection dictionary maps file descriptors (integers) to their corresponding network connection objects.
connections = {}
requests = {}
responses = {}
while True:
# Ask epoll if any sockets have events and wait up to 1 second if no events are present.
events = epoll.poll(1)
# fileno is a file desctiptor.
# event is the event code (type).
for fileno, event in events:
# Check for a read event on the socket because a new connection may be present.
if fileno == serversocket.fileno():
# connection is a new socket object.
# address is client IP address. The format of address depends on the address family of the socket (i.e., AF_INET).
connection, address = serversocket.accept()
# Set new socket-connection to non-blocking mode.
connection.setblocking(0)
# Listen for read events on the new socket-connection.
epoll.register(connection.fileno(), select.EPOLLIN)
connections[connection.fileno()] = connection
requests[connection.fileno()] = b''
responses[connection.fileno()] = response
# If a read event occured, then read the new data sent from the client.
elif event & select.EPOLLIN:
requests[fileno] += connections[fileno].recv(1024)
# Once we're done reading, stop listening for read events and start listening for EPOLLOUT events (this will tell us when we can start sending data back to the client).
if EOL1 in requests[fileno] or EOL2 in requests[fileno]:
epoll.modify(fileno, select.EPOLLOUT)
# Print request data to the console.
epoll.modify(fileno, select.EPOLLOUT)
data = requests[fileno]
eol = data.find("\r\n") #this is the end of the FIRST line
start_line = data[:eol] #get the contents of the first line (which is the protocol information)
# method is POST|GET, etc
method, uri, http_version = start_line.split(" ")
# re-used facebooks httputil library (works well to normalize and parse headers)
headers = HTTPHeaders.parse(data[eol:])
print "\nCLIENT: FD:%s %s: '%s' %s" % (fileno, method, uri, datetime.datetime.now())
# If the client is ready to receive data, sent it out response.
elif event & select.EPOLLOUT:
# Send response a single bit at a time until the complete response is sent.
# NOTE: This is where we are going to use sendfile().
byteswritten = connections[fileno].send(responses[fileno])
responses[fileno] = responses[fileno][byteswritten:]
if len(responses[fileno]) == 0:
# Tell the socket we are no longer interested in read/write events.
epoll.modify(fileno, 0)
# Tell the client we are done sending data and it can close the connection. (good form)
connections[fileno].shutdown(socket.SHUT_RDWR)
# EPOLLHUP (hang-up) events mean the client has disconnected so clean-up/close the socket.
elif event & select.EPOLLHUP:
epoll.unregister(fileno)
connections[fileno].close()
del connections[fileno]
finally:
# Close remaining open socket upon program completion.
epoll.unregister(serversocket.fileno())
epoll.close()
serversocket.close()
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""HTTP utility code shared by clients and servers."""
class HTTPHeaders(dict):
"""A dictionary that maintains Http-Header-Case for all keys.
Supports multiple values per key via a pair of new methods,
add() and get_list(). The regular dictionary interface returns a single
value per key, with multiple values joined by a comma.
>>> h = HTTPHeaders({"content-type": "text/html"})
>>> h.keys()
['Content-Type']
>>> h["Content-Type"]
'text/html'
>>> h.add("Set-Cookie", "A=B")
>>> h.add("Set-Cookie", "C=D")
>>> h["set-cookie"]
'A=B,C=D'
>>> h.get_list("set-cookie")
['A=B', 'C=D']
>>> for (k,v) in sorted(h.get_all()):
... print '%s: %s' % (k,v)
...
Content-Type: text/html
Set-Cookie: A=B
Set-Cookie: C=D
"""
def __init__(self, *args, **kwargs):
# Don't pass args or kwargs to dict.__init__, as it will bypass
# our __setitem__
dict.__init__(self)
self._as_list = {}
self.update(*args, **kwargs)
# new public methods
def add(self, name, value):
"""Adds a new value for the given key."""
norm_name = HTTPHeaders._normalize_name(name)
if norm_name in self:
# bypass our override of __setitem__ since it modifies _as_list
dict.__setitem__(self, norm_name, self[norm_name] + ',' + value)
self._as_list[norm_name].append(value)
else:
self[norm_name] = value
def get_list(self, name):
"""Returns all values for the given header as a list."""
norm_name = HTTPHeaders._normalize_name(name)
return self._as_list.get(norm_name, [])
def get_all(self):
"""Returns an iterable of all (name, value) pairs.
If a header has multiple values, multiple pairs will be
returned with the same name.
"""
for name, list in self._as_list.iteritems():
for value in list:
yield (name, value)
def items(self):
return [{key: value[0]} for key, value in self._as_list.iteritems()]
def get_content_type(self):
return dict.get(self, HTTPHeaders._normalize_name('content-type'), None)
def parse_line(self, line):
"""Updates the dictionary with a single header line.
>>> h = HTTPHeaders()
>>> h.parse_line("Content-Type: text/html")
>>> h.get('content-type')
'text/html'
"""
name, value = line.split(":", 1)
self.add(name, value.strip())
#classmethod
def parse(cls, headers):
"""Returns a dictionary from HTTP header text.
>>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.iteritems())
[('Content-Length', '42'), ('Content-Type', 'text/html')]
"""
h = cls()
for line in headers.splitlines():
if line:
h.parse_line(line)
return h
# dict implementation overrides
def __setitem__(self, name, value):
norm_name = HTTPHeaders._normalize_name(name)
dict.__setitem__(self, norm_name, value)
self._as_list[norm_name] = [value]
def __getitem__(self, name):
return dict.__getitem__(self, HTTPHeaders._normalize_name(name))
def __delitem__(self, name):
norm_name = HTTPHeaders._normalize_name(name)
dict.__delitem__(self, norm_name)
del self._as_list[norm_name]
def get(self, name, default=None):
return dict.get(self, HTTPHeaders._normalize_name(name), default)
def update(self, *args, **kwargs):
# dict.update bypasses our __setitem__
for k, v in dict(*args, **kwargs).iteritems():
self[k] = v
#staticmethod
def _normalize_name(name):
"""Converts a name to Http-Header-Case.
>>> HTTPHeaders._normalize_name("coNtent-TYPE")
'Content-Type'
"""
return "-".join([w.capitalize() for w in name.split("-")])
if(__name__ == '__main__'):
sys.exit(main(sys.argv))