I'm trying to build an own layer in Scapy which contains a "LengthField". This field should contain the total size of this layer + everything that follows.
I know that the "post_build" method is what I need to achieve this and I have tried to understand how it is used in the Scapy Documentation, but I do not understand it.
Let's say I have a layer
class MyLayer(Packet):
name = "MyLayer"
fields_desc = [
ByteField("SomeField", 1),
IntField("LengthField", 0)
]
How do I need to modify this class so it can automatically update the LengthField upon buildung?
def post_build(self, p, pay):
p += pay
if self.LengthField is None:
p = p[:1] + struct.pack("I", len(p)) + p[5:]
hexdump(p)
return p
You can refer to some examples in the source code:
class HCI_Command_Hdr(Packet):
name = "HCI Command header"
fields_desc = [XLEShortField("opcode", 0),
ByteField("len", None), ]
def post_build(self, p, pay):
p += pay
if self.len is None:
p = p[:2] + struct.pack("B", len(pay)) + p[3:]
return p
The result is:
>>> (HCI_Command_Hdr()/"1234567").show2()
###[ HCI Command header ]###
opcode= 0x0
len= 7
###[ Raw ]###
load= '1234567'
You can print p and pay to understand the code:
p[:2] + struct.pack("B", len(pay)) + p[3:]
Then, modify it to achieve your needs.
fix the fields size with the length of a new MyLayer in post_dissect. Note that you cannot use len(str(self)) for length calculation as you'll end up in an endless recursion due to str() calling do_dissect.
class MyLayer(Packet):
name = "MyLayer"
fields_desc = [
ByteField("SomeField", 1),
LenField("LengthField", None), # calcs len(pkt.payload)
]
def post_dissect(self, s):
self.LengthField += len(str(self.__class__())) # add this layer size by creating a new temporary layer and calculating its size.
return Packet.post_dissect(self, s)
the value will be calculated upon show2()
(MyLayer()/Raw().show2()
results in
###[ MyLayer ]###
SomeField = 1
LengthField= 9
###[ Raw ]###
load = 'hihiii'
where 9 = 6 bytes payload (Raw) + 1 byte (SomeField) + 2 Bytes (LenField with fmt="H" == shortInt == 2bytes)
Related
I'm struggling with serial write communication. Basically I don't know which option to choose to combine both integers and float values in the correct way for serial write.
The problem: I need to send data values (chars,int, floats) to a microcontroller (ARM8 processor) for control of wheels for a robot platform. By means of a RS422 to USB converter I'm able to read data values from the same microcontroller, by means of this Python code:
import serial
from struct import unpack
# Initialization
counter = 0
#open serial for reading (BMW to PC communication)
s_port = 'COM8'
b_rate = 460800
ser = serial.Serial(port=s_port,baudrate=b_rate,timeout=0.01)
#method for reading incoming bytes on serial
while counter<20:
data = ser.readline()
data = data.encode("hex")
strlen = len(data)
rev_data = "".join(reversed([data[i:i+2] for i in range(0, len(data), 2)]))
if strlen==80:
Soh = data[0:2]
Nob = data[2:4]
Adr = data[4:6]
Cmd = data[6:8]
Hrt = data[8:12]
Po1 = data[12:28]
Po2 = data[28:44]
En1 = data[44:60]
En2 = data[60:76]
Crc = data[76:78]
Eot = data[78:80]
So1 = unpack('B', Soh.decode("hex")) # unsigned char
No1 = unpack('B', Nob.decode("hex")) # unsigned char
Ad1 = unpack('B', Adr.decode("hex")) # unsigned char
Cm1 = unpack('B', Cmd.decode("hex")) # unsigned char
Hr1 = unpack('h', Hrt.decode("hex")) # short
Po1 = unpack('d', Po1.decode("hex")) # double
Po2 = unpack('d', Po2.decode("hex")) # double
En1 = unpack('d', En1.decode("hex")) # double
En2 = unpack('d', En2.decode("hex")) # double
Cr1 = unpack('B', Crc.decode("hex")) # unsigned char
Eo1 = unpack('B', Eot.decode("hex")) # unsigned char
StartOfHeader = So1[0]
NoOfBytes = No1[0]
Address = Ad1[0]
Command = Cm1[0]
Heartbeat = Hr1[0]
Potentiometer1 = Po1[0]
Potentiometer2 = Po2[0]
Encoder1 = En1[0]
Encoder2 = En2[0]
CRC = Cr1[0]
EndOfTransmission = Eo1[0]
counter = counter+1
ser.close()
In Labview the serial write communication is already working, so that is my starting point:
But as I need to work with Python the trick is to make it working in Python.
To my knowledge the data is converted to ASCII, based on the "Type Cast" function in Labview (http://digital.ni.com/public.nsf/allkb/287D59BAF21F58C786256E8A00520ED5)
Honestly I don't nothing about ASCII messages, so I'm not 100% sure.
Now, I want to do the same trick for the serial write in Python 2.7. Starting from chr,int, float values converting to hexadecimal string and then write to the serial port. I suppose to use an ASCII conversion at the end, but I don't know if it's ASCII and the right Python command to do the conversion in the right way.
This is my serial write coding in Python so far (excuse me, for the long and inefficient coding lines):
# Initialization
counter = 0
#open serial for reading (BMW to PC communication)
s_port = 'COM9'
b_rate = 460800
ser = serial.Serial(port=s_port,baudrate=b_rate,timeout=0.05)
#method for writing to serial
while counter<100:
Soh = chr(1)
Nob = chr(46)
Adr = chr(49)
Cmd = chr(32)
DFS = float(1)
DRS = float(2)
DFD = int(3)
DRD = int(4)
DFC = int(5)
DRC = int(6)
SFCP = float(7)
SRCP = float(8)
SFC = int(9)
SRC = int(10)
CRC = chr(77)
EOT = chr(4)
S1 = Soh.encode("hex")
N1 = Nob.encode("hex")
A1 = Adr.encode("hex")
C1 = Cmd.encode("hex")
D11 = hex(struct.unpack('<I', struct.pack('<f', DFS))[0])
D1 = D11[2:]
D12 = hex(struct.unpack('<I', struct.pack('<f', DRS))[0])
D2 = D12[2:]
D3 = '{0:08x}'.format(DFD)
D4 = '{0:08x}'.format(DRD)
D5 = '{0:08x}'.format(DFC)
D6 = '{0:08x}'.format(DRC)
S11 = hex(struct.unpack('<I', struct.pack('<f', SFCP))[0])
S2 = S11[2:]
S12 = hex(struct.unpack('<I', struct.pack('<f', SRCP))[0])
S3 = S12[2:]
S4 = '{0:08x}'.format(SFC)
S5 = '{0:08x}'.format(SRC)
C2 = CRC.encode("hex")
E1 = EOT.encode("hex")
hex_string = E1 + C2 + S5 + S4 + S3 + S2 + D6 + D5 + D4 + D3 + D2 + D1 + C1 + A1 + N1 + S1
rev_hex_string = "".join(reversed([hex_string[i:i+2] for i in range(0, len(hex_string), 2)]))
##command = ...
ser.write(command)
counter = counter+1
ser.close()
This Python code generates the same hexadecimal string (called rev_hex_string) as the Labview programm. But till now, I was unable to do the serial write with Python.
I don't know how to proceed. Do I need ASCII for serial write? And which Python codes to use? encode("ascii"), decode("hex")? I'm totally lost in the many possibilities....
In case I use encode("ascii") or decode("hex") in Python it only generates a " .01" (ASCII???) message.
When I switch the normal view in Labview it has a much longer message:
" .01 €? # | - à# A M"
(I still missing some characters, but this is the most)
Besides that; do I need carriage returns and/or buffer flushing?
I have to say I'm new to serial write communication. I hope you can help me.
Sorry for the long post, I hope to be as precise as possible.
Whenever I send a SYN packet using my program, I get no reply.I know the server is working because I can connect to it using the normal socket connect() method but when I try using RAW sockets to do it I get no reply, not even an RST.
Here is my packet according to Wireshark
Transmission Control Protocol, Src Port: 5173 (5173), Dst Port: 5005 n (5005), Seq: 0, Len: 0
Source Port: 5173
Destination Port: 5005
[Stream index: 15]
[TCP Segment Len: 0]
Sequence number: 0 (relative sequence number)
Acknowledgment number: 0
Header Length: 40 bytes
Flags: 0x002 (SYN)
000. .... .... = Reserved: Not set
...0 .... .... = Nonce: Not set
.... 0... .... = Congestion Window Reduced (CWR): Not set
.... .0.. .... = ECN-Echo: Not set
.... ..0. .... = Urgent: Not set
.... ...0 .... = Acknowledgment: Not set
.... .... 0... = Push: Not set
.... .... .0.. = Reset: Not set
.... .... ..1. = Syn: Set
.... .... ...0 = Fin: Not set
[TCP Flags: **********S*]
Window size value: 53270
[Calculated window size: 53270]
Checksum: 0x9f18 [incorrect, should be 0x90ae (maybe caused by "TCP checksum offload"?)]
Urgent pointer: 0
Options: (20 bytes), Maximum segment size, SACK permitted, Timestamps, No-Operation (NOP), Window scale
Maximum segment size: 65495 bytes
Kind: Maximum Segment Size (2)
Length: 4
MSS Value: 65495
TCP SACK Permitted Option: True
Kind: SACK Permitted (4)
Length: 2
Timestamps: TSval 378701, TSecr 0
Kind: Time Stamp Option (8)
Length: 10
Timestamp value: 378701
Timestamp echo reply: 0
No-Operation (NOP)
Type: 1
0... .... = Copy on fragmentation: No
.00. .... = Class: Control (0)
...0 0001 = Number: No-Operation (NOP) (1)
Window scale: 7 (multiply by 128)
[SEQ/ACK analysis]
And here is my Python code
#!/usr/bin/python
import socket
from struct import *
import random
s = socket.socket()
host = "127.0.0.1"
destination = "127.0.0.1"
CLRF = '\r\n'
#socket.gethostname()
print destination
port = 5173
#s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.connect((host, 5005))
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
except socket.error , msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
ipSource = '192.168.0.106'
#IP header
ipIHL = 5 # Header Length
ipVersion = 4 # ipv4/v6
ipTOS = 0 # type of service
ipTotalLen = 0 ## Kernel will fill correct length apparently
ipPackID = random.randint(1,1000)
#Flags
ipReserved = 0
ipNoFrag = 1
ipMoreFrags = 0
ipFragOffset = 0 #Fragment offset
ipTTL = 64
ipProtocol = socket.IPPROTO_TCP
ipChecksum = 0 # Magic kernel filling in at work again
ipSource = socket.inet_aton (host)
ipDest = socket.inet_aton (destination)
#Packing IP flags
ipFlags = ipMoreFrags + (ipNoFrag << 1) + (ipReserved << 2)
ipFragOffset = (ipFlags << 13) + ipFragOffset
ipIHLVersion = (ipVersion << 4) + ipIHL
headerIP = pack('!BBHHHBBH4s4s',ipIHLVersion, ipTOS, ipTotalLen, ipPackID, ipFragOffset, ipTTL, ipProtocol, ipChecksum, ipSource, ipDest)
#Checksum function
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
def checksum(msg):
s = 0
for i in range(0, len(msg), 2):
w = ord(msg[i]) + (ord(msg[i+1]) << 8)
s = carry_around_add(s, w)
return ~s & 0xffff
#TCP Header
tcpSourcePort = port #Source Port
tcpDestPort = 5005 #Destination Port
tcpSeqNum = 0 #Packet sequence
tcpAckNum = 0 #Ackknowledge Number
tcpOffset = 10 #Size of tcp header 20 bytes
#tcpReserved = 0
#tcpECN = 0
#Control Flags
tcpURG = 0
tcpACK = 0
tcpPSH = 0
tcpRST = 0
tcpSYN = 1
tcpFIN = 0
tcpWindow = socket.htons (5840) #Dunno how this works
tcpChecksum = 0
tcpUrgentPointer = 0
#TCP Options
tcpMaxSegmentSize = (2 << 24) + (4 << 16) + 65495 # Kind + Length + Max Segment Size
tcpSACKPermitted = (4 << 8) + 2#Kind + Length
#Split TCP TImestamps into 2 because too large
tcpTimestampPartOne = (8 << 8) + (10) #Kind + Length
tcpTimestampPartTwo = (378701 << 32) + 0 #Timestamp Value + Timestamp echo reply
tcpNoOp = (0 << 7) + (0 << 5) + 1 #Copy on fragmentation + Class + Number
tcpWindowScale = (3 << 16)+ (3 << 8) + 7 #Kind + Length(Bytes) +Shift CountS
#Combine both due to length issues
tcpNoOpAndWindowScale = (tcpNoOp << 24) + tcpWindowScale
tcpOffsetResult = (tcpOffset << 4) + 0 #Shift 4 bytes to left
#Putting together all the TCP Control Flags
tcpFlags = tcpFIN + (tcpSYN << 1) + (tcpRST << 2) + (tcpPSH << 3) + (tcpACK << 4) + (tcpURG << 5)
#Packing the pseudo TCP header
headerTCP = pack('!HHLLBBHHHLHHQL', tcpSourcePort, tcpDestPort, tcpSeqNum, tcpAckNum, tcpOffsetResult, tcpFlags, tcpWindow, tcpChecksum, tcpUrgentPointer, tcpMaxSegmentSize, tcpSACKPermitted, tcpTimestampPartOne, tcpTimestampPartTwo, tcpNoOpAndWindowScale)
#headerTCP = pack('!HHLLBBHHH', tcpSourcePort, tcpDestPort, tcpSeqNum, tcpAckNum, tcpOffsetResult, tcpFlags, tcpWindow, tcpChecksum, tcpUrgentPointer)
#data = 'GET ./asd HTTP/1.1'
data = ''
#Checksum Calculation
#Pseudo Header Fields
sourceAddr = socket.inet_aton(host)
destAddr = socket.inet_aton(destination)
placeholder = 0
protocol = socket.IPPROTO_TCP
tcpLen = len(headerTCP) + len(data)
psh = pack('!4s4sBBH', sourceAddr, destAddr, placeholder, protocol, tcpLen);
psh = psh + headerTCP + data;
#Calc checksum
tcpChecksumReal = (checksum(psh) << 1)
print(tcpChecksumReal)
#Pack actual tcp header with checksum
headerTCP = pack('!HHLLBBH', tcpSourcePort, tcpDestPort, tcpSeqNum, tcpAckNum, tcpOffsetResult, tcpFlags, tcpWindow) + pack('!H', 40728) + pack ('!H', tcpUrgentPointer) + pack('!LHHQL', tcpMaxSegmentSize, tcpSACKPermitted, tcpTimestampPartOne, tcpTimestampPartTwo, tcpNoOpAndWindowScale)
#Build full packet / ip with tcp with data
packet = headerIP + headerTCP + data
#print [hex(ord(c)) for c in packet]
s.sendto(packet, (destination,0))
Any help would be appreciated, thanks in advance.
Credits to #KenCheung for the answer
Turns out it was the checksum, the checksum from the headers I used as reference also were incorrect but the network card was offloading them.
I am trying to parse a string as below using PyParsing.
R1# show ip bgp
BGP table version is 2, local router ID is 1.1.1.1
Status codes: s suppressed, d damped, h history, * valid, > best, i - internal,
r RIB-failure, S Stale
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
> 10.1.1.0/24 192.168.1.2 0 0 200 i
Note that LocPrf value is empty but it can be a number.
ipField = Word(nums, max=3)
ipAddr = Combine(ipField + "." + ipField + "." + ipField + "." + ipField)
status_code = Combine(Optional(oneOf("s d h * r")) + ">" + Optional(Literal("i"))
prefix = Combine(ipAddr + Optional(Literal("/") + Word(nums,max=2)))
next_hop = ipAddr
med = Word(nums)
local_pref = Word(nums) | White()
path = Group(OneOrMore(Word(nums)))
origin = oneOf("i e ?")
This is the grammar.
g = status_code + prefix + next_hop + med + local_pref + Suppress(Word(nums)) + Optional(path) + origin
I just need to parse the Bold line. But this is not parsing it properly. It assigns Weight value to LocPrf.
Please look over the following code example (which I will include in the next pyparsing release). You should be able to adapt it to your application:
from pyparsing import col,Word,Optional,alphas,nums,ParseException
table = """\
12345678901234567890
COLOR S M L
RED 10 2 2
BLUE 5 10
GREEN 3 5
PURPLE 8"""
# function to create column-specific parse actions
def mustMatchCols(startloc,endloc):
def pa(s,l,t):
if not startloc <= col(l,s) <= endloc:
raise ParseException(s,l,"text not in expected columns")
return pa
# helper to define values in a space-delimited table
def tableValue(expr, colstart, colend):
return Optional(expr.copy().addParseAction(mustMatchCols(colstart,colend)))
# define the grammar for this simple table
colorname = Word(alphas)
integer = Word(nums).setParseAction(lambda t: int(t[0])).setName("integer")
row = (colorname("name") +
tableValue(integer, 11, 12)("S") +
tableValue(integer, 15, 16)("M") +
tableValue(integer, 19, 20)("L"))
# parse the sample text - skip over the header and counter lines
for line in table.splitlines()[2:]:
print
print line
print row.parseString(line).dump()
Hi a sample output from running this code is shown as well, The script runs as well, you could run it to get a sense of what it does. My problem, is given the three different calls to the route() function what codes can I add to get a list of all previous objective function values ie obj variable on line 79.
desired output = obj_rslt = [20.989285714285714, 21.166176470588233, 25.8656 ]
I have tried to use the copy.copy() but it does not work, I need a list of all values as shown above for further work in another function. Thank you.
#import statements
import copy
import networkx as nx
import random as rand
#Define nodes and Edges
pos = {1001:(-42503,-3748871),1002:(-42267,-3749806),1003:(-40938,-3750235),1004: (-39452,-3750624),1005:(-39985,-3749564),1006:(-38473,-3749615),1007:(-41714,-3747171),1008:(-42279,-3745275),1009:(-41853,-3744185),1010:(-42000,-3746561),1011:(-42651,-3746188),1012:(-42195,-3747788),1013:(-41498,-3748890),1014:(-40366,-3748684),1015:(-43036,-3750284)}
edge = [(1001, 1003,{'length':0.35}),(1001, 1004,{'length':0.46}),(1001, 1009,{'length':0.49}),(1002, 1007,{'length':0.22}),(1002, 9972,{'length':0.54}),(1002, 1013,{'length':0.59}),(1003, 1014,{'length':0.25}),(1004, 1010,{'length':0.29}),(1004, 1013,{'length':0.57}),(1004, 1003,{'length':0.43}),(1004, 1006,{'length':0.37}),(1005, 1002,{'length':0.23}),(1005, 14566,{'length':0.72}),(1006, 1005,{'length':0.6}),(1007, 1003,{'length':0.39}),(1007, 1010,{'length':0.11}),(1009, 1001,{'length':0.51}),(1010, 1005,{'length':0.2}),(1011, 1004,{'length':0.37}),(1012, 1006,{'length':0.17}),(1013, 1005,{'length':0.19}),(1013, 1007,{'length':0.21}),(1014, 1005,{'length':0.35}),(1014, 1009,{'length':0.51})]
#Create the graph and add the nodes and edges
X = nx.MultiDiGraph()
X.add_nodes_from(pos.keys())
X.add_edges_from(edge)
def routes():
""" This function cretaes busroutes """
individual = []
shortest_path_length = []
num_routes = 3
#Generate the bus routes
for i in xrange(num_routes):
while True:
try:
A = int(rand.choice(pos.keys()))
B = int(rand.choice(pos.keys()))
path = nx.dijkstra_path(X,A,B,weight='length')
individual.append(path)
pathlength = round(nx.dijkstra_path_length(X,A,B),2)
if pathlength > 1:
shortest_path_length.append(pathlength)
break
else: pathlength
except:pass
# Loop through the list of shortest path nodes to get the nodes for the bus route
#bus_route_nodes = [map(lambda x: str(x) + '.0', l) for l in individual]
veh_spid = []
veh_hdw = []
obj_rslt = []
for ind in individual:
try:
headway = rand.randint(2, 30)
veh_hdw.append(headway)
speed = rand.choice([2,15,66])
veh_spid.append(speed)
except: pass
# Print bus route features
print 'OUTPUTS:'
print 'Route Name:', str(ind[0]) + ' ' + '-' + ' ' + str(ind[-1])
print 'Route Number:', individual.index(ind) + 1
print 'Route headway = ',headway
print 'Route speed = ',speed
print 'shortest path length', shortest_path_length
# Average network characteristics gotten by taken the mean of individual characteristics that make up each network
ntwk_len = sum(shortest_path_length)
ntwk_spid = sum(veh_spid)/len(veh_spid)
ntwk_hdwy = sum(veh_hdw )/len(veh_hdw)
#Calculate objective function values
obj = [0]
obj = copy.copy(obj)
obj = ( (ntwk_len/ntwk_spid) + 5 * (60/ntwk_hdwy) + (ntwk_len) )
obj_rslt.append(obj)
print 'obj_rslt', obj_rslt
print
return individual
#Three distinct method calls
routes()
routes()
routes()
OUTPUTS:
Route Name: 1014 - 1001
Route Number: 6
Route headway = 29
Route speed = 2
shortest path length [1.8, 2.77, 1.02]
obj_rslt [20.989285714285714]
OUTPUTS:
Route Name: 1003 - 1007
Route Number: 9
Route headway = 5
Route speed = 66
shortest path length [2.37, 2.57, 1.05]
obj_rslt [21.166176470588233]
OUTPUTS:
Route Name: 1012 - 1013
Route Number: 6
Route headway = 6
Route speed = 66
shortest path length [2.2, 1.85, 1.59]
obj_rslt [25.8656]
desired output = obj_rslt = [20.989285714285714, 21.166176470588233, 25.8656 ]
The problem is that you always set obj_rslt to []
Move the assignment obj_rslt = [] out of the method after the definition of pos and edge then you should be fine
#...your code
obj_results = []
def routes():
#do some computation and assume you store it in
#a variable called res
obj_results.append(res)
return res
routes()
routes()
routes()
print obj_results
The ouput will be a list with all three results
I wanna read a binary file (K7120127.SRS) with caractristhics detailed in the word file (Documentacion SRS DATA.doc) ,2.2. chapter, that adding in the next link
https://drive.google.com/folderview?id=0B_NlxFaQkpgHb00yTm5kU0MyaUU&usp=sharing
In the link is included a viewer of that data (Srsdisp.exe), but i wanna process this data not only view it, that's why I'd want to read it in Python.
I know plot using matplotlib, but work with binary files is new for me. I 'd wanna plot something like this (That plot was made using the viewer included in the link)
Try that.
from struct import unpack
# constants from the file spec
RECORD_SIZE=826
RECORD_HEADER_SIZE=24
RECORD_ARRAY_SIZE=401
# verbosity values
VERBOSITY_ALL = 2 # print warnings and errors
VERBOSITY_ERRORS = 1 # print errors
VERBOSITY_NONE = 0 # print nothing
class SRSRecord:
"""Holds one 826 byte SRS Record."""
_site_to_name = {
1: "Palehua",
2: "Holloman",
3: "Learmonth",
4: "San Vito",
# add new site names here ..
}
def __init__(self):
self.year = None
self.month = None
self.day = None
self.hour = None
self.minute = None
self.seconds = None
self.site_number = None
self.site_name = None
self.n_bands_per_record = None
self.a_start_freq = None
self.a_end_freq = None
self.a_num_bytes = None
self.a_analyser_reference_level = None
self.a_analyser_attenuation = None
self.b_start_freq = None
self.b_end_freq = None
self.b_num_bytes = None
self.b_analyser_reference_level = None
self.b_analyser_attenuation = None
# dictionary that maps frequency in mega hertz to level
self.a_values = {}
# dictionary that maps frequency in mega hertz to level
self.b_values = {}
return
def _parse_srs_file_header(self, header_bytes, verbosity = VERBOSITY_ALL):
fields = unpack(
# General header information
'>' # (data packed in big endian format)
'B' # 1 Year (last 2 digits) Byte integer (unsigned)
'B' # 2 Month number (1 to 12) "
'B' # 3 Day (1 to 31) "
'B' # 4 Hour (0 to 23 UT) "
'B' # 5 Minute (0 to 59) "
'B' # 6 Second at start of scan (0 to 59) "
'B' # 7 Site Number (0 to 255) "
'B' # 8 Number of bands in the record (2) "
# Band 1 (A-band) header information
'h' # 9,10 Start Frequency (MHz) Word integer (16 bits)
'H' # 11,12 End Frequency (MHz) "
'H' # 13,14 Number of bytes in data record (401) "
'B' # 15 Analyser reference level Byte integer
'B' # 16 Analyser attenuation (dB) "
# Band 2 (B-band) header information
# 17-24 As for band 1
'H' # 17,18 Start Frequency (MHz) Word integer (16 bits)
'H' # 19,20 End Frequency (MHz) "
'H' # 21,22 Number of bytes in data record (401) "
'B' # 23 Analyser reference level Byte integer
'B', # 24 Analyser attenuation (dB) "
header_bytes)
self.year = fields[0]
self.month = fields[1]
self.day = fields[2]
self.hour = fields[3]
self.minute = fields[4]
self.seconds = fields[5]
# read the site number and work out the site name
self.site_number = fields[6]
if self.site_number not in SRSRecord._site_to_name.keys():
# got an unknown site number.. complain a bit..
if verbosity >= VERBOSITY_ALL:
print("Unknown site number: %s" % self.site_number)
print("A list of known site numbers follows:")
for site_number, site_name in SRSRecord._site_to_name.items():
print("\t%s: %s" % (site_number, site_name))
# then set the site name to unknown.
self.site_name = "UnknownSite"
else:
# otherwise look up the site using our lookup table
self.site_name = SRSRecord._site_to_name[self.site_number]
# read the number of bands
self.n_bands_per_record = fields[7] # should be 2
if self.n_bands_per_record != 2 and verbosity >= VERBOSITY_ERRORS:
print("Warning.. record has %s bands, expecting 2!" % self.n_bands_per_record)
# read the a record meta data
self.a_start_freq = fields[8]
self.a_end_freq = fields[9]
self.a_num_bytes = fields[10]
if self.a_num_bytes != 401 and verbosity >= VERBOSITY_ERRORS:
print("Warning.. record has %s bytes in the a array, expecting 401!" %
self.a_num_bytes)
self.a_analyser_reference_level = fields[11]
self.a_analyser_attenuation = fields[12]
# read the b record meta data
self.b_start_freq = fields[13]
self.b_end_freq = fields[14]
self.b_num_bytes = fields[15]
if self.b_num_bytes != 401 and verbosity >= VERBOSITY_ERRORS:
print("Warning.. record has %s bytes in the b array, expecting 401!" %
self.b_num_bytes)
self.b_analyser_reference_level = fields[16]
self.b_analyser_attenuation = fields[17]
return
def _parse_srs_a_levels(self, a_bytes):
# unpack the frequency/levels from the first array
for i in range(401):
# freq equation from the srs file format spec
freq_a = 25 + 50 * i / 400.0
level_a = unpack('>B', a_bytes[i])[0]
self.a_values[freq_a] = level_a
return
def _parse_srs_b_levels(self, b_bytes):
for i in range(401):
# freq equation from the srs file format spec
freq_b = 75 + 105 * i / 400.0
level_b = unpack('>B', b_bytes[i])[0]
self.b_values[freq_b] = level_b
return
def __str__(self):
return ("%s/%s/%s, %s:%s:%s site: %s/%s bands: %s "
"[A %s->%s MHz ref_level: %s atten: %s dB], "
"[B %s->%s MHz ref_level: %s atten: %s dB]"
)% (
self.day, self.month, self.year,
self.hour, self.minute, self.seconds,
self.site_number, self.site_name,
self.n_bands_per_record,
self.a_start_freq, self.a_end_freq,
self.a_analyser_reference_level, self.a_analyser_attenuation,
self.b_start_freq, self.b_end_freq,
self.b_analyser_reference_level, self.b_analyser_attenuation,
)
def _dump(self, values):
freqs = values.keys()
freqs.sort()
for freq in freqs:
print "%5s %s" % (freq, values[freq])
return
def dump_a(self):
self._dump(self.a_values)
return
def dump_b(self):
self._dump(self.b_values)
return
def read_srs_file(fname):
"""Parses an srs file and returns a list of SRSRecords."""
# keep the records we read in here
srs_records = []
f = open(fname, "rb")
while True:
# read raw record data
record_data = f.read(RECORD_SIZE)
# if the length of the record data is zero we've reached the end of the data
if len(record_data) == 0:
break
# break up the record bytes into header, array a and array b bytes
header_bytes = record_data[:RECORD_HEADER_SIZE]
a_bytes = record_data[RECORD_HEADER_SIZE : RECORD_HEADER_SIZE + RECORD_ARRAY_SIZE]
b_bytes = record_data[RECORD_HEADER_SIZE + RECORD_ARRAY_SIZE :
RECORD_HEADER_SIZE + 2 * RECORD_ARRAY_SIZE]
# make a new srs record
record = SRSRecord()
record._parse_srs_file_header(header_bytes, verbosity = VERBOSITY_ERRORS)
record._parse_srs_a_levels(a_bytes)
record._parse_srs_b_levels(b_bytes)
srs_records.append(record)
return srs_records
if __name__ == "__main__":
# parse the file.. (this is where the magic happens ;)
srs_records = read_srs_file(fname = "K7120127.SRS")
# play with the data
for i in range(3):
print srs_records[i]
r0 = srs_records[0]
r0.dump_a()
r0.dump_b()