Related
the original code which only support python 2 is here
link to thinkgear.py
I'm trying to edit it to support python 3. the edited code is here:
import sys
import serial
from io import BytesIO
import struct
from collections import namedtuple
import logging
import logging.handlers
import sys
import time
import datetime
global delta
delta = []
_log = logging.getLogger(__name__)
_bytelog = logging.getLogger(__name__+'.bytes')
_bytelog.propagate = False
fh = logging.FileHandler('spam.log')
fh.setLevel(logging.DEBUG)
_log.addHandler(fh)
class ThinkGearProtocol(object):
def __init__(self, port):
self.serial = serial.Serial(port, 57600)
self.preread = BytesIO()
self.io = self.serial
#staticmethod
def _chksum(packet):
return ~sum(c for c in packet ) & 0xff
def _read(self, n):
buf = self.io.read(n)
if len(buf) < n:
_log.debug('incomplete read, short %s bytes', n - len(buf))
if self.io == self.preread:
_log.debug('end of preread buffer')
# self.preread.reset()
# self.preread.truncate()
# two line comment out
self.io = self.serial
buf += self.io.read(n-len(buf))
if len(buf) < n:
_log.debug('incomplete read, short %s bytes', n - len(buf))
for o in range(0, len(buf), 16):
_bytelog.debug('%04X '+' '.join(('%02X',)*len(buf[o:o+16])), o, *(c for c in buf[o:o+16]))
return buf
def _deread(self, buf):
_log.debug('putting back %s bytes', len(buf))
pos = self.preread.tell()
self.preread.seek(0, 2)
self.preread.write(buf)
self.preread.seek(pos)
self.io = self.preread
def get_packets(self):
last_two = ()
while True:
last_two = last_two[-1:]+(self._read(1),)
# _log.debug('last_two: %r', last_two)
if last_two == (b'\xAA',b'\xAA'):
plen = self._read(1)
if plen >= b'\xAA':
# Bogosity
_log.debug('discarding %r while syncing', last_two[0])
last_two = last_two[-1:]+(plen,)
else:
last_two = ()
packet = self._read(int.from_bytes((plen), byteorder='big'))
# _log.debug(plen)
checksum = self._read(1)
if ord(checksum) == self._chksum(packet):
yield self._decode(packet)
else:
_log.debug('bad checksum')
self._deread(packet+checksum)
elif len(last_two) == 2:
_log.debug('discarding %r while syncing', last_two[0])
def _decode(self, packet):
decoded = []
while packet:
extended_code_level = 0
while len(packet) and packet[0] == '\x55':
extended_code_level += 1
packet = packet[1:]
if len(packet) < 2:
_log.debug('ran out of packet: %r', '\x55'*extended_code_level+packet)
break
code = packet[0]
if code < 0x80:
value = packet[1]
packet = packet[2:]
else:
vlen = packet[1]
if len(packet) < 2+vlen:
_log.debug('ran out of packet: %r', '\x55'*extended_code_level+chr(code)+chr(vlen)+packet)
break
value = packet[2:2+vlen]
packet = packet[2+vlen:]
# _log.debug('extended_code_level is '+str(extended_code_level))
# _log.debug('code is '+str(code))
# _log.debug('data_types is '+str(data_types))
# _log.debug(not extended_code_level and code in data_types)
# _log.debug(not bool(extended_code_level and code in data_types))
# _log.debug((extended_code_level,code) in data_types)
if not bool(extended_code_level and code in data_types):
data = data_types[code](extended_code_level, code, value)
# _log.debug('extended_code_level is '+str(extended_code_level))
# _log.debug('code is '+str(code))
# _log.debug('value is '+str(value))
# _log.debug('data_types is '+str(data_types))
elif (extended_code_level,code) in data_types:
data = data_types[(extended_code_level,code)](extended_code_level, code, value)
else:
data = ThinkGearUnknownData(extended_code_level, code, value)
decoded.append(data)
return decoded
data_types = {}
class ThinkGearMetaClass(type):
def __new__(mcls, name, bases, data):
cls = super(ThinkGearMetaClass, mcls).__new__(mcls, name, bases, data)
code = getattr(cls, 'code', None)
if code:
data_types[code] = cls
extended_code_level = getattr(cls, 'extended_code_level', None)
if extended_code_level:
data_types[(extended_code_level,code)] = cls
return cls
class ThinkGearData(object, metaclass=ThinkGearMetaClass):
def __init__(self, extended_code_level, code, value):
self.extended_code_level = extended_code_level
self.code = code
# self.value = self._decode(value)
self.value = value
# _log.debug('123')
if self._log:
_log.log(self._log, '%s', self)
#staticmethod
def _decode(v):
return v
def __str__(self):
return self._strfmt % vars(self)
# __metaclass__ = ThinkGearMetaClass
_log = logging.DEBUG
class ThinkGearUnknownData(ThinkGearData):
'''???'''
_strfmt = 'Unknown: code=%(code)02X extended_code_level=%(extended_code_level)s %(value)r'
class ThinkGearPoorSignalData(ThinkGearData):
'''POOR_SIGNAL Quality (0-255)'''
code = 0x02
_strfmt = 'POOR SIGNAL: %(value)s'
_decode = staticmethod(ord)
class ThinkGearAttentionData(ThinkGearData):
'''ATTENTION eSense (0 to 100)'''
code = 0x04
_strfmt = 'ATTENTION eSense: %(value)s'
_decode = staticmethod(ord)
class ThinkGearMeditationData(ThinkGearData):
'''MEDITATION eSense (0 to 100)'''
code = 0x05
_strfmt = 'MEDITATION eSense: %(value)s'
_decode = staticmethod(ord)
class ThinkGearRawWaveData(ThinkGearData):
'''RAW Wave Value (-32768 to 32767)'''
code = 0x80
_strfmt = 'Raw Wave: %(value)s'
_decode = staticmethod(lambda v: struct.unpack('>h', v)[0])
# There are lots of these, don't log them by default
_log = False
EEGPowerData = namedtuple('EEGPowerData', 'delta theta lowalpha highalpha lowbeta highbeta lowgamma midgamma')
delta_value = namedtuple('EEGPowerData', 'delta')
class ThinkGearEEGPowerData(ThinkGearData):
'''Eight EEG band power values (0 to 16777215).
delta, theta, low-alpha high-alpha, low-beta, high-beta, low-gamma, and
mid-gamma EEG band power values.
'''
code = 0x83
_strfmt = 'ASIC EEG Power: %(value)r'
_decode = staticmethod(lambda v: EEGPowerData(*struct.unpack('>8L', ''.join( '\x00'+v[o:o+3] for o in range(0, 24, 3)))))
#print(EEGPowerData.delta)
def main():
global packet_log
packet_log = []
logging.basicConfig(level=logging.DEBUG)
for pkt in ThinkGearProtocol('COM3').get_packets():
packet_log.append(pkt)
if __name__ == '__main__':
main()
when running in python2, i get the result like this:
DEBUG:__main__:ASIC EEG Power: EEGPowerData(delta=7784, theta=7734, lowalpha=2035, highalpha=1979, lowbeta=2914, highbeta=3996, lowgamma=1944, midgamma=1847
when running in python3, the result is like this:
DEBUG:__main__:ASIC EEG Power: b'\x00\xa9\xf1\x00t%\x00\rK\x00\x18"\x00\x16%\x00\x1d6\x00OT\x00\x17\x84'
Anyone know how should i edit this line of code in order to make it work in python 3? Thank you
_decode = staticmethod(lambda v: EEGPowerData(*struct.unpack('>8L', ''.join( '\x00'+v[o:o+3] for o in range(0, 24, 3)))))
i want to write a hmac (hash-based message authentication code) in python. So far i managed to write the basic hmac but i want to add another parameter in the message. For example, message=(mac_address || index_value). Can somebody show me how to do it? And how can i save the output in another list (e.g. digest_hmac_list)?
from hashlib import shake_256
from zlib import crc32, adler32
class HMAC:
def __init__(self, key, message, hash_h=shake_256):
""" key and message must be byte object """
self.i_key_pad = bytearray()
self.o_key_pad = bytearray()
self.key = key
self.message = message
self.blocksize = 64
self.hash_h = hash_h
self.init_flag = False
def init_pads(self):
""" creating inner padding and outer padding """
for i in range(self.blocksize):
self.i_key_pad.append(0x36 ^ self.key[i])
self.o_key_pad.append(0x5c ^ self.key[i])
def init_key(self):
""" key regeneration """
if len(self.key) > self.blocksize:
self.key = bytearray(shake_256(key).digest())
elif len(self.key) < self.blocksize:
i = len(self.key)
while i < self.blocksize:
self.key += b"\x00"
i += 1
def digest(self):
if self.hash_h == adler32 or self.hash_h == crc32:
return self.hash_h(bytes(self.o_key_pad)+str(self.hash_h(bytes(self.i_key_pad)+self.message)).encode())
""" returns a digest, byte object. """
""" check if init_flag is set """
if self.init_flag == False:
self.init_key()
self.init_pads()
""" hold init_flag for good. """
self.init_flag = True
return self.hash_h(bytes(self.o_key_pad)+self.hash_h(bytes(self.i_key_pad)+self.message).digest()).digest()
def hexdigest(self):
if self.hash_h == adler32 or self.hash_h == crc32:
return hex(self.hash_h(bytes(self.o_key_pad)+str(self.hash_h(bytes(self.i_key_pad)+self.message)).encode()))[2:]
""" returns a digest in hexadecimal. """
""" check if init_flag is set """
if self.init_flag == False:
""" init key and padding. """
self.init_key()
self.init_pads()
""" set init_flag for good. """
self.init_flag = True
I fixed some small issues in Your code and made it so you can hash 2 different (mac || index) with same key and save it in a self.digest_all_list. I commented out all these things in the code.
from hashlib import shake_256
from zlib import crc32, adler32
class HMAC:
def __init__(self, key, message, hash_h=shake_256):
""" key and message must be byte object """
self.i_key_pad = bytearray()
self.o_key_pad = bytearray()
self.key = key
self.message = message
self.blocksize = 64
self.hash_h = hash_h
self.init_flag = False
# This will contain all hashed messages
self.digest_hmac_list = []
def init_pads(self):
""" creating inner padding and outer padding """
for i in range(self.blocksize):
self.i_key_pad.append(0x36 ^ self.key[i])
self.o_key_pad.append(0x5c ^ self.key[i])
def init_key(self):
""" key regeneration """
if len(self.key) > self.blocksize:
self.key = bytearray(shake_256(self.key).digest(self.blocksize))
elif len(self.key) < self.blocksize:
i = len(self.key)
while i < self.blocksize:
self.key += b"\x00"
i += 1
def digest(self, message = None):
# If you want to Hash 2 different message with same key(so same class instance)
# pass message to digest and default to self.message
if message:
self.message = bytearray(message, encoding="ascii")
if self.hash_h == adler32 or self.hash_h == crc32:
return self.hash_h(bytes(self.o_key_pad)+str(self.hash_h(bytes(self.i_key_pad)+self.message)).encode())
""" returns a digest, byte object. """
""" check if init_flag is set """
if self.init_flag == False:
self.init_key()
self.init_pads()
""" hold init_flag for good. """
self.init_flag = True
# You Forget to specify the size of the Hash shake_256 allow for arbitrary output(Not like SHA-2)
# , I chosen 64 byte you can you chose whatever you want
self.digest_hmac_list.append(self.hash_h(bytes(self.o_key_pad) + self.hash_h(bytes(self.i_key_pad) + self.message).digest(self.blocksize)).digest(self.blocksize))
return self.digest_hmac_list[-1]
def hexdigest(self, message = None):
# If you want to Hash 2 different message with same key(so same class instance)
# pass message to digest and default to self.message
if message:
self.message = bytearray(message, encoding="ascii")
# Checking must be Done First So you can initialize all required parts then hash the message
""" check if init_flag is set """
if self.init_flag == False:
""" init key and padding. """
self.init_key()
self.init_pads()
""" set init_flag for good. """
self.init_flag = True
if self.hash_h == adler32 or self.hash_h == crc32:
self.digest_hmac_list.append(hex(self.hash_h(bytes(self.o_key_pad) + str(self.hash_h(bytes(self.i_key_pad) + self.message)).encode())[2:]))
return self.digest_hmac_list[-1]
""" returns a digest in hexadecimal. """
# NOTE: You are Not hashing anything if the default Hash function is shake_256, add
# code here to add hexHashing for default
# message is mac then post pended with Index if that what I understand
index = "0"
mac = "FF0A8CD1DAAB"
key = "This is key"
cl = HMAC(bytearray(key, encoding="ascii"), bytearray(mac + index, encoding="ascii"), shake_256)
print(cl.digest())
print("=="*10)
index = "1"
print(cl.digest(mac + index))
print("=="*10)
print(cl.digest_hmac_list)
I would like to parse Python code that contains semicolons ; for separating commands and produce code that replaces those by newlines \n. E.g., from
def main():
a = "a;b"; return a
I'd like to produce
def main():
a = "a;b"
return a
Any hints?
Use the tokenize library to look for token.OP tokens, where the second element is a ; *. Replace these tokens with a token.NEWLINE token.
You'd need to adjust your token offsets and generate matching indent too however; so after a NEWLINE you'd need to adjust line numbers (increment by an offset you increase for every NEWLINE you insert) and the 'next' line (remainder of the current line) would have to have the indices adjusted to match the current indentation level:
import tokenize
TokenInfo = getattr(tokenize, 'TokenInfo', lambda *a: a) # Python 3 compat
def semicolon_to_newline(tokens):
line_offset = 0
last_indent = None
col_offset = None # None or an integer
for ttype, tstr, (slno, scol), (elno, ecol), line in tokens:
slno, elno = slno + line_offset, elno + line_offset
if ttype in (tokenize.INDENT, tokenize.DEDENT):
last_indent = ecol # block is indented to this column
elif ttype == tokenize.OP and tstr == ';':
# swap out semicolon with a newline
ttype = tokenize.NEWLINE
tstr = '\n'
line_offset += 1
if col_offset is not None:
scol, ecol = scol - col_offset, ecol - col_offset
col_offset = 0 # next tokens should start at the current indent
elif col_offset is not None:
if not col_offset:
# adjust column by starting column of next token
col_offset = scol - last_indent
scol, ecol = scol - col_offset, ecol - col_offset
if ttype == tokenize.NEWLINE:
col_offset = None
yield TokenInfo(
ttype, tstr, (slno, scol), (elno, ecol), line)
with open(sourcefile, 'r') as source, open(destination, 'w') as dest:
generator = tokenize.generate_tokens(source.readline)
dest.write(tokenize.untokenize(semicolon_to_newline(generator)))
Note that I don't bother to correct the line value; it is informative only, the data that was read from the file is not actually used when un-tokenizing.
Demo:
>>> from io import StringIO
>>> source = StringIO('''\
... def main():
... a = "a;b"; return a
... ''')
>>> generator = tokenize.generate_tokens(source.readline)
>>> result = tokenize.untokenize(semicolon_to_newline(generator))
>>> print(result)
def main():
a = "a;b"
return a
and slightly more complex:
>>> source = StringIO('''\
... class Foo(object):
... def bar(self):
... a = 10; b = 11; c = 12
... if self.spam:
... x = 12; return x
... x = 15; return y
...
... def baz(self):
... return self.bar;
... # note, nothing after the semicolon
... ''')
>>> generator = tokenize.generate_tokens(source.readline)
>>> result = tokenize.untokenize(semicolon_to_newline(generator))
>>> print(result)
class Foo(object):
def bar(self):
a = 10
b = 11
c = 12
if self.spam:
x = 12
return x
x = 15
return y
def baz(self):
return self.bar
# note, nothing after the semicolon
>>> print(result.replace(' ', '.'))
class.Foo(object):
....def.bar(self):
........a.=.10
........b.=.11
........c.=.12
........if.self.spam:
............x.=.12
............return.x
........x.=.15
........return.y
....def.baz(self):
........return.self.bar
........
........#.note,.nothing.after.the.semicolon
* The Python 3 version of tokenize outputs more informative TokenInfo named tuples, which have an extra exact_type attribute that can be used instead of doing a text match: tok.exact_type == tokenize.SEMI. I kept the above compatible with Python 2 and 3 however.
Here's a pyparsing solution - see comments in the code below:
from pyparsing import Literal, restOfLine, quotedString, pythonStyleComment, line
SEMI = Literal(';')
patt = SEMI + restOfLine
patt.ignore(quotedString)
patt.ignore(pythonStyleComment)
def split_at(s, locs):
"""
break up s into pieces, given list of break locations
"""
current = 0
ret = []
for loc in locs:
ret.append(s[current:loc].lstrip())
current = loc+1
ret.append(s[current:].lstrip())
return ret
def split_on_semicolon(s,l,tokens):
"""
parse time callback, when finding first unquoted ';' on a line
"""
current_line = line(l,s)
line_body = current_line.lstrip()
indent = current_line.index(line_body)
indent = current_line[:indent]
# may be more than one ';' on this line, find them all
# (the second token contains everything after the ';')
remainder = tokens[1]
if remainder.strip():
all_semis = [s for _,s,_ in SEMI.scanString(remainder)]
# break line into pieces
pieces = split_at(remainder, all_semis)
# rejoin pieces, with leading indents
return '\n'+'\n'.join(indent+piece for piece in pieces)
else:
return ''
patt.addParseAction(split_on_semicolon)
sample = """
def main():
this_semi_does_nothing();
neither_does_this_but_there_are_spaces_afterward();
a = "a;b"; return a # this is a comment; it has a semicolon!
def b():
if False:
z=1000;b("; in quotes"); c=200;return z
return ';'
class Foo(object):
def bar(self):
'''a docstring; with a semicolon'''
a = 10; b = 11; c = 12
# this comment; has several; semicolons
if self.spam:
x = 12; return x # so; does; this; one
x = 15;;; y += x; return y
def baz(self):
return self.bar
"""
print(patt.transformString(sample))
Gives:
def main():
this_semi_does_nothing()
neither_does_this_but_there_are_spaces_afterward()
a = "a;b"
return a # this is a comment; it has a semicolon!
def b():
if False:
z=1000
b("; in quotes")
c=200
return z
return ';'
class Foo(object):
def bar(self):
'''a docstring; with a semicolon'''
a = 10
b = 11
c = 12
# this comment; has several; semicolons
if self.spam:
x = 12
return x # so; does; this; one
x = 15
y += x
return y
def baz(self):
return self.bar
the code is suppose to decrypt and encrypt a random file, but I can't figure out how to input it.
import array
import hashlib
import random
from Crypto.Cipher import Blowfish
MH3G_JP = 0
MH3G_NA = 1
MH3G_EU = 2
MH4_JP = 3
MH4_NA = 4
MH4_EU = 5
MH4G_JP = 6
MH4G_NA = 7
MH4G_EU = 8
class SavedataCipher:
def __init__(self, game):
if game in (MH4G_JP, MH4G_NA, MH4G_EU):
self._cipher = Blowfish.new(b'blowfish key
iorajegqmrna4itjeangmb agmwgtobjteowhv9mope')
else:
raise ValueError('Ivalid game selected.')
def _xor(self, buff, key):
buff = array.array('H', buff)
for i in range(len(buff)):
if key == 0:
key = 1
key = key * 0xb0 % 0xff53
buff[i] ^= key
return buff.tostring()
def encrypt(self, buff):
csum = sum(buff) & 0xffffffff
buff = array.array('I', buff)
buff.insert(0, csum)
seed = random.getrandbits(16)
buff = array.array('I', self._xor(buff.tostring(), seed))
buff.insert(0, (seed << 16) + 0x10)
buff.byteswap()
buff = array.array('I', self._cipher.encrypt(buff.tostring()))
buff.byteswap()
return buff.tostring()
def decrypt(self, buff):
buff = array.array('I', buff)
buff.byteswap()
buff = array.array('I', self._cipher.decrypt(buff.tostring()))
buff.byteswap()
seed = buff.pop(0) >> 16
buff = array.array('I', self._xor(buff.tostring(), seed))
csum = buff.pop(0)
buff = buff.tostring()
if csum != (sum(buff) & 0xffffffff):
raise ValueError('Invalid checksum in header.')
return buff
def encrypt_file(self, savedata_file, out_file):
savedata = open(savedata_file, 'rb').read()
savedata = self.encrypt(savedata)
open(out_file, 'wb').write(savedata)
def decrypt_file(self, savedata_file, out_file):
savedata = open(savedata_file, 'rb').read()
savedata = self.decrypt(savedata)
open(out_file, 'wb').write(savedata)
This code is a class, you should create a function which calls this calls. You can do that in the same file, or in a different file and import the class.
if __name__ == "__main__":
cipher = SavedataCipher(MH4G_JP) # or use different parameter
# do something else
If you use string.split() on a Python string, it returns a list of strings. These substrings that have been split-out are copies of their part of the parent string.
Is it possible to instead get some cheaper slice object that holds only a reference, offset and length to the bits split out?
And is it possible to have some 'string view' to extract and treat these sub-strings as if they are strings yet without making a copy of their bytes?
(I ask as I have very large strings I want to slice and am running out of memory occasionally; removing the copies would be a cheap profile-guided win.)
buffer will give you a read-only view on a string.
>>> s = 'abcdefghijklmnopqrstuvwxyz'
>>> b = buffer(s, 2, 10)
>>> b
<read-only buffer for 0x7f935ee75d70, size 10, offset 2 at 0x7f935ee5a8f0>
>>> b[:]
'cdefghijkl'
String objects always point to a NUL-terminated buffer in Python, so substrings must be copied. As Ignacio pointed out, you can use buffer() to get a read-only view on the string memory. The buffer() built-in function has been superseded by the more versatile memoryview objects, though, which are available in Python 2.7 and 3.x (buffer() is gone in Python 3.x).
s = "abcd" * 50
view = memoryview(s)
subview = view[10:20]
print subview.tobytes()
This code prints
cdabcdabcd
As soon as you call tobytes(), a copy of the string will be created, but the same happens when slicing the old buffer objects as in Ignacio's answer.
Here's the quick string-like buffer wrapper I came up with; I was able to use this in place of classic strings without changing the code that expected to consume strings.
class StringView:
def __init__(self,s,start=0,size=sys.maxint):
self.s, self.start, self.stop = s, start, min(start+size,len(s))
self.size = self.stop - self.start
self._buf = buffer(s,start,self.size)
def find(self,sub,start=0,stop=None):
assert start >= 0, start
assert (stop is None) or (stop <= self.size), stop
ofs = self.s.find(sub,self.start+start,self.stop if (stop is None) else (self.start+stop))
if ofs != -1: ofs -= self.start
return ofs
def split(self,sep=None,maxsplit=sys.maxint):
assert maxsplit > 0, maxsplit
ret = []
if sep is None: #whitespace logic
pos = [self.start,self.start] # start and stop
def eat(whitespace=False):
while (pos[1] < self.stop) and (whitespace == (ord(self.s[pos[1]])<=32)):
pos[1] += 1
def eat_whitespace():
eat(True)
pos[0] = pos[1]
eat_whitespace()
while pos[1] < self.stop:
eat()
ret.append(self.__class__(self.s,pos[0],pos[1]-pos[0]))
eat_whitespace()
if len(ret) == maxsplit:
ret.append(self.__class__(self.s,pos[1]))
break
else:
start = stop = 0
while len(ret) < maxsplit:
stop = self.find(sep,start)
if -1 == stop:
break
ret.append(self.__class__(self.s,self.start+start,stop-start))
start = stop + len(sep)
ret.append(self.__class__(self.s,self.start+start,self.size-start))
return ret
def split_str(self,sep=None,maxsplit=sys.maxint):
"if you really want strings and not views"
return [str(sub) for sub in self.split(sep,maxsplit)]
def __cmp__(self,s):
if isinstance(s,self.__class__):
return cmp(self._buf,s._buf)
assert isinstance(s,str), type(s)
return cmp(self._buf,s)
def __len__(self):
return self.size
def __str__(self):
return str(self._buf)
def __repr__(self):
return "'%s'"%self._buf
if __name__=="__main__":
test_str = " this: is: a: te:st str:ing :"
test = Envelope.StringView(test_str)
print "find('is')"
print "\t",test_str.find("is")
print "\t",test.find("is")
print "find('is',4):"
print "\t",test_str.find("is",4)
print "\t",test.find("is",4)
print "find('is',4,7):"
print "\t",test_str.find("is",4,7)
print "\t",test.find("is",4,7)
print "split():"
print "\t",test_str.split()
print "\t",test.split()
print "split(None,2):"
print "\t",test_str.split(None,2)
print "\t",test.split(None,2)
print "split(':'):"
print "\t",test_str.split(":")
print "\t",test.split(":")
print "split('x'):"
print "\t",test_str.split("x")
print "\t",test.split("x")
print "''.split('x'):"
print "\t","".split("x")
print "\t",Envelope.StringView("").split("x")