(I've edited this for clarity, and changed the actual question a bit based on EOL's answer)
I'm trying to translate the following function in C to Python but failing miserably (see C code below). As I understand it, it takes four 1-byte chars starting from the memory location pointed to by from, treats them as unsigned long ints in order to give each one 4 bytes of space, and does some bitshifting to arrange them as a big-endian 32-bit integer. It's then used in an algorithm of checking file validity. (from the Treaty of Babel)
static int32 read_alan_int(unsigned char *from)
{
return ((unsigned long int) from[3])| ((unsigned long int)from[2] << 8) |
((unsigned long int) from[1]<<16)| ((unsigned long int)from[0] << 24);
}
/*
The claim algorithm for Alan files is:
* For Alan 3, check for the magic word
* load the file length in blocks
* check that the file length is correct
* For alan 2, each word between byte address 24 and 81 is a
word address within the file, so check that they're all within
the file
* Locate the checksum and verify that it is correct
*/
static int32 claim_story_file(void *story_file, int32 extent)
{
unsigned char *sf = (unsigned char *) story_file;
int32 bf, i, crc=0;
if (extent < 160) return INVALID_STORY_FILE_RV;
if (memcmp(sf,"ALAN",4))
{ /* Identify Alan 2.x */
bf=read_alan_int(sf+4);
if (bf > extent/4) return INVALID_STORY_FILE_RV;
for (i=24;i<81;i+=4)
if (read_alan_int(sf+i) > extent/4) return INVALID_STORY_FILE_RV;
for (i=160;i<(bf*4);i++)
crc+=sf[i];
if (crc!=read_alan_int(sf+152)) return INVALID_STORY_FILE_RV;
return VALID_STORY_FILE_RV;
}
else
{ /* Identify Alan 3 */
bf=read_alan_int(sf+12);
if (bf > (extent/4)) return INVALID_STORY_FILE_RV;
for (i=184;i<(bf*4);i++)
crc+=sf[i];
if (crc!=read_alan_int(sf+176)) return INVALID_STORY_FILE_RV;
}
return INVALID_STORY_FILE_RV;
}
I'm trying to reimplement this in Python. For implementing the read_alan_int function, I would think that importing struct and doing struct.unpack_from('>L', data, offset) would work. However, on valid files, this always returns 24 for the value bf, which means that the for loop is skipped.
def read_alan_int(file_buffer, i):
i0 = ord(file_buffer[i]) * (2 ** 24)
i1 = ord(file_buffer[i + 1]) * (2 ** 16)
i2 = ord(file_buffer[i + 2]) * (2 ** 8)
i3 = ord(file_buffer[i + 3])
return i0 + i1 + i2 + i3
def is_a(file_buffer):
crc = 0
if len(file_buffer) < 160:
return False
if file_buffer[0:4] == 'ALAN':
# Identify Alan 2.x
bf = read_alan_int(file_buffer, 4)
if bf > len(file_buffer)/4:
return False
for i in range(24, 81, 4):
if read_alan_int(file_buffer, i) > len(file_buffer)/4:
return False
for i in range(160, bf * 4):
crc += ord(file_buffer[i])
if crc != read_alan_int(file_buffer, 152):
return False
return True
else:
# Identify Alan 3.x
#bf = read_long(file_buffer, 12, '>')
bf = read_alan_int(file_buffer, 12)
print bf
if bf > len(file_buffer)/4:
return False
for i in range(184, bf * 4):
crc += ord(file_buffer[i])
if crc != read_alan_int(file_buffer, 176):
return False
return True
return False
if __name__ == '__main__':
import sys, struct
data = open(sys.argv[1], 'rb').read()
print is_a(data)
...but the damn thing still returns 24. Unfortunately, my C skills are non-existent so I'm having trouble getting the original program to print some debug output so I can know what bf is supposed to be.
What am I doing wrong?
Ok, so I'm apparently doing read_alan_int correctly. However, what's failing for me is the check that the first 4 characters are "ALAN". All of my test files fail this test. I've changed the code to remove this if/else statement and to instead just take advantage of early returns, and now all of my unit tests pass. So, on a practical level, I'm done. However, I'll keep the question open to address the new problem: how can I possibly wrangle the bits to get "ALAN" out of the first 4 chars?
def is_a(file_buffer):
crc = 0
if len(file_buffer) < 160:
return False
#if file_buffer.startswith('ALAN'):
# Identify Alan 2.x
bf = read_long(file_buffer, 4)
if bf > len(file_buffer)/4:
return False
for i in range(24, 81, 4):
if read_long(file_buffer, i) > len(file_buffer)/4:
return False
for i in range(160, bf * 4):
crc += ord(file_buffer[i])
if crc == read_long(file_buffer, 152):
return True
# Identify Alan 3.x
crc = 0
bf = read_long(file_buffer, 12)
if bf > len(file_buffer)/4:
return False
for i in range(184, bf * 4):
crc += ord(file_buffer[i])
if crc == read_long(file_buffer, 176):
return True
return False
Ah, I think I've got it. Note that the description says
/*
The claim algorithm for Alan files is:
* For Alan 3, check for the magic word
* load the file length in blocks
* check that the file length is correct
* For alan 2, each word between byte address 24 and 81 is a
word address within the file, so check that they're all within
the file
* Locate the checksum and verify that it is correct
*/
which I read as saying that there's a magic word in Alan 3, but not in Alan 2. However, your code goes the other way, even though the C code only assumes that the ALAN exists for Alan 3 files.
Why? Because you don't speak C, so you guessed -- naturally enough! -- that memcmp would return (the equivalent of a Python) True if the first four characters of sf and "ALAN" are equal.. but it doesn't. memcmp returns 0 if the contents are equal, and nonzero if they differ.
And that seems to be the way it works:
>>> import urllib2
>>>
>>> alan2 = urllib2.urlopen("http://ifarchive.plover.net/if-archive/games/competition2001/alan/chasing/chasing.acd").read(4)
>>> alan3 = urllib2.urlopen("http://mirror.ifarchive.org/if-archive/games/competition2006/alan/enterthedark/EnterTheDark.a3c").read(4)
>>>
>>> alan2
'\x02\x08\x01\x00'
>>> alan3
'ALAN'
Hypothesis 1: You are running on Windows, and you haven't opened your file in binary mode.
Your Python version looks fine to me.
PS: I missed the "memcmp() catch" that DSM found, so the Python code for if memcmp(…)… should actually be `if file_buffer[0:4] != 'ALAN'.
As far as I can see from the C code and from the sample file you give in the comments to the original question, the sample file is indeed invalid; here are the values:
read_alan_int(sf+12) == 24 # 0, 0, 0, 24 in file sf, big endian
crc = 0
read_alan_int(sf+176) = 46 # 0, 0, 0, 46 in file sf, big endian
So, crc != read_alan_int(sf+176), indeed.
Are you sure that the sample file is a valid file? Or is part of the calculation of crc missing from the original post??
Related
Problem:
I made an elf executable that self modifies one of its byte. It simply changes a 0 for a 1. When I run the executable normally, I can see that the change was successful because it runs exactly as expected (more on that further down). The problem arises when debugging it: The debugger (using radare2) returns the wrong value when looking at the modified byte.
Context:
I made a reverse engineering challenge, inspired by Smallest elf. You can see the "source code" (if you can even call it that) there: https://pastebin.com/Yr1nFX8W.
To assemble and execute:
nasm -f bin -o tinyelf tinyelf.asm
chmod +x tinyelf
./tinyelf [flag]
If the flag is right, it returns 0. Any other value means your answer is wrong.
./tinyelf FLAG{wrong-flag}; echo $?
... outputs "255".
!Solution SPOILERS!
It's possible to reverse it statically. Once that is done, you will find out that each characters in the flag is found by doing this calculation:
flag[i] = b[i] + b[i+32] + b[i+64] + b[i+96];
...where i is the index of the character, and b is the bytes of the executable itself. Here is a c script that solve the challenge without a debugger:
#include <stdio.h>
int main()
{
char buffer[128];
FILE* fp;
fp = fopen("tinyelf", "r");
fread(buffer, 128, 1, fp);
int i;
char c = 0;
for (i = 0; i < 32; i++) {
c = buffer[i];
// handle self-modifying code
if (i == 10) {
c = 0;
}
c += buffer[i+32] + buffer[i+64] + buffer[i+96];
printf("%c", c);
}
printf("\n");
}
You can see that my solver handles a special case: When i == 10, c = 0. That's because it's the index of the byte that is modified during execution. Running the solver and calling tinyelf with it I get:
FLAG{Wh3n0ptiMizaTioNGOesT00F4r}
./tinyelf FLAG{Wh3n0ptiMizaTioNGOesT00F4r} ; echo $?
Output: 0. Success!
Great, let's try to solve it dynamically now, using python and radare2:
import r2pipe
r2 = r2pipe.open('./tinyelf')
r2.cmd('doo FLAG{AAAAAAAAAAAAAAAAAAAAAAAAAA}')
r2.cmd('db 0x01002051')
flag = ''
for i in range(0, 32):
r2.cmd('dc')
eax = r2.cmd('dr? al')
c = int(eax, 16)
flag += chr(c)
print('\n\n' + flag)
It puts a breakpoint on the command that compares the input characters with the expected characters, then get what it is compared with (al). This SHOULD work. Yet, here is the output:
FLAG{Wh3n0�tiMiza�ioNGOesT00F4r}
2 incorrect values, one of which is at the index 10 (the modified byte). Weird, maybe a bug with radare2? Let's try unicorn (a cpu emulator) next:
from unicorn import *
from unicorn.x86_const import *
from pwn import *
ADDRESS = 0x01002000
mu = Uc(UC_ARCH_X86, UC_MODE_32)
code = bytearray(open('./tinyelf').read())
mu.mem_map(ADDRESS, 20 * 1024 * 1024)
mu.mem_write(ADDRESS, str(code))
mu.reg_write(UC_X86_REG_ESP, ADDRESS + 0x2000)
mu.reg_write(UC_X86_REG_EBP, ADDRESS + 0x2000)
mu.mem_write(ADDRESS + 0x2000, p32(2)) # argc
mu.mem_write(ADDRESS + 0x2000 + 4, p32(ADDRESS + 0x5000)) # argv[0]
mu.mem_write(ADDRESS + 0x2000 + 8, p32(ADDRESS + 0x5000)) # argv[1]
mu.mem_write(ADDRESS + 0x5000, "x" * 32)
flag = ''
def hook_code(uc, address, size, user_data):
global flag
eip = uc.reg_read(UC_X86_REG_EIP)
if eip == 0x01002051:
c = uc.reg_read(UC_X86_REG_EAX) & 0x7f
#print(str(c) + " " + chr(c))
flag += chr(c)
mu.hook_add(UC_HOOK_CODE, hook_code)
try:
mu.emu_start(0x01002004, ADDRESS + len(code))
except Exception:
print flag
This time the solver outputs: FLAG{Wh3n0otiMizaTioNGOesT00F4r}
Notice at the index 10: 'o' instead of 'p'. That's an off by 1 mistake exactly where the byte is modified. That can't be a coincidence, right?
Anyone has an idea why both these scripts do not work? Thank you.
There is no issue with radare2 but your analysis of the program is incorrect thus the code that you wrote handles this RE incorrectly.
Lets start with
When i == 10, c = 0. That's because it's the index of the byte that is modified during execution.
That is partially true. It is set to zero at the beginning but then after each round there is this code:
xor al, byte [esi]
or byte [ebx + 0xa], al
So let's understand what's happening here. al is the currently calculated char of the flag and esi points to the FLAG that was entered as a argument and at [ebx + 0xa] we currently have 0 (set at the beginning), so the char at index 0xa will stay zero only if the calculated flag char is equal to the one in argument and since you are running r2 with a fake flag, that starts to be a problem from 6th char but the result of this you see at the first � at index 10. To mitigate that we need to update your script a little bit.
eax = r2.cmd('dr? al')
c = int(eax, 16)
r2.cmd("ds 2")
r2.cmd("dr al = 0x0")
What we do here is that after the brekpoint was hit and we read the calculated flag char we move two instructions further (to reach 0x01002054) and then we set al to 0x0 to emulate that our char at [esi] was actually the same as the calculated one (so xor will return 0 in such case). By doing this we keep value at 0xa to be zero still.
Now the second character. This RE is tricky ;) - it reads itself and if you forget about that you might end up with case like this. Let's try to analyze why this character is off. It is 18th character of the flag (so index is 17 as we start from 0) and if we check the formula for characters indexes that we read from the binary we noticed that indexes are: 17(dec) = 11(hex), 17 + 32 = 49(dec) = 31(hex), 17 + 64 = 81(dec) = 51(hex), 17 + 96 = 113(dec) = 71(hex). But this 51(hex) looks oddly familiar? Didn't we see that somewhere before? Yup, it's the offset at which you set your breakpoint to read the al value.
This is the code that break your second char
r2.cmd('db 0x01002051')
Yup - your breakpoint. You are setting to break at that address and a soft breakpoint is putting a 0xcc in the memory address so when the opcode that reads 3rd byte of the 18th char hits that spot it does not get 0x5b (the original value) it gets 0xcc. So to fix that we need to correct that calculation. Here probably it can be done in a smarter/more elegant way but I went for a simple solution so I just did this:
if i == 17:
c -= (0xcc-0x5b)
Just subtract was was unintentionally added by putting a breakpoint in the code.
The final code:
import r2pipe
r2 = r2pipe.open('./tinyelf')
print r2
r2.cmd("doo FLAG{AAAAAAAAAAAAAAAAAAAAAAAAAA}")
r2.cmd("db 0x01002051")
flag = ''
for i in range(0, 32):
r2.cmd("dc")
eax = r2.cmd('dr? al')
c = int(eax, 16)
if i == 17:
c -= (0xcc-0x5b)
r2.cmd("ds 2")
r2.cmd("dr al = 0x0")
flag += chr(c)
print('\n\n' + flag)
That prints the correct flag:
FLAG{Wh3n0ptiMizaTioNGOesT00F4r}
As for the Unicorn you are not setting the breakpoint so the problem 2 goes away, and the off-by-1 on 10th index is due to the same reason as for r2.
crc_table = None
def make_crc_table():
global crc_table
crc_table = [0] * 256
for n in xrange(256):
c = n
for k in xrange(8):
if c & 1:
c = 0xedb88320L ^ (c >> 1)
else:
c = c >> 1
crc_table[n] = c
make_crc_table()
"""
/* Update a running CRC with the bytes buf[0..len-1]--the CRC
should be initialized to all 1's, and the transmitted value
is the 1's complement of the final running CRC (see the
crc() routine below)). */
"""
def update_crc(crc, buf):
c = crc
for byte in buf:
c = crc_table[int((c ^ ord(byte)) & 0xff)] ^ (c >> 8)
return c
# /* Return the CRC of the bytes buf[0..len-1]. */
def crc(buf):
return update_crc(0xffffffffL, buf) ^ 0xffffffffL
I used this code to calculate png crc value
My IHDR chunk data is 000008A0 000002FA 08020000 00 and the result of that code was 0xa1565b1L
However real crc was 0x84E42B87. I checked this value with well known png checker tool and correct crc was 0x84E42B87.
I can't understand how this value is calculated and correct value.
The CRC is calculated over the chunk type and the data, not just the data. So those bytes would be preceded by the four bytes IHDR. Then you get the correct CRC.
As an aside, I have no idea how you got 0xa1565b1L from 000008A0 000002FA 08020000 00. I get 0xa500050a as the CRC of those bytes. There must be something else that you're doing wrong as well. You would need to provide a complete example for us to be able to tell.
I have been trying to get my head around CRC32 calculations without much success, the values that I seem to get do not match what I should get.
I am aware that Python has libraries that are capable of generating these checksums (namely zlib and binascii) but I do not have the luxury of being able to use them as the CRC functionality do not exist on the micropython.
So far I have the following code:
import binascii
import zlib
from array import array
poly = 0xEDB88320
table = array('L')
for byte in range(256):
crc = 0
for bit in range(8):
if (byte ^ crc) & 1:
crc = (crc >> 1) ^ poly
else:
crc >>= 1
byte >>= 1
table.append(crc)
def crc32(string):
value = 0xffffffffL
for ch in string:
value = table[(ord(ch) ^ value) & 0x000000ffL] ^ (value >> 8)
return value
teststring = "test"
print "binascii calc: 0x%08x" % (binascii.crc32(teststring) & 0xffffffff)
print "zlib calc: 0x%08x" % (zlib.crc32(teststring) & 0xffffffff)
print "my calc: 0x%08x" % (crc32(teststring))
Then I get the following output:
binascii calc: 0xd87f7e0c
zlib calc: 0xd87f7e0c
my calc: 0x2780810c
The binascii and zlib calculations agree where as my one doesn't. I believe the calculated table of bytes is correct as I have compared it to examples available on the net. So the issue must be the routine where each byte is calculated, could anyone point me in the correct direction?
Thanks in advance!
I haven't looked closely at your code, so I can't pinpoint the exact source of the error, but you can easily tweak it to get the desired output:
import binascii
from array import array
poly = 0xEDB88320
table = array('L')
for byte in range(256):
crc = 0
for bit in range(8):
if (byte ^ crc) & 1:
crc = (crc >> 1) ^ poly
else:
crc >>= 1
byte >>= 1
table.append(crc)
def crc32(string):
value = 0xffffffffL
for ch in string:
value = table[(ord(ch) ^ value) & 0xff] ^ (value >> 8)
return -1 - value
# test
data = (
'',
'test',
'hello world',
'1234',
'A long string to test CRC32 functions',
)
for s in data:
print repr(s)
a = binascii.crc32(s)
print '%08x' % (a & 0xffffffffL)
b = crc32(s)
print '%08x' % (b & 0xffffffffL)
print
output
''
00000000
00000000
'test'
d87f7e0c
d87f7e0c
'hello world'
0d4a1185
0d4a1185
'1234'
9be3e0a3
9be3e0a3
'A long string to test CRC32 functions'
d2d10e28
d2d10e28
Here are a couple more tests that verify that the tweaked crc32 gives the same result as binascii.crc32.
from random import seed, randrange
print 'Single byte tests...',
for i in range(256):
s = chr(i)
a = binascii.crc32(s) & 0xffffffffL
b = crc32(s) & 0xffffffffL
assert a == b, (repr(s), a, b)
print('ok')
seed(42)
print 'Multi-byte tests...'
for width in range(2, 20):
print 'Width', width
r = range(width)
for n in range(1000):
s = ''.join([chr(randrange(256)) for i in r])
a = binascii.crc32(s) & 0xffffffffL
b = crc32(s) & 0xffffffffL
assert a == b, (repr(s), a, b)
print('ok')
output
Single byte tests... ok
Multi-byte tests...
Width 2
Width 3
Width 4
Width 5
Width 6
Width 7
Width 8
Width 9
Width 10
Width 11
Width 12
Width 13
Width 14
Width 15
Width 16
Width 17
Width 18
Width 19
ok
As discussed in the comments, the source of the error in the original code is that this CRC-32 algorithm inverts the initial crc buffer, and then inverts the final buffer contents. So value is initialised to 0xffffffff instead of zero, and we need to return value ^ 0xffffffff, which can also be written as ~value & 0xffffffff, i.e. invert value and then select the low-order 32 bits of the result.
If using binary data where the crc is chained over multiple buffers I used the following (using the OPs table):
def crc32(data, crc=0xffffffff):
for b in data:
crc = table[(b ^ crc) & 0xff] ^ (crc >> 8)
return crc
One can XOR the final result with -1 to agree with the online calculators.
crc = crc32(b'test')
print('0x{:08x}'.format(crc))
crc = crc32(b'te')
crc = crc32(b'st', crc)
print('0x{:08x}'.format(crc))
print('xor: 0x{:08x}'.format(crc ^ 0xffffffff))
output
0x278081f3
0x278081f3
xor: 0xd87f7e0c
I have a string that is packed such that each character was originally an unsigned byte but is stored as 7 bits and then packed into an unsigned byte array. I'm trying to find a quick way to unpack this string in Python but the function I wrote that uses the bitstring module works well but is very slow. It seems like something like this should not be so slow but I'm probably doing it very inefficiently...
This seems like something that is probably trivial but I just don't know what to use, maybe there is already a function that will unpack the string?
from bitstring import BitArray
def unpackString(raw):
msg = ''
bits = BitArray(bytes=raw)
mask = BitArray('0b01111111')
i = 0
while 1:
try:
iByte = (bits[i:i + 8] & mask).int
# value of 0 denotes a line break
if iByte == 0:
msg += '\n'
elif iByte >= 32 and iByte <= 126:
msg += chr(iByte)
i += 7
except:
break
return msg
This took me a while to figure out, as your solution seems to ignore the first bit of data. Given the input byte of 129 (0b10000001) I would expect to see 64 '1000000' printed by the following, but your code produces 1 '0000001' -- ignoring the first bit.
bs = b'\x81' # one byte string, whose value is 129 (0x81)
arr = BitArray(bs)
mask = BitArray('0b01111111')
byte = (arr[0:8] & mask).int
print(byte, repr("{:07b}".format(byte)))
Simplest solution would be to modify your solution to use bitstring.ConstBitStream -- I got an order of magnitude speed increase with the following.
from bitstring import ConstBitStream
def unpack_bitstream(raw):
num_bytes, remainder = divmod(len(raw) * 8 - 1, 7)
bitstream = ConstBitStream(bytes=raw, offset=1) # use offset to ignore leading bit
msg = b''
for _ in range(num_bytes):
byte = bitstream.read("uint:7")
if not byte:
msg += b'\n'
elif 32 <= byte <= 126:
msg += bytes((byte,))
# msg += chr(byte) # python 2
return msg
However, this can be done quite easily using only the standard library. This makes the solution more portable and, in the instances I tried, faster by another order of magnitude (I didn't try the cythonised version of bitstring).
def unpack_bytes(raw, zero_replacement=ord("\n")):
# use - 1 to ignore leading bit
num_bytes, remainder = divmod(len(raw) * 8 - 1, 7)
i = int.from_bytes(raw, byteorder="big")
# i = int(raw.encode("hex"), 16) # python 2
if remainder:
# remainder means there are unused trailing bits, so remove these
i >>= remainder
msg = []
for _ in range(num_bytes):
byte = i & 127
if not byte:
msg.append(zero_replacement)
elif 32 <= byte <= 126:
msg.append(byte)
i >>= 7
msg.reverse()
return bytes(msg)
# return b"".join(chr(c) for c in msg) # python 2
I've used python 3 to create these methods. If you're using python 2 then there are a number of adjustments you'll need to make. I've added these as comments after the line they are intended to replace and marked them python 2.
How do I convert a hex string to a signed int in Python 3?
The best I can come up with is
h = '9DA92DAB'
b = bytes(h, 'utf-8')
ba = binascii.a2b_hex(b)
print(int.from_bytes(ba, byteorder='big', signed=True))
Is there a simpler way? Unsigned is so much easier: int(h, 16)
BTW, the origin of the question is itunes persistent id - music library xml version and iTunes hex version
In n-bit two's complement, bits have value:
bit 0 = 20
bit 1 = 21
bit n-2 = 2n-2
bit n-1 = -2n-1
But bit n-1 has value 2n-1 when unsigned, so the number is 2n too high. Subtract 2n if bit n-1 is set:
def twos_complement(hexstr, bits):
value = int(hexstr, 16)
if value & (1 << (bits - 1)):
value -= 1 << bits
return value
print(twos_complement('FFFE', 16))
print(twos_complement('7FFF', 16))
print(twos_complement('7F', 8))
print(twos_complement('FF', 8))
Output:
-2
32767
127
-1
import struct
For Python 3 (with comments' help):
h = '9DA92DAB'
struct.unpack('>i', bytes.fromhex(h))
For Python 2:
h = '9DA92DAB'
struct.unpack('>i', h.decode('hex'))
or if it is little endian:
h = '9DA92DAB'
struct.unpack('<i', h.decode('hex'))
Here's a general function you can use for hex of any size:
import math
# hex string to signed integer
def htosi(val):
uintval = int(val,16)
bits = 4 * (len(val) - 2)
if uintval >= math.pow(2,bits-1):
uintval = int(0 - (math.pow(2,bits) - uintval))
return uintval
And to use it:
h = str(hex(-5))
h2 = str(hex(-13589))
x = htosi(h)
x2 = htosi(h2)
This works for 16 bit signed ints, you can extend for 32 bit ints. It uses the basic definition of 2's complement signed numbers. Also note xor with 1 is the same as a binary negate.
# convert to unsigned
x = int('ffbf', 16) # example (-65)
# check sign bit
if (x & 0x8000) == 0x8000:
# if set, invert and add one to get the negative value, then add the negative sign
x = -( (x ^ 0xffff) + 1)
It's a very late answer, but here's a function to do the above. This will extend for whatever length you provide. Credit for portions of this to another SO answer (I lost the link, so please provide it if you find it).
def hex_to_signed(source):
"""Convert a string hex value to a signed hexidecimal value.
This assumes that source is the proper length, and the sign bit
is the first bit in the first byte of the correct length.
hex_to_signed("F") should return -1.
hex_to_signed("0F") should return 15.
"""
if not isinstance(source, str):
raise ValueError("string type required")
if 0 == len(source):
raise valueError("string is empty")
sign_bit_mask = 1 << (len(source)*4-1)
other_bits_mask = sign_bit_mask - 1
value = int(source, 16)
return -(value & sign_bit_mask) | (value & other_bits_mask)