Rice coding in Cython - python

Here is an implementation of well-known Rice coding (= Golomb code with M = 2^k http://en.wikipedia.org/wiki/Golomb_coding), widely used in compression algorithms, in Python.
Unfortunately it is rather slow. What could be the cause of this low speed ? (StringIO? the fact that data is written byte after byte?)
What would you recommand to use in order to speed it the encoding ? What trick would you use to speed it up with Cython ?
import struct
import StringIO
def put_bit(f, b):
global buff, filled
buff = buff | (b << (7-filled))
if (filled == 7):
f.write(struct.pack('B',buff))
buff = 0
filled = 0
else:
filled += 1
def rice_code(f, x, k):
q = x / (1 << k)
for i in range(q):
put_bit(f, 1)
put_bit(f, 0)
for i in range(k-1, -1, -1):
put_bit(f, (x >> i) & 1)
def compress(L, k):
f = StringIO.StringIO()
global buff, filled
buff = 0
filled = 0
for x in L: # encode all numbers
rice_code(f, x, k)
for i in range(8-filled): # write the last byte (if necessary pad with 1111...)
put_bit(f, 1)
return f.getvalue()
if __name__ == '__main__':
print struct.pack('BBB', 0b00010010, 0b00111001, 0b01111111) #see http://fr.wikipedia.org/wiki/Codage_de_Rice#Exemples
print compress([1,2,3,10],k = 3)
PS : Should this question be moved to https://codereview.stackexchange.com/ ?

I would use a C-style buffer instead of StringIO when building the compressed result and I would attempt to use only C-style temporaries in the encoding loop. I also noticed that you can pre-initialize your buffer to be filled with set bits ('1' bits), and this will make encoding values with a large quotient faster because you can simply skip over those bits in the output buffer. I rewrote the compress function with those things in mind, and measured the speed of the result, and it seems my version is more than ten times faster than your encoder, but the resulting code is less readable.
Here is my version:
cimport cpython.string
cimport libc.stdlib
cimport libc.string
import struct
cdef int BUFFER_SIZE = 4096
def compress(L, k):
result = ''
cdef unsigned cvalue
cdef char *position
cdef int bit, nbit
cdef unsigned q, r
cdef unsigned ck = k
cdef unsigned mask = (1 << ck) - 1
cdef char *buff = <char *>libc.stdlib.malloc(BUFFER_SIZE)
if buff is NULL:
raise MemoryError
try:
# Initialize the buffer space is assumed to contain all set bits
libc.string.memset(buff, 0xFF, BUFFER_SIZE)
position = buff
bit = 7
for value in L:
cvalue = value
q = cvalue >> ck
r = cvalue & mask
# Skip ahead some number of pre-set one bits for the quotient
position += q / 8
bit -= q % 8
if bit < 0:
bit += 8
position += 1
# If we have gone off the end of the buffer, extract
# the result and reset buffer pointers
while position - buff >= BUFFER_SIZE:
block = cpython.string.PyString_FromStringAndSize(
buff, BUFFER_SIZE)
result = result + block
libc.string.memset(buff, 0xFF, BUFFER_SIZE)
position = position - BUFFER_SIZE
# Clear the final bit to indicate the end of the quotient
position[0] = position[0] ^ (1 << bit)
if bit > 0:
bit = bit - 1
else:
position += 1
bit = 7
# Check for buffer overflow
if position - buff >= BUFFER_SIZE:
block = cpython.string.PyString_FromStringAndSize(
buff, BUFFER_SIZE)
result = result + block
libc.string.memset(buff, 0xFF, BUFFER_SIZE)
position = buff
# Encode the remainder bits one by one
for nbit in xrange(k - 1, -1, -1):
position[0] = (position[0] & ~(1 << bit)) | \
(((r >> nbit) & 1) << bit)
if bit > 0:
bit = bit - 1
else:
position += 1
bit = 7
# Check for buffer overflow
if position - buff >= BUFFER_SIZE:
block = cpython.string.PyString_FromStringAndSize(
buff, BUFFER_SIZE)
result = result + block
libc.string.memset(buff, 0xFF, BUFFER_SIZE)
position = buff
# Advance if we have partially used the last byte
if bit < 7:
position = position + 1
# Extract the used portion of the buffer
block = cpython.string.PyString_FromStringAndSize(
buff, position - buff)
result = result + block
return result
finally:
libc.stdlib.free(buff)
def test():
a = struct.pack('BBB', 0b00010010, 0b00111001, 0b01111111) #see http://fr.wikipedia.org/wiki/Codage_de_Rice#Exemples
b = compress([1,2,3,10],k = 3)
assert a == b

Related

Bit operation Left Shift Python on big integers

I am trying to implement a left bit shift on python with big integers. Because of their size, I want to stock their bit value on a file and work on the file after, as far as the bit string it too big for my RAM.
However, I am facing problem deleting the N first bits of the int without using its binary representation, as far as I can't.
Here is what I did so far:
def __lshift(self, n, d):
N = n.bit_length()
if N > 2**20: # n is big and we have to use files
temp = open('bin.tmp','w')
while N > 2**20:
n_ = n >> 20 # Take the 20 first bits of n
temp.write(bin(n)[2:])
# Here I would like to delete 20 first bits of n
else:
bin_ = bin(n)[2:]
bin_ = bin_[:N-d] + bin_[d:]
return int(bin_,2)
Thanks for your help !
Here is the solution I finally found :
def lshift(n,d):
def one_turn(n):
N = n.bit_length()
end = n >> N-1
begin = (n & ((1 + (1 << N-1) - 1) ^ ((1 << N) - 1))) << 1
return begin + end
for i in range(d):
n = one_turn(n)
return n
Finally easier than what I was trying to do :)

C function to Python (different results)

I am trying to port this snippet of code to python from C. The outputs are different even though it's the same code.
This is the C version of the code which works:
int main(void)
{
uint8_t pac[] = {0x033,0x55,0x22,0x65,0x76};
uint8_t len = 5;
uint8_t chan = 0x64;
btLeWhiten(pac, len, chan);
for(int i = 0;i<=len;i++)
{
printf("Whiten %02d \r\n",pac[i]);
}
while(1)
{
}
return 0;
}
void btLeWhiten(uint8_t* data, uint8_t len, uint8_t whitenCoeff)
{
uint8_t m;
while(len--){
for(m = 1; m; m <<= 1){
if(whitenCoeff & 0x80){
whitenCoeff ^= 0x11;
(*data) ^= m;
}
whitenCoeff <<= 1;
}
data++;
}
}
What I currently have in Python is:
def whiten(data, len, whitenCoeff):
idx = len
while(idx > 0):
m = 0x01
for i in range(0,8):
if(whitenCoeff & 0x80):
whitenCoeff ^= 0x11
data[len - idx -1 ] ^= m
whitenCoeff <<= 1
m <<= 0x01
idx = idx - 1
pac = [0x33,0x55,0x22,0x65,0x76]
len = 5
chan = 0x64
def main():
whiten(pac,5,chan)
print pac
if __name__=="__main__":
main()
The problem i see is that whitenCoeff always remain 8 bits in the C snippet but it gets larger than 8 bits in Python on each loop pass.
You've got a few more problems.
whitenCoeff <<= 1; is outside of the if block in your C code, but it's inside of the if block in your Python code.
data[len - idx -1 ] ^= m wasn't translated correctly, it works backwards from the C code.
This code produces the same output as your C code:
def whiten(data, whitenCoeff):
for index in range(len(data)):
for i in range(8):
if (whitenCoeff & 0x80):
whitenCoeff ^= 0x11
data[index] ^= (1 << i)
whitenCoeff = (whitenCoeff << 1) & 0xff
return data
if __name__=="__main__":
print whiten([0x33,0x55,0x22,0x65,0x76], 0x64)
In C you are writing data from 0 to len-1 but in Python you are writing data from -1 to len-2. Remove the -1 from this line:
data[len - idx -1 ] ^= m
like this
data[len - idx] ^= m
you also need to put this line outside the if:
whitenCoeff <<= 1
whitenCoeff <<= 1 in C becomes 0 after a while because it is a 8-bit data.
In python, there's no such limit, so you have to write:
whitenCoeff = (whitenCoeff<<1) & 0xFF
to mask higher bits out.
(don't forget to check vz0 remark on array boundary)
plus there was an indentation issue.
rewritten code which gives same result:
def whiten(data, whitenCoeff):
idx = len(data)
while(idx > 0):
m = 0x01
for i in range(0,8):
if(whitenCoeff & 0x80):
whitenCoeff ^= 0x11
data[-idx] ^= m
whitenCoeff = (whitenCoeff<<1) & 0xFF
m <<= 0x01
idx = idx - 1
pac = [0x33,0x55,0x22,0x65,0x76]
chan = 0x64
def main():
whiten(pac,chan)
print(pac)
if __name__=="__main__":
main()
Slightly off-topic: Note that the C version already has problems:
for(int i = 0;i<=len;i++)
should be
for(int i = 0;i<len;i++)
I solved it by anding the python code with 0xFF. That keeps the variable from increasing beyond 8 bits.
Your code in C does not appear to work as intended since it displays one more value than is available in pac. Correcting for this should cause 5 values to be displayed instead of 6 values. To copy the logic from C over to Python, the following was written in an attempt to duplicate the results:
#! /usr/bin/env python3
def main():
pac = bytearray(b'\x33\x55\x22\x65\x76')
chan = 0x64
bt_le_whiten(pac, chan)
print('\n'.join(map('Whiten {:02}'.format, pac)))
def bt_le_whiten(data, whiten_coeff):
for offset in range(len(data)):
m = 1
while m & 0xFF:
if whiten_coeff & 0x80:
whiten_coeff ^= 0x11
data[offset] ^= m
whiten_coeff <<= 1
whiten_coeff &= 0xFF
m <<= 1
if __name__ == '__main__':
main()
To simulate 8-bit unsigned integers, the snippet & 0xFF is used in several places to truncate numbers to the proper size. The bytearray data type is used to store pac since that appears to be the most appropriate storage method in this case. The code still needs documentation to properly understand it.

writing a 1024 bits length decimal number in a binary file with python

i have actually obtained a 1024 bits length decimal number (representing half of a rsa key of 2048 bits).
I would like to then write this number into a binary file, e.g. the bytes of this file will directly represent the number.
I figured for example how to do so with a long number, "4444" for example:
with open('test','wb') as f:
for b in struct.pack('>L',4444):
f.write(b)
i then get those bytes in the binary file : 00 00 11 5c
however, how can i do so for long numbers, like a 1024 bit length one?
thanks for any answer!
>>> binascii.unhexlify('{:0{}x}'.format(19, int(1024/4)))
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13'
You can treat any positive Python integer as a base-256 encoded bytestring and encode and decode one like this:
import math
def hexrepr(b): # bytearray arg
return ' '.join(("%02x" % i) for i in b)
def hexstr(b): # bytearray arg
return "'%s'" % ''.join('\\x'+("%02x" % i) for i in b)
def roundup(n, m): # round n up to nearest whole m
if m == 0:
return 0
else:
add = m // abs(m)
return ((n+m-add) // m) * m
def base256_encode(n, minlen=0): # int/long to byte array
if n > 0:
arr = []
while n:
n, rem = divmod(n, 256)
arr.append(rem)
b = bytearray(reversed(arr))
elif n == 0:
b = bytearray(b'\x00')
else:
raise ValueError
if minlen > 0 and len(b) < minlen: # zero padding needed?
b = (minlen-len(b)) * '\x00' + b
return b
def base256_decode(a_bytearray): # bytearray to number
return reduce(lambda a,i: a*256 + i, a_bytearray, 0)
n = 4444 # must be unsigned integer
nbits = int(math.floor(math.log(n, 2)) + 1) # number of bits needed to represent n
print 'n:', format(n, ',d')
print('nbits: {}'.format(nbits))
print('roundup(nbits, 8): {}'.format(roundup(nbits, 8)))
encoded = base256_encode(n, roundup(nbits, 8)/8)
print 'encoded = base256_encode(n, roundup(nbits, 8)/8)'
print 'hexstr(encoded):', hexstr(encoded)
print 'encoded:', hexrepr(encoded)
print 'decoded:', format(base256_decode(encoded), ',d')
Which produces the following for n = 4444:
Output:
n: 4,444
nbits: 13
roundup(nbits, 8): 16
encoded = base256_encode(n, roundup(nbits, 8)/8)
hexstr(encoded): '\x11\x5c'
encoded: 11 5c
decoded: 4,444

Need help porting C function to Python

I'm trying to port a C function which calculates a GPS checksum over to Python. According to the receiving end I am sometimes miscalculating the checksum, so must still have a bug in there.
C code is
void ComputeAsciiChecksum(unsigned char *data, unsigned int len,
unsigned char *p1, unsigned char *p2)
{
unsigned char c,h,l;
assert(Stack_Low());
c = 0;
while (len--) {
c ^= *data++;
}
h = (c>>4);
l = c & 0xf;
h += '0';
if (h > '9') {
h += 'A'-'9'-1;
}
l += '0';
if (l > '9') {
l += 'A'-'9'-1;
}
*p1 = h;
*p2 = l;
}
My attempt at a Python function is
def calcChecksum(line):
c = 0
i = 0
while i < len(line):
c ^= ord(line[i]) % 256
i += 1
return '%02X' % c;
Here is how you can set up a testing environment to diagnose your problem.
Copy the above C function to a file, remove the assert() line, and compile it to a shared library with
gcc -shared -o checksum.so checksum.c
(If you are on Windows or whatever, do the equivalent of the above.)
Copy this code to a Python file:
import ctypes
import random
c = ctypes.CDLL("./checksum.so")
c.ComputeAsciiChecksum.rettype = None
c.ComputeAsciiChecksum.argtypes = [ctypes.c_char_p, ctypes.c_uint,
ctypes.c_char_p, ctypes.c_char_p]
def compute_ascii_checksum_c(line):
p1 = ctypes.create_string_buffer(1)
p2 = ctypes.create_string_buffer(1)
c.ComputeAsciiChecksum(line, len(line), p1, p2)
return p1.value + p2.value
def compute_ascii_checksum_py(line):
c = 0
i = 0
while i < len(line):
c ^= ord(line[i]) % 256
i += 1
return '%02X' % c;
Now you have access to both versions of the checksum function and can compare the results. I wasn't able to find any differences.
(BTW, how are you computing the length of the string in C? If you are using strlen(), this would stop at NUL bytes.)
As a side note, your Python version isn't really idiomatic Python. Here are two more idiomatic versions:
def compute_ascii_checksum_py(line):
checksum = 0
for c in line:
checksum ^= ord(c)
return "%02X" % checksum
or
def compute_ascii_checksum_py(line):
return "%02X" % reduce(operator.xor, map(ord, line))
Note that these implementations should do exactly the same as yours.
Have you checked out this cookbook recipe? It hints at what input you should include in "line", returns a asterisk in front of the checksum, and gives one (input, output) data pair that you can use as test data.
Are you sure that "the receiver" is working correctly? Is the problem due to upper vs lower case hex letters?

Python TEA implementation

Anybody knows proper python implementation of TEA (Tiny Encryption Algorithm)? I tried the one I've found here: http://sysadminco.com/code/python-tea/ - but it does not seem to work properly.
It returns different results than other implementations in C or Java. I guess it's caused by completely different data types in python (or no data types in fact).
Here's the code and an example:
def encipher(v, k):
y=v[0];z=v[1];sum=0;delta=0x9E3779B9;n=32
w=[0,0]
while(n>0):
y += (z << 4 ^ z >> 5) + z ^ sum + k[sum & 3]
y &= 4294967295L # maxsize of 32-bit integer
sum += delta
z += (y << 4 ^ y >> 5) + y ^ sum + k[sum>>11 & 3]
z &= 4294967295L
n -= 1
w[0]=y; w[1]=z
return w
def decipher(v, k):
y=v[0]
z=v[1]
sum=0xC6EF3720
delta=0x9E3779B9
n=32
w=[0,0]
# sum = delta<<5, in general sum = delta * n
while(n>0):
z -= (y << 4 ^ y >> 5) + y ^ sum + k[sum>>11 & 3]
z &= 4294967295L
sum -= delta
y -= (z << 4 ^ z >> 5) + z ^ sum + k[sum&3]
y &= 4294967295L
n -= 1
w[0]=y; w[1]=z
return w
Python example:
>>> import tea
>>> key = [0xbe168aa1, 0x16c498a3, 0x5e87b018, 0x56de7805]
>>> v = [0xe15034c8, 0x260fd6d5]
>>> res = tea.encipher(v, key)
>>> "%X %X" % (res[0], res[1])
**'70D16811 F935148F'**
C example:
#include <unistd.h>
#include <stdio.h>
void encipher(unsigned long *const v,unsigned long *const w,
const unsigned long *const k)
{
register unsigned long y=v[0],z=v[1],sum=0,delta=0x9E3779B9,
a=k[0],b=k[1],c=k[2],d=k[3],n=32;
while(n-->0)
{
sum += delta;
y += (z << 4)+a ^ z+sum ^ (z >> 5)+b;
z += (y << 4)+c ^ y+sum ^ (y >> 5)+d;
}
w[0]=y; w[1]=z;
}
int main()
{
unsigned long v[] = {0xe15034c8, 0x260fd6d5};
unsigned long key[] = {0xbe168aa1, 0x16c498a3, 0x5e87b018, 0x56de7805};
unsigned long res[2];
encipher(v, res, key);
printf("%X %X\n", res[0], res[1]);
return 0;
}
$ ./tea
**D6942D68 6F87870D**
Please note, that both examples were run with the same input data (v and key), but results were different. I'm pretty sure C implementation is correct - it comes from a site referenced by wikipedia (I couldn't post a link to it because I don't have enough reputation points yet - some antispam thing)
I fixed it. Here is working TEA implementation in python:
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
from ctypes import *
def encipher(v, k):
y = c_uint32(v[0])
z = c_uint32(v[1])
sum = c_uint32(0)
delta = 0x9e3779b9
n = 32
w = [0,0]
while(n>0):
sum.value += delta
y.value += ( z.value << 4 ) + k[0] ^ z.value + sum.value ^ ( z.value >> 5 ) + k[1]
z.value += ( y.value << 4 ) + k[2] ^ y.value + sum.value ^ ( y.value >> 5 ) + k[3]
n -= 1
w[0] = y.value
w[1] = z.value
return w
def decipher(v, k):
y = c_uint32(v[0])
z = c_uint32(v[1])
sum = c_uint32(0xc6ef3720)
delta = 0x9e3779b9
n = 32
w = [0,0]
while(n>0):
z.value -= ( y.value << 4 ) + k[2] ^ y.value + sum.value ^ ( y.value >> 5 ) + k[3]
y.value -= ( z.value << 4 ) + k[0] ^ z.value + sum.value ^ ( z.value >> 5 ) + k[1]
sum.value -= delta
n -= 1
w[0] = y.value
w[1] = z.value
return w
if __name__ == "__main__":
key = [1,2,3,4]
v = [1385482522,639876499]
enc = encipher(v,key)
print enc
print decipher(enc,key)
And a small sample:
>>> v
[1385482522, 639876499]
>>> tea.decipher(tea.encipher(v,key),key)
[1385482522L, 639876499L]
Since TEA is a block cipher and your v is a very small block, I'd guess there may be block padding differences, or as Wikipedia notes:
http://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm:
Note that the reference implementation
is bound to a specific microprocessor
architecture meaning that byte order
considerations are important when
cyphertext is shared and processed on
different systems. The original paper
does not specify any details about
microprocessor architecture and so
anyone implementing a system using TEA
would need to make those
specifications for themselves.
I didn't inspect either implementation in detail. Your &= statements feel suspicious, too.
Tea is broken, do not use it.
XXTEA which is secure does not define endianess and stuff and you should should reinvent whell when you can use AES.
There is no point in using unsecure cryptography.
I strongy advice you to apply AES, it can be implemented in 8bit microcontolers whit just few kB of code
EDIT
Did you checked this code?
http://sysadminco.com/code/python-tea/

Categories