I'm a newbie in this field and am trying to learn a bit about how to write cryptographic hash functions.
To get some hands-on, I tried updating the PySHA2 algorithm for Python 3.6 and up (the original version doesn't work on Python 2.5+ and the author says he won't fix this). I don't intend to use this algorithm for any work, just coding this for the sake of knowledge.
I've reached this far:
import copy
import struct
_initial_hashes = [0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179]
_round_constants = [0x428a2f98d728ae22, 0x7137449123ef65cd, 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc,
0x3956c25bf348b538, 0x59f111f1b605d019, 0x923f82a4af194f9b, 0xab1c5ed5da6d8118,
0xd807aa98a3030242, 0x12835b0145706fbe, 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235, 0xc19bf174cf692694,
0xe49b69c19ef14ad2, 0xefbe4786384f25e3, 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65,
0x2de92c6f592b0275, 0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
0x983e5152ee66dfab, 0xa831c66d2db43210, 0xb00327c898fb213f, 0xbf597fc7beef0ee4,
0xc6e00bf33da88fc2, 0xd5a79147930aa725, 0x06ca6351e003826f, 0x142929670a0e6e70,
0x27b70a8546d22ffc, 0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6, 0x92722c851482353b,
0xa2bfe8a14cf10364, 0xa81a664bbc423001, 0xc24b8b70d0f89791, 0xc76c51a30654be30,
0xd192e819d6ef5218, 0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8,
0x19a4c116b8d2d0c8, 0x1e376c085141ab53, 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8,
0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb, 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3,
0x748f82ee5defb2fc, 0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec,
0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915, 0xc67178f2e372532b,
0xca273eceea26619c, 0xd186b8c721c0c207, 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178,
0x06f067aa72176fba, 0x0a637dc5a2c898a6, 0x113f9804bef90dae, 0x1b710b35131c471b,
0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c,
0x4cc5d4becb3e42b6, 0x597f299cfc657e2a, 0x5fcb6fab3ad6faec, 0x6c44198c4a475817]
def _rit_rot(on: int, by: int) -> int:
"""
helper function for right rotation as it isn't done by a simple bitwise operation (xor is done by '^')
:param on: value to be rotated
:param by: value by which to rotate
:return: right rotated 'on'
"""
return ((on >> by) | (on << (64 - by))) & 0xFFFFFFFFFFFFFFFF
def hash_main(chunk):
global _initial_hashes, _round_constants
# start the hashing process
# to begin, create a place to store the 80 words that we'll make
words = [0] * 80
# first 16 words will be saved without any changes
words[:16] = struct.unpack('!16Q', chunk)
# extend these 16 words into the remaining 64 words of 'message schedule array'
for i in range(16, 80):
part_1 = _rit_rot(words[i - 15], 1) ^ _rit_rot(words[i - 15], 8) ^ (words[i - 15] >> 7)
part_2 = _rit_rot(words[i - 2], 19) ^ _rit_rot(words[i - 2], 61) ^ (words[i - 2] >> 6)
words[i] = (words[i - 16] + part_1 + words[i - 7] + part_2) & 0xFFFFFFFFFFFFFFFF
# create the working variables
a, b, c, d, e, f, g, h = _initial_hashes
# start the compression function
for z in range(80):
var_1 = _rit_rot(a, 28) ^ _rit_rot(a, 34) ^ _rit_rot(a, 39)
var_2 = _rit_rot(e, 14) ^ _rit_rot(e, 18) ^ _rit_rot(e, 41)
var_3 = (a & b) ^ (a & c) ^ (b & c)
var_4 = (e & f) ^ ((~e) & g)
temp_1 = var_1 + var_3
temp_2 = h + var_2 + var_4 + _round_constants[z] + words[z]
# remix the hashes
h = g
g = f
f = e
e = (d + temp_2) & 0xFFFFFFFFFFFFFFFF
d = c
c = b
b = a
a = (temp_1 + temp_2) & 0xFFFFFFFFFFFFFFFF
# add this chunk to initial hashes
_initial_hashes = [(x + y) & 0xFFFFFFFFFFFFFFFF for x, y in zip(_initial_hashes,
[a, b, c, d, e, f, g, h])]
def _sha_backend_update(text_copy, _buffer, _counter):
"""
backend function that hashes given string
"""
global _initial_hashes, _round_constants
# create variables for cycling
_buffer += text_copy
_counter += len(text_copy)
# assert the variables are correct
if not text_copy:
return
if type(text_copy) is not str:
raise TypeError("Invalid Object! Please enter a valid string for hashing!")
# break the buffer into 128-bit chunks
while len(_buffer) >= 128:
chunk = _buffer[:128].encode()[1:]
hash_main(chunk)
_buffer = _buffer[128:]
def sha_backend_digest(text_to_hash: str, _buffer: str, _counter: int,
_output_size: int, hex_output: bool = False):
# initialize variables
variable_x = _counter & 0x7F
length = str(struct.pack('!Q', _counter << 3))
# set the thresholds
if variable_x < 112:
padding_len = 111 - variable_x
else:
padding_len = 239 - variable_x
# make a copy of the text_to_hash before starting hashing
text_copy = copy.deepcopy(text_to_hash)
m = '\x80' + ('\x00' * (padding_len + 8)) + length
# run the update function
_sha_backend_update(text_copy, _buffer, _counter)
# return the hash value
return_val = [hex(stuff) for stuff in _initial_hashes[:_output_size]]
if hex_output is True:
return_val = [int(stuff, base=16) for stuff in return_val]
return return_val
return ''.join(return_val)
def sha_512(text_to_hash: str, hex_digest: bool = False) -> str:
"""
frontend function for SHA512 hashing
:return: hashed string
"""
# before anything, check if the input is correct
if not text_to_hash:
return ""
if type(text_to_hash) is not str:
raise TypeError("Invalid content! Please provide content in correct format for hashing!")
# initialize default variables
_buffer = ''
_counter = 0
_output_size = 8
# start the backend function
return sha_backend_digest(text_to_hash, _buffer, _counter, _output_size, hex_output=hex_digest)
message = "This is a string to be hashed"
from hashlib import sha512
print("hashlib gives: ", sha512(message.encode()).hexdigest())
print("I give: ", sha_512(message))
As is obvious, I don't understand a lot of things in this algorithm and have literally copied many parts from the original code (also, I know it isn't good practice to write everything in a single function but I find it easier when trying to understand something).
But the biggest problem I have right now is it doesn't work! Whatever input message I provide to my function, it gives the same output:
0x6a09e667f3bcc9080xbb67ae8584caa73b0x3c6ef372fe94f82b0xa54ff53a5f1d36f1
0x510e527fade682d10x9b05688c2b3e6c1f0x1f83d9abfb41bd6b0x5be0cd19137e2179
I wrote a code at the bottom to compare it with python's hashlib module.
Where am I going wrong in this and how do I fix this?
EDIT: As mentioned in the comments, I tried to feed in a longer message string and the code seems to be working (it still gives longer output than hashlib though):
message = "This is a string to be hashed. I'll try to make this string as long as possible by adding" \
"as much information to it as I can, in the hopes that this string would somehow become longer than" \
"128 bits and my code can run properly. Hopefully, this is already longer than 128 bits, so lets see" \
"how it works..."
hash: 0x6fcc0f346f2577800x334bd9b6c1178a970x90964a3f45f7b5bb0xc14033d12f6607e60xb598bea0a8b0ac1e0x116b0e134691ab540x73d88e77e5b862ba0x89181da7462c5574
message = "This is a string to be hashed. I'll try to make this string as long as possible by adding" \
"as much information to it as I can, in the hopes that this string would somehow become longer than"
hash: 0x166e40ab03bc98750xe81fe34168b6994f0xe56b81bd5972b5560x8789265c3a56b30b0x2c810d652ea7b1550xa23ca2704602a8240x12ffb1ec8f3dd6d10x88c29f84cbef8988
You'll always have to pad the message. Padding and adding the length are always required as last step of the SHA-2 process. Currently you weren't performing that last step (to completion).
Here are my last two comments that pointed you in the right direction:
So generally you try and take one 128 byte block from the binary message, update the hash state using the information in that block, then move to the next one until you have a partial or 0 byte block. That block you need to pad & add size indication (in bits) and process. If you've not enough space for the padding / size indication then you need yet another block consisting entirely of padding and the size indication. If you read carefully, then you always process at least one block.
and
Hmm, it is already in sha_backend_digest (the 0x80 followed by zero bytes and the length which is input size * 8 (_counter << 3).
But of course you do need to perform that and not skip any step.
Related
I'm trying to get a quick implementation of the following problem, ideally such that it would work in a numba function. The problem is the following: I have two random integers a & b and consider their binary representation of length L, e.g.
L=4: a=10->1010, b=6->0110.
This is the information that is feed into the function. Then I cut both binary representations in two at the same random position and fuse one of the two results, e.g.
L=4: a=1|010, b=0|110 ---> c=1110 or 0010.
One of the two outcome is chosen with equal probability and that is the outcome of the function. The cut occurs between the first 1/0 and the last 0/1 of the binary representation.
This is currently my code:
def func(a,b,l):
bin_a = [int(i) for i in str(bin(a))[2:].zfill(l)]
bin_b = [int(i) for i in str(bin(b))[2:].zfill(l)]
randint = random.randint(1, l - 1)
print("randint", randint)
if random.random() < 0.5:
result = bin_a[0:randint]+bin_b[randint:l]
else:
result = bin_b[0:randint] + bin_a[randint:l]
return result
I have the feeling that there a possibly many shortcuts to this problem that I do not come up with. Also my code does not work in numba :/. Thanks for any help!
Edit: This is an update of my code, thanks to Prunes help! It also works as a numba function. If there is no further improvements to that, I would close the question.
def func2(a,b,l):
randint = random.randint(1, l - 1)
print("randint", randint)
bitlist_l = [1]*randint+[0]*(l-randint)
bitlist_r = [0]*randint+[1]*(l-randint)
print("bitlist_l", bitlist_l)
print("bitlist_r", bitlist_r)
l_mask = 0
r_mask = 0
for i in range(l):
l_mask = (l_mask << 1) | bitlist_l[i]
r_mask = (r_mask << 1) | bitlist_r[i]
print("l_mask", l_mask)
print("r_mask", r_mask)
if random.random() < 0.5:
c = (a & l_mask) | (b & r_mask)
else:
c = (b & l_mask) | (a & r_mask)
return c
You lose a lot of time converting between string and int. Try bit operations instead. Mask the items you want and construct the output without all the conversions. Try these steps:
size = [length of larger number in bits] There are many ways to get this.
Make a mask template, size 1-bits.
Pick your random position, pos randint is a poor anem, as it shadows the function you're using.
Make two masks: l_mask = mask << pos; r_mask = mask >> pos. This gives you two mutually exclusive and exhaustive bit-maps for your inputs.
Flip your random coin, the 50-50 chance. The < 0.5 result would be ...
(a & l_mask) | (b & rmask)
For the >= 0.5 result, switch a and b in that expression.
You can improve your code by realizing that you do not need a "human readable" binary representation to do binary operations.
For example, creating the mask:
m = (1<<randompos) - 1
The crossover can be done like so:
c = (a if coinflip else b) ^ ((a^b)&m)
And that's all.
Full example:
# create random sample
a,b = np.random.randint(1<<32,size=2)
randompos = np.random.randint(1,32)
coinflip = np.random.randint(2)
randompos
# 12
coinflip
# 0
# do the crossover
m = (1<<randompos) - 1
c = (a if coinflip else b) ^ ((a^b)&m)
# check
for i in (a,b,m,c):
print(f"{i:032b}")
# 11100011110111000001001111100011
# 11010110110000110010101001111011
# 00000000000000000000111111111111
# 11010110110000110010001111100011
The usual saying is that string comparison must be done in constant time when checking things like password or hashes, and thus, it is recommended to avoid a == b.
However, I run the follow script and the results don't support the hypothesis that a==b short circuit on the first non-identical character.
from time import perf_counter_ns
import random
def timed_cmp(a, b):
start = perf_counter_ns()
a == b
end = perf_counter_ns()
return end - start
def n_timed_cmp(n, a, b):
"average time for a==b done n times"
ts = [timed_cmp(a, b) for _ in range(n)]
return sum(ts) / len(ts)
def check_cmp_time():
random.seed(123)
# generate a random string of n characters
n = 2 ** 8
s = "".join([chr(random.randint(ord("a"), ord("z"))) for _ in range(n)])
# generate a list of strings, which all differs from the original string
# by one character, at a different position
# only do that for the first 50 char, it's enough to get data
diffs = [s[:i] + "A" + s[i+1:] for i in range(min(50, n))]
timed = [(i, n_timed_cmp(10000, s, d)) for (i, d) in enumerate(diffs)]
sorted_timed = sorted(timed, key=lambda t: t[1])
# print the 10 fastest
for x in sorted_timed[:10]:
i, t = x
print("{}\t{:3f}".format(i, t))
print("---")
i, t = timed[0]
print("{}\t{:3f}".format(i, t))
i, t = timed[1]
print("{}\t{:3f}".format(i, t))
if __name__ == "__main__":
check_cmp_time()
Here is the result of a run, re-running the script gives slightly different results, but nothing satisfactory.
# ran with cpython 3.8.3
6 78.051700
1 78.203200
15 78.222700
14 78.384800
11 78.396300
12 78.441800
9 78.476900
13 78.519000
8 78.586200
3 78.631500
---
0 80.691100
1 78.203200
I would've expected that the fastest comparison would be where the first differing character is at the beginning of the string, but it's not what I get.
Any idea what's going on ???
There's a difference, you just don't see it on such small strings. Here's a small patch to apply to your code, so I use longer strings, and I do 10 checks by putting the A at a place, evenly spaced in the original string, from the beginning to the end, I mean, like this:
A_______________________________________________________________
______A_________________________________________________________
____________A___________________________________________________
__________________A_____________________________________________
________________________A_______________________________________
______________________________A_________________________________
____________________________________A___________________________
__________________________________________A_____________________
________________________________________________A_______________
______________________________________________________A_________
____________________________________________________________A___
## -15,13 +15,13 ## def n_timed_cmp(n, a, b):
def check_cmp_time():
random.seed(123)
# generate a random string of n characters
- n = 2 ** 8
+ n = 2 ** 16
s = "".join([chr(random.randint(ord("a"), ord("z"))) for _ in range(n)])
# generate a list of strings, which all differs from the original string
# by one character, at a different position
# only do that for the first 50 char, it's enough to get data
- diffs = [s[:i] + "A" + s[i+1:] for i in range(min(50, n))]
+ diffs = [s[:i] + "A" + s[i+1:] for i in range(0, n, n // 10)]
timed = [(i, n_timed_cmp(10000, s, d)) for (i, d) in enumerate(diffs)]
sorted_timed = sorted(timed, key=lambda t: t[1])
and you'll get:
0 122.621000
1 213.465700
2 380.214100
3 460.422000
5 694.278700
4 722.010000
7 894.630300
6 1020.722100
9 1149.473000
8 1341.754500
---
0 122.621000
1 213.465700
Note that with your example, with only 2**8 characters, it's already noticable, apply this patch:
## -21,7 +21,7 ## def check_cmp_time():
# generate a list of strings, which all differs from the original string
# by one character, at a different position
# only do that for the first 50 char, it's enough to get data
- diffs = [s[:i] + "A" + s[i+1:] for i in range(min(50, n))]
+ diffs = [s[:i] + "A" + s[i+1:] for i in [0, n - 1]]
timed = [(i, n_timed_cmp(10000, s, d)) for (i, d) in enumerate(diffs)]
sorted_timed = sorted(timed, key=lambda t: t[1])
to only keep the two extreme cases (first letter change vs last letter change) and you'll get:
$ python3 cmp.py
0 124.131800
1 135.566000
Numbers may vary, but most of the time test 0 is a tad faster that test 1.
To isolate more precisely which caracter is modified, it's possible as long as the memcmp does it character by character, so as long as it does not use integer comparisons, typically on the last character if they get misaligned, or on really short strings, like 8 char string, as I demo here:
from time import perf_counter_ns
from statistics import median
import random
def check_cmp_time():
random.seed(123)
# generate a random string of n characters
n = 8
s = "".join([chr(random.randint(ord("a"), ord("z"))) for _ in range(n)])
# generate a list of strings, which all differs from the original string
# by one character, at a different position
# only do that for the first 50 char, it's enough to get data
diffs = [s[:i] + "A" + s[i + 1 :] for i in range(n)]
values = {x: [] for x in range(n)}
for _ in range(10_000_000):
for i, diff in enumerate(diffs):
start = perf_counter_ns()
s == diff
values[i].append(perf_counter_ns() - start)
timed = [[k, median(v)] for k, v in values.items()]
sorted_timed = sorted(timed, key=lambda t: t[1])
# print the 10 fastest
for x in sorted_timed[:10]:
i, t = x
print("{}\t{:3f}".format(i, t))
print("---")
i, t = timed[0]
print("{}\t{:3f}".format(i, t))
i, t = timed[1]
print("{}\t{:3f}".format(i, t))
if __name__ == "__main__":
check_cmp_time()
Which gives me:
1 221.000000
2 222.000000
3 223.000000
4 223.000000
5 223.000000
6 223.000000
7 223.000000
0 241.000000
The differences are so small, Python and perf_counter_ns may no longer be the right tools here.
See, to know why it doesn't short circuit, you'll have to do some digging. The simple answer is, of course, it doesn't short circuit because the standard doesn't specify so. But you might think, "Why wouldn't the implementations choose to short circuit? Surely, It must be faster!". Not quite.
Let's take a look at cpython, for obvious reasons. Look at the code for unicode_compare_eq function defined in unicodeobject.c
static int
unicode_compare_eq(PyObject *str1, PyObject *str2)
{
int kind;
void *data1, *data2;
Py_ssize_t len;
int cmp;
len = PyUnicode_GET_LENGTH(str1);
if (PyUnicode_GET_LENGTH(str2) != len)
return 0;
kind = PyUnicode_KIND(str1);
if (PyUnicode_KIND(str2) != kind)
return 0;
data1 = PyUnicode_DATA(str1);
data2 = PyUnicode_DATA(str2);
cmp = memcmp(data1, data2, len * kind);
return (cmp == 0);
}
(Note: This function is actually called after deducing that str1 and str2 are not the same object - if they are - well that's just a simple True immediately)
Focus on this line specifically-
cmp = memcmp(data1, data2, len * kind);
Ahh, we're back at another cross road. Does memcmp short circuit? The C standard does not specify such a requirement. As seen in the opengroup docs and also in Section 7.24.4.1 of the C Standard Draft
7.24.4.1 The memcmp function
Synopsis
#include <string.h>
int memcmp(const void *s1, const void *s2, size_t n);
Description
The memcmp function compares the first n characters of the object pointed to by s1 to
the first n characters of the object pointed to by s2.
Returns
The memcmp function returns an integer greater than, equal to, or less than zero,
accordingly as the object pointed to by s1 is greater than, equal to, or less than the object pointed to by s2.
Most Some C implementations (including glibc) choose to not short circuit. But why? are we missing something, why would you not short circuit?
Because the comparison they use isn't might not be as naive as a byte by byte by check. The standard does not require the objects to be compared byte by byte. Therein lies the chance of optimization.
What glibc does, is that it compares elements of type unsigned long int instead of just singular bytes represented by unsigned char. Check out the implementation
There's a lot more going under the hood - a discussion far outside the scope of this question, after all this isn't even tagged as a C question ;). Though I found that this answer may be worth a look. But just know, the optimization is there, just in a much different form than the approach that may come in mind at first glance.
Edit: Fixed wrong function link
Edit: As #Konrad Rudolph has stated, glibc memcmp does apparently short circuit. I've been misinformed.
If I have several binary strings with compressed zlib data, is there a way to efficiently combine them into a single compressed string without decompressing everything?
Example of what I have to do now:
c1 = zlib.compress("The quick brown fox jumped over the lazy dog. ")
c2 = zlib.compress("We ride at dawn! ")
c = zlib.compress(zlib.decompress(c1)+zlib.decompress(c2)) # Warning: Inefficient!
d1 = zlib.decompress(c1)
d2 = zlib.decompress(c2)
d = zlib.decompress(c)
assert d1+d2 == d # This will pass!
Example of what I want:
c1 = zlib.compress("The quick brown fox jumped over the lazy dog. ")
c2 = zlib.compress("We ride at dawn! ")
c = magic_zlib_add(c1+c2) # Magical method of combining compressed streams
d1 = zlib.decompress(c1)
d2 = zlib.decompress(c2)
d = zlib.decompress(c)
assert d1+d2 == d # This should pass!
I don't know too much about zlib and the DEFLATE algorithm, so this may be entirely impossible from a theoretical point of view. Also, I must use use zlib; so I can't wrap zlib and come up with my own protocol that transparently handles concatenated streams.
NOTE: I don't really mind if the solution is not trivial in Python. I'm willing to write some C code and use ctypes in Python.
Since you don't mind venturing into C, you can start by looking at the code for gzjoin.
Note, the gzjoin code has to decompress to find the parts that have to change when merged, but it doesn't have to recompress. That's not too bad because decompression is typically faster than compression.
In addition to gzjoin which requires decompression of the first deflate stream, you can take a look at gzlog.h and gzlog.c, which efficiently appends short strings to a gzip file without having to decompress the deflate stream each time. (It can be easily modified to operate on zlib-wrapped deflate data instead of gzip-wrapped deflate data.) You would use this approach if you are in control of the creation of the first deflate stream. If you are not creating the first deflate stream, then you would have to use the approach of gzjoin which requires decompression.
None of the approaches require recompression.
I'm just turning #zorlak's comment into an answer and adding some code so I can find it later.
For the first stream, call deflate() with a Z_SYNC_FLUSH. For each subsequent stream, call deflate() with Z_SYNC_FLUSH, strip the first two bytes, and concatenate, also collecting the adler32 value and uncompressed length. Then for the last stream, [call] deflate() with a Z_FINISH, strip off the 4 byte checksum, and replace it with adler32_combine() of all the checksums.
If you can control the initial compression of your streams, you can store the length of the uncompressed data, its Adler-32 checksum, and the compressed data somewhere. Later you can then concatenate the individual streams in an arbitrary order.
Note that I am not sure if the individual streams can have different compression levels, compression strategies, or window sizes since the concatenate function strips the zlib header of all but the first stream...
from typing import Tuple
import zlib
def prepare(data: bytes) -> Tuple[int, bytes, int]:
deflate = zlib.compressobj()
result = deflate.compress(data)
result += deflate.flush(zlib.Z_SYNC_FLUSH)
return len(data), result, zlib.adler32(data)
def concatenate(*chunks: Tuple[int, bytes, int]) -> bytes:
if not chunks:
return b''
_, result, final_checksum = chunks[0]
for length, chunk, checksum in chunks[1:]:
result += chunk[2:] # strip the zlib header
final_checksum = adler32_combine(final_checksum, checksum, length)
result += b'\x03\x00' # insert a final empty block
result += final_checksum.to_bytes(4, byteorder='big')
return result
def adler32_combine(adler1: int, adler2: int, length2: int) -> int:
# Python implementation of adler32_combine
# The orignal C implementation is Copyright (C) 1995-2011, 2016 Mark Adler
# see https://github.com/madler/zlib/blob/master/adler32.c#L143
BASE = 65521
WORD = 0xffff
DWORD = 0xffffffff
if adler1 < 0 or adler1 > DWORD:
raise ValueError('adler1 must be between 0 and 2^32')
if adler2 < 0 or adler2 > DWORD:
raise ValueError('adler2 must be between 0 and 2^32')
if length2 < 0:
raise ValueError('length2 must not be negative')
remainder = length2 % BASE
sum1 = adler1 & WORD
sum2 = (remainder * sum1) % BASE
sum1 += (adler2 & WORD) + BASE - 1
sum2 += ((adler1 >> 16) & WORD) + ((adler2 >> 16) & WORD) + BASE - remainder
if sum1 >= BASE:
sum1 -= BASE
if sum1 >= BASE:
sum1 -= BASE
if sum2 >= (BASE << 1):
sum2 -= (BASE << 1)
if sum2 >= BASE:
sum2 -= BASE
return (sum1 | (sum2 << 16))
A quick example:
hello = prepare(b'Hello World! ')
test = prepare(b'This is a test. ')
fox = prepare(b'The quick brown fox jumped over the lazy dog. ')
dawn = prepare(b'We ride at dawn! ')
# these all print what you would expect
print(zlib.decompress(concatenate(hello, test, fox, dawn)))
print(zlib.decompress(concatenate(dawn, fox, test, hello)))
print(zlib.decompress(concatenate(fox, hello, dawn, test)))
print(zlib.decompress(concatenate(test, dawn, hello, fox)))
From Section 15.2 of Programming Pearls
The C codes can be viewed here: http://www.cs.bell-labs.com/cm/cs/pearls/longdup.c
When I implement it in Python using suffix-array:
example = open("iliad10.txt").read()
def comlen(p, q):
i = 0
for x in zip(p, q):
if x[0] == x[1]:
i += 1
else:
break
return i
suffix_list = []
example_len = len(example)
idx = list(range(example_len))
idx.sort(cmp = lambda a, b: cmp(example[a:], example[b:])) #VERY VERY SLOW
max_len = -1
for i in range(example_len - 1):
this_len = comlen(example[idx[i]:], example[idx[i+1]:])
print this_len
if this_len > max_len:
max_len = this_len
maxi = i
I found it very slow for the idx.sort step. I think it's slow because Python need to pass the substring by value instead of by pointer (as the C codes above).
The tested file can be downloaded from here
The C codes need only 0.3 seconds to finish.
time cat iliad10.txt |./longdup
On this the rest of the Achaeans with one voice were for
respecting the priest and taking the ransom that he offered; but
not so Agamemnon, who spoke fiercely to him and sent him roughly
away.
real 0m0.328s
user 0m0.291s
sys 0m0.006s
But for Python codes, it never ends on my computer (I waited for 10 minutes and killed it)
Does anyone have ideas how to make the codes efficient? (For example, less than 10 seconds)
My solution is based on Suffix arrays. It is constructed by Prefix doubling the Longest common prefix. The worst-case complexity is O(n (log n)^2). The file "iliad.mb.txt" takes 4 seconds on my laptop. The longest_common_substring function is short and can be easily modified, e.g. for searching the 10 longest non-overlapping substrings. This Python code is faster than the original C code from the question, if duplicate strings are longer than 10000 characters.
from itertools import groupby
from operator import itemgetter
def longest_common_substring(text):
"""Get the longest common substrings and their positions.
>>> longest_common_substring('banana')
{'ana': [1, 3]}
>>> text = "not so Agamemnon, who spoke fiercely to "
>>> sorted(longest_common_substring(text).items())
[(' s', [3, 21]), ('no', [0, 13]), ('o ', [5, 20, 38])]
This function can be easy modified for any criteria, e.g. for searching ten
longest non overlapping repeated substrings.
"""
sa, rsa, lcp = suffix_array(text)
maxlen = max(lcp)
result = {}
for i in range(1, len(text)):
if lcp[i] == maxlen:
j1, j2, h = sa[i - 1], sa[i], lcp[i]
assert text[j1:j1 + h] == text[j2:j2 + h]
substring = text[j1:j1 + h]
if not substring in result:
result[substring] = [j1]
result[substring].append(j2)
return dict((k, sorted(v)) for k, v in result.items())
def suffix_array(text, _step=16):
"""Analyze all common strings in the text.
Short substrings of the length _step a are first pre-sorted. The are the
results repeatedly merged so that the garanteed number of compared
characters bytes is doubled in every iteration until all substrings are
sorted exactly.
Arguments:
text: The text to be analyzed.
_step: Is only for optimization and testing. It is the optimal length
of substrings used for initial pre-sorting. The bigger value is
faster if there is enough memory. Memory requirements are
approximately (estimate for 32 bit Python 3.3):
len(text) * (29 + (_size + 20 if _size > 2 else 0)) + 1MB
Return value: (tuple)
(sa, rsa, lcp)
sa: Suffix array for i in range(1, size):
assert text[sa[i-1]:] < text[sa[i]:]
rsa: Reverse suffix array for i in range(size):
assert rsa[sa[i]] == i
lcp: Longest common prefix for i in range(1, size):
assert text[sa[i-1]:sa[i-1]+lcp[i]] == text[sa[i]:sa[i]+lcp[i]]
if sa[i-1] + lcp[i] < len(text):
assert text[sa[i-1] + lcp[i]] < text[sa[i] + lcp[i]]
>>> suffix_array(text='banana')
([5, 3, 1, 0, 4, 2], [3, 2, 5, 1, 4, 0], [0, 1, 3, 0, 0, 2])
Explanation: 'a' < 'ana' < 'anana' < 'banana' < 'na' < 'nana'
The Longest Common String is 'ana': lcp[2] == 3 == len('ana')
It is between tx[sa[1]:] == 'ana' < 'anana' == tx[sa[2]:]
"""
tx = text
size = len(tx)
step = min(max(_step, 1), len(tx))
sa = list(range(len(tx)))
sa.sort(key=lambda i: tx[i:i + step])
grpstart = size * [False] + [True] # a boolean map for iteration speedup.
# It helps to skip yet resolved values. The last value True is a sentinel.
rsa = size * [None]
stgrp, igrp = '', 0
for i, pos in enumerate(sa):
st = tx[pos:pos + step]
if st != stgrp:
grpstart[igrp] = (igrp < i - 1)
stgrp = st
igrp = i
rsa[pos] = igrp
sa[i] = pos
grpstart[igrp] = (igrp < size - 1 or size == 0)
while grpstart.index(True) < size:
# assert step <= size
nextgr = grpstart.index(True)
while nextgr < size:
igrp = nextgr
nextgr = grpstart.index(True, igrp + 1)
glist = []
for ig in range(igrp, nextgr):
pos = sa[ig]
if rsa[pos] != igrp:
break
newgr = rsa[pos + step] if pos + step < size else -1
glist.append((newgr, pos))
glist.sort()
for ig, g in groupby(glist, key=itemgetter(0)):
g = [x[1] for x in g]
sa[igrp:igrp + len(g)] = g
grpstart[igrp] = (len(g) > 1)
for pos in g:
rsa[pos] = igrp
igrp += len(g)
step *= 2
del grpstart
# create LCP array
lcp = size * [None]
h = 0
for i in range(size):
if rsa[i] > 0:
j = sa[rsa[i] - 1]
while i != size - h and j != size - h and tx[i + h] == tx[j + h]:
h += 1
lcp[rsa[i]] = h
if h > 0:
h -= 1
if size > 0:
lcp[0] = 0
return sa, rsa, lcp
I prefer this solution over more complicated O(n log n) because Python has a very fast list sorting algorithm (Timsort). Python's sort is probably faster than necessary linear time operations in the method from that article, that should be O(n) under very special presumptions of random strings together with a small alphabet (typical for DNA genome analysis). I read in Gog 2011 that worst-case O(n log n) of my algorithm can be in practice faster than many O(n) algorithms that cannot use the CPU memory cache.
The code in another answer based on grow_chains is 19 times slower than the original example from the question, if the text contains a repeated string 8 kB long. Long repeated texts are not typical for classical literature, but they are frequent e.g. in "independent" school homework collections. The program should not freeze on it.
I wrote an example and tests with the same code for Python 2.7, 3.3 - 3.6.
The translation of the algorithm into Python:
from itertools import imap, izip, starmap, tee
from os.path import commonprefix
def pairwise(iterable): # itertools recipe
a, b = tee(iterable)
next(b, None)
return izip(a, b)
def longest_duplicate_small(data):
suffixes = sorted(data[i:] for i in xrange(len(data))) # O(n*n) in memory
return max(imap(commonprefix, pairwise(suffixes)), key=len)
buffer() allows to get a substring without copying:
def longest_duplicate_buffer(data):
n = len(data)
sa = sorted(xrange(n), key=lambda i: buffer(data, i)) # suffix array
def lcp_item(i, j): # find longest common prefix array item
start = i
while i < n and data[i] == data[i + j - start]:
i += 1
return i - start, start
size, start = max(starmap(lcp_item, pairwise(sa)), key=lambda x: x[0])
return data[start:start + size]
It takes 5 seconds on my machine for the iliad.mb.txt.
In principle it is possible to find the duplicate in O(n) time and O(n) memory using a suffix array augmented with a lcp array.
Note: *_memoryview() is deprecated by *_buffer() version
More memory efficient version (compared to longest_duplicate_small()):
def cmp_memoryview(a, b):
for x, y in izip(a, b):
if x < y:
return -1
elif x > y:
return 1
return cmp(len(a), len(b))
def common_prefix_memoryview((a, b)):
for i, (x, y) in enumerate(izip(a, b)):
if x != y:
return a[:i]
return a if len(a) < len(b) else b
def longest_duplicate(data):
mv = memoryview(data)
suffixes = sorted((mv[i:] for i in xrange(len(mv))), cmp=cmp_memoryview)
result = max(imap(common_prefix_memoryview, pairwise(suffixes)), key=len)
return result.tobytes()
It takes 17 seconds on my machine for the iliad.mb.txt. The result is:
On this the rest of the Achaeans with one voice were for respecting
the priest and taking the ransom that he offered; but not so Agamemnon,
who spoke fiercely to him and sent him roughly away.
I had to define custom functions to compare memoryview objects because memoryview comparison either raises an exception in Python 3 or produces wrong result in Python 2:
>>> s = b"abc"
>>> memoryview(s[0:]) > memoryview(s[1:])
True
>>> memoryview(s[0:]) < memoryview(s[1:])
True
Related questions:
Find the longest repeating string and the number of times it repeats in a given string
finding long repeated substrings in a massive string
The main problem seems to be that python does slicing by copy: https://stackoverflow.com/a/5722068/538551
You'll have to use a memoryview instead to get a reference instead of a copy. When I did this, the program hung after the idx.sort function (which was very fast).
I'm sure with a little work, you can get the rest working.
Edit:
The above change will not work as a drop-in replacement because cmp does not work the same way as strcmp. For example, try the following C code:
#include <stdio.h>
#include <string.h>
int main() {
char* test1 = "ovided by The Internet Classics Archive";
char* test2 = "rovided by The Internet Classics Archive.";
printf("%d\n", strcmp(test1, test2));
}
And compare the result to this python:
test1 = "ovided by The Internet Classics Archive";
test2 = "rovided by The Internet Classics Archive."
print(cmp(test1, test2))
The C code prints -3 on my machine while the python version prints -1. It looks like the example C code is abusing the return value of strcmp (it IS used in qsort after all). I couldn't find any documentation on when strcmp will return something other than [-1, 0, 1], but adding a printf to pstrcmp in the original code showed a lot of values outside of that range (3, -31, 5 were the first 3 values).
To make sure that -3 wasn't some error code, if we reverse test1 and test2, we'll get 3.
Edit:
The above is interesting trivia, but not actually correct in terms of affecting either chunks of code. I realized this just as I shut my laptop and left a wifi zone... Really should double check everything before I hit Save.
FWIW, cmp most certainly works on memoryview objects (prints -1 as expected):
print(cmp(memoryview(test1), memoryview(test2)))
I'm not sure why the code isn't working as expected. Printing out the list on my machine does not look as expected. I'll look into this and try to find a better solution instead of grasping at straws.
This version takes about 17 secs on my circa-2007 desktop using totally different algorithm:
#!/usr/bin/env python
ex = open("iliad.mb.txt").read()
chains = dict()
# populate initial chains dictionary
for (a,b) in enumerate(zip(ex,ex[1:])) :
s = ''.join(b)
if s not in chains :
chains[s] = list()
chains[s].append(a)
def grow_chains(chains) :
new_chains = dict()
for (string,pos) in chains :
offset = len(string)
for p in pos :
if p + offset >= len(ex) : break
# add one more character
s = string + ex[p + offset]
if s not in new_chains :
new_chains[s] = list()
new_chains[s].append(p)
return new_chains
# grow and filter, grow and filter
while len(chains) > 1 :
print 'length of chains', len(chains)
# remove chains that appear only once
chains = [(i,chains[i]) for i in chains if len(chains[i]) > 1]
print 'non-unique chains', len(chains)
print [i[0] for i in chains[:3]]
chains = grow_chains(chains)
The basic idea is to create a list of substrings and positions where they occure, thus eliminating the need to compare same strings again and again. The resulting list look like [('ind him, but', [466548, 739011]), (' bulwark bot', [428251, 428924]), (' his armour,', [121559, 124919, 193285, 393566, 413634, 718953, 760088])]. Unique strings are removed. Then every list member grows by 1 character and new list is created. Unique strings are removed again. And so on and so forth...
I have this Python code to do this:
from struct import pack as _pack
def packl(lnum, pad = 1):
if lnum < 0:
raise RangeError("Cannot use packl to convert a negative integer "
"to a string.")
count = 0
l = []
while lnum > 0:
l.append(lnum & 0xffffffffffffffffL)
count += 1
lnum >>= 64
if count <= 0:
return '\0' * pad
elif pad >= 8:
lens = 8 * count % pad
pad = ((lens != 0) and (pad - lens)) or 0
l.append('>' + 'x' * pad + 'Q' * count)
l.reverse()
return _pack(*l)
else:
l.append('>' + 'Q' * count)
l.reverse()
s = _pack(*l).lstrip('\0')
lens = len(s)
if (lens % pad) != 0:
return '\0' * (pad - lens % pad) + s
else:
return s
This takes approximately 174 usec to convert 2**9700 - 1 to a string of bytes on my machine. If I'm willing to use the Python 2.7 and Python 3.x specific bit_length method, I can shorten that to 159 usecs by pre-allocating the l array to be the exact right size at the very beginning and using l[something] = syntax instead of l.append.
Is there anything I can do that will make this faster? This will be used to convert large prime numbers used in cryptography as well as some (but not many) smaller numbers.
Edit
This is currently the fastest option in Python < 3.2, it takes about half the time either direction as the accepted answer:
def packl(lnum, padmultiple=1):
"""Packs the lnum (which must be convertable to a long) into a
byte string 0 padded to a multiple of padmultiple bytes in size. 0
means no padding whatsoever, so that packing 0 result in an empty
string. The resulting byte string is the big-endian two's
complement representation of the passed in long."""
if lnum == 0:
return b'\0' * padmultiple
elif lnum < 0:
raise ValueError("Can only convert non-negative numbers.")
s = hex(lnum)[2:]
s = s.rstrip('L')
if len(s) & 1:
s = '0' + s
s = binascii.unhexlify(s)
if (padmultiple != 1) and (padmultiple != 0):
filled_so_far = len(s) % padmultiple
if filled_so_far != 0:
s = b'\0' * (padmultiple - filled_so_far) + s
return s
def unpackl(bytestr):
"""Treats a byte string as a sequence of base 256 digits
representing an unsigned integer in big-endian format and converts
that representation into a Python integer."""
return int(binascii.hexlify(bytestr), 16) if len(bytestr) > 0 else 0
In Python 3.2 the int class has to_bytes and from_bytes functions that can accomplish this much more quickly that the method given above.
Here is a solution calling the Python/C API via ctypes. Currently, it uses NumPy, but if NumPy is not an option, it could be done purely with ctypes.
import numpy
import ctypes
PyLong_AsByteArray = ctypes.pythonapi._PyLong_AsByteArray
PyLong_AsByteArray.argtypes = [ctypes.py_object,
numpy.ctypeslib.ndpointer(numpy.uint8),
ctypes.c_size_t,
ctypes.c_int,
ctypes.c_int]
def packl_ctypes_numpy(lnum):
a = numpy.zeros(lnum.bit_length()//8 + 1, dtype=numpy.uint8)
PyLong_AsByteArray(lnum, a, a.size, 0, 1)
return a
On my machine, this is 15 times faster than your approach.
Edit: Here is the same code using ctypes only and returning a string instead of a NumPy array:
import ctypes
PyLong_AsByteArray = ctypes.pythonapi._PyLong_AsByteArray
PyLong_AsByteArray.argtypes = [ctypes.py_object,
ctypes.c_char_p,
ctypes.c_size_t,
ctypes.c_int,
ctypes.c_int]
def packl_ctypes(lnum):
a = ctypes.create_string_buffer(lnum.bit_length()//8 + 1)
PyLong_AsByteArray(lnum, a, len(a), 0, 1)
return a.raw
This is another two times faster, totalling to a speed-up factor of 30 on my machine.
For completeness and for future readers of this question:
Starting in Python 3.2, there are functions int.from_bytes() and int.to_bytes() that perform the conversion between bytes and int objects in a choice of byte orders.
I suppose you really should just be using numpy, which I'm sure has something or other built in for this. It might also be faster to hack around with the array module. But I'll take a stab at it anyway.
IMX, creating a generator and using a list comprehension and/or built-in summation is faster than a loop that appends to a list, because the appending can be done internally. Oh, and 'lstrip' on a large string has got to be costly.
Also, some style points: special cases aren't special enough; and you appear not to have gotten the memo about the new x if y else z construct. :) Although we don't need it anyway. ;)
from struct import pack as _pack
Q_size = 64
Q_bitmask = (1L << Q_size) - 1L
def quads_gen(a_long):
while a_long:
yield a_long & Q_bitmask
a_long >>= Q_size
def pack_long_big_endian(a_long, pad = 1):
if lnum < 0:
raise RangeError("Cannot use packl to convert a negative integer "
"to a string.")
qs = list(reversed(quads_gen(a_long)))
# Pack the first one separately so we can lstrip nicely.
first = _pack('>Q', qs[0]).lstrip('\x00')
rest = _pack('>%sQ' % len(qs) - 1, *qs[1:])
count = len(first) + len(rest)
# A little math trick that depends on Python's behaviour of modulus
# for negative numbers - but it's well-defined and documented
return '\x00' * (-count % pad) + first + rest
Just wanted to post a follow-up to Sven's answer (which works great). The opposite operation - going from arbitrarily long bytes object to Python Integer object requires the following (because there is no PyLong_FromByteArray() C API function that I can find):
import binascii
def unpack_bytes(stringbytes):
#binascii.hexlify will be obsolete in python3 soon
#They will add a .tohex() method to bytes class
#Issue 3532 bugs.python.org
return int(binascii.hexlify(stringbytes), 16)