Count number of comparisons for QuickSort - python

I want to count the number of comparisons in quicksort. In order to do so, I introduced a counting variable c. Although I think the implementation is correct, the counter is significantly higher than with insertion sort, which should not be the case. Have I done something wrong?
Here is my code.
def quick_sort(a):
c = 0
c = quickSortImpl(a, 0, len(a)-1, c)
return c
def quickSortImpl(a, l, r, c):
if r > l:
k, c = partition(a, l, r, c)
c = quickSortImpl(a, l, k-1, c)
c = quickSortImpl(a, k+1, r, c)
return c
def partition(a, l, r, c):
pivot = a[r]
i = l
j = r - 1
while True:
c += 1
while i < r and a[i] <= pivot:
c += 1
i += 1
c += 1
while j > l and a[j] >= pivot:
c += 1
j -= 1
if i < j:
a[i], a[j] = a[j], a[i]
else:
break
a[r] = a[i]
a[i] = pivot
return i, c
Click here for comparison between Insertion Sort vs Quick Sort

Related

Sorting multiple lists together in place

I have lists a,b,c,... of equal length. I'd like to sort all of them the order obtained by sorting a, i.e., I could do the decorate-sort-undecorate pattern
a, b, c = map(list, zip(*sorted(zip(a, b, c))))
or something like that. However, I'd like that the lists are sorted in place (I assume that sorted pulls everything from the temporary iterator passed to it to a temporary list, and then zip stuff into three output lists, so every datum in the input is copied twice unnecessarily) without creating temporary objects. So what I don't mean is:
a_sorted, b_sorted, c_sorted = map(list, zip(*sorted(zip(a, b, c))))
a[:] = a_sorted
b[:] = b_sorted
c[:] = c_sorted
How can I achieve that?
I think "without creating temporary objects" is impossible, especially since "everything is an object" in Python.
You could get O(1) space / number of objects if you implement some sorting algorithm yourself, though if you want O(n log n) time and stability, it's difficult. If you don't care about stability (seems likely, since you say you want to sort by a but then actually sort by a, b and c), heapsort is reasonably easy:
def sort_together_heapsort(a, b, c):
n = len(a)
def swap(i, j):
a[i], a[j] = a[j], a[i]
b[i], b[j] = b[j], b[i]
c[i], c[j] = c[j], c[i]
def siftdown(i):
while (kid := 2*i+1) < n:
imax = kid if a[kid] > a[i] else i
kid += 1
if kid < n and a[kid] > a[imax]:
imax = kid
if imax == i:
return
swap(i, imax)
i = imax
for i in range(n // 2)[::-1]:
siftdown(i)
while n := n - 1:
swap(0, n)
siftdown(0)
Anyway, if someone's interested in just saving some amount of memory, that can be done by decorating in-place (building tuples and storing them in a):
def sort_together_decorate_in_a(a, b, c):
for i, a[i] in enumerate(zip(a, b, c)):
pass
a.sort()
for i, [a[i], b[i], c[i]] in enumerate(a):
pass
Or if you trust that list.sort will ask for keys for the elements in order (at least in CPython it does, already did so when the key parameter was introduced 18 years ago, and I suspect will keep doing so):
def sort_together_iter_key(a, b, c):
it = iter(a)
b.sort(key=lambda _: next(it))
it = iter(a)
c.sort(key=lambda _: next(it))
a.sort()
Testing memory and time with three lists of 100,000 elements:
15,072,520 bytes 152 ms sort_together_sorted_zip
15,072,320 bytes 166 ms sort_together_sorted_zip_2
14,272,576 bytes 152 ms sort_together_sorted_zip_X
6,670,708 bytes 126 ms sort_together_decorate_in_a
6,670,772 bytes 177 ms sort_together_decorate_in_first_X
5,190,212 bytes 342 ms sort_multi_by_a_guest_X
1,597,400 bytes 100 ms sort_together_iter_key
1,597,448 bytes 102 ms sort_together_iter_key_X
744 bytes 1584 ms sort_together_heapsort
704 bytes 1663 ms sort_together_heapsort_X
168 bytes 1326 ms sort_together_heapsort_opti
188 bytes 1512 ms sort_together_heapsort_opti_X
Note:
The second solution is a shortened/improved version of yours, no need for temporary variables and conversions to lists.
The solutions with _X suffix are versions that take arbitrarily many lists as parameters.
The #a_guest is from their answer. Runtime-wise it currently benefits from my data being random, as that doesn't expose that solution's worst case complexity O(m * n²), where m is the number of lists and n is the length of each list.
Testing memory and time with ten lists of 100,000 elements:
19,760,808 bytes 388 ms sort_together_sorted_zip_X
12,159,100 bytes 425 ms sort_together_decorate_in_first_X
5,190,292 bytes 1249 ms sort_multi_by_a_guest_X
1,597,528 bytes 393 ms sort_together_iter_key_X
704 bytes 4186 ms sort_together_heapsort_X
188 bytes 4032 ms sort_together_heapsort_opti_X
The whole code (Try it online!):
import tracemalloc as tm
from random import random
from timeit import timeit
def sort_together_sorted_zip(a, b, c):
a_sorted, b_sorted, c_sorted = map(list, zip(*sorted(zip(a, b, c))))
a[:] = a_sorted
b[:] = b_sorted
c[:] = c_sorted
def sort_together_sorted_zip_2(a, b, c):
a[:], b[:], c[:] = zip(*sorted(zip(a, b, c)))
def sort_together_sorted_zip_X(*lists):
sorteds = zip(*sorted(zip(*lists)))
for lst, lst[:] in zip(lists, sorteds):
pass
def sort_together_decorate_in_a(a, b, c):
for i, a[i] in enumerate(zip(a, b, c)):
pass
a.sort()
for i, [a[i], b[i], c[i]] in enumerate(a):
pass
def sort_together_decorate_in_first_X(*lists):
first = lists[0]
for i, first[i] in enumerate(zip(*lists)):
pass
first.sort()
for i, values in enumerate(first):
for lst, lst[i] in zip(lists, values):
pass
def sort_together_iter_key(a, b, c):
it = iter(a)
b.sort(key=lambda _: next(it))
it = iter(a)
c.sort(key=lambda _: next(it))
a.sort()
def sort_together_iter_key_X(*lists):
for lst in lists[1:]:
it = iter(lists[0])
lst.sort(key=lambda _: next(it))
lists[0].sort()
def sort_together_heapsort(a, b, c):
n = len(a)
def swap(i, j):
a[i], a[j] = a[j], a[i]
b[i], b[j] = b[j], b[i]
c[i], c[j] = c[j], c[i]
def siftdown(i):
while (kid := 2*i+1) < n:
imax = kid if a[kid] > a[i] else i
kid += 1
if kid < n and a[kid] > a[imax]:
imax = kid
if imax == i:
return
swap(i, imax)
i = imax
for i in range(n // 2)[::-1]:
siftdown(i)
while n := n - 1:
swap(0, n)
siftdown(0)
def sort_together_heapsort_X(*lists):
a = lists[0]
n = len(a)
def swap(i, j):
for lst in lists:
lst[i], lst[j] = lst[j], lst[i]
def siftdown(i):
while (kid := 2*i+1) < n:
imax = kid if a[kid] > a[i] else i
kid += 1
if kid < n and a[kid] > a[imax]:
imax = kid
if imax == i:
return
swap(i, imax)
i = imax
for i in range(n // 2)[::-1]:
siftdown(i)
while n := n - 1:
swap(0, n)
siftdown(0)
def sort_together_heapsort_opti(a, b, c):
# Avoid inner functions and range-loop to minimize memory.
# Makes it faster, too. But duplicates code. Not recommended.
n = len(a)
i0 = n // 2 - 1
while i0 >= 0:
i = i0
while (kid := 2*i+1) < n:
imax = kid if a[kid] > a[i] else i
kid += 1
if kid < n and a[kid] > a[imax]:
imax = kid
if imax == i:
break
a[i], a[imax] = a[imax], a[i]
b[i], b[imax] = b[imax], b[i]
c[i], c[imax] = c[imax], c[i]
i = imax
i0 -= 1
while n := n - 1:
a[0], a[n] = a[n], a[0]
b[0], b[n] = b[n], b[0]
c[0], c[n] = c[n], c[0]
i = 0
while (kid := 2*i+1) < n:
imax = kid if a[kid] > a[i] else i
kid += 1
if kid < n and a[kid] > a[imax]:
imax = kid
if imax == i:
break
a[i], a[imax] = a[imax], a[i]
b[i], b[imax] = b[imax], b[i]
c[i], c[imax] = c[imax], c[i]
i = imax
def sort_together_heapsort_opti_X(*lists):
# Avoid inner functions and range-loop to minimize memory.
# Makes it faster, too. But duplicates code. Not recommended.
a = lists[0]
n = len(a)
i0 = n // 2 - 1
while i0 >= 0:
i = i0
while (kid := 2*i+1) < n:
imax = kid if a[kid] > a[i] else i
kid += 1
if kid < n and a[kid] > a[imax]:
imax = kid
if imax == i:
break
for lst in lists:
lst[i], lst[imax] = lst[imax], lst[i]
i = imax
i0 -= 1
while n := n - 1:
for lst in lists:
lst[0], lst[n] = lst[n], lst[0]
i = 0
while (kid := 2*i+1) < n:
imax = kid if a[kid] > a[i] else i
kid += 1
if kid < n and a[kid] > a[imax]:
imax = kid
if imax == i:
break
for lst in lists:
lst[i], lst[imax] = lst[imax], lst[i]
i = imax
def sort_multi_by_a_guest_X(a, *lists):
indices = list(range(len(a)))
indices.sort(key=lambda i: a[i])
a.sort()
for lst in lists:
for i, j in enumerate(indices):
while j < i:
j = indices[j]
lst[i], lst[j] = lst[j], lst[i]
funcs = [
sort_together_sorted_zip,
sort_together_sorted_zip_2,
sort_together_sorted_zip_X,
sort_together_decorate_in_a,
sort_together_decorate_in_first_X,
sort_multi_by_a_guest_X,
sort_together_iter_key,
sort_together_iter_key_X,
sort_together_heapsort,
sort_together_heapsort_X,
sort_together_heapsort_opti,
sort_together_heapsort_opti_X,
]
n = 100000
a0 = [random() for _ in range(n)]
b0 = [x + 1 for x in a0]
c0 = [x + 2 for x in a0]
for _ in range(3):
for func in funcs:
a, b, c = a0[:], b0[:], c0[:]
time = timeit(lambda: func(a, b, c), number=1)
assert a == sorted(a0)
assert b == sorted(b0)
assert c == sorted(c0)
a, b, c = a0[:], b0[:], c0[:]
tm.start()
func(a, b, c)
memory = tm.get_traced_memory()[1]
tm.stop()
print(f'{memory:10,} bytes {int(time * 1e3):4} ms {func.__name__}')
print()
The following function uses a memory overhead that is independent of the number of lists to sort. It is stable w.r.t. the first list.
def sort_multi(a, *lists):
indices = list(range(len(a)))
indices.sort(key=lambda i: a[i])
a.sort()
for lst in lists:
for i, j in enumerate(indices):
while j < i:
j = indices[j]
lst[i], lst[j] = lst[j], lst[i]

Key comparisons in a merge-insertion hybrid sort

I was given the task with the merge-insertion sort described as(paraphrased):
Starting off with merge sort, once a threshold S(small positive integer) is reached, the algorithm will then sort the sub arrays with insertion sort.
We are tasked to find the optimal S value for varying length of inputs to achieve minimum key comparisons. I implemented the code by modifying what was available online to get:
def mergeSort(arr, l, r, cutoff):
if l < r:
m = l+(r-l)//2
if len(arr[l:r+1]) > cutoff:
return mergeSort(arr, l, m, cutoff)+mergeSort(arr, m+1, r, cutoff)+merge(arr, l, m, r)
else:
return insertionSort(arr, l, r+1)
return 0
def merge(arr, l, m, r):
comp = 0
n1 = m - l + 1
n2 = r - m
L = [0] * (n1)
R = [0] * (n2)
for i in range(0, n1):
L[i] = arr[l + i]
for j in range(0, n2):
R[j] = arr[m + 1 + j]
i = 0
j = 0
k = l
while i < n1 and j < n2:
if L[i] <= R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
comp +=1
while i < n1:
arr[k] = L[i]
i += 1
k += 1
while j < n2:
arr[k] = R[j]
j += 1
k += 1
return comp
def insertionSort(arr, l, r):
comp = 0
for i in range(l+1, r):
key = arr[i]
j = i-1
while j >= l:
if key >= arr[j]:
comp += 1
break
arr[j + 1] = arr[j]
j -= 1
comp += 1
arr[j + 1] = key
return comp
However the graph I get for the minimum value of S against length is:
This means that a near-pure mergesort is almost always preferred over the hybrid. Which is against what is available online, saying that insertion sort will perform faster than mergesort at low values of S(~10-25). I can't seem to find any error with my code, so is hybrid sort really better than merge sort?
IMO the question is flawed.
Mergesort always performs N Lg(N) key comparisons, while Insertionsort takes N²/2 of them. Hence as of N=2, the comparison count favors Mergesort in all cases. (This is only approximate, as N does not always divide evenly).
But the number of moves as well as the overhead will tend to favor Insertionsort. So a more relevant metric is the actual running time which, unfortunately, will depend on the key length and type.

Merge sort in python: slicing vs iterating - impact on complexity

I want to check that my understanding of how python handles slices is correct.
Here's my implementation of merge sort:
def merge_sort(L):
def merge(a, b):
i, j = 0, 0
c = []
while i < len(a) and j < len(b):
if a[i] < b[j]:
c.append(a[i])
i += 1
elif b[j] < a[i]:
c.append(b[j])
j += 1
if a[i:]:
c.extend(a[i:])
if b[j:]:
c.extend(b[j:])
return c
if len(L) <= 1:
return L
else:
mid = len(L) // 2
left = merge_sort(L[:mid])
right = merge_sort(L[mid:])
return merge(left, right)
Am I right in thinking that I could replace this:
if a[i:]:
c.extend(a[i:])
if b[j:]:
c.extend(b[j:])
With this:
while i < len(a):
c.append(a[i])
i += 1
while j < len(b):
c.append(b[j])
j += 1
And have the exact same level of complexity? My understanding of slicing is that its complexity is equivalent to slice length? Is that correct?
Does the fact that I'm calling a slice twice (first in the condition, second time inside of it) make it 2x complexity?
Your implementation of mergesort has problems:
in the merge function's main loop, you do nothing if the values in a[i] and b[j] are equal, or more precisely if you have neither a[i] < b[i] nor a[i] > b[i]. This causes an infinite loop.
there is no need to define merge as a local function, actually there is no need to make it a separate function, you could inline the code and save the overhead of a function call.
Here is a modified version:
def merge_sort(L):
if len(L) <= 1:
return L
else:
mid = len(L) // 2
a = merge_sort(L[:mid])
b = merge_sort(L[mid:])
i, j = 0, 0
c = []
while i < len(a) and j < len(b):
if a[i] <= b[j]:
c.append(a[i])
i += 1
else:
c.append(b[j])
j += 1
if a[i:]:
c.extend(a[i:])
else:
c.extend(b[j:])
return c
Regarding performance, slicing or iterating has no impact on complexity since both operations have linear time cost.
Regarding performance, here are directions to try:
replace the test if a[i:] with if i < len(a). Creating the slice twice is costly.
perform the sort in place, avoiding the append operations
restructure the main loop to have a single test per iteration
Here is a modified version:
def merge_sort(L):
if len(L) <= 1:
return L
else:
mid = len(L) // 2
a = merge_sort(L[:mid])
b = merge_sort(L[mid:])
i, j, k = 0, 0, 0
while True:
if a[i] <= b[j]:
L[k] = a[i]
k += 1
i += 1
if (i == len(a)):
L[k:] = b[j:]
return L
else:
L[k] = b[j]
k += 1
j += 1
if (j == len(b)):
L[k:] = a[i:]
return L

Quick Sort algorithm with three way partition

I am new to algorithms and was working on implementing the Quick Sort algorithm with a 3-way partition such that it works fast even on sequences containing many equal elements. The following was my implementation:
def randomized_quick_sort(a, l, r):
if l >= r:
return
k = random.randint(l, r)
a[l], a[k] = a[k], a[l]
#use partition3
m1,m2 = partition3(a, l, r)
randomized_quick_sort(a, l, m1 - 1);
randomized_quick_sort(a, m2 + 1, r);
def partition3(a, l, r):
x, j, t = a[l], l, r
for i in range(l + 1, t+1):
if a[i] < x:
j += 1
a[i], a[j] = a[j], a[i]
elif a[i]>x:
a[i],a[t]=a[t],a[i]
i-=1
t-=1
a[l], a[j] = a[j], a[l]
return j,t
It does not generate correctly sorted lists. I found the correct implementation of the partition code here in Stack Overflow.
def partition3(a, l, r):
x, j, t = a[l], l, r
i = j
while i <= t :
if a[i] < x:
a[j], a[i] = a[i], a[j]
j += 1
elif a[i] > x:
a[t], a[i] = a[i], a[t]
t -= 1
i -= 1 # remain in the same i in this case
i += 1
return j,t
Can someone please explain to me how the incorrect partition implementation was failing?
Thanks in advance

counting inversions of mergesort

I have implemented a mergesort function which works correctly, However, I'm having a hard time modifying it to count the number of inversions in the original array before it is sorted.
An inversion is a pair where i < j but a[i] > a[j] an example, a = [5,2,1] has 3 inversions: (5,2),(5,1),(2,1)
def mergeSort(a):
mid = len(a)//2
if len(a) < 2:
return
l = a[:mid]
r = a[mid:]
mergeSort(l)
mergeSort(r)
return merge(l,r,a)
def merge(l,r,a):
i = 0
j = 0
k = 0
inv = 0
while(i < len(l) and j < len(r)):
if(l[i] < r[j]):
a[k] = l[i]
i = i + 1
else:
a[k] = r[j]
inv = inv + 1
j = j + 1
k = k + 1
while i < len(l):
a[k] = l[i]
i = i + 1
k = k + 1
while j < len(r):
a[k] = r[j]
j = j + 1
k = k + 1
inv = inv + 1
return [a,inv]
a = [6,5,4,3,2,1]
print(mergeSort(a))
The above example should return 15 as the count of inversions as n(n-1)/2 is the number of inversions for descending order array.
can someone explain how to count it?
L[i] > R[j] is a single inversion, but note that since the arrays are sorted, if L[k] > R[j] for some k, this means L[k] > R[j] for all i <= k < |L|. So you can subtract the length of the array L from i to give you the total number of inversions.

Categories