Is there a way that collections.Counter doesn't count/ignores a given value (here 0):
from collections import Counter
import numpy as np
idx = np.random.randint(4, size=(100,100))
most_common = np.zeros(100)
num_most_common = np.zeros(100)
for i in range(100):
most_common[i], num_most_common[i] = Counter(idx[i, :]).most_common(1)[0]
So if 0 is the most common value it should give the second most common value. In addition, is there a way to avoid the for loop in this case?
For positive numbers, we can use vectorized-bincount - bincount2D_vectorized -
# https://stackoverflow.com/a/46256361/ #Divakar
def bincount2D_vectorized(a):
N = a.max()+1
a_offs = a + np.arange(a.shape[0])[:,None]*N
return np.bincount(a_offs.ravel(), minlength=a.shape[0]*N).reshape(-1,N)
# Get binned counts per row, with each number representing a bin
c = bincount2D_vectorized(idx)
# Skip the first element, as that represents counts for 0s.
# Get most common element and count per row
most_common = c[:,1:].argmax(1)+1
num_most_common = c[:,1:].max(1)
# faster : num_most_common = c[np.arange(len(most_common)),most_common]
For generic int numbers, we could extend like so -
s = idx.min()
c = bincount2D_vectorized(idx-s)
c[:,-s] = 0
most_common = c.argmax(1)
num_most_common = c[np.arange(len(most_common)),most_common]
most_common += s
You can do the following, using a generator to only count something if it is not 0.
most_common = np.array([Counter(x for x in r if x).most_common(1)[0][0] for r in idx])
num_most_common = np.array([Counter(x for x in r if x).most_common(1)[0][1] for r in idx])
or even
count = np.array([Counter(x for x in r if x).most_common(1)[0] for r in idx])
most_common = count[:,0]
num_most_common = count[:,1]
Related
Suppose I have a huge array of data and sample of them are :
x= [ 511.31, 512.24, 571.77, 588.35, 657.08, 665.49, -1043.45, -1036.56,-969.39, -955.33]
I used the following code to generate all possible pairs
Pairs=[(x[i],x[j]) for i in range(len(x)) for j in range(i+1, len(x))]
Which gave me all possible pairs. Now, I would like to group these pairs if they are within threshold values of -25 or +25 and label them accordingly.
Any idea or advice on how to do this? Thanks in advance
If I understood correctly your problem, the code below should do the trick. The idea is to generate a dictionary whose keys are the mean value, and just keep appending data onto it:
import numpy as np #I use numpy for the mean.
#Your threshold
threshold = 25
#A dictionary will hold the relevant pairs
mylist = {}
for i in Pairs:
#Check for the threshold and discard otherwise
diff = abs(i[1]-i[0])
if(diff < threshold):
#Name of the entry in the dictionary
entry = str('%d'%int(np.mean(i)))
#If the entry already exists, append. Otherwise, create a container list
if(entry in mylist):
mylist[entry].append(i)
else:
mylist[entry] = [i]
which results in the following output:
{'-1040': [(-1043.45, -1036.56)],
'-962': [(-969.39, -955.33)],
'511': [(511.1, 511.31),
(511.1, 512.24),
(511.1, 512.35),
(511.31, 512.24),
(511.31, 512.35)],
'512': [(511.1, 513.35),
(511.31, 513.35),
(512.24, 512.35),
(512.24, 513.35),
(512.35, 513.35)],
'580': [(571.77, 588.35)],
'661': [(657.08, 665.49)]}
This should be a fast way to do that:
import numpy as np
from scipy.spatial.distance import pdist
# Input data
x = np.array([511.31, 512.24, 571.77, 588.35, 657.08,
665.49, -1043.45, -1036.56,-969.39, -955.33])
thres = 25.0
# Compute pairwise distances
# default distance metric is'euclidean' which
# would be equivalent but more expensive to compute
d = pdist(x[:, np.newaxis], 'cityblock')
# Find distances within threshold
d_idx = np.where(d <= thres)[0]
# Convert "condensed" distance indices to pair of indices
r = np.arange(len(x))
c = np.zeros_like(r, dtype=np.int32)
np.cumsum(r[:0:-1], out=c[1:])
i = np.searchsorted(c[1:], d_idx, side='right')
j = d_idx - c[i] + r[i] + 1
# Get pairs of values
v_i = x[i]
v_j = x[j]
# Find means
m = np.round((v_i + v_j) / 2).astype(np.int32)
# Print result
for idx in range(len(m)):
print(f'{m[idx]}: ({v_i[idx]}, {v_j[idx]})')
Output
512: (511.31, 512.24)
580: (571.77, 588.35)
661: (657.08, 665.49)
-1040: (-1043.45, -1036.56)
-962: (-969.39, -955.33)
I was given a task to create the smallest number from two numbers remaining zeros.
But I cannot solve the task because of my code is not remaining all zeros appropriate. If the input is
245
36
the output is 23456 and that's correct. But with input:
40
305
it outputs: [0,0,3,4,5]. But should be 30045.
Here's my code:
f1 = [int(x) for x in input()]
f2 = [int(y) for y in input()]
f = f1+f2
for each in range(len(f)):
for eacc in range(each+1, len(f)):
if f[each] > f[eacc]:
f[each], f[eacc] = f[eacc], f[each]
for zero in range(len(f)):
if f[zero] == 0 and f[0] > 0:
f.remove(0)
f.insert(zero+1, 0)
break
print(f)
n1 = 40
n2 = 305
# sort lexicograhically
ns = sorted(str(n1) + str(n2))
# move the first non-zero element to the start
i = ns.count('0')
if 0 < i < len(ns):
ns[0:0] = ns.pop(i)
Remove all the zeros. Get all the permutations and find the min. Then add zero from index 1
from itertools import permutations
a=list('40')+list('305')
a=list(map(int,a))
num_of_zero=a.count(0) # get the count of zeros
for i in range(num_of_zero):
a.pop(a.index(0))
new_list=list(min(list(permutations(a)))) # get all the permutations
for i in range(num_of_zero):
new_list.insert(1,0) # insert zeros at index 1 shifting all element to the right
print(''.join(map(str,new_list)))#30045
Without permutations sorted will also work
a=list('40')+list('305')
a=list(map(int,a))
num_of_zero=a.count(0)
for i in range(num_of_zero):
a.pop(a.index(0))
new_list=sorted(a)
for i in range(num_of_zero):
new_list.insert(1,0)
print(''.join(map(str,new_list)))#30045
Using numpy
import numpy as np
a=list('40')+list('305')
a=list(map(int,a))
num_of_zero=a.count(0)
new_list=sorted(a) # sorted will return [0,0,3,4,5]
I = np.nonzero(new_list) #return non_zero_array
if(len(I[0])>0):
first_non_zero_value=new_list.pop(I[0][0]) #get index of first element
new_list.insert(0,first_non_zero_value)
print(''.join(map(str,new_list)))#30045
Here you could use itertools.permutations. First I would use map to change the ints to lists. Next I would concatenate them and have one list of 5 ints. Then using permutations we could generate all possible numbers that could be made from these 5 ints. From our new list we could now take the min using *list comprehension to filter out any item that begins with 0 using if i[0]. Since it is a tuple we have to convert the elements to str then we can join them into an int and print
from itertools import permutations
a = 40
b = 305
a = [*map(int, str(a))]
b = [*map(int, str(b))]
c = a + b
combo = list(permutations(c, len(c)))
res = min([i for i in combo if i[0]])
res = [str(i) for i in res]
print(''.join(res))
# 30045
If a = 0, b = 0 is a potential input, a try/except block would be neccessary
try:
res = min([i for i in combo if i[0]])
res = [str(i) for i in res]
print(int(''.join(res)))
except ValueError:
res = 0
print(res)
import numpy as np
x = ([1,2,3,3])
y = ([1,2,3])
z = ([6,6,1,2,9,9])
(only positive values)
In each array i need to return the most common value, or, if values come up the same amount of times - return the minimum.
This is home assignment and I can't use anything but numpy.
outputs:
f(x) = 3,
f(y) = 1,
f(z) = 6
for a numpy exclusive solution something like this will work:
occurances = np.bincount(x)
print (np.argmax(occurances))
The above mentioned method won't work if there is a negative number in the list. So in order to account for such an occurrence kindly use:
not_required, counts = np.unique(x, return_counts=True)
x=np.array(x)
if (x >= 0).all():
print(not_required[np.argmax(counts)])
else:
print(not_required[np.argmax(counts)])
It's called a mode function. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mode.html
Without numpy
n_dict = {}
for k in x:
try:
n_dict[k] += 1
except KeyError:
n_dict[k] = 1
rev_n_dict = {}
for k in n_dict:
if n_dict[k] not in rev_n_dict:
rev_n_dict[n_dict[k]] = [k]
else:
rev_n_dict[n_dict[k]].append(k)
local_max = 0
for k in rev_n_dict:
if k > local_max:
local_max = k
if len(rev_n_dict[local_max]) > 0:
print (min(rev_n_dict[local_max]))
else:
print (rev_n_dict[local_max])
To add to the previous results, you could use a collections.Counter object:
my_array = [3,24,543,3,1,6,7,8,....,223213,13213]
from collections import Counter
my_counter = Counter( my_array)
most_common_value = my_counter.most_common(1)[0][0]
It is quite simple but certainly not pretty. I have used variable names that will be self explanatory along with the comments. Feel free to ask if there is a doubt.
import numpy as np
x=([6,6,1,2,9,9])
def tester(x):
not_required, counts = np.unique(x, return_counts=True)
x=np.array(x)
if (x >= 0).all():
highest_occurance=[not_required[np.argmax(counts)]]
number_of_counts=np.max(counts)
else:
highest_occurance=not_required[np.argmax(counts)]
number_of_counts=np.max(counts)
return highest_occurance,number_of_counts
most_abundant,first_test_counts=(tester(x))
new_x=[vals for vals in x if vals not in most_abundant]
second_most_abundant,second_test_counts=(tester(new_x))
if second_test_counts==first_test_counts:
print("Atleast two elements have the same number of counts",most_abundant," and", second_most_abundant, "have %s"%first_test_counts,"occurances")
else:
print("%s occurrs for the max of %s times"%(most_abundant,first_test_counts))
we can also loop it to check if there are more than two elements with the same occurrence, instead of using an if else for a specific case of only looking at two elements
I have a very large ndarray A, and a sorted list of points k (a small list, about 30 points).
For every element of A, I want to determine the closest element in the list of points k, together with the index. So something like:
>>> A = np.asarray([3, 4, 5, 6])
>>> k = np.asarray([4.1, 3])
>>> values, indices
[3, 4.1, 4.1, 4.1], [1, 0, 0, 0]
Now, the problem is that A is very very large. So I can't do something inefficient like adding one dimension to A, take the abs difference to k, and then take the minimum of each column.
For now I have been using np.searchsorted, as shown in the second answer here: Find nearest value in numpy array but even this is too slow. This is the code I used (modified to work with multiple values):
def find_nearest(A,k):
indicesClosest = np.searchsorted(k, A)
flagToReduce = indicesClosest==k.shape[0]
modifiedIndicesToAvoidOutOfBoundsException = indicesClosest.copy()
modifiedIndicesToAvoidOutOfBoundsException[flagToReduce] -= 1
flagToReduce = np.logical_or(flagToReduce,
np.abs(A-k[indicesClosest-1]) <
np.abs(A - k[modifiedIndicesToAvoidOutOfBoundsException]))
flagToReduce = np.logical_and(indicesClosest > 0, flagToReduce)
indicesClosest[flagToReduce] -= 1
valuesClosest = k[indicesClosest]
return valuesClosest, indicesClosest
I then thought of using scipy.spatial.KDTree:
>>> d = scipy.spatial.KDTree(k)
>>> d.query(A)
This turns out to be much slower than the searchsorted solution.
On the other hand, the array A is always the same, only k changes. So it would be beneficial to use some auxiliary structure (like a "inverse KDTree") on A, and then query the results on the small array k.
Is there something like that?
Edit
At the moment I am using a variant of np.searchsorted that requires the array A to be sorted. We can do this in advance as a pre-processing step, but we still have to restore the original order after computing the indices. This variant is about twice as fast as the one above.
A = np.random.random(3000000)
k = np.random.random(30)
indices_sort = np.argsort(A)
sortedA = A[indices_sort]
inv_indices_sort = np.argsort(indices_sort)
k.sort()
def find_nearest(sortedA, k):
midpoints = k[:-1] + np.diff(k)/2
idx_aux = np.searchsorted(sortedA, midpoints)
idx = []
count = 0
final_indices = np.zeros(sortedA.shape, dtype=int)
old_obj = None
for obj in idx_aux:
if obj != old_obj:
idx.append((obj, count))
old_obj = obj
count += 1
old_idx = 0
for idx_A, idx_k in idx:
final_indices[old_idx:idx_A] = idx_k
old_idx = idx_A
final_indices[old_idx:] = len(k)-1
indicesClosest = final_indices[inv_indices_sort] #<- this takes 90% of the time
return k[indicesClosest], indicesClosest
The line that takes so much time is the line that brings the indices back to their original order.
Update:
The builtin function numpy.digitize can actually do exactly what you need. Only a small trick is required: digitize assigns values to bins. We can convert k to bins by sorting the array and setting the bin borders exactly in the middle between adjacent elements.
import numpy as np
A = np.asarray([3, 4, 5, 6])
k = np.asarray([4.1, 3, 1]) # added another value to show that sorting/binning works
ki = np.argsort(k)
ks = k[ki]
i = np.digitize(A, (ks[:-1] + ks[1:]) / 2)
indices = ki[i]
values = ks[i]
print(values, indices)
# [ 3. 4.1 4.1 4.1] [1 0 0 0]
Old answer:
I would take a brute-force approach to perform one vectorized pass over A for each element in k and update those locations where the current element improves the approximation.
import numpy as np
A = np.asarray([3, 4, 5, 6])
k = np.asarray([4.1, 3])
err = np.zeros_like(A) + np.inf # keep track of error over passes
values = np.empty_like(A, dtype=k.dtype)
indices = np.empty_like(A, dtype=int)
for i, v in enumerate(k):
d = np.abs(A - v)
mask = d < err # only update where v is closer to A
values[mask] = v
indices[mask] = i
err[mask] = d[mask]
print(values, indices)
# [ 3. 4.1 4.1 4.1] [1 0 0 0]
This approach requires three temporary variables of same size as A, so it will fail if not enough memory is available.
So, after some work and an idea from the scipy mailing list, I think that in my case (with a constant A and slowly varying k), the best way to do this is to use the following implementation.
class SearchSorted:
def __init__(self, tensor, use_k_optimization=True):
'''
use_k_optimization requires storing 4x the size of the tensor.
If use_k_optimization is True, the class will assume that successive calls will be made with similar k.
When this happens, we can cut the running time significantly by storing additional variables. If it won't be
called with successive k, set the flag to False, as otherwise would just consume more memory for no
good reason
'''
self.indices_sort = np.argsort(tensor)
self.sorted_tensor = tensor[self.indices_sort]
self.inv_indices_sort = np.argsort(self.indices_sort)
self.use_k_optimization = use_k_optimization
self.previous_indices_results = None
self.prev_idx_A_k_pair = None
def query(self, k):
midpoints = k[:-1] + np.diff(k) / 2
idx_count = np.searchsorted(self.sorted_tensor, midpoints)
idx_A_k_pair = []
count = 0
old_obj = 0
for obj in idx_count:
if obj != old_obj:
idx_A_k_pair.append((obj, count))
old_obj = obj
count += 1
if not self.use_k_optimization or self.previous_indices_results is None:
#creates the index matrix in the sorted case
final_indices = self._create_indices_matrix(idx_A_k_pair, self.sorted_tensor.shape, len(k))
#and now unsort it to match the original tensor position
indicesClosest = final_indices[self.inv_indices_sort]
if self.use_k_optimization:
self.prev_idx_A_k_pair = idx_A_k_pair
self.previous_indices_results = indicesClosest
return indicesClosest
old_indices_unsorted = self._create_indices_matrix(self.prev_idx_A_k_pair, self.sorted_tensor.shape, len(k))
new_indices_unsorted = self._create_indices_matrix(idx_A_k_pair, self.sorted_tensor.shape, len(k))
mask = new_indices_unsorted != old_indices_unsorted
self.prev_idx_A_k_pair = idx_A_k_pair
self.previous_indices_results[self.indices_sort[mask]] = new_indices_unsorted[mask]
indicesClosest = self.previous_indices_results
return indicesClosest
#staticmethod
def _create_indices_matrix(idx_A_k_pair, matrix_shape, len_quant_points):
old_idx = 0
final_indices = np.zeros(matrix_shape, dtype=int)
for idx_A, idx_k in idx_A_k_pair:
final_indices[old_idx:idx_A] = idx_k
old_idx = idx_A
final_indices[old_idx:] = len_quant_points - 1
return final_indices
The idea is to sort the array A beforehand, then use searchsorted of A on the midpoints of k. This gives the same information as before, in that it tells us exactly which points of A are closer to which points of k. The method _create_indices_matrix will create the full indices array from these informations, and then we will unsort it to recover the original order of A. To take advantage of slowly varying k, we save the last indices and we determine which indices we have to change; we then change only those. For slowly varying k, this produces superior performance (at a quite bigger memory cost, however).
For random matrix A of 5 million elements and k of about 30 elements, and repeating the experiments 60 times, we get
Function search_sorted1; 15.72285795211792s
Function search_sorted2; 13.030786037445068s
Function query; 2.3306031227111816s <- the one with use_k_optimization = True
Function query; 4.81286096572876s <- with use_k_optimization = False
scipy.spatial.KDTree.query is too slow, and I don't time it (above 1 minute, though). This is the code used to do the timing; contains also the implementation of search_sorted1 and 2.
import numpy as np
import scipy
import scipy.spatial
import time
A = np.random.rand(10000*500) #5 million elements
k = np.random.rand(32)
k.sort()
#first attempt, detailed in the answer, too
def search_sorted1(A, k):
indicesClosest = np.searchsorted(k, A)
flagToReduce = indicesClosest == k.shape[0]
modifiedIndicesToAvoidOutOfBoundsException = indicesClosest.copy()
modifiedIndicesToAvoidOutOfBoundsException[flagToReduce] -= 1
flagToReduce = np.logical_or(flagToReduce,
np.abs(A-k[indicesClosest-1]) <
np.abs(A - k[modifiedIndicesToAvoidOutOfBoundsException]))
flagToReduce = np.logical_and(indicesClosest > 0, flagToReduce)
indicesClosest[flagToReduce] -= 1
return indicesClosest
#taken from #Divakar answer linked in the comments under the question
def search_sorted2(A, k):
indicesClosest = np.searchsorted(k, A, side="left").clip(max=k.size - 1)
mask = (indicesClosest > 0) & \
((indicesClosest == len(k)) | (np.fabs(A - k[indicesClosest - 1]) < np.fabs(A - k[indicesClosest])))
indicesClosest = indicesClosest - mask
return indicesClosest
def kdquery1(A, k):
d = scipy.spatial.cKDTree(k, compact_nodes=False, balanced_tree=False)
_, indices = d.query(A)
return indices
#After an indea on scipy mailing list
class SearchSorted:
def __init__(self, tensor, use_k_optimization=True):
'''
Using this requires storing 4x the size of the tensor.
If use_k_optimization is True, the class will assume that successive calls will be made with similar k.
When this happens, we can cut the running time significantly by storing additional variables. If it won't be
called with successive k, set the flag to False, as otherwise would just consume more memory for no
good reason
'''
self.indices_sort = np.argsort(tensor)
self.sorted_tensor = tensor[self.indices_sort]
self.inv_indices_sort = np.argsort(self.indices_sort)
self.use_k_optimization = use_k_optimization
self.previous_indices_results = None
self.prev_idx_A_k_pair = None
def query(self, k):
midpoints = k[:-1] + np.diff(k) / 2
idx_count = np.searchsorted(self.sorted_tensor, midpoints)
idx_A_k_pair = []
count = 0
old_obj = 0
for obj in idx_count:
if obj != old_obj:
idx_A_k_pair.append((obj, count))
old_obj = obj
count += 1
if not self.use_k_optimization or self.previous_indices_results is None:
#creates the index matrix in the sorted case
final_indices = self._create_indices_matrix(idx_A_k_pair, self.sorted_tensor.shape, len(k))
#and now unsort it to match the original tensor position
indicesClosest = final_indices[self.inv_indices_sort]
if self.use_k_optimization:
self.prev_idx_A_k_pair = idx_A_k_pair
self.previous_indices_results = indicesClosest
return indicesClosest
old_indices_unsorted = self._create_indices_matrix(self.prev_idx_A_k_pair, self.sorted_tensor.shape, len(k))
new_indices_unsorted = self._create_indices_matrix(idx_A_k_pair, self.sorted_tensor.shape, len(k))
mask = new_indices_unsorted != old_indices_unsorted
self.prev_idx_A_k_pair = idx_A_k_pair
self.previous_indices_results[self.indices_sort[mask]] = new_indices_unsorted[mask]
indicesClosest = self.previous_indices_results
return indicesClosest
#staticmethod
def _create_indices_matrix(idx_A_k_pair, matrix_shape, len_quant_points):
old_idx = 0
final_indices = np.zeros(matrix_shape, dtype=int)
for idx_A, idx_k in idx_A_k_pair:
final_indices[old_idx:idx_A] = idx_k
old_idx = idx_A
final_indices[old_idx:] = len_quant_points - 1
return final_indices
mySearchSorted = SearchSorted(A, use_k_optimization=True)
mySearchSorted2 = SearchSorted(A, use_k_optimization=False)
allFunctions = [search_sorted1, search_sorted2,
mySearchSorted.query,
mySearchSorted2.query]
print(np.array_equal(mySearchSorted.query(k), kdquery1(A, k)[1]))
print(np.array_equal(mySearchSorted.query(k), search_sorted2(A, k)[1]))
print(np.array_equal(mySearchSorted2.query(k), search_sorted2(A, k)[1]))
if __name__== '__main__':
num_to_average = 3
for func in allFunctions:
if func.__name__ == 'search_sorted3':
indices_sort = np.argsort(A)
sA = A[indices_sort].copy()
inv_indices_sort = np.argsort(indices_sort)
else:
sA = A.copy()
if func.__name__ != 'query':
func_to_use = lambda x: func(sA, x)
else:
func_to_use = func
k_to_use = k
start_time = time.time()
for idx_average in range(num_to_average):
for idx_repeat in range(10):
k_to_use += (2*np.random.rand(*k.shape)-1)/100 #uniform between (-1/100, 1/100)
k_to_use.sort()
indices = func_to_use(k_to_use)
if func.__name__ == 'search_sorted3':
indices = indices[inv_indices_sort]
val = k[indices]
end_time = time.time()
total_time = end_time-start_time
print('Function {}; {}s'.format(func.__name__, total_time))
I'm sure that it still possible to do better (I use a loot of space for SerchSorted class, so we could probably save something). If you have any ideas for an improvement, please let me know!
I have several arrays of size (20000,1) with different contents. I'd like to randomly delete 25% of all rows per array in such a way that for each array the same row is deleted.
A rather tedious way I found is the following:
import numpy as np
a=np.array(range(1000))
b=np.array(np.random.rand(1000))
seed=np.random.randint(0,100000000) #picking a random seed
np.random.seed(seed) #Setting the same seed for each deletion
a[np.random.rand(*a.shape) < .25] = 0
np.random.seed(seed)
b[np.random.rand(*b.shape) < .25] = 0
a=a[a !=0]
b=b[b !=0]
There are several problems with this approach, such as what if an array already contains zeros?
Is there a better way of doing this?
based on and extended from Joel Cornett's solution:
import numpy as np
length = 20000
limit = int(0.75*length)
keep = np.random.permutation(length)[:limit]
newArray = oldArray[keep]
Here is a non-numpy solution in very general terms:
import random
to_keep = set(random.sample(range(total_rows), keep_ratio * total_rows))
#do this for each array:
new_array = np.array(item for index, item in enumerate(old_array) if index in to_keep)
total_rows is the number of rows in each array (I think you said this was 20,000)
keep_ratio is the percentage of rows to keep, which according to you is 1 - 0.25 = 0.75
EDIT
You can also use numpy's compress() method.
import random
to_keep = set(random.sample(range(total_rows), keep_ratio * total_rows))
kompressor = (1 if i in to_keep else 0 for i in xrange(total_rows))
new_array = numpy.compress(kompressor, old_array, axis=1)
kompressor
Similar to Theodros's answer, but preserves the original ordering of elements:
import numpy as np
mask = np.ones(len(a), dtype=bool)
mask[:len(a)/4] = 0
np.random.shuffle(mask)
a = a[mask]
b = b[mask]
I have no idea how well this works with numpy, but this is what I'd use in pure Python:
total = len(a)
toss = int(0.25 * total)
keeping = [False] * toss + [True] * (total - toss)
random.shuffle(keeping)
a = [value for value, flag in zip(a, keeping) if flag]
b = [value for value, flag in zip(b, keeping) if flag]