I have this code that generate all the 2**40 possible binary numbers, and from this binary numbers, i will try to get all the vectors that match my objectif function conditions which is:
1- each vector in the matrix must have 20 of ones(1).
2- the sum of s = s + (the index of one +1)* the rank of the one must equal 4970.
i wrote this code but it will take a lot of time maybe months, to give the results. Now, i am looking for an alternative way or an optimization of this code if possible.
import time
from multiprocessing import Process
from multiprocessing import Pool
import numpy as np
import itertools
import numpy
CC = 20
#test if there is 20 numbers of 1
def test1numebers(v,x=1,x_l=CC):
c = 0
for i in range(len(v)):
if(v[i]==x):
c+=1
if c == x_l:
return True
else:
return False
#s = s+ the nth of 1 * (index+1)
def objectif_function(v,x=1):
s = 0
for i in range(len(v)):
if(v[i]==x):
s = s+((i+1)*nthi(v,i))
return s
#calculate the nth of 1 in a vecteur
def nthi(v,i):
c = 0
for j in range(0,i+1):
if(v[j] == 1):
c+=1
return c
#generate 2**40 of all possible binray numbers
def generateMatrix(N):
l = itertools.product([0, 1], repeat=N)
return l
#function that get the number of valide vector that match our objectif function
def main_algo(N=40,S=4970):
#N = 40
m = generateMatrix(N)
#S = 4970
c = 0
ii = 0
for i in m:
ii+=1
print("\n count:",ii)
xx = i
if(test1numebers(xx)):
if(objectif_function(xx)==S):
c+=1
print('found one')
print('\n',xx,'\n')
if ii>=1000000:
break
t_end = time.time()
print('time taken for 10**6 is: ',t_end-t_start)
print(c)
#main_algo()
if __name__ == '__main__':
'''p = Process(target=main_algo, args=(40,4970,))
p.start()
p.join()'''
p = Pool(150)
print(p.map(main_algo, [40,4970]))
While you could make a lot of improvements in readability and make your code more pythonic.
I recommend that you use numpy which is the fastest way of working with matrixes.
Avoid working with matrixes on a "pixel by pixel" loop. With numpy you can make those calculations faster and with all the data at once.
Also numpy has support for generating matrixes really fast. I think that you could make a random [0,1] matrix in less lines of code and quite faster.
Also i recommend that you install OPENBLAS, ATLAS and LAPACK which make linear algebra calculations quite faster.
I hope this helps you.
Related
I have two ordered lists of consecutive integers m=0, 1, ... M and n=0, 1, 2, ... N. Each value of m has a probability pm, and each value of n has a probability pn. I am trying to find the ordered list of unique values r=n/m and their probabilities pr. I am aware that r is infinite if n=0 and can even be undefined if m=n=0.
In practice, I would like to run for M and N each be of the order of 2E4, meaning up to 4E8 values of r - which would mean 3 GB of floats (assuming 8 Bytes/float).
For this calculation, I have written the python code below.
The idea is to iterate over m and n, and for each new m/n, insert it in the right place with its probability if it isn't there yet, otherwise add its probability to the existing number. My assumption is that it is easier to sort things on the way instead of waiting until the end.
The cases related to 0 are added at the end of the loop.
I am using the Fraction class since we are dealing with fractions.
The code also tracks the multiplicity of each unique value of m/n.
I have tested up to M=N=100, and things are quite slow. Are there better approaches to the question, or more efficient ways to tackle the code?
Timing:
M=N=30: 1 s
M=N=50: 6 s
M=N=80: 30 s
M=N=100: 82 s
import numpy as np
from fractions import Fraction
import time # For timiing
start_time = time.time() # Timing
M, N = 6, 4
mList, nList = np.arange(1, M+1), np.arange(1, N+1) # From 1 to M inclusive, deal with 0 later
mProbList, nProbList = [1/(M+1)]*(M), [1/(N+1)]*(N) # Probabilities, here assumed equal (not general case)
# Deal with mn=0 later
pmZero, pnZero = 1/(M+1), 1/(N+1) # P(m=0) and P(n=0)
pNaN = pmZero * pnZero # P(0/0) = P(m=0)P(n=0)
pZero = pmZero * (1 - pnZero) # P(0) = P(m=0)P(n!=0)
pInf = pnZero * (1 - pmZero) # P(inf) = P(m!=0)P(n=0)
# Main list of r=m/n, P(r) and mult(r)
# Start with first line, m=1
rList = [Fraction(mList[0], n) for n in nList[::-1]] # Smallest first
rProbList = [mProbList[0] * nP for nP in nProbList[::-1]] # Start with first line
rMultList = [1] * len(rList) # Multiplicity of each element
# Main loop
for m, mP in zip(mList[1:], mProbList[1:]):
for n, nP in zip(nList[::-1], nProbList[::-1]): # Pick an n value
r, rP, rMult = Fraction(m, n), mP*nP, 1
for i in range(len(rList)-1): # See where it fits in existing list
if r < rList[i]:
rList.insert(i, r)
rProbList.insert(i, rP)
rMultList.insert(i, 1)
break
elif r == rList[i]:
rProbList[i] += rP
rMultList[i] += 1
break
elif r < rList[i+1]:
rList.insert(i+1, r)
rProbList.insert(i+1, rP)
rMultList.insert(i+1, 1)
break
elif r == rList[i+1]:
rProbList[i+1] += rP
rMultList[i+1] += 1
break
if r > rList[-1]:
rList.append(r)
rProbList.append(rP)
rMultList.append(1)
break
# Deal with 0
rList.insert(0, Fraction(0, 1))
rProbList.insert(0, pZero)
rMultList.insert(0, N)
# Deal with infty
rList.append(np.Inf)
rProbList.append(pInf)
rMultList.append(M)
# Deal with undefined case
rList.append(np.NAN)
rProbList.append(pNaN)
rMultList.append(1)
print(".... done in %s seconds." % round(time.time() - start_time, 2))
print("************** Final list\nr", 'Prob', 'Mult')
for r, rP, rM in zip(rList, rProbList, rMultList): print(r, rP, rM)
print("************** Checks")
print("mList", mList, 'nList', nList)
print("Sum of proba = ", np.sum(rProbList))
print("Sum of multi = ", np.sum(rMultList), "\t(M+1)*(N+1) = ", (M+1)*(N+1))
Based on the suggestion of #Prune, and on this thread about merging lists of tuples, I have modified the code as below. It's a lot easier to read, and runs about an order of magnitude faster for N=M=80 (I have omitted dealing with 0 - would be done same way as in original post). I assume there may be ways to tweak the merge and conversion back to lists further yet.
# Do calculations
data = [(Fraction(m, n), mProb(m) * nProb(n)) for n in range(1, N+1) for m in range(1, M+1)]
data.sort()
# Merge duplicates using a dictionary
d = {}
for r, p in data:
if not (r in d): d[r] = [0, 0]
d[r][0] += p
d[r][1] += 1
# Convert back to lists
rList, rProbList, rMultList = [], [], []
for k in d:
rList.append(k)
rProbList.append(d[k][0])
rMultList.append(d[k][1])
I expect that "things are quite slow" because you've chosen a known inefficient sort. A single list insertion is O(K) (later list elements have to be bumped over, and there is added storage allocation on a regular basis). Thus a full-list insertion sort is O(K^2). For your notation, that is O((M*N)^2).
If you want any sort of reasonable performance, research and use the best-know methods. The most straightforward way to do this is to make your non-exception results as a simple list comprehension, and use the built-in sort for your penultimate list. Simply append your n=0 cases, and you're done in O(K log K) time.
I the expression below, I've assumed functions for m and n probabilities.
This is a notational convenience; you know how to directly compute them, and can substitute those expressions if you wish.
data = [ (mProb(m) * nProb(n), Fraction(m, n))
for n in range(1, N+1)
for m in range(0, M+1) ]
data.sort()
data.extend([ # generate your "zero" cases here ])
Need to generate 20 million unrepeatable random numbers with 8 characters length and save it in an array.
I try with multiprocessing,threading but it stays slow.
Try with multiprocessing:
from numpy.random import default_rng
from multiprocessing import Process,Queue
import os,time
import numpy as np
rng = default_rng()
f=np.array([],dtype=np.int64)
def generate(q,start,stop):
numbers=[rng.choice(range(start,stop),replace=False) for _ in range(1000)]
q.put(numbers)
if __name__ == '__main__':
timeInit = time.time()
for x in range(20000):
q=Queue()
p = Process(target=generate,args=(q,11111111,99999999,))
p.start()
f=np.append(f,q.get())
p.join()
print(f)
timeStop = time.time()
print('[TIME EXECUTED] ' + str(timeStop-timeInit) +' segs')
This took less than 30 secs on my personal laptop, if it works for you:
import random
candidates = list(range(10**7, 10**8)) # all numbers from 10000000 to 99999999
random.shuffle(candidates)
result = candidates[:20* 10**6] # take first 20 million
You haven't explained why you're doing all of that overhead. I simply took a random sample from the candidate numbers:
from random import sample
result = sample(
list(range(10**7, 10**8)),
2*10**7
)
51 seconds on my laptop, with interference from other jobs.
I just ran a more controlled test on both solutions. The one in this post took 48.5 seconds; the one from naicolas took 81.6 seconds, likely due to the extra list creation.
I hope I got your idea. The random numbers that you are trying to generate are actually a bit tricky. Basically we are looking for a set of unique (non-repeatable) but random numbers. In this case, we can not draw random numbers from uniform distribution, because there is no guarantee that numbers are unique.
There are 2 possible algorithms. The first one is to generate A LOT of possible random numbers, and remove those repeated ones. For instance,
import numpy as np
N = 20_000_000
L0 = 11_111_111 # legitimate int in Python
L1 = L0 * 9
not_enough_unique = True
while not_enough_unique:
X = np.random.uniform(L0, L1, int(N * 2)).astype(int)
X_unique = np.unique(X) # remove repeated numbers
not_enough_unique = len(X_unique) < N
random_numbers = X_unique[:N]
np.random.shuffle(random_numbers)
There is also another more "physics" approach. We can start with equal–spaced numbers, and move each number a little bit. The result will not be as random as the first one, but it is much faster and purely fun.
import numpy as np
N = 20_000_000
L0 = 11_111_111 # legitimate int in Python
L1 = L0 * 9
lattice = np.linspace(L0, L1, N) # all numbers have equal spacing
pertubation = np.random.normal(0, 0.4, N) # every number move left/right a little bit
random_numbers = (lattice + pertubation).astype(int)
# check if the minimum distance between two successive numbers
# i.e. all numbers are unique
min_dist = np.abs(np.diff(random_numbers)).min()
print(f"generating random numbers with minimum separation of {min_dist}")
print("(if it is > 1 you are good)")
np.random.shuffle(random_numbers)
(Both algorithms generate the result within 10s on my laptop)
Consider the following problem: Given a set of n intervals and a set of m floating-point numbers, determine, for each floating-point number, the subset of intervals that contain the floating-point number.
This problem has been addressed by constructing an interval tree (or called range tree or segment tree). Implementations have been done for the one-dimensional case, e.g. python's intervaltree package. Usually, these implementations consider one or few floating-point numbers, namely a small "m" above.
In my problem setting, both n and m are extremely large numbers (from solving an image processing problem). Further, I need to consider the N-dimensional intervals (called cuboid when N=3, because I was modeling human brains with the Finite Element Method). I have implemented a simple N-dimensional interval tree in python, but it run in a loop and can only take one floating-point number at a time. Can anyone help improve the implementation in terms of efficiency? You can change data structure freely.
import sys
import time
import numpy as np
# find the index of a satisfying x > a in one dimension
def find_index_smaller(a, x):
idx = np.argsort(a)
ss = np.searchsorted(a, x, sorter=idx)
res = idx[0:ss]
return res
# find the index of a satisfying x < a in one dimension
def find_index_larger(a, x):
return find_index_smaller(-a, -x)
# find the index of a satisfing amin < x < amax in one dimension
def find_intv_at(amin, amax, x):
idx = find_index_smaller(amin, x)
idx2 = find_index_larger(amax[idx], x)
res = idx[idx2]
return res
# find the index of a satisfying amin < x < amax in N dimensions
def find_intv_at_nd(amin, amax, x):
dim = amin.shape[0]
res = np.arange(amin.shape[-1])
for i in range(dim):
idx = find_intv_at(amin[i, res], amax[i, res], x[i])
res = res[idx]
return res
I also have two test examples for sanity check and performance testing:
def demo1():
print ("By default, we do a correctness test")
n_intv = 2
n_point = 2
# generate the test data
point = np.random.rand(3, n_point)
intv_min = np.random.rand(3, n_intv)
intv_max = intv_min + np.random.rand(3, n_intv)*8
print ("point ")
print (point)
print ("intv_min")
print (intv_min)
print ("intv_max")
print (intv_max)
print ("===Indexes of intervals that contain the point===")
for i in range(n_point):
print (find_intv_at_nd(intv_min,intv_max, point[:, i]))
def demo2():
print ("Performance:")
n_points=100
n_intv = 1000000
# generate the test data
points = np.random.rand(n_points, 3)*512
intv_min = np.random.rand(3, n_intv)*512
intv_max = intv_min + np.random.rand(3, n_intv)*8
print ("point.shape = "+str(points.shape))
print ("intv_min.shape = "+str(intv_min.shape))
print ("intv_max.shape = "+str(intv_max.shape))
starttime = time.time()
for point in points:
tmp = find_intv_at_nd(intv_min, intv_max, point)
print("it took this long to run {} points, with {} interva: {}".format(n_points, n_intv, time.time()-starttime))
My idea would be:
Remove np.argsort() from the algo, because the interval tree does not change, so sorting could have been done in pre-processing.
Vectorize x. The algo runs a loop for each x. It would be nice if we can get rid of the loop over x.
Any contribution would be appreciated.
I want to make a plot where I show several pulses at different times.
I have the list l with the time of each pulse, for example
l=[1.,2.,24.]
and I have the duration d of the pulse, for example d=0.2.
I can do the plot in the following way:
import numpy as np
import matplotlib.pyplot as plt
t=np.linspace(0.,30,1000)
l=[1.,2.,24.]
d=0.2
def pulse(t):
if t<l[0]:
L = 0.
elif l[0]<=t<l[0]+d or l[1]<=t<l[1]+d or l[2]<=t<l[2]+d:
L = 1
else:
L=0.
return L
plt.figure(1)
P=map(pulse,t)
plt.plot(t,P)
plt.show()
Of course, if len(l) becomes very large I cannot use this procedure, that is, I cannot write by hand a very long chain of or. How can I write the algorithm in a more synthetic way?
You can test several different conditions by using a for loop. For example:
def pulse(t):
L = 0
for i in range(len(l)):
if l[i] <= t < l[i] + d:
L += 1
return L
I think you can use numpy indexong for efficiency and of course write a for loop for unknown number of if statements.
p = np.zeros(1000)
for i in l:
p[(0<=t-i) & (t-i<d)] = 1
I want to write simple toy code for implicitly restart Lanczos method.
Without implicit restarting, the code is perfectly working but when I turn on the restart, I cannot get proper solution
To my knowledge, newly constructed w should be orthogonal to all of the new Lanczos vectors. For the first restart, the orthogonality is well preserved but from the second restart, the orthogonality is significantly broken down and the program does not find proper eigenvalues.
I already spent several tens of hours to fix it. I almost gave up...... Here is my python code
"""
Author: Sunghwan Choi
Date Created: June 19, 2017
Python Version: 2.7 or 3.5
Reference for Lanczos algorithm
http://www.netlib.org/utk/people/JackDongarra/etemplates/node104.html
Reference for implicit restart
http://www.netlib.org/utk/people/JackDongarra/etemplates/node118.html
"""
import numpy as np
from scipy.sparse.linalg import eigsh
#from scipy.sparse import eye
from scipy.sparse import coo_matrix
from numpy import eye
def clustering(eigvals,eigvecs,tol=1e-2):
ret_eigvals=[]
ret_eigvecs=[]
for i in range(len(eigvals)):
for ret_eigval, ret_eigvec in zip (ret_eigvals,ret_eigvecs):
if (np.abs(eigvals[i]/ret_eigval-1.0)<tol ):
break
else:
ret_eigvals.append(eigvals[i])
ret_eigvecs.append(eigvecs[:,i])
ret_eigvals=np.array(ret_eigvals)
ret_eigvecs=np.array(ret_eigvecs).T
return ret_eigvals,ret_eigvecs
def check_conv(matrix, cal_eigval, cal_eigvec, tol):
indices=[]
for i in range(len(cal_eigval)):
if(np.linalg.norm(matrix.dot(cal_eigvec[:,i]) - cal_eigval[i]*cal_eigvec[:,i])< tol):
indices.append(i)
return indices
################ input
size=1600
max_step=20000
which='SA'
#implicit=False
implicit=True
energy_range=[0.0,6.0]
tol = 1e-5
n_eig=6
n_tol_check=40 # n_tol_check>n_eig ==0
######################
# generate 1D harmonic oscillator
h=0.1
matrix=-5/2*eye(size)
matrix+=4/3*(eye(size,k=1)+eye(size,k=-1))
matrix+=-1/12*(eye(size,k=2)+eye(size,k=-2))
matrix=-0.5*matrix/(h*h)
distance =lambda index: (index-size/2)*h
matrix+=np.diagflat( list(map( lambda i: 0.5*distance(i)**2, range(size))))
# solve eigenvalue problem to check validity
true_eigval,true_eigvec = eigsh(matrix,k=50,which=which)
indices = np.all([true_eigval>energy_range[0], true_eigval<energy_range[1]],axis=0)
true_eigval = true_eigval[indices]
true_eigvec = true_eigvec[:,indices]
#initialize variables
alpha=[]; beta=[]
index_v=0
restart_interval = n_tol_check+n_eig if implicit is not False else max_step
T = np.zeros((restart_interval,restart_interval))
v = np.zeros((size,restart_interval))
#Q=np.eye(restart_interval)
#generate initial vector
np.random.seed(1)
initial_vec = np.random.random(size)
#initial_vec = np.loadtxt("tmp")
w = v[:,index_v] = initial_vec/np.linalg.norm(initial_vec)
init_beta = np.linalg.norm(w)
# start Lanczos i_step
for i_step in range(max_step):
if (i_step is 0):
v[:,index_v] = w/init_beta
else:
v[:,index_v] = w/T[index_v,index_v-1]
w=matrix.dot(v[:,index_v])
if (i_step is 0):
w=w-init_beta*v[:,index_v-1]
else:
w=w-T[index_v,index_v-1]*v[:,index_v-1]
T[index_v,index_v]=np.dot(w,v[:,index_v])
w -=T[index_v,index_v]*v[:,index_v]
#check convergence
if ((i_step+1)%n_tol_check==n_eig and i_step>n_eig):
# calculate eigenval of T matrix
cal_eigval, cal_eigvec_= np.linalg.eigh(T[:index_v+1,:index_v+1])
cal_eigvec = np.dot(v[:,:index_v+1],cal_eigvec_)
#check tolerance
conv_indices = check_conv(matrix, cal_eigval, cal_eigvec,tol)
#filter energy_range
indices = np.all([cal_eigval[conv_indices]>energy_range[0], cal_eigval[conv_indices]<energy_range[1]],axis=0)
#check clustering
conv_cal_eigval,conv_cal_eigvec = clustering((cal_eigval[conv_indices])[indices], (cal_eigvec[conv_indices])[indices])
if (len(conv_cal_eigval)>=n_eig):
break
# implicit restarting
if (implicit is True):
Q=np.eye(restart_interval)
# do shift & QR decomposition
indices = np.argsort(np.abs(cal_eigval-np.mean(energy_range)))
for index in indices[n_eig:]:
new_Q,new_R = np.linalg.qr(T-cal_eigval[index]*np.eye(len(T)))
T = np.dot(new_Q.T,np.dot(T,new_Q))
v = np.dot(v,new_Q)
Q = np.dot(Q,new_Q)
w=v[:,n_eig]*T[n_eig,n_eig-1]+w*Q[-1,n_eig-1]
v[:,n_eig:]=0.0
T[:,n_eig:] = 0.0
T[n_eig:,:] = 0.0
#for debug
#print(np.dot(w.T, v))
# reset index
index_v=n_eig-1
index_v+=1
T[index_v,index_v-1]=np.linalg.norm(w)
T[index_v-1,index_v]=np.linalg.norm(w)
else:
print("not converged")
exit(-1)
print ("energy window: (", energy_range[0],",",energy_range[1],")")
print ("true eigenvalue")
print(true_eigval)
print ("eigenvalue from Lanczos w/ implicit restart (",i_step+1,")")
print(conv_cal_eigval)