Sample from array of Beta distributions in Tensorflow2 - python

I have an array params with shape (N, 2), where each pair corresponds to a Beta distribution, e.g. (alpha, beta). I need to sample a value from each of these distributions; is there a way to do this without looping in Python? I know I can do this:
u = [tfp.distributions.Beta(a, b).sample() for (a, b) in params]
# Or
u = [np.random.beta(a, b) for (a, b) in params]
but I imagine this to be very slow. I am using tensorflow, tensorflow_probability and numpy btw.
EDIT:
Thanks to JST99's answer, I found out that it is possible to pass a vector of parameters to tfp.distributions.Beta as well:
u = tfp.distributions.Beta(params[:, 0], params[:, 1]).sample()

You can separate out the alpha and the beta parameters using zip, then use np.random.beta in a vectorized fashion.
alpha, beta = zip(*params)
u = np.random.beta(alpha, beta)
Here is an example with some dummy parameters.
>>> params = [(i, i) for i in range(1, 100)]
>>> alpha, beta = zip(*params)
>>> u = np.random.beta(alpha, beta)
>>> u
array([0.88027947, 0.22507079, 0.46668932, 0.65091097, 0.62278597,
0.37450051, 0.53237829, 0.5589561 , 0.55190015, 0.64352003,
0.61396155, 0.58559066, 0.59525124, 0.49827492, 0.45065234,
0.53716919, 0.52950708, 0.41751582, 0.44912503, 0.57043946,
0.51909876, 0.34834858, 0.55753122, 0.41586101, 0.46762533,
0.4905744 , 0.53927006, 0.5234163 , 0.56215437, 0.38265575,
0.4940874 , 0.45066854, 0.53654453, 0.40955841, 0.49478651,
0.52974175, 0.43218663, 0.49791192, 0.47176042, 0.46717939,
0.45576387, 0.58941562, 0.44112651, 0.45401485, 0.48990107,
0.5640564 , 0.46720441, 0.439157 , 0.56098725, 0.43914691,
0.44654769, 0.5639682 , 0.41962566, 0.53689739, 0.46501042,
0.52775508, 0.55992535, 0.4948104 , 0.54856768, 0.4711496 ,
0.44694159, 0.54769584, 0.51792418, 0.48669042, 0.51969972,
0.51599904, 0.4818758 , 0.47555456, 0.47581746, 0.43417686,
0.49156854, 0.51359563, 0.52830314, 0.50988281, 0.47357164,
0.47619267, 0.52755645, 0.50141785, 0.48280575, 0.47817313,
0.47954096, 0.53885494, 0.5218641 , 0.50253071, 0.58804552,
0.50788384, 0.49429312, 0.47677202, 0.45542669, 0.47169082,
0.58838068, 0.4992328 , 0.5098452 , 0.44761298, 0.45971338,
0.4841432 , 0.47673295, 0.48205439, 0.4799415 ])

Related

how can I store optimized parameters corresponding to local maximum of a function out of the multiple loops?

I have a 3D array and I want to find the optimal parameters corresponding to a local maximum of 2D array for each iteration of 3rd array as an outer loop there.
Nstep1 = 5
l2= linspace(0.01,2,Nstep1)
EP_opt = zeros(Nstep1)
Nstep = 5
for l in range(Nstep1):
Vp = zeros((Nstep, Nstep))
g1 = linspace(0.1, 0.5, Nstep)
g2 = linspace(0.1, 0.5, Nstep)
for j in range(Nstep):
for k in range(Nstep):
def Ep(pr):
a,b,c=pr
return -(a*l2[l]+b*g1[j]*g1[j]-c*g2[k])
x0=[0,1,1]
bnds= ((0, 1),(0, 1),(0, 1))
res=minimize(Ep,x0,bounds=bnds)
#Vp[j,k,l] = -res.fun# will it help to find local maximum of `Vp` for each `l`.
Vp[j,k] = -res.fun
x1= res.x
EP_opt[l] = Vp.max()# local maximum for each `l2`
how to find the optimized parameters (x1[0], x1[1], x1[2],g1[j] and g2[k]) corresponding to Vp.max() for each l2? Thanks.
Your optimisation doesn't make any sense, because - given your objective function - for every index of l, the best parameters remain the same. Run this:
import numpy as np
from scipy.optimize import minimize
def Ep(pr: np.ndarray, l: float) -> float:
a, b, c, g1, g2 = pr
return -(a*l + b*g1*g1 - c*g2)
def main() -> None:
Nstep1 = 5
Nstep = 5
l2 = np.linspace(0.01, 2, Nstep1) # 5-vector of coefficients
g1 = np.linspace(0.1, 0.5, Nstep) # 5-vector of coefficients
g2 = g1 # 5-vector of coefficients
# for every l,j,k and parameters x1,2,3 there is a value of Ep
# the optimisation parameters exclude l and include j,k,x1,2,3
# over those optimisation parameters, there is some maximum Ep.
initial = np.zeros(5)
for l, l_val in enumerate(l2):
res = minimize(
Ep, x0=initial,
bounds=(
(0, 1), (0, 1), (0, 1),
(g1[0], g1[-1]),
(g2[0], g2[-1]),
),
args=(l_val,),
)
print(res)
if __name__ == '__main__':
main()
It will show you that in all five cases of l, the best parameter set for x0,1,2,g1,g2 is
[1. , 1. , 0. , 0.5, 0.1]

Constrained optimisation in scipy enters restricted area

I am trying to solve multivariate optimisation problem using python with scipy.
Let me define enviroment I am working in:
searched parameters:
and the problem itself:
(In my case logL function is complex, so i will substitute it with the trivial one, generating similar issue. Therefore in this example I am not using function parameters fully, but I am including those, for problem consistency).
I am using following convention on storing parameters in single, flat array:
Here is the script, that was supposed to solve my problem.
import numpy as np
from scipy import optimize as opt
from pprint import pprint
from typing import List
_d = 2
_tmax = 500.0
_T = [[1,2,3,4,5], [6,7,8,9]]
def logL(args: List[float], T : List[List[float]], tmax : float):
# simplified - normaly using T in computation, here only to determine dimension
d = len(T)
# trivially forcing args to go 'out-of constrains'
return -sum([(args[2 * i] + args[2 * i + 1] * tmax)**2 for i in range(d)])
def gradientForIthDimension(i, d, t_max):
g = np.zeros(2 * d + 2 * d**2)
g[2 * i] = 1.0
g[2 * i + 1] = t_max + 1.0
return g
def zerosWithOneOnJth(j, l):
r = [0.0 for _ in range(l)]
r[j] = 1.0
return r
new_lin_const = {
'type': 'ineq',
'fun' : lambda x: np.array(
[x[2 * i] + x[2 * i + 1] * (_tmax + 1.0) for i in range(_d)]
+ [x[j] for j in range(2*_d + 2*_d**2) if j not in [2 * i + 1 for i in range(_d)]]
),
'jac' : lambda x: np.array(
[gradientForIthDimension(i, _d, _tmax) for i in range(_d)]
+ [zerosWithOneOnJth(j, 2*_d + 2*_d**2) for j in range(2*_d + 2*_d**2) if j not in [2 * i + 1 for i in range(_d)]]
)
}
and finally optimisation
logArgs = [2 for _ in range(2 * (_d ** 2) + 2 * _d)]
# addditional bounds, not mentioned in a problem, but suppose a'priori knowledge
bds = [(0.0, 10.0) for _ in range(2 * (_d ** 2) + 2 * _d)]
for i in range(_d):
bds[2*i + 1] = (-10.0, 10.0)
res = opt.minimize(lambda x, args: -logL(x, args[0], args[1]),
constraints=new_lin_const, x0 = logArgs, args=([_T, _tmax]), method='SLSQP', options={'disp': True}, bounds=bds)
But when checking for result, i am getting:
pprint(res)
# fun: 2.2124712864600578e-05
# jac: array([0.00665204, 3.32973738, 0.00665204, 3.32973738, 0. ,
# 0. , 0. , 0. , 0. , 0. ,
# 0. , 0. ])
# message: 'Optimization terminated successfully'
# nfev: 40
# nit: 3
# njev: 3
# status: 0
# success: True
# x: array([ 1.66633206, -0.00332601, 1.66633206, -0.00332601, 2. ,
# 2. , 2. , 2. , 2. , 2. ,
# 2. , 2. ])
particullary:
print(res.x[0] + res.x[1]*(501.0))
# -3.2529534621517087e-13
so result is out of constrained area...
I was trying to follow documentation, but for me it does not work. I will be happy to hear any advice on what is wrong.
First of all, please stop posting the same question multiple times. This question is basically the same as your other one here. Next time, just edit your question instead of posting a new one.
That being said, your code is needlessly complicated given that your optimization problem is quite simple. It should be your goal that reading your code is as simple as reading the mathematical optimization problem. A more than welcome side effect is that it's much easier to debug your code then in case it's not working as expected.
For this purpose, it's highly recommended that you make yourself familiar with numpy and its vectorized operations (as already mentioned in the comments of your previous question). For instance, you don't need loops to implement your objective, the constraint function or the jacobian. Packing all the optimization variables into one large vector x is the right approach. However, you can simply unpack x into its components lambda, gamma, alpha and beta again. This makes it easier for you to write your functions and easier to read, too.
Well, instead of cutting my way through your code, you can find a simplified and working implementation below. By evaluating the functions and comparing the outputs to the evaluated functions in your code snippet, you should get an idea of what's going wrong on your side.
Edit: It seems like most of the algorithms under the hood of scipy.minimize fail to converge to a local minimizer while preserving strict feasibility of the constraints. If you're open to using another package, I'd recommend using the state-of-the-art NLP solver Ipopt. You can use it by means of the cyipopt package and thanks to its minimize_ipopt method, you can use it similar to scipy.optimize.minimize:
import numpy as np
#from scipy.optimize import minimize
from cyipopt import minimize_ipopt as minimize
d = 2
tmax = 500.0
N = 2*d + 2*d**2
def logL(x, d, tmax):
lambda_, gamma, alpha, beta = np.split(x, np.cumsum([d, d, d**2]))
return np.sum((lambda_ + tmax*gamma)**2)
def con_fun(x, d, tmax):
# split the packed variable x = (lambda_, gamma, alpha, beta)
lambda_, gamma, alpha, beta = np.split(x, np.cumsum([d, d, d**2]))
return lambda_ + (tmax + 1.0) * gamma
def con_jac(x, d, tmax):
jac = np.block([np.eye(d), (tmax + 1.0)*np.eye(d), np.zeros((d, 2*d**2))])
return jac
constr = {
'type': 'ineq',
'fun': lambda x: con_fun(x, d, tmax),
'jac': lambda x: con_jac(x, d, tmax)
}
bounds = [(0, 10.0)]*N + [(-10.0, 10.0)]*N + [(0.0, 10.0)]*2*d**2
x0 = np.full(N, 2.0)
res = minimize(lambda x: logL(x, d, tmax), x0=x0, constraints=constr,
method='SLSQP', options={'disp': True}, bounds=bounds)
print(res)
yields
******************************************************************************
This program contains Ipopt, a library for large-scale nonlinear optimization.
Ipopt is released as open source code under the Eclipse Public License (EPL).
For more information visit https://github.com/coin-or/Ipopt
******************************************************************************
fun: 0.00014085582293562834
info: {'x': array([ 2.0037865 , 2.0037865 , -0.00399079, -0.00399079, 2.00700641,
2.00700641, 2.00700641, 2.00700641, 2.00700641, 2.00700641,
2.00700641, 2.00700641]), 'g': array([0.00440135, 0.00440135]), 'obj_val': 0.00014085582293562834, 'mult_g': array([-0.01675576, -0.01675576]), 'mult_x_L': array([5.00053270e-08, 5.00053270e-08, 1.00240003e-08, 1.00240003e-08,
4.99251018e-08, 4.99251018e-08, 4.99251018e-08, 4.99251018e-08,
4.99251018e-08, 4.99251018e-08, 4.99251018e-08, 4.99251018e-08]), 'mult_x_U': array([1.25309309e-08, 1.25309309e-08, 1.00160027e-08, 1.00160027e-08,
1.25359789e-08, 1.25359789e-08, 1.25359789e-08, 1.25359789e-08,
1.25359789e-08, 1.25359789e-08, 1.25359789e-08, 1.25359789e-08]), 'status': 0, 'status_msg': b'Algorithm terminated successfully at a locally optimal point, satisfying the convergence tolerances (can be specified by options).'}
message: b'Algorithm terminated successfully at a locally optimal point, satisfying the convergence tolerances (can be specified by options).'
nfev: 15
nit: 14
njev: 16
status: 0
success: True
x: array([ 2.0037865 , 2.0037865 , -0.00399079, -0.00399079, 2.00700641,
2.00700641, 2.00700641, 2.00700641, 2.00700641, 2.00700641,
2.00700641, 2.00700641])
and evaluating the constraint function at the found solution yields
In [17]: print(constr['fun'](res.x))
[0.00440135 0.00440135]
Consequently, the constraints are fulfilled.

Incorrect output for calculating the compact svd

I am trying to calculate the truncated svd for a given matrix. I have written the code but when I test it has an incorrect output. I'm not sure where I am going wrong. I think I may be calculating my pos_v incorrectly but I can't seem to find the issue, can anyone give any guidance?
Here is my code:
def compact_svd(A, tol=1e-6):
"""Compute the truncated SVD of A.
Parameters:
A ((m,n) ndarray): The matrix (of rank r) to factor.
tol (float): The tolerance for excluding singular values.
Returns:
((m,r) ndarray): The orthonormal matrix U in the SVD.
((r,) ndarray): The singular values of A as a 1-D array.
((r,n) ndarray): The orthonormal matrix V^H in the SVD.
"""
lambda_, v = sp.linalg.eig((A.conj().T # A))
lambda_ = lambda_.real
sigma = np.sqrt(lambda_)
indices = np.argsort(sigma)[::-1]
v = v[:, indices]
r = 0
for i in range(len(sigma)):
if sigma[i] > tol:
r = r + 1
pos_sigma = sigma[:r]
pos_v = v[:,:r]
U = (A # pos_v) / pos_sigma
return U, pos_sigma, pos_v.conj().T
Here is my test matrix:
A = np.array([[9,9,9,3,2,9,3,7,7,8],
[4,4,7,4,2,4,8,7,1,8],
[1,4,7,4,5,6,8,4,1,6],
[5,5,1,8,9,4,9,4,2,7],
[7,7,7,9,4,7,4,3,7,1]],dtype = float)
print(compact_svd(A))
The correct output:
(array([[ 0.54036027+0.j, 0.58805563+0.j, -0.29423603+0.j,
-0.4346745 +0.j, -0.29442248+0.j],
[ 0.41227593+0.j, -0.21929894+0.j, -0.51747179+0.j,
0.08375491+0.j, 0.71214086+0.j],
[ 0.38514303+0.j, -0.32015959+0.j, -0.24745912+0.j,
0.60060756+0.j, -0.57201156+0.j],
[ 0.43356274+0.j, -0.61204283+0.j, 0.41057641+0.j,
-0.51216171+0.j, -0.080897 +0.j],
[ 0.44914089+0.j, 0.35916564+0.j, 0.64485588+0.j,
0.42544582+0.j, 0.26912684+0.j]]),
array([39.03360665, 11.91940614, 9.3387396 , 5.38285176, 3.33439025]),
array([[ 0.31278916-0.j, 0.34239004-0.j, 0.35924746-0.j,
0.31566457-0.j, 0.24413875-0.j, 0.35101654-0.j,
0.35095554-0.j, 0.28925585-0.j, 0.22009374-0.j,
0.34370454-0.j],
[ 0.29775734-0.j, 0.21717625-0.j, 0.28679345-0.j,
-0.17261926-0.j, -0.41403132-0.j, 0.21480395-0.j,
-0.5556673 -0.j, -0.00587411-0.j, 0.40832611-0.j,
-0.24296833-0.j],
[ 0.17147953-0.j, 0.09198514-0.j, -0.32960263-0.j,
0.55102537-0.j, 0.36556324-0.j, -0.00497598-0.j,
-0.07790604-0.j, -0.33140639-0.j, 0.26883294-0.j,
-0.47752981-0.j],
[-0.47542292-0.j, -0.14068908-0.j, 0.62131114-0.j,
0.21645498-0.j, -0.11266769-0.j, 0.17761373-0.j,
0.23467192-0.j, -0.15350902-0.j, -0.07515751-0.j,
-0.43906049-0.j],
[ 0.33174054-0.j, -0.18290668-0.j, 0.04021533-0.j,
0.43552649-0.j, -0.50269662-0.j, -0.50174342-0.j,
0.17580464-0.j, 0.33582599-0.j, -0.05960136-0.j,
-0.1162055 -0.j]])

Python Replacing every imaginary value in array by random

I got an
array([[ 0.01454911+0.j, 0.01392502+0.00095922j,
0.00343284+0.00036535j, 0.00094982+0.0019255j ,
0.00204887+0.0039264j , 0.00112154+0.00133549j, 0.00060697+0.j],
[ 0.02179418+0.j, 0.01010125-0.00062646j,
0.00086327+0.00495717j, 0.00204473-0.00584213j,
0.00159394-0.00678094j, 0.00121372-0.0043044j , 0.00040639+0.j]])
I need a solution which gives me the possibility to replace just the imaginary components by an random value generated by:
numpy.random.vonmises(mu, kappa, size=size)
The resulting array needs to be in the same form as the first one.
Loop over the numbers and just set them to a value you like. The parameters mu and kappa for the numpy.random.vonmises function need to be defined, since in they are undefined in the below example.
import numpy as np
data = np.array([[ 0.01454911+0.j, 0.01392502+0.00095922j,
0.00343284+0.00036535j, 0.00094982+0.0019255j ,
0.00204887+0.0039264j , 0.00112154+0.00133549j, 0.00060697+0.j],
[ 0.02179418+0.j, 0.01010125-0.00062646j,
0.00086327+0.00495717j, 0.00204473-0.00584213j,
0.00159394-0.00678094j, 0.00121372-0.0043044j , 0.00040639+0.j]])
def setRandomImag(c):
c.imag = np.random.vonmises(mu, kappa, size=size)
return c
data = [ setRandomImag(i) for i in data]
n_epochs = 2
n_freqs = 7
# form giving parameters for the array
data2 = np.zeros((n_epochs, n_freqs), dtype=complex)
for i in range(0,n_epochs):
data2[i] = np.real(data[i]) + np.random.vonmises(mu, kappa) * complex(0,1)
It gives my whole n_epoch the same imaginary value. Not exactly what I was asking for, but solves my problem.
Try using this approach:
Store your numbers into a 2-D array: Real-part and Imaginary-part.
Then replace the Imaginary-part with the randomly chosen numbers.

How should I multiply scipy.fftpack output vectors together?

The scipy.fftpack.rfft function returns the DFT as a vector of floats, alternating between the real and complex part. This means to multiply to DFTs together (for convolution) I will have to do the complex multiplication "manually" which seems quite tricky. This must be something people do often - I presume/hope there is a simple trick to do this efficiently that I haven't spotted?
Basically I want to fix this code so that both methods give the same answer:
import numpy as np
import scipy.fftpack as sfft
X = np.random.normal(size = 2000)
Y = np.random.normal(size = 2000)
NZ = np.fft.irfft(np.fft.rfft(Y) * np.fft.rfft(X))
SZ = sfft.irfft(sfft.rfft(Y) * sfft.rfft(X)) # This multiplication is wrong
NZ
array([-43.23961083, 53.62608086, 17.92013729, ..., -16.57605207,
8.19605764, 5.23929023])
SZ
array([-19.90115323, 16.98680347, -8.16608202, ..., -47.01643274,
-3.50572376, 58.1961597 ])
N.B. I am aware that fftpack contains a convolve function, but I only need to fft one half of the transform - my filter can be fft'd once in advance and then used over and over again.
You don't have to flip back to np.float64 and hstack. You can create an empty destination array, the same shape as sfft.rfft(Y) and sfft.rfft(X), then create a np.complex128 view of it and fill this view with the result of the multiplication. This will automatically fill the destination array as wanted.
If I retake your example :
import numpy as np
import scipy.fftpack as sfft
X = np.random.normal(size = 2000)
Y = np.random.normal(size = 2000)
Xf = np.fft.rfft(X)
Xf_cpx = Xf[1:-1].view(np.complex128)
Yf = np.fft.rfft(Y)
Yf_cpx = Yf[1:-1].view(np.complex128)
Zf = np.empty(X.shape)
Zf_cpx = Zf[1:-1].view(np.complex128)
Zf[0] = Xf[0]*Yf[0]
# the [...] is important to use the view as a reference to Zf and not overwrite it
Zf_cpx[...] = Xf_cpx * Yf_cpx
Zf[-1] = Xf[-1]*Yf[-1]
Z = sfft.irfft.irfft(Zf)
and that's it!
You can use a simple if statement if you want your code to be more general and handle odd lengths as explained in Jaime's answer.
Here is a function that does what you want:
def rfft_mult(a,b):
"""Multiplies two outputs of scipy.fftpack.rfft"""
assert a.shape == b.shape
c = np.empty( a.shape )
c[...,0] = a[...,0]*b[...,0]
# To comply with the rfft support of multi dimensional arrays
ar = a.reshape(-1,a.shape[-1])
br = b.reshape(-1,b.shape[-1])
cr = c.reshape(-1,c.shape[-1])
# Note that we cannot use ellipses to achieve that because of
# the way `view` work. If there are many dimensions, one should
# consider to manually perform the complex multiplication with slices.
if c.shape[-1] & 0x1: # if odd
for i in range(len(ar)):
ac = ar[i,1:].view(np.complex128)
bc = br[i,1:].view(np.complex128)
cc = cr[i,1:].view(np.complex128)
cc[...] = ac*bc
else:
for i in range(len(ar)):
ac = ar[i,1:-1].view(np.complex128)
bc = br[i,1:-1].view(np.complex128)
cc = cr[i,1:-1].view(np.complex128)
cc[...] = ac*bc
c[...,-1] = a[...,-1]*b[...,-1]
return c
You can take a view of a slice of your return array, e.g.:
>>> scipy.fftpack.fft(np.arange(8))
array([ 28.+0.j , -4.+9.65685425j, -4.+4.j ,
-4.+1.65685425j, -4.+0.j , -4.-1.65685425j,
-4.-4.j , -4.-9.65685425j])
>>> a = scipy.fftpack.rfft(np.arange(8))
>>> a
array([ 28. , -4. , 9.65685425, -4. ,
4. , -4. , 1.65685425, -4. ])
>>> a.dtype
dtype('float64')
>>> a[1:-1].view(np.complex128) # First and last entries are real
array([-4.+9.65685425j, -4.+4.j , -4.+1.65685425j])
You will need to handle even or odd sized FFTs differently:
>>> scipy.fftpack.fft(np.arange(7))
array([ 21.0+0.j , -3.5+7.26782489j, -3.5+2.79115686j,
-3.5+0.79885216j, -3.5-0.79885216j, -3.5-2.79115686j,
-3.5-7.26782489j])
>>> a = scipy.fftpack.rfft(np.arange(7))
>>> a
array([ 21. , -3.5 , 7.26782489, -3.5 ,
2.79115686, -3.5 , 0.79885216])
>>> a.dtype
dtype('float64')
>>> a[1:].view(np.complex128)
array([-3.5+7.26782489j, -3.5+2.79115686j, -3.5+0.79885216j])

Categories