Using scipy.optimize.broyden2 with a function which receives several parameters - python

I want to use scipy.optimize.broyden2, the problem is that my function doesn't just take an array as argument, but a lot more parameters.
What should I do? Define global variables?
These are my functions:
def F(S, I, R, alpha, beta):
return [- beta * S * I, beta * S * I - alpha * R, alpha * R]
def euler(xi, xf, m, F, initial_values, alpha, beta):
h = (xf - xi) / m
t = np.linspace(xi, xf, m + 1)
t = np.delete(t, 0)
vect_y = [initial_values[0], initial_values[1], initial_values[2]]
for i in range(len(t)):
y_actual = [sum(x) for x in zip(vect_y, [element * h for element in F(vect_y[0], vect_y[1], vect_y[2], alpha, beta)])]
vect_y = y_actual
return vect_y
I want to use broyden2 with euler, where x0 would be initial_values.

As was suggested in comments, you can use an auxiliary function that unpacks the list of arguments using *list syntax, and calls your main function with that. A minimal example is shown below, where f is the function whose root is being found.
from scipy.optimize import broyden2
def f(x, y, z):
return [x-1, y-2, z-3]
broyden2(lambda X: f(*X), [0, 0, 0])
Output: array([ 1., 2., 3.])

Related

Non-hashable static arguments are not supported in Jax when using vmap

This is related to this question. After some work, I managed to change it down to the last error. The code looks like this now.
import jax.numpy as jnp
from jax import grad, jit, value_and_grad
from jax import vmap, pmap
from jax import random
import jax
from jax import lax
from jax import custom_jvp
def p_tau(z, tau, alpha=1.5):
return jnp.clip((alpha - 1) * z - tau, 0) ** (1 / (alpha - 1))
def get_tau(tau, tau_max, tau_min, z_value):
return lax.cond(z_value < 1,
lambda _: (tau, tau_min),
lambda _: (tau_max, tau),
operand=None
)
def body(kwargs, x):
tau_min = kwargs['tau_min']
tau_max = kwargs['tau_max']
z = kwargs['z']
alpha = kwargs['alpha']
tau = (tau_min + tau_max) / 2
z_value = p_tau(z, tau, alpha).sum()
taus = get_tau(tau, tau_max, tau_min, z_value)
tau_max, tau_min = taus[0], taus[1]
return {'tau_min': tau_min, 'tau_max': tau_max, 'z': z, 'alpha': alpha}, None
#jax.partial(jax.jit, static_argnums=(2,))
def map_row(z_input, alpha, T):
z = (alpha - 1) * z_input
tau_min, tau_max = jnp.min(z) - 1, jnp.max(z) - z.shape[0] ** (1 - alpha)
result, _ = lax.scan(body, {'tau_min': tau_min, 'tau_max': tau_max, 'z': z, 'alpha': alpha}, xs=None,
length=T)
tau = (result['tau_max'] + result['tau_min']) / 2
result = p_tau(z, tau, alpha)
return result / result.sum()
#jax.partial(jax.jit, static_argnums=(1,3,))
def _entmax(input, axis=-1, alpha=1.5, T=20):
result = vmap(jax.partial(map_row, alpha, T), axis)(input)
return result
#jax.partial(custom_jvp, nondiff_argnums=(1, 2, 3,))
def entmax(input, axis=-1, alpha=1.5, T=10):
return _entmax(input, axis, alpha, T)
#jax.partial(jax.jit, static_argnums=(0,2,))
def _entmax_jvp_impl(axis, alpha, T, primals, tangents):
input = primals[0]
Y = entmax(input, axis, alpha, T)
gppr = Y ** (2 - alpha)
grad_output = tangents[0]
dX = grad_output * gppr
q = dX.sum(axis=axis) / gppr.sum(axis=axis)
q = jnp.expand_dims(q, axis=axis)
dX -= q * gppr
return Y, dX
#entmax.defjvp
def entmax_jvp(axis, alpha, T, primals, tangents):
return _entmax_jvp_impl(axis, alpha, T, primals, tangents)
import numpy as np
input = jnp.array(np.random.randn(64, 10)).block_until_ready()
weight = jnp.array(np.random.randn(64, 10)).block_until_ready()
def toy(input, weight):
return (weight*entmax(input, 0, 1.5, 20)).sum()
jax.jit(value_and_grad(toy))(input, weight)
This leads to (what I hope) is the final error, that is
Non-hashable static arguments are not supported, as this can lead to unexpected cache-misses. Static argument (index 2) of type <class 'jax.interpreters.batching.BatchTracer'> for function map_row is non-hashable.
This is very strange, as I think I have marked every everywhere axis appears to be static, yet it still tells me that it is traced.
When you write a partial function with positional arguments, those arguments are passed first. So this:
jax.partial(map_row, alpha, T)
is essentially equivalent to this:
lambda z_input: map_row(alpha, T, z_input)
Notice the incorrect order of the arguments – this is what's causing your error: you're passing z_input, a non-hashable tracer, to an argument that is expected to be static.
You can fix this by replacing the partial statement above with:
lambda z: map_row(z, alpha, T)
and then your code will run correctly.

How to solve the differential equation?

I want to solve the deferential equation
dydt = r * (Y ** p) * (1 - (Y / K) ** alpha)
I tried to write the code like :
def func(Y, r, p, K, alpha):
dydt = r * (Y ** p) * (1 - (Y / K) ** alpha)
return dydt
t = np.linspace(0, len(df), len(df))
# I used 1 to initialize my parameters ( is there a better way ?)
r = 1; p = 1; K = 1; alpha = 1
y0 = r,p,K,alpha
ret = odeint(func, y0, t)
but when I try to execute the third block I get
TypeError: func() missing 3 required positional arguments: 'p', 'K', and 'alpha'
However I tried to use ret = odeint(func, y0, t, args=(p,K, alpha))
but this resulted in a three straight lines, when the equation is supposed to return a logistic curve.
how can I try to put r in the argument and why I need to specify the arguments? how can I get the final shape (logistic curve)
Note: to understand the parameters: Y represents the cumulative number of cases at time t, r is the growth rateat the early stage, and K is the final epidemic size.𝑝∈[0,1]is a parameter that allows the model to capture different growth profiles including the constant incidence (𝑝=0), sub-exponential growth (0<𝑝<1)and exponential growth (𝑝=1).
def func(Y, t, r, p, K, alpha):
return r * (Y ** p) * (1 - (Y / K) ** alpha)
You must add the t parameter in the ODEINT method.
y0 = 0.5 # Your initial condition.
params = (1, 1, 1, 1) # r, p, K, alpha
sol = odeint(func, y0, t, args=params)
From the source! Scipy ODEINT

How to compute the integral of a function which depends on the integral of another function

from scipy.integrate import quad
from math import sqrt
f = lambda x, a: a**2 * x # here a is a constant.
F = lambda x, a: quad(f, 0, x, args=(a,))[0]
rho = 5
I need to compute the integral of
1/sqrt(F(rho,a)-F(s,a)),
s is from 0 (lower limit) to rho (upper limit).
I think your question is missing some information (about a for example) from your previous post How to use `scipy.integrate.quad` to compute integral of a function which depends on the integral of another function You should probably fix that by editing your question text.
Regarding the current issue: Why don't you just define a new function g and then refer to the other function and integrate the same way as in F?
from scipy.integrate import quad
from math import sqrt
f = lambda x, a: a**2 * x
F = lambda x, a: quad(f, 0, x, args=(a,))[0]
rho = 5
g = lambda x, a: 1 / sqrt(F(rho, a) - F(x, a))
I = quad(g, 0, rho, args=(4,))
print(I)
This prints:
(0.5553603672694568, 1.9614421198355103e-11)
All integrals can be computed symbolically, no need for quad. F is
def F(x):
return a**2 * x**2 / 2
and
def g(x):
1 / sqrt(a**2 / 2 * (rho**2 - s**2))
val = quad(g, 0, rho)
is
pi / sqrt(2) / a
(independent of rho).
Besides, you can simply define a outside of the functions. (I never understood why it's necessary for quad to have an args argument.)
from scipy.integrate import quad
from math import sqrt
a = 3.14
f = lambda x: a**2 * x
F = lambda x: quad(f, 0, x, args=(a,))[0]
rho = 5

Contour plot with function receiving lists

I have some function f(list) with receives as argument a list with length 2, i.e., list = [entry_1, entry_2]. I need to do a contour plot of this function:
x = np.linspace(0, 2, 1000+1)
y = np.linspace(0, 2, 1000+1)
X, Y = np.meshgrid(x, y)
Z = ?
plt.contour(X, Y, Z)
plt.show()
The problem is: I don't know how to pass the arguments. If the function was of the type f(x,y), then
Z = f(X, Y)
would do the job. But
Z = f([X,Y])
fails: it receives too many arguments. How can I do this?
EDIT: Here are the functions of the program:
from scipy.optimize import minimize
def c_Gamma_gamma_fv(cf, cv):
return np.abs((4 * eta_gamma * charges**2 * a_q * cf).sum() + 4.* cf *a_tau/3. + a_w * cv)**2/Gamma_gamma
def mu_fv(cf, cv):
return np.array([cf**4,
cf**2 * cv**2,
cf**2 * c_Gamma_gamma_fv(cf, cv),
cv**2 * c_Gamma_gamma_fv(cf, cv),
cf**4,
cv**2 * cf**2,
cf**2 * cv**2,
cv**4,
cv**2 * cf**2,
cv**4])
def chi_square_fv(clist):
cf, cv = clist
return ((mu_fv(cf, cv) - mu_data) # inv_cov # (mu_fv(cf, cv) - mu_data))
x0 = [1., 1.]
res_fv = minimize(chi_square_fv, x0)
print(res_fv)
def delta_chi_fv(clist):
return chi_square_fv(clist) - chi_square_fv([res_fv.x[0], res_fv.x[1]])
All variables not explicit are constants. The function I want to plot is delta_chi_fv.
The solution I found (with a little help of a professor here in my university), though not the fastest one, works:
Z = np.zeros((len(x), len(y)))
for i in range(len(x)):
for j in range(len(y)):
z = delta_chi_fv([x[i], y[j]])
Z[i,j] = z
With this construction of Z, then
plt.contour(X,Y,Z)
works fine. If anyone knows another answer, it would be great to learn more things about this language.
Cheers,
Gabriel.

Explicit Euler method doesn't behave how I expect

I have implemented the following explicit euler method in python:
def explicit_euler(df, x0, h, N):
"""Solves an ODE IVP using the Explicit Euler method.
Keyword arguments:
df - The derivative of the system you wish to solve.
x0 - The initial value of the system you wish to solve.
h - The step size.
N - The number off steps.
"""
x = np.zeros(N)
x[0] = x0
for i in range(0, N-1):
x[i+1] = x[i] + h * df(x[i])
return x
Following the article on wikipedia I can plot the function and verify that I get the same plot: . I believe that here the method I have written is working correctly.
Next I tried to use it to solve the last system given on this page and instead of the plot shown there I obtain this:
I am not sure why my plot doesn't match the one shown on the webpage. The explicit euler method seems to work fine when I use it to solve systems where the slope doesn't change, but for an oscillating function it never seems to mimic it at all. Not even showing the expected error gain as indicated on the linked webpage. I am not sure what is wrong with the method I have implemented.
Here is the code used for plotting and the derivative:
def g(t):
return -0.5 * np.exp(t * 0.5) * np.sin(5 * t) + 5 * np.exp(t * 0.5)
* np.cos(5 * t)
h = 0.001
x0 = 0
tn = 4
N = int(tn / h)
x = ee.explicit_euler(f, x0, h, N)
t = np.arange(0, tn, h)
fig = plt.figure()
plt.plot(t, x, label="Explicit Euler")
plt.plot(t, (np.exp(0.5 * t) * np.sin(5 * t)), label="Analytical
solution")
#plt.plot(t, np.exp(0.5 * t), label="Analytical solution")
plt.xlabel('Timesteps t')
plt.ylabel('x(t)=e^(0.5*t) * sin(5*t)')
plt.legend()
plt.grid()
plt.show()
Edit:
As requested here is the current equation I am applying the method to:
y'-y=-0.5*e^(t/2)*sin(5t)+5e^(t/2)*cos(5t)
Where y(0)=0.
I would like to make clear however that this behaviour doesn't occur just for this equation but all equations where the slope has a change in sign, or oscillating behaviour.
Edit 2:
Ok thanks. Yes the code below does indeed work. But I have one further question. In the simple example I had for the exponential function, I had defined a method:
def f(x):
return x
for the system f'(x)=x. This gave the output of my first graph which looks correct. I then defined another function:
def k(x):
return cos(x)
for the system f'(x)=cos(x), this does not give expected output. But when I change the function definition to
def k(t, x):
return cos(t)
I get the expected output. If I change my function
def f(t, x):
return t
I get an incorrect output. Am I always actually evaluating the function at a time step and is it just by chance for the system x'=x that at each time step the value is just the value of x?
I had understood that the Euler method used the value of the previously calculated value in order to get the next value. But if I run code for my function k(x)=cos(x), I get output pictured below, which must be incorrect. This now uses the updated code you provided.
def k(t, x):
return np.cos(x)
h = 0.1 # Step size
x0 = (0, 0) # Initial point of iteration
tn = 10 # Time step to iterate to
N = int(tn / h) # Number of steps
x = ee.explicit_euler(k, x0, h, N)
t = np.arange(0, tn, h)
The problem is that you have incorrectly raised the function g, you want to solve the equation:
From where we observe that:
y' = y -0.5*e^(t/2)*sin(5t)+5e^(t/2)*cos(5t)
Then we define the function f(t, y) = y -0.5*e^(t/2)*sin(5t)+5e^(t/2)*cos(5t) as:
def f(t, y):
return y -0.5 * np.exp(t * 0.5) * np.sin(5 * t) + 5 * np.exp(t * 0.5) * np.cos(5 * t)
The initial point of iteration is f0=(t(0), y(0)):
f0 = (0, 0)
Then from Euler's equations:
def explicit_euler(df, x0, h, N):
"""Solves an ODE IVP using the Explicit Euler method.
Keyword arguments:
df - The derivative of the system you wish to solve.
x0 - The initial value of the system you wish to solve.
h - The step size.
N - The number off steps.
"""
x = np.zeros(N)
t, x[0] = x0
for i in range(0, N-1):
x[i+1] = x[i] + h * df(t ,x[i])
t += h
return x
Complete Code:
def explicit_euler(df, x0, h, N):
"""Solves an ODE IVP using the Explicit Euler method.
Keyword arguments:
df - The derivative of the system you wish to solve.
x0 - The initial value of the system you wish to solve.
h - The step size.
N - The number off steps.
"""
x = np.zeros(N)
t, x[0] = x0
for i in range(0, N-1):
x[i+1] = x[i] + h * df(t ,x[i])
t += h
return x
def df(t, y):
return -0.5 * np.exp(t * 0.5) * np.sin(5 * t) + 5 * np.exp(t * 0.5) * np.cos(5 * t) + y
h = 0.001
f0 = (0, 0)
tn = 4
N = int(tn / h)
x = explicit_euler(df, f0, h, N)
t = np.arange(0, tn, h)
fig = plt.figure()
plt.plot(t, x, label="Explicit Euler")
plt.plot(t, (np.exp(0.5 * t) * np.sin(5 * t)), label="Analytical solution")
#plt.plot(t, np.exp(0.5 * t), label="Analytical solution")
plt.xlabel('Timesteps t')
plt.ylabel('x(t)=e^(0.5*t) * sin(5*t)')
plt.legend()
plt.grid()
plt.show()
Screenshot:
Dump y' and what is on the right side is what you should place in the df function.
We will modify the variables to maintain the same standard for the variables, and will y be the dependent variable, and t the independent variable.
Equation 2: In this case the equation f'(x)=cos(x) will be rewritten to:
y'=cos(t)
Then:
def df(t, y):
return np.cos(t)
In conclusion, if we have an equation of the following form:
y' = f(t, y)
Then:
def df(t, y):
return f(t, y)

Categories