scipy.optimize.minimize is too slow. How can I speed up - python
I am converting an IDL code (written by Oleg Kochukhov) to Python. The code generates star surface map over spectral line profiles using Tikhonov or Maximum Entropy methods.
I use scipy.optimize.minimize to generate map over line profiles. But process is too slow and results is not compatible. I search solution on internet but i dont find any usefull solution.
I added a runnable code below:
import numpy as np
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import matplotlib.gridspec as gridspec
#syc = 0
def DI_GridInit(ntot):
# generate stellar surface grid
nlat = int(round(0.5 * (1.0 + np.sqrt(1.0 + np.pi * ntot))) - 1)
nlon = np.zeros(nlat, dtype=int)
xlat = np.pi * (np.arange(nlat, dtype=float) + 0.5) / nlat - np.pi / 2.0
xcirc = 2.0 * np.cos(xlat[1:])
nlon[1:] = np.around(xcirc * nlat) + 1
nlon[0] = ntot - sum(nlon[1:])
if abs(nlon[0] - nlon[nlat - 1]) > nlat:
nlon[1:] = nlon[1:] + (nlon[0] - nlon[nlat - 1]) / nlat
nlon[0] = ntot - sum(nlon[1:])
if nlon[0] < nlon[nlat - 1]:
nlon[1:] = nlon[1:] - 1
nlon[0] = ntot - sum(nlon[1:])
# generate Descartes coordinates for the surface grid in
# stellar coordinates, areas of surface elements and
# regularization indices: (lower, upper, right, left)
x0, j = np.zeros((ntot, 3), dtype=float), 0
latitude, longitude = np.zeros(ntot, dtype=float), np.zeros(ntot, dtype=float)
sa, ireg = np.zeros(ntot, dtype=float), np.zeros((ntot, 4), dtype=int)
slt = np.hstack((0., (xlat[1:nlat] + xlat[0:nlat - 1]) / 2. + np.pi / 2., np.pi))
for i in range(nlat):
coslat = np.cos(xlat[i])
sinlat = np.sin(xlat[i])
xlon = 2 * np.pi * (np.arange(nlon[i]) + 0.5) / nlon[i]
sinlon = np.sin(xlon)
coslon = np.cos(xlon)
x0[:, 0][j:j + nlon[i]] = coslat * sinlon
x0[:, 1][j:j + nlon[i]] = -coslat * coslon
x0[:, 2][j:j + nlon[i]] = sinlat
latitude[j:j + nlon[i]] = xlat[i]
longitude[j:j + nlon[i]] = xlon
sa[j:j + nlon[i]] = 2. * np.pi * (np.cos(slt[i]) - np.cos(slt[i + 1])) / nlon[i]
ireg[:, 2][j:j + nlon[i]] = np.roll(j + np.arange(nlon[i], dtype=int), -1)
ireg[:, 3][j:j + nlon[i]] = np.roll(j + np.arange(nlon[i], dtype=int), 1)
if (i > 0):
il_lo = j - nlon[i - 1] + np.arange(nlon[i - 1], dtype=int)
else:
il_lo = j + nlon[i] + np.arange(nlon[i + 1], dtype=int)
if (i < nlat - 1):
il_up = j + nlon[i] + np.arange(nlon[i + 1], dtype=int)
else:
il_up = il_lo
for k in range(j, j + nlon[i]):
dlat_lo = longitude[k] - longitude[il_lo]
ll = np.argmin(abs(dlat_lo))
ireg[k][0] = il_lo[ll]
dlat_up = longitude[k] - longitude[il_up]
ll = np.argmin(abs(dlat_up))
ireg[k][1] = il_up[ll]
j += nlon[i]
theta = np.arccos(x0[:, 2])
phi = np.arctan2(x0[:, 0], -x0[:, 1])
ii = np.argwhere(phi < 0).T[0]
nii = len(ii)
phi[ii] = 2.0 * np.pi - abs(phi[ii]) if nii else None
grid = {'ntot': ntot, 'nlat': nlat, 'nlon': nlon, 'xyz': x0, 'lat': latitude,
'lon': longitude, 'area': sa, 'ireg': ireg, 'phi': phi, 'theta': theta}
return grid
def DI_Map(grid, spots):
map = np.ones(grid['ntot'], dtype=float)
for i in range(spots['n']):
dlon = grid['lon'] - np.deg2rad(spots['tbl'][i, 0])
dlat = grid['lat'] - np.deg2rad(spots['tbl'][i, 1])
da = (2.0 * np.arcsin(np.sqrt(np.sin(0.5 * dlat) ** 2 +
np.cos(np.deg2rad(spots['tbl'][i, 1])) *
np.cos(grid['lat']) * np.sin(0.5 * dlon) ** 2)))
ii = np.argwhere(da <= np.deg2rad(spots['tbl'][i, 2])).T[0]
ni = len(ii)
map[ii] = spots['tbl'][i, 3] if ni > 0 else None
return map
def DI_Prf(grid, star, map, phase=None, vv=None, vr=None, nonoise=None):
# velocity array
if vv is not None:
nv = len(vv)
else:
nv = int(np.ceil(2.0 * star['vrange'] / star['vstep']))
vv = -star['vrange'] + np.arange(nv, dtype=float) * star['vstep']
# phase array
if phase is None:
phase = np.arange(star['nphases'], dtype=float) / star['nphases']
# velocity correction for each phase
vr = np.zeros(star['nphases'], dtype=float) if vr == None else None
# fixed trigonometric quantities
cosi = np.cos(np.deg2rad(star['incl'])); sini = np.sin(np.deg2rad(star['incl']))
coslat = np.cos(grid['lat']); sinlat = np.sin(grid['lat'])
# FWHM to Gaussian sigma
sigm = star['fwhm'] / np.sqrt(8.0 * np.log(2.0))
isig = (-0.5 / sigm ** 2)
# initialize line profile and integrated field arrays
prf = np.zeros((nv, len(phase)), dtype=float)
# gradient if called with 5 - variable input
grad = np.zeros((nv, len(phase), grid['ntot']), dtype=float)
# phase loop
for i in range(len(phase)):
coslon = np.cos(grid['lon'] + 2.0 * np.pi * phase[i])
sinlon = np.sin(grid['lon'] + 2.0 * np.pi * phase[i])
mu = sinlat * cosi + coslat * sini * coslon
ivis = np.argwhere(mu > 0.).T[0]
dv = -sinlon[ivis] * coslat[ivis] * star['vsini']
avis = grid['area'][ivis] * mu[ivis] * (1.0 - star['limbd'] + star['limbd'] * mu[ivis])
if star['type'] == 0:
wgt = avis * map[ivis]
wgtn = sum(wgt)
for j in range(nv):
plc = 1.0 - star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
prf[j][i] = sum(wgt * plc) / wgtn
grad[j][i][ivis] = avis * plc / wgtn - avis * prf[j][i] / wgtn
elif star['type'] == 1:
wgt = avis
wgtn = sum(wgt)
for j in range(nv):
plc = 1.0 - map[ivis] * star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
prf[j][i] = sum(wgt * plc) / wgtn
grad[j][i][ivis] = -wgt / wgtn * star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
# output structure
syn = {'v': vv, 'phase': phase, 'prf': prf}
# add noise
if star['snr'] != -1 and nonoise != None:
obs = syn['prf'] * 0.0
for i in range(star['nphases']):
obs[:, i] = syn['prf'][:, i] + np.random.standard_normal((len(syn['v']),)) / star['snr']
syn['obs'] = obs
return syn, grad
def DI_func(cmap, functargs):
# global syc
star = functargs['star']
grid = functargs['grid']
obs = functargs['obs']
invp = functargs['invp']
nv = len(obs['v'])
er = 1.0 / abs(star['snr'])
if 'vr' in obs.keys():
syn, grad = DI_Prf(grid, star, cmap, phase=obs['phase'], vv=obs['v'], vr=obs['vr'])
else:
syn, grad = DI_Prf(grid, star, cmap, phase=obs['phase'], vv=obs['v'])
# shf = 0
# for i in range(len(obs['phase'])):
# plt.plot(obs['v'], obs['obs'][:, i] + shf, 'bo')
# plt.plot(obs['v'], syn['prf'][:, i] + shf, 'r')
# plt.plot(obs['v'], obs['obs'][:, i] - syn['prf'][:, i] + shf, 'k')
# shf += 0.1
# plt.show()
fchi = 0.0
sign = (-1) ** invp['regtype']
for i in range(star['nphases']):
fchi = fchi + sign * sum((syn['prf'][:, i] - obs['obs'][:, i]) ** 2 / er ** 2) / nv
freg = 0
if invp['lambda'] > 0:
if invp['regtype'] == 0:
ir = grid['ireg']
for k in range(len(ir[0, :])):
freg = freg + invp['lambda'] / grid['ntot'] * sum((cmap - cmap[ir[:, k]]) ** 2)
elif invp['regtype'] == 1:
mmap = sum(cmap) / grid['ntot']
nmap = cmap / mmap
freg = freg - invp['lambda'] / grid['ntot'] * sum(nmap * np.log(nmap))
ftot = fchi + freg
syn['obs'] = obs['obs']
# syc += 1
# if syc % 1000 == 0:
# plotting(grid, cmap, syn, star['incl'], typ=star['type'])
#
# print(syc, ftot, sum(cmap))
return ftot
def plotting(grid, map, syn, incl, typ):
nlon = grid['nlon']
nln = max(nlon)
nlt = len(nlon)
ll = np.zeros(nlt + 1, dtype=int)
ll[0] = 0
for i in range(nlt):
ll[i + 1] = ll[i] + nlon[i]
map1 = np.zeros((nlt, nln), dtype=float)
x = np.arange(nln, dtype=float) + 0.5
for i in range(nlt):
lll = ((np.arange(nlon[i] + 2, dtype=float) - 0.5) * nln) / nlon[i]
y = np.hstack((map[ll[i + 1] - 1], map[ll[i]:ll[i+1]-1], map[ll[i]]))
for j in range(nln):
imin = np.argmin(abs(x[j] - lll))
map1[i, j] = y[imin]
light = (190 * (map1 - np.min(map1)) / (np.max(map1) - np.min(map1))) + 50
light_rect = np.flipud(light)
if typ == 0:
cmap = 'gray'
else:
cmap = 'gray_r'
fig = plt.figure()
fig.clear()
spec = gridspec.GridSpec(ncols=3, nrows=3, left=0.10, right=0.98,
top=0.97, bottom=0.07, hspace=0.2, wspace=0.36)
# naive IDW-like interpolation on regular grid
shape = light.shape
nrows, ncols = (shape[0], shape[1])
lon, lat = np.meshgrid(np.linspace(0, 360, ncols), np.linspace(-90, 90, nrows))
for i, item in enumerate([[(0, 0), -0], [(0, 1), -90], [(1, 0,), -180], [(1, 1), -270]]):
ax = fig.add_subplot(spec[item[0]])
# set up map projection
m = Basemap(projection='ortho', lat_0=90 - incl, lon_0=item[1], ax=ax)
# draw lat/lon grid lines every 30 degrees.
m.drawmeridians(np.arange(0, 360, 30))
m.drawparallels(np.arange(-90, 90, 30))
# compute native map projection coordinates of lat/lon grid.
x, y = m(lon, lat)
# contour data over the map.
m.contourf(x, y, light, 15, vmin=0., vmax=255., cmap=cmap)
if i in [0, 2]:
x2, y2 = m(180 - item[1], incl)
else:
x2, y2 = m(180 + item[1], incl)
x1, y1 = (-10, 5)
ax.annotate(str('%0.2f' % (abs(item[1]) / 360.)), xy=(x2, y2), xycoords='data',
xytext=(x1, y1), textcoords='offset points',
color='r')
ax5 = fig.add_subplot(spec[-1, :2])
ax5.imshow(light_rect, vmin=0., vmax=255., cmap=cmap, interpolation='none', extent=[0, 360, -90, 90])
ax5.set_xticks(np.arange(0, 420, 60))
ax5.set_yticks(np.arange(-90, 120, 30))
ax5.set_xlabel('Longitude ($^\circ$)', fontsize=7)
ax5.set_ylabel('Latitude ($^\circ$)', fontsize=7)
ax5.tick_params(labelsize=7)
ax6 = fig.add_subplot(spec[0:, 2])
shf = 0.0
for i in range(len(syn['phase'])):
ax6.plot(syn['v'], syn['obs'][:, -i - 1] + shf, 'bo', ms=2)
ax6.plot(syn['v'], syn['prf'][:, -i - 1] + shf, 'r', linewidth=1)
ax6.text(min(syn['v']), max(syn['obs'][:, -i - 1] + shf), str('%0.2f' % syn['phase'][-i - 1]),
fontsize=7)
shf += 0.1
p1 = ax6.lines[0]
p2 = ax6.lines[-1]
p1datay = p1.get_ydata()
p1datax = p1.get_xdata()
p2datay = p2.get_ydata()
y1, y2 = min(p1datay) - min(p1datay) / 20.,max(p2datay) + min(p1datay) / 10.
ax6.set_ylim([y1, y2])
ax6.set_xlabel('V ($km s^{-1}$)', fontsize=7)
ax6.set_ylabel('I / Ic', fontsize=7)
ax6.tick_params(labelsize=7)
max_ = int(max(p1datax))
ax6.set_xticks([-max_, np.floor(-max_ / 2.), 0.0, np.ceil(max_ / 2.), max_])
plt.show()
if __name__ == "__main__":
# Star parameters
star = {'ntot': 1876, 'type': 0, 'incl': 70, 'vsini': 50, 'fwhm': 7.0, 'd': 0.6,
'limbd': 0.5, 'nphases': 5, 'vrange': np.sqrt(50 ** 2 + 7.0 ** 2) * 1.4,
'vstep': 1.0, 'snr': 500}
# Spot parameters
lon_spot = [40, 130, 220, 310]
lat_spot = [-30, 0, 60, 30]
r_spot = [20, 20, 20, 20]
c_spot = [0.1, 0.2, 0.25, 0.3]
tbl = np.array([lon_spot, lat_spot, r_spot, c_spot]).T
spots = {'n': len(lon_spot), 'type': star['type'], 'tbl': tbl}
# Generate grid
grid = DI_GridInit(star['ntot'])
# Generate map
cmap = DI_Map(grid, spots)
# Generate spectral line profiles
csyn, grad = DI_Prf(grid, star, cmap, nonoise=True)
# Plotting map and line profiles
plotting(grid, cmap, csyn, star['incl'], star['type'])
# Generate map over the line profiles using scipy.optimize.minimize
invp = {'lambda': 20, 'regtype': 0, 'maxiter': 10}
grid_inv = DI_GridInit(star['ntot'])
functargs = {'star': star, 'grid': grid_inv, 'obs': csyn, 'invp': invp}
cmap = np.ones(star['ntot'])
cmap[0] = 0.99
bnd = list(zip(np.zeros(len(cmap), dtype=float), np.ones(len(cmap), dtype=float)))
minimize(DI_func, cmap, args=functargs, method='TNC', bounds=bnd,
callback=None, options={'eps': 0.1, 'maxiter': 5, 'disp': True})
The code includes followed parts.
'DI_GridInit' : Generates grids for the map
'DI_Map' : Generates star surface map according to starspot parameters (such as longitude, latitude, radius and contrast)
'DI_Prf' : Generates spectral line profiles according to map
Now I want to obtain the surface map over the generated and noised line profiles. I use scipy.optimize.minimize (TNC method) for obtain the surface map. I use 'DI_func' as function in minimize. But 'minimize' is so slow. What is the problem. How can I speed this up.
Here is a modified version of DI_Prf, where is the major computation time during the execution of DI_func:
def DI_Prf(grid, star, map, phase=None, vv=None, vr=None, nonoise=None):
# velocity array
if vv is not None:
nv = len(vv)
else:
nv = int(np.ceil(2.0 * star['vrange'] / star['vstep']))
vv = -star['vrange'] + np.arange(nv, dtype=float) * star['vstep']
# phase array
if phase is None:
phase = np.arange(star['nphases'], dtype=float) / star['nphases']
# velocity correction for each phase
vr = np.zeros(star['nphases'], dtype=float) if vr == None else None
# fixed trigonometric quantities
cosi = np.cos(np.deg2rad(star['incl'])); sini = np.sin(np.deg2rad(star['incl']))
coslat = np.cos(grid['lat']); sinlat = np.sin(grid['lat'])
# FWHM to Gaussian sigma
sigm = star['fwhm'] / np.sqrt(8.0 * np.log(2.0))
isig = (-0.5 / sigm ** 2)
# initialize line profile and integrated field arrays
prf = np.zeros((nv, len(phase)), dtype=float)
# gradient if called with 5 - variable input
grad = np.zeros((nv, len(phase), grid['ntot']), dtype=float)
# phase loop
for i in range(len(phase)):
coslon = np.cos(grid['lon'] + 2.0 * np.pi * phase[i])
sinlon = np.sin(grid['lon'] + 2.0 * np.pi * phase[i])
mu = sinlat * cosi + coslat * sini * coslon
ivis = np.argwhere(mu > 0.).T[0]
dv = -sinlon[ivis] * coslat[ivis] * star['vsini']
avis = grid['area'][ivis] * mu[ivis] * (1.0 - star['limbd'] + star['limbd'] * mu[ivis])
if star['type'] == 0:
wgt = avis * map[ivis]
wgtn = sum(wgt)
#for j in range(nv):
# plc = 1.0 - star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
# prf[j][i] = sum(wgt * plc) / wgtn
# grad[j][i][ivis] = avis * plc / wgtn - avis * prf[j][i] / wgtn
plc = 1.0 - star['d'] * np.exp(isig * (vv[:, np.newaxis] + dv[np.newaxis, :] - vr[i]) ** 2)
prf[:, i] = np.sum(wgt * plc, axis=1) / wgtn
grad[:, i, ivis] = avis * plc / wgtn - (avis[:, np.newaxis]*prf[:, i]).T / wgtn
elif star['type'] == 1:
wgt = avis
wgtn = sum(wgt)
for j in range(nv): # to be modified too
plc = 1.0 - map[ivis] * star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
prf[j][i] = sum(wgt * plc) / wgtn
grad[j][i][ivis] = -wgt / wgtn * star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
# output structure
syn = {'v': vv, 'phase': phase, 'prf': prf}
# add noise
if star['snr'] != -1 and nonoise != None:
#for i in range(star['nphases']):
obs = syn['prf'] + np.random.standard_normal(size=syn['prf'].shape) / star['snr']
syn['obs'] = obs
return syn, grad
It reduces the time by 3:
%%timeit
syn, grad = DI_Prf(grid, star, cmap, phase=obs['phase'], vv=obs['v'])
# 127 ms ± 2.61 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
# 40.7 ms ± 683 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
The main idea with Numpy is to not use loops, but work with multidimensional array, and use the broadcasting capabilities.
For instance:
fchi = 0.0
for i in range(star['nphases']):
fchi = fchi + sign * sum((syn['prf'][:, i] - obs['obs'][:, i]) ** 2 / er ** 2) / nv
could be replaced with:
fchi = sign / nv / er ** 2 * np.sum( np.sum((syn['prf'] - obs['obs']) ** 2, axis=1 ) )
same for np.random.standard_normal(size=syn['prf'].shape)
It's not a big improvement here because star['nphases'] is small, but it is relatively important for the other axis. You could go further and remove the for loop over the phases in DI_Prf but it requires some thinking
Related
solving the wave 1-d equation with python and animate
I'm trying to solve the 1-d wave equation, and I coding the program for numerical computing solutions and animating, saving data in the file. I don't know how to fix the error and finally get the working code. u_tt = a**2 * u_xx + f(x,t) It is necessary for the program to solve equations when entering both an additional function and with non-zero initial and boundary conditions, with graphic visualization and saving data to a file. So I attach my code (Python 3.9), and error message: import numpy as np import math import matplotlib.pyplot as plt import os import time import glob def sol(I, V, f, a, L, C, T, U_0, U_L, dt, user_func = None): """ solver for wave equation u_tt = a**2*u_xx + f(x,t) (0,L) where u=0 for x=0,L, for t in (0,T]. :param I: :param V: :param f: :param a: :param L: :param C: :param T: :param U_0: :param U_L: :param dt: :param user_func: :return: """ nt = int(round(T / dt)) t = np.linspace(0, nt * dt, nt + 1) # array for time points dx = dt * a / float(C) nx = int(round(L / dx)) x = np.linspace(0, L, nx + 1) # array for coord points q = a ** 2 C2 = (dt / dx) ** 2 dt2 = dt * dt # --- checking f(x,t) --- if f is None or f == 0: f = lambda x, t: 0 # --- check the initial conds dU(x,0)/dt --- if V is None or V == 0: V = lambda x: 0 # boundary conds if U_0 is not None: if isinstance(U_0, (float, int)) and U_0 == 0: U_0 = lambda t: 0 if U_L is not None: if isinstance(U_L, (float, int)) and U_L == 0: U_L = lambda t: 0 # --- allocate memory --- u = np.zeros(nx + 1) u_n = np.zeros(nx + 1) u_nm = np.zeros(nx + 1) # --- valid indexing check --- Ix = range(0, nx + 1) It = range(0, nt + 1) # --- set the boundary conds --- for i in range(0, nx + 1): u_n[i] = I(x[i]) if user_func is not None: user_func(u_n, x, t, 0) # --- finite difference step --- for i in Ix[1:-1]: u[i] = u_n[i] + dt * V(x[i]) + 0.5 * C2 * (0.5 * (q[i] + q[i + 1]) * (u_n[i + 1] - u_n[i]) - 0.5 * (q[i] + q[i - 1]) * (u_n[i] - u_n[i - 1])) + 0.5 * dt2 * f(x[i], t[0]) i = Ix[0] if U_0 is None: # set the boundary conds (x=0: i-1 -> i+1 u[i-1]=u[i+1] # where du/dn = 0, on x=L: i+1 -> i-1 u[i+1]=u[i-1]) ip1 = i + 1 im1 = ip1 # i-1 -> i+1 u[i] = u_n[i] + dt * V(x[i]) + \ 0.5 * C2 * (0.5 * (q[i] + q[ip1]) * (u_n[ip1] - u_n[i]) - 0.5 * (q[i] + q[im1]) * (u_n[i] - u_n[im1])) + 0.5 * dt2 * f(x[i], t[0]) else: u[i] = U_0(dt) i = Ix[-1] if U_L is None: im1 = i - 1 ip1 = im1 # i+1 -> i-1 u[i] = u_n[i] + dt * V(x[i]) + \ 0.5 * C2 * (0.5 * (q[i] + q[ip1]) * (u_n[ip1] - u_n[i]) - 0.5 * (q[i] + q[im1]) * (u_n[i] - u_n[im1])) + \ 0.5 * dt2 * f(x[i], t[0]) else: u[i] = U_L(dt) if user_func is not None: user_func(u, x, t, 1) # update data u_nm, u_n, u = u_n, u, u_nm # --- time looping --- for n in It[1:-1]: # update all inner points for i in Ix[1:-1]: u[i] = - u_nm[i] + 2 * u_n[i] + \ C2 * (0.5 * (q[i] + q[i + 1]) * (u_n[i + 1] - u_n[i]) - 0.5 * (q[i] + q[i - 1]) * (u_n[i] - u_n[i - 1])) + dt2 * f(x[i], t[n]) # --- set boundary conds --- i = Ix[0] if U_0 is None: # set the boundary conds # x=0: i-1 -> i+1 u[i-1]=u[i+1] where du/dn=0 # x=L: i+1 -> i-1 u[i+1]=u[i-1] where du/dn=0 ip1 = i + 1 im1 = ip1 u[i] = - u_nm[i] + 2 * u_n[i] + \ C2 * (0.5 * (q[i] + q[ip1]) * (u_n[ip1] - u_n[i]) - 0.5 * (q[i] + q[im1]) * (u_n[i] - u_n[im1])) + \ dt2 * f(x[i], t[n]) else: u[i] = U_0(t[n + 1]) i = Ix[-1] if U_L is None: im1 = i - 1 ip1 = im1 u[i] = - u_nm[i] + 2 * u_n[i] + \ C2 * (0.5 * (q[i] + q[ip1]) * (u_n[ip1] - u_n[i]) - 0.5 * (q[i] + q[im1]) * (u_n[i] - u_n[im1])) + \ dt2 * f(x[i], t[n]) else: u[i] = U_L(t[n + 1]) if user_func is not None: if user_func(u, x, t, n + 1): break u_nm, u_n, u = u_n, u, u_nm return u, x, t # --- here function for return functions --- # return func(x) def func(x): """ :param x: :return: """ return # expression # start simulate and animate or visualisation and savin the data from file def simulate( I, V, f, a, L, C, T, U_0, U_L, dt, # params umin, umax, # amplitude animate = True, # animate or not? solver_func = sol, # call the solver mode = 'plotter', # mode: plotting the graphic or saving to file ): # code for visualization and simulate ........... # start simulate solver_func(I, V, f, a, L, C, T, U_0, U_L, dt, user_func) return 0 def task( ): ''' test tasking for solver and my problem :return: ''' I L = 1 a = 1 C = 0.85 T = 1 dt = 0.05 U_0, U_L, V, f umax = 2 umin = -umax simulate(I, V, f, a, L, C, T, U_0, U_L, dt, umax, umin, animate = True, solver_func = sol, mode = 'plotter',) if __name__ == '__main__': task() And I get the same error: File "C:\\LR2-rep\wave_eq_1d.py", line 102, in sol u[i] = u_n[i] + dt * V(x[i]) + 0.5 * C2 * (0.5 * (q[i] + q[i + 1]) * (u_n[i + 1] - u_n[i]) - TypeError: 'int' object is not subscriptable I understand the meaning of the error, but I do not understand how it can be fixed, and for almost two weeks I have not been able to write a program ... I ask for help with solving this problem! Thank you very much in advance!
offset a parallel line to a given line python
I want to draw parallel line to given X,Y coordinate below code helps to draw , import numpy as np import matplotlib.pyplot as plt x = [187, 879, 722, 322] y = [341, 344, 112, 112] newX = [] newY = [] def findIntesection(p1x, p1y, p2x, p2y, p3x,p3y, p4x, p4y): dx12 = p2x - p1x dy12 = p2y - p1y dx34 = p4x - p3x dy34 = p4y - p3y denominator = (dy12*dx34-dx12*dy34) t1 = ((p1x - p3x) * dy34 + (p3y - p1y) * dx34)/ denominator t2 = ((p3x - p1x) * dy12 + (p1y - p3y) * dx12)/ -denominator; intersectX = p1x + dx12 * t1 intersectY = p1y + dy12 * t1 if (t1 < 0): t1 = 0 elif (t1 > 1): t1 = 1 if (t2 < 0): t2 = 0 elif (t2 > 1): t2 = 1 return intersectX,intersectY def normalizeVec(x,y): distance = np.sqrt(x*x+y*y) return x/distance, y/distance def getEnlarged(oldX, oldY, offset): num_points = len(oldX) for j in range(num_points): i = j - 1 if i < 0: i += num_points k = (j + 1) % num_points vec1X = oldX[j] - oldX[i] vec1Y = oldY[j] - oldY[i] v1normX, v1normY = normalizeVec(vec1X,vec1Y) v1normX *= offset v1normY *= offset n1X = -v1normY n1Y = v1normX pij1X = oldX[i] + n1X pij1Y = oldY[i] + n1Y pij2X = oldX[j] + n1X pij2Y = oldY[j] + n1Y vec2X = oldX[k] - oldX[j] vec2Y = oldY[k] - oldY[j] v2normX, v2normY = normalizeVec(vec2X,vec2Y) v2normX *= offset v2normY *= offset n2X = -v2normY n2Y = v2normX pjk1X = oldX[j] + n2X pjk1Y = oldY[j] + n2Y pjk2X = oldX[k] + n2X pjk2Y = oldY[k] + n2Y intersectX,intersetY = findIntesection(pij1X,pij1Y,pij2X,pij2Y,pjk1X,pjk1Y,pjk2X,pjk2Y) #print(intersectX,intersetY) newX.append(intersectX) newY.append(intersetY) getEnlarged(x, y, 20) plt.plot(x, y) plt.plot(newX, newY) plt.show() This gives result as below Here it is giving good result by drawing parallel line to each line of our trapezoidal shaped , but i want it to be a closed shape in place of open shape i want to join the 1st and last coordinate so that it should form a closed shape. Any help will be appreciated .
Using approach from here outer_ccw parameters combines vertex order and desired offset direction. For CCW order and outer polygon it is 1, for inner polygon it should be -1. def makeOffsetPoly(oldX, oldY, offset, outer_ccw = 1): num_points = len(oldX) for curr in range(num_points): prev = (curr + num_points - 1) % num_points next = (curr + 1) % num_points vnX = oldX[next] - oldX[curr] vnY = oldY[next] - oldY[curr] vnnX, vnnY = normalizeVec(vnX,vnY) nnnX = vnnY nnnY = -vnnX vpX = oldX[curr] - oldX[prev] vpY = oldY[curr] - oldY[prev] vpnX, vpnY = normalizeVec(vpX,vpY) npnX = vpnY * outer_ccw npnY = -vpnX * outer_ccw bisX = (nnnX + npnX) * outer_ccw bisY = (nnnY + npnY) * outer_ccw bisnX, bisnY = normalizeVec(bisX, bisY) bislen = offset / np.sqrt((1 + nnnX*npnX + nnnY*npnY)/2) newX.append(oldX[curr] + bislen * bisnX) newY.append(oldY[curr] + bislen * bisnY) x = [0, 100, 60, 40] y = [0, 0, 50, 50] makeOffsetPoly(x, y, 20) print(newX, newY) >>>[-29.424478775259594, 129.4244787752596, 66.79706177729007, 33.202938222709925] [-14.14213562373095, -14.14213562373095, 64.14213562373095, 64.14213562373095]
Just append the first coordinates to the end of your lists. x.append(x[0]) y.append(y[0]) newX.append(newX[0]) newY.append(newY[0]) Place this right before you plot. Here's my output
How can I know the dimension of my variable?
I get this error : ValueError: operands could not be broadcast together with shapes (365,) (2,) But I'm surprised by this (2,). How do I know which variable does this dimension (2,) please? Because none of my variables should have it. Thank you for your help ! Here, you can see the first script, where I define my function. It include a loop and also another function so I don't know if I can. I have a lot of variable with (365, ) for the dimension because, it's function of the time, so for 365 days. I have some fixed variable like the soil parameter, so the dimension for these is (1,) But I don't know which variable get (2,) dimension ? import pandas as pd import numpy as np def SA(MO = 0, ETPr = 0, SWSa = 0, pb = 1.70 ): DB = pd.read_excel("~/Documents/Spider/Data/data_base.xlsx", sheet_name = "DB") DB1 = pd.read_excel("~/Documents/Spider/Bilan_Courgette.xlsx", sheet_name = "sol") DB2 = pd.read_excel("~/Documents/Spider/Bilan_Courgette.xlsx", sheet_name = "culture") #Calculs inter. pour déterminer ET0/day #Array qui reprend "date" en une série 1 -> 365 JourDeLAnnee = pd.Series(range(1,366)) #Mauves dist_TS = 1+(0.033*np.cos(0.0172 * JourDeLAnnee)) decli_So = 0.409*np.sin((0.0172 * JourDeLAnnee)-1.39) lat = 0.87266463 ang_Hor_So =np.arccos(-np.tan(lat)*np.tan(decli_So)) gamma = 0.067 #Jaunes delta = 2504*np.exp((17.27*DB.tsa_by_day)/(DB.tsa_by_day +237.3))/(DB.tsa_by_day +237.3)**2 rg = DB.ens_by_day / 1000000 * 86400 ra = 37.6 * dist_TS * ((ang_Hor_So * np.sin(lat) * np.sin(decli_So)) + \ (np.cos(lat) * np.cos(decli_So) * np.sin(ang_Hor_So))) rso = (0.75 + (2*0.00001*120)) * ra tw =(DB.tsa_by_day * np.arctan(0.151977 * ((DB.hra_by_day + 8.313659)**0.5))) + \ np.arctan(DB.tsa_by_day + DB.hra_by_day) - np.arctan(DB.hra_by_day - 1.676331) + \ (0.00391838 * ((DB.hra_by_day)**1.5) * np.arctan(0.023101 * DB.hra_by_day)) - 4.686035 ed = (0.611 * np.exp((17.27 * tw) / (tw + 237.3))) - (0.0008 *(DB.tsa_by_day-tw) * 101.325) ea =((0.611 * np.exp((17.27*DB.tsa_max) / (DB.tsa_max + 237.3))) + \ (0.611 * np.exp((17.27 * DB.tsa_min) / (DB.tsa_min +237.3)))) / 2.0 rn = (0.77 * rg) - (((1.35 * (rg / rso)) - 0.35) \ * (0.34 - (0.14 * (ed**0.5))) * (4.9E-9) * ((((273+DB.tsa_max)**4)+((273+DB.tsa_min)**4))/2)) #Calcul de G from typing import List def get_g_constant(tsa_by_day: List[float], day: int): assert day >= 1 return 0.38 * (tsa_by_day[day] - tsa_by_day[day-1]) def get_g_for_year(tsa_by_day: List[int]) -> List[float]: g_list = [] for i in range(1, len(tsa_by_day)): g_value = get_g_constant(tsa_by_day, i) g_list.append(g_value) return g_list G = get_g_for_year(DB.tsa_by_day) G = [DB.tsa_by_day[0]] + G #Le fameux ET0 ET0 = ((0.408 * delta * (rn - G)) + (gamma * (900 /(DB.tsa_by_day + 273)) * DB.vtt_by_day * (ea - ed))) / \ (delta + (0.067*(1+(0.34 * DB.vtt_by_day)))) # Calcul des paramètres du sol Profil = 500 pb = 100 / ((MO / 224000) + ((100-MO) / (1.64))) Os = 0.6355+0.0013* DB1.A -0.1631* pb Or = 0 lnα = (-4.3003) - (0.0097*DB1.A) + (0.0138* DB1.S ) - (0.0992*MO) lnn = -1.0846-0.0236 * DB1.A -0.0085 * DB1.S +0.0001 * (DB1.S)**2 nn = np.exp(lnn) + 1 m = 1 - (1/nn) lnK0 = 1.9582 + 0.0308*DB1.S - 0.6142* pb - 0.1566*MO λ = -1.8642 - 0.1317*DB1.A + 0.0067*DB1.S α = np.exp(lnα) K0 = np.exp(lnK0) θPf2 =(((1 + ((α*(10**2.5))**nn))**(-m))*( Os - Or)) + Or θPf4 =(((1 + ((α*(10**4.2))**nn))**(-m))*( Os - Or)) + Or SWS = θPf2 - θPf4 diff = SWS*SWSa aj = diff / 2 θPf2New = θPf2 + aj θPf4New = θPf4 - aj #Calcul du volume de stock p à atteindre p = 0.04 *(5 - ET0) + DB2.ptab[0] θp =(1 - p) * ( θPf2New - θPf4New )+ θPf4New Vp = θp * Profil #Le fameux ETP import datetime DateS = datetime.datetime.strptime('30/03/2019','%d/%m/%Y').timetuple().tm_yday DateR = datetime.datetime.strptime('15/09/2019','%d/%m/%Y').timetuple().tm_yday ETP=ET0.copy() for n in range(364): if n >= (DateS - 1) and n <= (DateR - 1) : ETP[n] = ET0[n] * DB2.Kc[0] else: ETP[n] = ET0[n] * DB2.SolNu[0] ETP[0] = 0 ETPNew = ET0.copy() ETPNew = ETP - ETP * ETPr #Le Bilan Hydrique Stock = ET0.copy() θ = ET0.copy() Drainage = ET0.copy() Irrigation = ET0.copy() Se = ET0.copy() SeC = ET0.copy() θ[0] = θPf2New Stock[0] = θ[0]*Profil for i in range(364) : Se[i] = (θ[i] - Or)/( Os - Or) if Se[i] > 1 : SeC[i] = 1 else: SeC[i] = Se[i] Drainage[i] = K0 *(((SeC[i])**λ )*(1-(1- SeC[i]**(nn/(nn-1)))**m)**2)*10 if Vp[i] - Stock[i] > 0 : #Ici stock non défini Irrigation[i] = Vp[i] - Stock[i] else: Irrigation[i] = 0 Stock[i+1] = Stock[i] + DB.plu_by_day[i] - ETPNew[i] - Drainage[i] + Irrigation[i] θ[i+1] = Stock[i+1] / Profil return (Irrigation.sum()) After, i use a second script to do a sensitivity analysis. And It's here, when I run this script, I get the error 'ValueError: operands could not be broadcast together with shapes (365,) (2,)' import numpy as np from SALib.analyze import sobol from SALib.sample import saltelli from test import* import matplotlib.pyplot as plt # Set up dictionary with system parameters problem = { 'num_vars': 4, 'names': ['MO', 'ETPr', 'SWSa', 'K0'], 'bounds': [[0, 10], [0, 0.04135], [0, 0.2615], [1.40, 1.70], ]} # Array with n's to use nsamples = np.arange(50, 400, 50) # Arrays to store the index estimates S1_estimates = np.zeros([problem['num_vars'],len(nsamples)]) ST_estimates = np.zeros([problem['num_vars'],len(nsamples)]) # Loop through all n values, create sample, evaluate model and estimate S1 & ST for i in range(len(nsamples)): print('n= '+ str(nsamples[i])) # Generate samples sampleset = saltelli.sample(problem, nsamples[i],calc_second_order=False) # Run model for all samples output = [SA(*sampleset[j,:]) for j in range(len(sampleset))] # Perform analysis results = sobol.analyze(problem, np.asarray(output), calc_second_order=False,print_to_console=False) # Store estimates ST_estimates[:,i]=results['ST'] S1_estimates[:,i]=results['S1'] np.save('ST_estimates.npy', ST_estimates) np.save('S1_estimates.npy', S1_estimates) S1_estimates = np.load('S1_estimates.npy') ST_estimates = np.load('ST_estimates.npy') # Generate figure showing evolution of indices fig = plt.figure(figsize=(18,9)) ax1 = fig.add_subplot(1,2,1) handles = [] for j in range(problem['num_vars']): handles += ax1.plot(nsamples, S1_estimates[j,:], linewidth=5) ax1.set_title('Evolution of S1 index estimates', fontsize=20) ax1.set_ylabel('S1', fontsize=18) ax1.set_xlabel('Number of samples (n)', fontsize=18) ax1.tick_params(axis='both', which='major', labelsize=14) ax2 = fig.add_subplot(1,2,2) for j in range(problem['num_vars']): ax2.plot(nsamples, ST_estimates[j,:], linewidth=5) ax2.set_title('Evolution of ST index estimates', fontsize=20) ax2.set_ylabel('ST', fontsize=18) ax2.tick_params(axis='both', which='major', labelsize=14) ax2.set_xlabel('Number of samples (n)', fontsize=18) fig.legend(handles, problem['names'], loc = 'right', fontsize=11) plt.savefig('indexevolution.png') # Calculate parameter rankings S1_ranks = np.zeros_like(S1_estimates) ST_ranks = np.zeros_like(ST_estimates) for i in range(len(nsamples)): orderS1 = np.argsort(S1_estimates[:,i]) orderST = np.argsort(ST_estimates[:,i]) S1_ranks[:,i] = orderS1.argsort() ST_ranks[:,i] = orderST.argsort() Thank you for your help !
matplotlib line that follows point
How to make line that follows points. Shadow of move. Something like that: https://www.youtube.com/watch?v=pEjZd-AvPco Pastebin with code: https://pastebin.com/AkHaEM4i Everything is in the link, so I can't add some more details. Gonna paste lorem ipsum... It looks like your post is mostly code; please add some more details. class DoublePendulum: def __init__(self, init_state = [120,0,-20,0], L1 = .5, L2 = .5, M1 = 1.0, M2 = 2.0, G = 9.8, origin=(0,0)): self.init_state = np.asarray(init_state,dtype='float') self.params = (L1,L2,M1,M2,G) self.origin = origin self.time_elapsed = 0 self.state = self.init_state * np.pi/180 def position(self): (L1, L2, M1, M2, G) = self.params x = np.cumsum([self.origin[0], L1 * sin(self.state[0]), L2 * sin(self.state[2])]) y = np.cumsum([self.origin[1], -L1 * cos(self.state[0]), -L2 * cos(self.state[2])]) return (-x,-y) def dstate_dt(self,state,t): (M1,M2,L1,L2,G)=self.params dydx = np.zeros_like(state) dydx[0] = state[1] dydx[2] = state[3] cos_delta = cos(state[2] - state[0]) sin_delta = sin(state[2] - state[0]) den1 = (M1 + M2) * L1 - M2 * L1 * cos_delta * cos_delta dydx[1] = (M2 * L1 * state[1] * state[1] * sin_delta * cos_delta + M2 * G * sin(state[2]) * cos_delta + M2 * L2 * state[3] * state[3] * sin_delta - (M1+M2) * G * sin(state[0])) / den1 den2 = (L2 / L1) * den1 dydx[3] = (-M2 * L2 * state[3] * state[3] * sin_delta * cos_delta + (M1 + M2) * G * sin(state[0]) * cos_delta - (M1 + M2) * L1 * state[1] * state[1] * sin_delta - (M1 + M2) * G * sin(state[2])) / den2 return dydx def step(self,dt): self.state = integrate.odeint(self.dstate_dt, self.state, [0,dt])[1] self.time_elapsed += dt pendulum = DoublePendulum([120.,0.0,180.,0.0],.5,.5,10,10,10) dt = 1./30 #fps fig = plt.figure(1) lim1,lim2 = 2,-2 ax = fig.add_subplot(111,aspect='equal', autoscale_on=False, xlim=(lim1,lim2),ylim=(lim1,lim2),alpha=0.5) ax.grid() line, = ax.plot([],[],'o-',lw=2) time_text = ax.text(0.02,0.95,'', transform=ax.transAxes) def init(): line.set_data([],[]) time_text.set_text('') return line, time_text def animate(i): global pendulum, dt pendulum.step(dt) line.set_data(*pendulum.position()) time_text.set_text('time = %.1f' % pendulum.time_elapsed) return line, time_text from time import time t0 = time() animate(0) t1 = time() interval = 100 * dt - (t1-t0) ani = animation.FuncAnimation(fig,animate,frames=150, interval=interval, blit=True, init_func=init) fig.set_size_inches(6.5, 6.5) plt.show()
I think the referred youtube-video uses code very similiar to the code I published here: https://github.com/jonas37/double_pendulum/
ray caster, cast_ray function incorrectly accounts for obscured light
I am getting an error that says I am not accounting for obscured light and that my specular is getting added when the light is obscured. This is what the specular part that is being added onto is with x representing r, g, orb of my Color class: light.color.x * s.finish.specular * specIntense def in_shadow (sphere_list, sphere, ray_to_light, light): new_list = list() for s in sphere_list: if sphere != s: new_list.append(s) for s in new_list: if sphere_intersection_point(ray_to_light, s): x1 = ray_to_light.pt.x - light.pt.x y1 = ray_to_light.pt.y - light.pt.y z1 = ray_to_light.pt.z - light.pt.z dist1 = math.sqrt(x1 + y1 + z1) x2 = ray_to_light.pt.x - s.center.x y2 = ray_to_light.pt.y - s.center.y z2 = ray_to_light.pt.z - s.center.z dist2 = math.sqrt(x2 + y2 + z2) # distance to light, distance to sphere # check if distance to sphere < distance to light # if so return 0 if dist2 < dist1: return 0 return 1 def cast_ray(ray, sphere_list, color, light, point): # count = 0 dist = -1 cp = Color(1.0, 1.0, 1.0) for s in sphere_list: if sphere_intersection_point(ray, s): # count += 1 p = sphere_intersection_point(ray, s) vec = vector_from_to(s.center, p) N = normalize_vector(vec) norm_scaled = scale_vector(N, 0.01) pe = translate_point(p, norm_scaled) l = vector_from_to(pe, light.pt) l_dir = normalize_vector(l) dot = dot_vector(N, l_dir) r = Ray(pe, l_dir) dotNScaled = dot * 2 reflecVec = difference_vector(l_dir, scale_vector(N, dotNScaled)) V = vector_from_to(point, pe) Vdir = normalize_vector(V) spec = dot_vector(reflecVec, Vdir) m = in_shadow(sphere_list, s, r, light) if (dot <= 0): m = 0 x = (ray.pt.x - p.x) ** 2 y = (ray.pt.y - p.y) ** 2 z = (ray.pt.z - p.z) ** 2 curdist = math.sqrt(x + y + z) # print curdist if (dist < 0) or (dist > curdist): dist = curdist if (spec <= 0 ): r = ( s.color.r * s.finish.ambient * color.r ) \ + ( light.color.r * s.finish.diffuse * dot * s.color.r * m ) g = ( s.color.g * s.finish.ambient * color.g ) \ + (light.color.g * s.finish.diffuse * dot * s.color.g * m ) b = ( s.color.b * s.finish.ambient * color.b ) \ + (light.color.b * s.finish.diffuse * dot * s.color.b * m ) cp = Color(r, g, b) if ( spec >= 0 ): specIntense = spec ** (1/s.finish.roughness) print type(s.finish.diffuse) r = (s.color.r * s.finish.ambient * color.r) \ + (light.color.r * s.finish.diffuse * dot * s.color.r * m) \ + (light.color.r * s.finish.specular * specIntense) g = (s.color.g * s.finish.ambient * color.g) \ + (light.color.g * s.finish.diffuse * dot * s.color.g * m) \ + (light.color.g * s.finish.specular * specIntense) b = (s.color.b * s.finish.ambient * color.b) \ + (light.color.b * s.finish.diffuse * dot * s.color.b * m) \ + (light.color.b * s.finish.specular * specIntense) cp = Color(r, g, b) # if count > 1: # print 'intersects two!' return cp I think somewhere I am not accounting for the case where the sphere has another one in front of it therefore the specular part is being added to it when it shouldn't, creating this weird white light behind the first sphere that isn't supposed to be there. I'm sure there is a bug in this code somewhere but I cannot find it.