I get this error :
ValueError: operands could not be broadcast together with shapes (365,) (2,)
But I'm surprised by this (2,).
How do I know which variable does this dimension (2,) please?
Because none of my variables should have it.
Thank you for your help !
Here, you can see the first script, where I define my function. It include a loop and also another function so I don't know if I can.
I have a lot of variable with (365, ) for the dimension because, it's function of the time, so for 365 days.
I have some fixed variable like the soil parameter, so the dimension for these is (1,)
But I don't know which variable get (2,) dimension ?
import pandas as pd
import numpy as np
def SA(MO = 0,
ETPr = 0,
SWSa = 0,
pb = 1.70 ):
DB = pd.read_excel("~/Documents/Spider/Data/data_base.xlsx", sheet_name = "DB")
DB1 = pd.read_excel("~/Documents/Spider/Bilan_Courgette.xlsx", sheet_name = "sol")
DB2 = pd.read_excel("~/Documents/Spider/Bilan_Courgette.xlsx", sheet_name = "culture")
#Calculs inter. pour déterminer ET0/day
#Array qui reprend "date" en une série 1 -> 365
JourDeLAnnee = pd.Series(range(1,366))
#Mauves
dist_TS = 1+(0.033*np.cos(0.0172 * JourDeLAnnee))
decli_So = 0.409*np.sin((0.0172 * JourDeLAnnee)-1.39)
lat = 0.87266463
ang_Hor_So =np.arccos(-np.tan(lat)*np.tan(decli_So))
gamma = 0.067
#Jaunes
delta = 2504*np.exp((17.27*DB.tsa_by_day)/(DB.tsa_by_day +237.3))/(DB.tsa_by_day +237.3)**2
rg = DB.ens_by_day / 1000000 * 86400
ra = 37.6 * dist_TS * ((ang_Hor_So * np.sin(lat) * np.sin(decli_So)) + \
(np.cos(lat) * np.cos(decli_So) * np.sin(ang_Hor_So)))
rso = (0.75 + (2*0.00001*120)) * ra
tw =(DB.tsa_by_day * np.arctan(0.151977 * ((DB.hra_by_day + 8.313659)**0.5))) + \
np.arctan(DB.tsa_by_day + DB.hra_by_day) - np.arctan(DB.hra_by_day - 1.676331) + \
(0.00391838 * ((DB.hra_by_day)**1.5) * np.arctan(0.023101 * DB.hra_by_day)) - 4.686035
ed = (0.611 * np.exp((17.27 * tw) / (tw + 237.3))) - (0.0008 *(DB.tsa_by_day-tw) * 101.325)
ea =((0.611 * np.exp((17.27*DB.tsa_max) / (DB.tsa_max + 237.3))) + \
(0.611 * np.exp((17.27 * DB.tsa_min) / (DB.tsa_min +237.3)))) / 2.0
rn = (0.77 * rg) - (((1.35 * (rg / rso)) - 0.35) \
* (0.34 - (0.14 * (ed**0.5))) * (4.9E-9) * ((((273+DB.tsa_max)**4)+((273+DB.tsa_min)**4))/2))
#Calcul de G
from typing import List
def get_g_constant(tsa_by_day: List[float], day: int):
assert day >= 1
return 0.38 * (tsa_by_day[day] - tsa_by_day[day-1])
def get_g_for_year(tsa_by_day: List[int]) -> List[float]:
g_list = []
for i in range(1, len(tsa_by_day)):
g_value = get_g_constant(tsa_by_day, i)
g_list.append(g_value)
return g_list
G = get_g_for_year(DB.tsa_by_day)
G = [DB.tsa_by_day[0]] + G
#Le fameux ET0
ET0 = ((0.408 * delta * (rn - G)) + (gamma * (900 /(DB.tsa_by_day + 273)) * DB.vtt_by_day * (ea - ed))) / \
(delta + (0.067*(1+(0.34 * DB.vtt_by_day))))
# Calcul des paramètres du sol
Profil = 500
pb = 100 / ((MO / 224000) + ((100-MO) / (1.64)))
Os = 0.6355+0.0013* DB1.A -0.1631* pb
Or = 0
lnα = (-4.3003) - (0.0097*DB1.A) + (0.0138* DB1.S ) - (0.0992*MO)
lnn = -1.0846-0.0236 * DB1.A -0.0085 * DB1.S +0.0001 * (DB1.S)**2
nn = np.exp(lnn) + 1
m = 1 - (1/nn)
lnK0 = 1.9582 + 0.0308*DB1.S - 0.6142* pb - 0.1566*MO
λ = -1.8642 - 0.1317*DB1.A + 0.0067*DB1.S
α = np.exp(lnα)
K0 = np.exp(lnK0)
θPf2 =(((1 + ((α*(10**2.5))**nn))**(-m))*( Os - Or)) + Or
θPf4 =(((1 + ((α*(10**4.2))**nn))**(-m))*( Os - Or)) + Or
SWS = θPf2 - θPf4
diff = SWS*SWSa
aj = diff / 2
θPf2New = θPf2 + aj
θPf4New = θPf4 - aj
#Calcul du volume de stock p à atteindre
p = 0.04 *(5 - ET0) + DB2.ptab[0]
θp =(1 - p) * ( θPf2New - θPf4New )+ θPf4New
Vp = θp * Profil
#Le fameux ETP
import datetime
DateS = datetime.datetime.strptime('30/03/2019','%d/%m/%Y').timetuple().tm_yday
DateR = datetime.datetime.strptime('15/09/2019','%d/%m/%Y').timetuple().tm_yday
ETP=ET0.copy()
for n in range(364):
if n >= (DateS - 1) and n <= (DateR - 1) :
ETP[n] = ET0[n] * DB2.Kc[0]
else:
ETP[n] = ET0[n] * DB2.SolNu[0]
ETP[0] = 0
ETPNew = ET0.copy()
ETPNew = ETP - ETP * ETPr
#Le Bilan Hydrique
Stock = ET0.copy()
θ = ET0.copy()
Drainage = ET0.copy()
Irrigation = ET0.copy()
Se = ET0.copy()
SeC = ET0.copy()
θ[0] = θPf2New
Stock[0] = θ[0]*Profil
for i in range(364) :
Se[i] = (θ[i] - Or)/( Os - Or)
if Se[i] > 1 :
SeC[i] = 1
else:
SeC[i] = Se[i]
Drainage[i] = K0 *(((SeC[i])**λ )*(1-(1- SeC[i]**(nn/(nn-1)))**m)**2)*10
if Vp[i] - Stock[i] > 0 : #Ici stock non défini
Irrigation[i] = Vp[i] - Stock[i]
else:
Irrigation[i] = 0
Stock[i+1] = Stock[i] + DB.plu_by_day[i] - ETPNew[i] - Drainage[i] + Irrigation[i]
θ[i+1] = Stock[i+1] / Profil
return (Irrigation.sum())
After, i use a second script to do a sensitivity analysis. And It's here, when I run this script, I get the error 'ValueError: operands could not be broadcast together with shapes (365,) (2,)'
import numpy as np
from SALib.analyze import sobol
from SALib.sample import saltelli
from test import*
import matplotlib.pyplot as plt
# Set up dictionary with system parameters
problem = {
'num_vars': 4,
'names': ['MO', 'ETPr', 'SWSa', 'K0'],
'bounds': [[0, 10],
[0, 0.04135],
[0, 0.2615],
[1.40, 1.70],
]}
# Array with n's to use
nsamples = np.arange(50, 400, 50)
# Arrays to store the index estimates
S1_estimates = np.zeros([problem['num_vars'],len(nsamples)])
ST_estimates = np.zeros([problem['num_vars'],len(nsamples)])
# Loop through all n values, create sample, evaluate model and estimate S1 & ST
for i in range(len(nsamples)):
print('n= '+ str(nsamples[i]))
# Generate samples
sampleset = saltelli.sample(problem, nsamples[i],calc_second_order=False)
# Run model for all samples
output = [SA(*sampleset[j,:]) for j in range(len(sampleset))]
# Perform analysis
results = sobol.analyze(problem, np.asarray(output), calc_second_order=False,print_to_console=False)
# Store estimates
ST_estimates[:,i]=results['ST']
S1_estimates[:,i]=results['S1']
np.save('ST_estimates.npy', ST_estimates)
np.save('S1_estimates.npy', S1_estimates)
S1_estimates = np.load('S1_estimates.npy')
ST_estimates = np.load('ST_estimates.npy')
# Generate figure showing evolution of indices
fig = plt.figure(figsize=(18,9))
ax1 = fig.add_subplot(1,2,1)
handles = []
for j in range(problem['num_vars']):
handles += ax1.plot(nsamples, S1_estimates[j,:], linewidth=5)
ax1.set_title('Evolution of S1 index estimates', fontsize=20)
ax1.set_ylabel('S1', fontsize=18)
ax1.set_xlabel('Number of samples (n)', fontsize=18)
ax1.tick_params(axis='both', which='major', labelsize=14)
ax2 = fig.add_subplot(1,2,2)
for j in range(problem['num_vars']):
ax2.plot(nsamples, ST_estimates[j,:], linewidth=5)
ax2.set_title('Evolution of ST index estimates', fontsize=20)
ax2.set_ylabel('ST', fontsize=18)
ax2.tick_params(axis='both', which='major', labelsize=14)
ax2.set_xlabel('Number of samples (n)', fontsize=18)
fig.legend(handles, problem['names'], loc = 'right', fontsize=11)
plt.savefig('indexevolution.png')
# Calculate parameter rankings
S1_ranks = np.zeros_like(S1_estimates)
ST_ranks = np.zeros_like(ST_estimates)
for i in range(len(nsamples)):
orderS1 = np.argsort(S1_estimates[:,i])
orderST = np.argsort(ST_estimates[:,i])
S1_ranks[:,i] = orderS1.argsort()
ST_ranks[:,i] = orderST.argsort()
Thank you for your help !
How to make line that follows points. Shadow of move.
Something like that: https://www.youtube.com/watch?v=pEjZd-AvPco
Pastebin with code: https://pastebin.com/AkHaEM4i
Everything is in the link, so I can't add some more details. Gonna paste lorem ipsum...
It looks like your post is mostly code; please add some more details.
class DoublePendulum:
def __init__(self,
init_state = [120,0,-20,0],
L1 = .5,
L2 = .5,
M1 = 1.0,
M2 = 2.0,
G = 9.8,
origin=(0,0)):
self.init_state = np.asarray(init_state,dtype='float')
self.params = (L1,L2,M1,M2,G)
self.origin = origin
self.time_elapsed = 0
self.state = self.init_state * np.pi/180
def position(self):
(L1, L2, M1, M2, G) = self.params
x = np.cumsum([self.origin[0],
L1 * sin(self.state[0]),
L2 * sin(self.state[2])])
y = np.cumsum([self.origin[1],
-L1 * cos(self.state[0]),
-L2 * cos(self.state[2])])
return (-x,-y)
def dstate_dt(self,state,t):
(M1,M2,L1,L2,G)=self.params
dydx = np.zeros_like(state)
dydx[0] = state[1]
dydx[2] = state[3]
cos_delta = cos(state[2] - state[0])
sin_delta = sin(state[2] - state[0])
den1 = (M1 + M2) * L1 - M2 * L1 * cos_delta * cos_delta
dydx[1] = (M2 * L1 * state[1] * state[1] * sin_delta * cos_delta
+ M2 * G * sin(state[2]) * cos_delta
+ M2 * L2 * state[3] * state[3] * sin_delta
- (M1+M2) * G * sin(state[0])) / den1
den2 = (L2 / L1) * den1
dydx[3] = (-M2 * L2 * state[3] * state[3] * sin_delta * cos_delta
+ (M1 + M2) * G * sin(state[0]) * cos_delta
- (M1 + M2) * L1 * state[1] * state[1] * sin_delta
- (M1 + M2) * G * sin(state[2])) / den2
return dydx
def step(self,dt):
self.state = integrate.odeint(self.dstate_dt, self.state, [0,dt])[1]
self.time_elapsed += dt
pendulum = DoublePendulum([120.,0.0,180.,0.0],.5,.5,10,10,10)
dt = 1./30 #fps
fig = plt.figure(1)
lim1,lim2 = 2,-2
ax = fig.add_subplot(111,aspect='equal', autoscale_on=False,
xlim=(lim1,lim2),ylim=(lim1,lim2),alpha=0.5)
ax.grid()
line, = ax.plot([],[],'o-',lw=2)
time_text = ax.text(0.02,0.95,'', transform=ax.transAxes)
def init():
line.set_data([],[])
time_text.set_text('')
return line, time_text
def animate(i):
global pendulum, dt
pendulum.step(dt)
line.set_data(*pendulum.position())
time_text.set_text('time = %.1f' % pendulum.time_elapsed)
return line, time_text
from time import time
t0 = time()
animate(0)
t1 = time()
interval = 100 * dt - (t1-t0)
ani = animation.FuncAnimation(fig,animate,frames=150,
interval=interval, blit=True, init_func=init)
fig.set_size_inches(6.5, 6.5)
plt.show()
I think the referred youtube-video uses code very similiar to the code I published here:
https://github.com/jonas37/double_pendulum/
I am converting an IDL code (written by Oleg Kochukhov) to Python. The code generates star surface map over spectral line profiles using Tikhonov or Maximum Entropy methods.
I use scipy.optimize.minimize to generate map over line profiles. But process is too slow and results is not compatible. I search solution on internet but i dont find any usefull solution.
I added a runnable code below:
import numpy as np
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import matplotlib.gridspec as gridspec
#syc = 0
def DI_GridInit(ntot):
# generate stellar surface grid
nlat = int(round(0.5 * (1.0 + np.sqrt(1.0 + np.pi * ntot))) - 1)
nlon = np.zeros(nlat, dtype=int)
xlat = np.pi * (np.arange(nlat, dtype=float) + 0.5) / nlat - np.pi / 2.0
xcirc = 2.0 * np.cos(xlat[1:])
nlon[1:] = np.around(xcirc * nlat) + 1
nlon[0] = ntot - sum(nlon[1:])
if abs(nlon[0] - nlon[nlat - 1]) > nlat:
nlon[1:] = nlon[1:] + (nlon[0] - nlon[nlat - 1]) / nlat
nlon[0] = ntot - sum(nlon[1:])
if nlon[0] < nlon[nlat - 1]:
nlon[1:] = nlon[1:] - 1
nlon[0] = ntot - sum(nlon[1:])
# generate Descartes coordinates for the surface grid in
# stellar coordinates, areas of surface elements and
# regularization indices: (lower, upper, right, left)
x0, j = np.zeros((ntot, 3), dtype=float), 0
latitude, longitude = np.zeros(ntot, dtype=float), np.zeros(ntot, dtype=float)
sa, ireg = np.zeros(ntot, dtype=float), np.zeros((ntot, 4), dtype=int)
slt = np.hstack((0., (xlat[1:nlat] + xlat[0:nlat - 1]) / 2. + np.pi / 2., np.pi))
for i in range(nlat):
coslat = np.cos(xlat[i])
sinlat = np.sin(xlat[i])
xlon = 2 * np.pi * (np.arange(nlon[i]) + 0.5) / nlon[i]
sinlon = np.sin(xlon)
coslon = np.cos(xlon)
x0[:, 0][j:j + nlon[i]] = coslat * sinlon
x0[:, 1][j:j + nlon[i]] = -coslat * coslon
x0[:, 2][j:j + nlon[i]] = sinlat
latitude[j:j + nlon[i]] = xlat[i]
longitude[j:j + nlon[i]] = xlon
sa[j:j + nlon[i]] = 2. * np.pi * (np.cos(slt[i]) - np.cos(slt[i + 1])) / nlon[i]
ireg[:, 2][j:j + nlon[i]] = np.roll(j + np.arange(nlon[i], dtype=int), -1)
ireg[:, 3][j:j + nlon[i]] = np.roll(j + np.arange(nlon[i], dtype=int), 1)
if (i > 0):
il_lo = j - nlon[i - 1] + np.arange(nlon[i - 1], dtype=int)
else:
il_lo = j + nlon[i] + np.arange(nlon[i + 1], dtype=int)
if (i < nlat - 1):
il_up = j + nlon[i] + np.arange(nlon[i + 1], dtype=int)
else:
il_up = il_lo
for k in range(j, j + nlon[i]):
dlat_lo = longitude[k] - longitude[il_lo]
ll = np.argmin(abs(dlat_lo))
ireg[k][0] = il_lo[ll]
dlat_up = longitude[k] - longitude[il_up]
ll = np.argmin(abs(dlat_up))
ireg[k][1] = il_up[ll]
j += nlon[i]
theta = np.arccos(x0[:, 2])
phi = np.arctan2(x0[:, 0], -x0[:, 1])
ii = np.argwhere(phi < 0).T[0]
nii = len(ii)
phi[ii] = 2.0 * np.pi - abs(phi[ii]) if nii else None
grid = {'ntot': ntot, 'nlat': nlat, 'nlon': nlon, 'xyz': x0, 'lat': latitude,
'lon': longitude, 'area': sa, 'ireg': ireg, 'phi': phi, 'theta': theta}
return grid
def DI_Map(grid, spots):
map = np.ones(grid['ntot'], dtype=float)
for i in range(spots['n']):
dlon = grid['lon'] - np.deg2rad(spots['tbl'][i, 0])
dlat = grid['lat'] - np.deg2rad(spots['tbl'][i, 1])
da = (2.0 * np.arcsin(np.sqrt(np.sin(0.5 * dlat) ** 2 +
np.cos(np.deg2rad(spots['tbl'][i, 1])) *
np.cos(grid['lat']) * np.sin(0.5 * dlon) ** 2)))
ii = np.argwhere(da <= np.deg2rad(spots['tbl'][i, 2])).T[0]
ni = len(ii)
map[ii] = spots['tbl'][i, 3] if ni > 0 else None
return map
def DI_Prf(grid, star, map, phase=None, vv=None, vr=None, nonoise=None):
# velocity array
if vv is not None:
nv = len(vv)
else:
nv = int(np.ceil(2.0 * star['vrange'] / star['vstep']))
vv = -star['vrange'] + np.arange(nv, dtype=float) * star['vstep']
# phase array
if phase is None:
phase = np.arange(star['nphases'], dtype=float) / star['nphases']
# velocity correction for each phase
vr = np.zeros(star['nphases'], dtype=float) if vr == None else None
# fixed trigonometric quantities
cosi = np.cos(np.deg2rad(star['incl'])); sini = np.sin(np.deg2rad(star['incl']))
coslat = np.cos(grid['lat']); sinlat = np.sin(grid['lat'])
# FWHM to Gaussian sigma
sigm = star['fwhm'] / np.sqrt(8.0 * np.log(2.0))
isig = (-0.5 / sigm ** 2)
# initialize line profile and integrated field arrays
prf = np.zeros((nv, len(phase)), dtype=float)
# gradient if called with 5 - variable input
grad = np.zeros((nv, len(phase), grid['ntot']), dtype=float)
# phase loop
for i in range(len(phase)):
coslon = np.cos(grid['lon'] + 2.0 * np.pi * phase[i])
sinlon = np.sin(grid['lon'] + 2.0 * np.pi * phase[i])
mu = sinlat * cosi + coslat * sini * coslon
ivis = np.argwhere(mu > 0.).T[0]
dv = -sinlon[ivis] * coslat[ivis] * star['vsini']
avis = grid['area'][ivis] * mu[ivis] * (1.0 - star['limbd'] + star['limbd'] * mu[ivis])
if star['type'] == 0:
wgt = avis * map[ivis]
wgtn = sum(wgt)
for j in range(nv):
plc = 1.0 - star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
prf[j][i] = sum(wgt * plc) / wgtn
grad[j][i][ivis] = avis * plc / wgtn - avis * prf[j][i] / wgtn
elif star['type'] == 1:
wgt = avis
wgtn = sum(wgt)
for j in range(nv):
plc = 1.0 - map[ivis] * star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
prf[j][i] = sum(wgt * plc) / wgtn
grad[j][i][ivis] = -wgt / wgtn * star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
# output structure
syn = {'v': vv, 'phase': phase, 'prf': prf}
# add noise
if star['snr'] != -1 and nonoise != None:
obs = syn['prf'] * 0.0
for i in range(star['nphases']):
obs[:, i] = syn['prf'][:, i] + np.random.standard_normal((len(syn['v']),)) / star['snr']
syn['obs'] = obs
return syn, grad
def DI_func(cmap, functargs):
# global syc
star = functargs['star']
grid = functargs['grid']
obs = functargs['obs']
invp = functargs['invp']
nv = len(obs['v'])
er = 1.0 / abs(star['snr'])
if 'vr' in obs.keys():
syn, grad = DI_Prf(grid, star, cmap, phase=obs['phase'], vv=obs['v'], vr=obs['vr'])
else:
syn, grad = DI_Prf(grid, star, cmap, phase=obs['phase'], vv=obs['v'])
# shf = 0
# for i in range(len(obs['phase'])):
# plt.plot(obs['v'], obs['obs'][:, i] + shf, 'bo')
# plt.plot(obs['v'], syn['prf'][:, i] + shf, 'r')
# plt.plot(obs['v'], obs['obs'][:, i] - syn['prf'][:, i] + shf, 'k')
# shf += 0.1
# plt.show()
fchi = 0.0
sign = (-1) ** invp['regtype']
for i in range(star['nphases']):
fchi = fchi + sign * sum((syn['prf'][:, i] - obs['obs'][:, i]) ** 2 / er ** 2) / nv
freg = 0
if invp['lambda'] > 0:
if invp['regtype'] == 0:
ir = grid['ireg']
for k in range(len(ir[0, :])):
freg = freg + invp['lambda'] / grid['ntot'] * sum((cmap - cmap[ir[:, k]]) ** 2)
elif invp['regtype'] == 1:
mmap = sum(cmap) / grid['ntot']
nmap = cmap / mmap
freg = freg - invp['lambda'] / grid['ntot'] * sum(nmap * np.log(nmap))
ftot = fchi + freg
syn['obs'] = obs['obs']
# syc += 1
# if syc % 1000 == 0:
# plotting(grid, cmap, syn, star['incl'], typ=star['type'])
#
# print(syc, ftot, sum(cmap))
return ftot
def plotting(grid, map, syn, incl, typ):
nlon = grid['nlon']
nln = max(nlon)
nlt = len(nlon)
ll = np.zeros(nlt + 1, dtype=int)
ll[0] = 0
for i in range(nlt):
ll[i + 1] = ll[i] + nlon[i]
map1 = np.zeros((nlt, nln), dtype=float)
x = np.arange(nln, dtype=float) + 0.5
for i in range(nlt):
lll = ((np.arange(nlon[i] + 2, dtype=float) - 0.5) * nln) / nlon[i]
y = np.hstack((map[ll[i + 1] - 1], map[ll[i]:ll[i+1]-1], map[ll[i]]))
for j in range(nln):
imin = np.argmin(abs(x[j] - lll))
map1[i, j] = y[imin]
light = (190 * (map1 - np.min(map1)) / (np.max(map1) - np.min(map1))) + 50
light_rect = np.flipud(light)
if typ == 0:
cmap = 'gray'
else:
cmap = 'gray_r'
fig = plt.figure()
fig.clear()
spec = gridspec.GridSpec(ncols=3, nrows=3, left=0.10, right=0.98,
top=0.97, bottom=0.07, hspace=0.2, wspace=0.36)
# naive IDW-like interpolation on regular grid
shape = light.shape
nrows, ncols = (shape[0], shape[1])
lon, lat = np.meshgrid(np.linspace(0, 360, ncols), np.linspace(-90, 90, nrows))
for i, item in enumerate([[(0, 0), -0], [(0, 1), -90], [(1, 0,), -180], [(1, 1), -270]]):
ax = fig.add_subplot(spec[item[0]])
# set up map projection
m = Basemap(projection='ortho', lat_0=90 - incl, lon_0=item[1], ax=ax)
# draw lat/lon grid lines every 30 degrees.
m.drawmeridians(np.arange(0, 360, 30))
m.drawparallels(np.arange(-90, 90, 30))
# compute native map projection coordinates of lat/lon grid.
x, y = m(lon, lat)
# contour data over the map.
m.contourf(x, y, light, 15, vmin=0., vmax=255., cmap=cmap)
if i in [0, 2]:
x2, y2 = m(180 - item[1], incl)
else:
x2, y2 = m(180 + item[1], incl)
x1, y1 = (-10, 5)
ax.annotate(str('%0.2f' % (abs(item[1]) / 360.)), xy=(x2, y2), xycoords='data',
xytext=(x1, y1), textcoords='offset points',
color='r')
ax5 = fig.add_subplot(spec[-1, :2])
ax5.imshow(light_rect, vmin=0., vmax=255., cmap=cmap, interpolation='none', extent=[0, 360, -90, 90])
ax5.set_xticks(np.arange(0, 420, 60))
ax5.set_yticks(np.arange(-90, 120, 30))
ax5.set_xlabel('Longitude ($^\circ$)', fontsize=7)
ax5.set_ylabel('Latitude ($^\circ$)', fontsize=7)
ax5.tick_params(labelsize=7)
ax6 = fig.add_subplot(spec[0:, 2])
shf = 0.0
for i in range(len(syn['phase'])):
ax6.plot(syn['v'], syn['obs'][:, -i - 1] + shf, 'bo', ms=2)
ax6.plot(syn['v'], syn['prf'][:, -i - 1] + shf, 'r', linewidth=1)
ax6.text(min(syn['v']), max(syn['obs'][:, -i - 1] + shf), str('%0.2f' % syn['phase'][-i - 1]),
fontsize=7)
shf += 0.1
p1 = ax6.lines[0]
p2 = ax6.lines[-1]
p1datay = p1.get_ydata()
p1datax = p1.get_xdata()
p2datay = p2.get_ydata()
y1, y2 = min(p1datay) - min(p1datay) / 20.,max(p2datay) + min(p1datay) / 10.
ax6.set_ylim([y1, y2])
ax6.set_xlabel('V ($km s^{-1}$)', fontsize=7)
ax6.set_ylabel('I / Ic', fontsize=7)
ax6.tick_params(labelsize=7)
max_ = int(max(p1datax))
ax6.set_xticks([-max_, np.floor(-max_ / 2.), 0.0, np.ceil(max_ / 2.), max_])
plt.show()
if __name__ == "__main__":
# Star parameters
star = {'ntot': 1876, 'type': 0, 'incl': 70, 'vsini': 50, 'fwhm': 7.0, 'd': 0.6,
'limbd': 0.5, 'nphases': 5, 'vrange': np.sqrt(50 ** 2 + 7.0 ** 2) * 1.4,
'vstep': 1.0, 'snr': 500}
# Spot parameters
lon_spot = [40, 130, 220, 310]
lat_spot = [-30, 0, 60, 30]
r_spot = [20, 20, 20, 20]
c_spot = [0.1, 0.2, 0.25, 0.3]
tbl = np.array([lon_spot, lat_spot, r_spot, c_spot]).T
spots = {'n': len(lon_spot), 'type': star['type'], 'tbl': tbl}
# Generate grid
grid = DI_GridInit(star['ntot'])
# Generate map
cmap = DI_Map(grid, spots)
# Generate spectral line profiles
csyn, grad = DI_Prf(grid, star, cmap, nonoise=True)
# Plotting map and line profiles
plotting(grid, cmap, csyn, star['incl'], star['type'])
# Generate map over the line profiles using scipy.optimize.minimize
invp = {'lambda': 20, 'regtype': 0, 'maxiter': 10}
grid_inv = DI_GridInit(star['ntot'])
functargs = {'star': star, 'grid': grid_inv, 'obs': csyn, 'invp': invp}
cmap = np.ones(star['ntot'])
cmap[0] = 0.99
bnd = list(zip(np.zeros(len(cmap), dtype=float), np.ones(len(cmap), dtype=float)))
minimize(DI_func, cmap, args=functargs, method='TNC', bounds=bnd,
callback=None, options={'eps': 0.1, 'maxiter': 5, 'disp': True})
The code includes followed parts.
'DI_GridInit' : Generates grids for the map
'DI_Map' : Generates star surface map according to starspot parameters (such as longitude, latitude, radius and contrast)
'DI_Prf' : Generates spectral line profiles according to map
Now I want to obtain the surface map over the generated and noised line profiles. I use scipy.optimize.minimize (TNC method) for obtain the surface map. I use 'DI_func' as function in minimize. But 'minimize' is so slow. What is the problem. How can I speed this up.
Here is a modified version of DI_Prf, where is the major computation time during the execution of DI_func:
def DI_Prf(grid, star, map, phase=None, vv=None, vr=None, nonoise=None):
# velocity array
if vv is not None:
nv = len(vv)
else:
nv = int(np.ceil(2.0 * star['vrange'] / star['vstep']))
vv = -star['vrange'] + np.arange(nv, dtype=float) * star['vstep']
# phase array
if phase is None:
phase = np.arange(star['nphases'], dtype=float) / star['nphases']
# velocity correction for each phase
vr = np.zeros(star['nphases'], dtype=float) if vr == None else None
# fixed trigonometric quantities
cosi = np.cos(np.deg2rad(star['incl'])); sini = np.sin(np.deg2rad(star['incl']))
coslat = np.cos(grid['lat']); sinlat = np.sin(grid['lat'])
# FWHM to Gaussian sigma
sigm = star['fwhm'] / np.sqrt(8.0 * np.log(2.0))
isig = (-0.5 / sigm ** 2)
# initialize line profile and integrated field arrays
prf = np.zeros((nv, len(phase)), dtype=float)
# gradient if called with 5 - variable input
grad = np.zeros((nv, len(phase), grid['ntot']), dtype=float)
# phase loop
for i in range(len(phase)):
coslon = np.cos(grid['lon'] + 2.0 * np.pi * phase[i])
sinlon = np.sin(grid['lon'] + 2.0 * np.pi * phase[i])
mu = sinlat * cosi + coslat * sini * coslon
ivis = np.argwhere(mu > 0.).T[0]
dv = -sinlon[ivis] * coslat[ivis] * star['vsini']
avis = grid['area'][ivis] * mu[ivis] * (1.0 - star['limbd'] + star['limbd'] * mu[ivis])
if star['type'] == 0:
wgt = avis * map[ivis]
wgtn = sum(wgt)
#for j in range(nv):
# plc = 1.0 - star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
# prf[j][i] = sum(wgt * plc) / wgtn
# grad[j][i][ivis] = avis * plc / wgtn - avis * prf[j][i] / wgtn
plc = 1.0 - star['d'] * np.exp(isig * (vv[:, np.newaxis] + dv[np.newaxis, :] - vr[i]) ** 2)
prf[:, i] = np.sum(wgt * plc, axis=1) / wgtn
grad[:, i, ivis] = avis * plc / wgtn - (avis[:, np.newaxis]*prf[:, i]).T / wgtn
elif star['type'] == 1:
wgt = avis
wgtn = sum(wgt)
for j in range(nv): # to be modified too
plc = 1.0 - map[ivis] * star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
prf[j][i] = sum(wgt * plc) / wgtn
grad[j][i][ivis] = -wgt / wgtn * star['d'] * np.exp(isig * (vv[j] + dv - vr[i]) ** 2)
# output structure
syn = {'v': vv, 'phase': phase, 'prf': prf}
# add noise
if star['snr'] != -1 and nonoise != None:
#for i in range(star['nphases']):
obs = syn['prf'] + np.random.standard_normal(size=syn['prf'].shape) / star['snr']
syn['obs'] = obs
return syn, grad
It reduces the time by 3:
%%timeit
syn, grad = DI_Prf(grid, star, cmap, phase=obs['phase'], vv=obs['v'])
# 127 ms ± 2.61 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
# 40.7 ms ± 683 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
The main idea with Numpy is to not use loops, but work with multidimensional array, and use the broadcasting capabilities.
For instance:
fchi = 0.0
for i in range(star['nphases']):
fchi = fchi + sign * sum((syn['prf'][:, i] - obs['obs'][:, i]) ** 2 / er ** 2) / nv
could be replaced with:
fchi = sign / nv / er ** 2 * np.sum( np.sum((syn['prf'] - obs['obs']) ** 2, axis=1 ) )
same for np.random.standard_normal(size=syn['prf'].shape)
It's not a big improvement here because star['nphases'] is small, but it is relatively important for the other axis. You could go further and remove the for loop over the phases in DI_Prf but it requires some thinking