Matrix vs scalar expression mismatch when using NumPy - python
Whilst running a simulation of a discrete-time system, I'm having trouble getting the equivalent scalar expressions of the original matrix equation to generate identical output (I need to convert all matrix expressions to scalars in order to implement the code in my embedded C project).
Please run the function with the following input parameters.
torque_sim_ctest(0.006,0,0.01,12)
Click the hyperlinks to see screen-shots of the expected output computed using the matrix expression (correct) vs. the output using the (supposedly) equivalent scalar expressions (incorrect).
Below is the code, which should give the correct output by default. To test the scalar computations, please comment line # 98 (qo_next = np.dot(Ado,qo) + np.dot(Bo,np.vstack([hallf, v[:,k]]))), and uncomment 100 through 102, run the script, and then call the function with the same parameters as shown above.
Lines to uncomment in tandem with the qo_next = ... matrix expression highlighted above:
# qo_next[0] = Ado[0,0]*qo[0] + Ado[0,1]*qo[1] + Ado[0,2]*qo[2] + Bo[0,0]*hallf
# qo_next[1] = Ado[1,0]*qo[0] + Ado[1,1]*qo[1] + Ado[1,2]*qo[2] + Bo[1,0]*hallf + Bo[1,1]*v[:,k]
# qo_next[2] = Ado[2,0]*qo[0] + Ado[2,1]*qo[1] + Ado[2,2]*qo[2] + Bo[2,0]*hallf
I would really appreciate it if someone could point out the bug in the scalar version of the code and help me fix this (numerical?) issue. Thank you!
import numpy as np
import scipy.signal as sig
import matplotlib.pyplot as plt
def torque_sim_ctest(K,Kp,Kd,tau_ampl):
# Example parameter values: torque_sim_ctest(0.006,0,0.01,12)
# -----------------------------------------------------------------------------
# Motor & System Parameters in continuous time:
bm, Jm, K_e, K_t = 1.5e-6, 3.507e-6, 12.5e-3, 12.5e-3
R, L = 0.476, 0.334e-3
gr, Jl, bl = 16.348, 10, 20
Jeq, beq = Jl + Jm*gr**2, bl + bm*gr**2
A = np.array([ [ 0, 1 ],
[ 0, -beq/Jeq ] ])
Ac = np.array( [[ 0, 1, 0 ],
[ 0, 0, 1 ],
[ 0, 0, 0 ]])
B = np.array([ [ 0, 0 ],
[ gr/Jeq, gr**2*K_t/(Jeq*R) ]])
Bc = np.array([ 0, 1, 0 ]).reshape(3,1)
C = np.array([ 1, 0 ])
Co = np.array([ 1, 0, 0 ]).reshape(1,3)
# -----------------------------------------------------------------------------
# Controller update interval Tc and simulation time step Td
Tc = 0.01
Td = 0.1*Tc
# Now discretise the continuous-time system model
Ad, Bd, Cd, Dd, dt = sig.cont2discrete((A,B,C,0),Td,method='bilinear')
# Output smoothing: filter parameters
f_ord = 2
num, den = sig.cheby1(f_ord,5,2*np.pi*10,'low',analog=True)
Af, Bf, Cf, Df = sig.tf2ss(num,den)
Afd, Bfd, Cfd, Dfd, dt = sig.cont2discrete((Af,Bf,Cf,Df),Tc,
method='bilinear')
# -----------------------------------------------------------------------------
# Observer Parameters
eps = 0.01
alph = Tc/eps
a1, a2, a3 = 3, 3, 1
Ho = np.array([a1, a2, a3]).reshape(3,1)
Ao = Ac - np.dot(Ho,Co)
D = np.diag([1, eps, eps**2])
Do = np.zeros([3,1])
Ado, Bdo, Cdo, Ddo, dt = sig.cont2discrete((Ao,Ho,np.linalg.inv(D),Do),
alph,method='bilinear')
Bo = np.hstack([Bdo.reshape(3,1), Bc.reshape(3,1)])
# -----------------------------------------------------------------------------
# Total simulation steps
n = 5001
# Define initial conditions prior to running simulation
x, q = np.zeros([2,n]), np.zeros([2,1])
q_next = np.zeros([2,1])
y, v = 0, np.zeros([1,n])
tau_ext = np.zeros([1,n])
hallc = 0
qf, qf_next, hallf = np.zeros([f_ord,1]), np.zeros([f_ord,1]), 0
qo, qo_next, xhat = np.zeros([3,1]), np.zeros([3,1]), np.zeros([3,1])
# The for loop below simulates the closed-loop system in discrete time steps Td
print('Simulation running; please wait ...')
for k in range(n-1):
# External torque input with peaks at 0.5 s and 1.6 s
tau_ext[:,k] = tau_ampl*(0.5*np.exp(-10*(k*Td-0.5)**2) \
+ np.exp(-5*(k*Td-1.6)**2))
# y is the output angle in SI units (radians)
y = gr*np.dot(C,x[:,k])
# Conversion of rotor angle to ideal hall-count
hallc = int(150/np.pi*y+0.5*np.sign(y))
# Now apply a smoothing filter
# qf_next = Afd # qf + Bfd # np.matrix([hallc[:,k]/gr])
# hallf = Cf # (qf + qf_next)
qf_next[0] = 0.661939208333454*qf[0] - 19.836230603818*qf[1] + 0.00830969604166727*hallc/gr
qf_next[1] = 0.00830969604166727*qf[0] + 0.90081884698091*qf[1] + 4.15484802083364e-5*hallc/gr
# Filtered hall-count
hallf = 1342.37547903*(qf[1] + qf_next[1])
# -----------------------------------------------------------------------------
# State Observer & Control Law
# Update the controller states only every Tc = 10*Td time step
if np.mod(k,int(Tc/Td+0.5)) == 0:
qo_next = np.dot(Ado,qo) + np.dot(Bo,np.vstack([hallf, v[:,k]]))
# qo_next[0] = Ado[0,0]*qo[0] + Ado[0,1]*qo[1] + Ado[0,2]*qo[2] + Bo[0,0]*hallf
# qo_next[1] = Ado[1,0]*qo[0] + Ado[1,1]*qo[1] + Ado[1,2]*qo[2] + Bo[1,0]*hallf + Bo[1,1]*v[:,k]
# qo_next[2] = Ado[2,0]*qo[0] + Ado[2,1]*qo[1] + Ado[2,2]*qo[2] + Bo[2,0]*hallf
# qo_next[0] = 1.40740740740741*hallf - 0.407407407407407*qo[0] + 0.296296296296296*qo[1] + 0.148148148148148*qo[2]
# qo_next[1] = 1.03703703703704*hallf - 1.03703703703704*qo[0] + 0.481481481481481*qo[1] + 0.740740740740741*qo[2] + v[:,k]
# qo_next[2] = 0.296296296296296*hallf - 0.296296296296296*qo[0] - 0.148148148148148*qo[1] + 0.925925925925926*qo[2]
xhat[0] = 0.703703703703704*hallf + 0.296296296296296*qo[0] + 0.148148148148148*qo[1] + 0.0740740740740741*qo[2]
xhat[1] = 51.8518518518519*hallf - 51.8518518518518*qo[0] + 74.0740740740741*qo[1] + 37.037037037037*qo[2]
xhat[2] = 1481.48148148148*hallf - 1481.48148148148*qo[0] - 740.740740740741*qo[1] + 9629.62962962963*qo[2]
# xhat = Cdo # qo + Ddo # np.matrix(hallf)
# Update the control voltage v[:,k]
v[:,k] = K*xhat[2] + Kp*hallf + Kd*xhat[1]
if v[:,k] < 0:
v[:,k] = 0
elif v[:,k] >= 12:
v[:,k] = 12
else:
# Wait and hold; don't update in this cycle
qo_next = qo
v[:,k] = v[:,k-1]
# -----------------------------------------------------------------------------
# Now calculate the system dynamics in discrete time steps Td
q_next = np.dot(Ad,q) + np.dot(Bd,np.vstack([tau_ext[:,k], v[:,k]]))
x[:,k+1:k+2] = q + q_next
q = q_next
qf[0] = qf_next[0]
qf[1] = qf_next[1]
qo[0] = qo_next[0]
qo[1] = qo_next[1]
qo[2] = qo_next[2]
# ** End For Loop **
# Plot the simulation output
# NOTE: System velocity signal is x[1,:], and position is y = x[0,:]
print('Done.')
tau_spr = gr*K_t/R*v[0,0:n-1]
tau_net = tau_spr + tau_ext[0,0:n-1]
t = np.linspace(0.0,(n-1)*Td,num=n-1)
fig1, ax1 = plt.subplots(2,2)
ax1[0,0].plot(t,tau_ext[0,0:n-1],'r--',t,tau_spr,'b-')
ax1[0,0].set_xlabel('Time (s)')
ax1[0,0].set_ylabel('Torque (N m)')
ax1[0,0].set_title('Torque Comparison')
ax1[0,1].plot(t,tau_ext[0,0:n-1]/tau_net,'r--',t,tau_spr/tau_net,'b-')
ax1[0,1].set_xlabel('Time (s)')
ax1[0,1].set_ylabel('Torque (Normalised)')
ax1[0,1].set_title('Torque Comparison')
ax1[1,0].step(t,v[0,0:n-1],'g-')
ax1[1,0].set_xlabel('Time (s)')
ax1[1,0].set_ylabel('Drive Voltage (V)')
ax1[1,0].set_title('Motor Voltage')
ax1[1,1].step(t,30/np.pi*x[1,0:n-1],'m-')
ax1[1,1].set_xlabel('Time (s)')
ax1[1,1].set_ylabel('Motor Speed (rpm)')
ax1[1,1].set_title('Motor Speed vs Time')
Phew, I may have finally got to the bottom of my issue, even though I don't fully understand the nature of the problem with array vs. scalar assignments.
When I use the vector computation: qo_next = np.dot(Ado,qo) + np.dot(Bo,np.vstack([hallf, v[:,k]])), then my implementation of the Zero-Order-Hold further down: qo_next = qo appears to work as one would expect.
When I convert the first assignment to a set of scalar expressions, however, it appears I must also do an element-by-element assignment to implement the Z.O.H., i.e.:
qo_next[0] = qo[0]
qo_next[1] = qo[1]
qo_next[2] = qo[2]
When I do that, the scalar conversion magically works and generates output equivalent to the vector version.
That was a real head-scratcher -- perhaps someone can explain to me how assignment works in Python so I can make sense of this. Thank you for reading.
Related
My while loop is not looping through the components in the array
I want the while loop at the end of my code to perform the calculation for each component of delta_omega, and print the result before moving on to the next number in the delta_omega array. When I run the code python outputs 'The rate of capture at a detuning of -5000000 is 3.0E+13' an infinite number of times before I manually stop it. I am unsure of why this is happening and how to fix it? I tried using break at the end of the loop, but this just performed the calculation once for the first component of the delta_omega array. import numpy as np from scipy.integrate import odeint #import matplotlib.pyplot as plt # Constants m_Rb = 1.443*10**-25 #mass of rubidium 87 k_b = 1.38*10**-23 h = 6.63*10**-34 hbar = 1.05*10**-34 L = 38.116*10**6 #natural linewidth epsilon_0 = 8.85418782*10**-12 #permittivity of free space # Changable paramaters lmbda = 780*10**-9 #wavelength of laser light k = (2*np.pi)/lmbda #wavevector of laser light B = 5*10**-4 #magnetic field strength # D2 effective magetic moment gj_gnd = 1 + (0.5*(0.5+1) + 0.5*(0.5+1) - 0*(0+1))/(2*0.5*(0.5+1)) mj_gnd = 0.5 gj_ex = 1 + (1.5*(1.5+1) + 0.5*(0.5+1) - 1*(1+1))/(2*1.5*(1.5+1)) mj_ex = 1.5 Bohr = 9.274*10**-24 #Bohr magneton value mu_eff = Bohr*(gj_ex*mj_ex - gj_gnd*mj_gnd) # -------- Before slower -------- T = 700 #temperature of oven vp = ((2*k_b*T)/m_Rb)**0.5 #mean velocity of particles coming out of the oven x_os = 0.1 a = (hbar*L*k)/(2*m_Rb) #max decelleration of atoms vf_oven = (vp**2 + (2*a*x_os))**0.5 t_b = (2*x_os)/(vp + vf_oven) #time taken from oven to start of slower # -------- During slower -------- length_slow = 0.5 vz_max = (vf_oven**2 + 2*a*length_slow)**0.5 Z = 0.7 #Z = np.linspace(0, length_slow, 100) P = 10**(4.312-(4040/T)) #vapour pressure for liquid phase (use 4.857 for solid phase) A = 5*10**-4 #area of the oven aperture n = P/(k_b*T) #atomic number density I = 1*10**5 #intensity n0 = 1 #refraction constant for medium E_0 = ((2*I)/(3*10**8*n0*epsilon_0))**0.5 Rabi = (E_0*3.5844*10**-29)/hbar II_sat = (2*Rabi**2)/L**2 delta_omega = np.array([-5*10**6, -10*10**6, -30*10**6]) #range of frequencies i = 0 while i<len(delta_omega): B_p = (h/mu_eff) * (delta_omega[i] + (1/lmbda)*(vf_oven**2 - (2*a*length_slow))**0.5) B_n = (h/mu_eff) * (delta_omega[i] - (1/lmbda)*(vf_oven**2 - (2*a*length_slow))**0.5) delta_n = delta_omega[i] + (k*vf_oven) - (mu_eff*B_n)/hbar delta_p = delta_omega[i] - (k*vf_oven) + (mu_eff*B_p)/hbar F = (hbar*k*L)/2 * ((II_sat/(1+II_sat+(2*delta_n/L)**2)) - (II_sat/(1+II_sat+(2*delta_p/L)**2))) accn = abs(F/m_Rb) vf_slower = (vf_oven**2 - (2*accn*length_slow))**0.5 t_d = 1/accn * (vf_oven - vf_slower) #time taken during slower # -------- After slower -------- da = 0.1 #distance from end of slower to the middle of the MOT vf_MOT = (vf_slower**2 - (2*accn*da))**0.5 t_a = da/vf_MOT #time taken after slower r0 = 0.01 #MOT capture radius vr_max = r0/(t_b+t_d+t_a) # -------- Flux of atoms captured -------- f_oven = ((n*A)/4) * (2/(np.pi)**0.5) * ((2*k_b*T)/m_Rb)**0.5 f = f_oven * (1 - np.exp(-vr_max**2/vp**2))*(1 - np.exp(-vz_max**2/vp**2)) print('The rate of capture at a detuning of', delta_omega[i], 'is', format(f, '.1E'))
It seems you forgot to increment i within the loop...
How to assign to a variable an infinite value in gekko?
I am trying to assign an infinite value to a variable of gekko. I have tried with the numpy's infinite value and python's own infinite but it is still not working due to a problem of recognition of gekko. The main objective of this idea is to force a variable to be strictly equal to 0, at least in the first iteration of the solver. from gekko import GEKKO from numpy import Inf model=GEKKO() R=model.FV(value=Inf) T=model.Array(model.Var,2) Q=model.FV() model.Equation(Q==(T[1]-T[0])/R) model.solve() And the error I am getting: Exception: #error: Model Expression *** Error in syntax of function string: Invalid element: inf Moreover, sometimes other variables are also required to be infinite, again, variables that are located in the denominator of a model equation. This is quite useful in order to try different scenarios of the simulation I am working with and check the systems behavior. Hope you can help me, thank you.
The large-scale NLP and MINLP solvers don't know how to compute gradients with a np.nan value so initializing with NaN generally doesn't help. Please post example code that demonstrates the issue that you are observing with improved performance from NaN initialization. Below are four unconstrained optimization methods compared on the same sample problem. The algorithms do not benefit from NaN for initialization. Some solvers substitute NaN with 0 or a high or low number. I suggest that you try giving np.nan as an initial condition to these solution methods to see how it affects the search for the minimum. import matplotlib import numpy as np import matplotlib.pyplot as plt # define objective function def f(x): x1 = x[0] x2 = x[1] obj = x1**2 - 2.0 * x1 * x2 + 4 * x2**2 return obj # define objective gradient def dfdx(x): x1 = x[0] x2 = x[1] grad = [] grad.append(2.0 * x1 - 2.0 * x2) grad.append(-2.0 * x1 + 8.0 * x2) return grad # Exact 2nd derivatives (hessian) H = [[2.0, -2.0],[-2.0, 8.0]] # Start location x_start = [-3.0, 2.0] # Design variables at mesh points i1 = np.arange(-4.0, 4.0, 0.1) i2 = np.arange(-4.0, 4.0, 0.1) x1_mesh, x2_mesh = np.meshgrid(i1, i2) f_mesh = x1_mesh**2 - 2.0 * x1_mesh * x2_mesh + 4 * x2_mesh**2 # Create a contour plot plt.figure() # Specify contour lines lines = range(2,52,2) # Plot contours CS = plt.contour(x1_mesh, x2_mesh, f_mesh,lines) # Label contours plt.clabel(CS, inline=1, fontsize=10) # Add some text to the plot plt.title(r'$f(x)=x_1^2 - 2x_1x_2 + 4x_2^2$') plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') ################################################## # Newton's method ################################################## xn = np.zeros((2,2)) xn[0] = x_start # Get gradient at start location (df/dx or grad(f)) gn = dfdx(xn[0]) # Compute search direction and magnitude (dx) # with dx = -inv(H) * grad delta_xn = np.empty((1,2)) delta_xn = -np.linalg.solve(H,gn) xn[1] = xn[0]+delta_xn plt.plot(xn[:,0],xn[:,1],'k-o') ################################################## # Steepest descent method ################################################## # Number of iterations n = 8 # Use this alpha for every line search alpha = 0.15 # Initialize xs xs = np.zeros((n+1,2)) xs[0] = x_start # Get gradient at start location (df/dx or grad(f)) for i in range(n): gs = dfdx(xs[i]) # Compute search direction and magnitude (dx) # with dx = - grad but no line searching xs[i+1] = xs[i] - np.dot(alpha,dfdx(xs[i])) plt.plot(xs[:,0],xs[:,1],'g-o') ################################################## # Conjugate gradient method ################################################## # Number of iterations n = 8 # Use this alpha for the first line search alpha = 0.15 neg = [[-1.0,0.0],[0.0,-1.0]] # Initialize xc xc = np.zeros((n+1,2)) xc[0] = x_start # Initialize delta_gc delta_cg = np.zeros((n+1,2)) # Initialize gc gc = np.zeros((n+1,2)) # Get gradient at start location (df/dx or grad(f)) for i in range(n): gc[i] = dfdx(xc[i]) # Compute search direction and magnitude (dx) # with dx = - grad but no line searching if i==0: beta = 0 delta_cg[i] = - np.dot(alpha,dfdx(xc[i])) else: beta = np.dot(gc[i],gc[i]) / np.dot(gc[i-1],gc[i-1]) delta_cg[i] = alpha * np.dot(neg,dfdx(xc[i])) + beta * delta_cg[i-1] xc[i+1] = xc[i] + delta_cg[i] plt.plot(xc[:,0],xc[:,1],'y-o') ################################################## # Quasi-Newton method ################################################## # Number of iterations n = 8 # Use this alpha for every line search alpha = np.linspace(0.1,1.0,n) # Initialize delta_xq and gamma delta_xq = np.zeros((2,1)) gamma = np.zeros((2,1)) part1 = np.zeros((2,2)) part2 = np.zeros((2,2)) part3 = np.zeros((2,2)) part4 = np.zeros((2,2)) part5 = np.zeros((2,2)) part6 = np.zeros((2,1)) part7 = np.zeros((1,1)) part8 = np.zeros((2,2)) part9 = np.zeros((2,2)) # Initialize xq xq = np.zeros((n+1,2)) xq[0] = x_start # Initialize gradient storage g = np.zeros((n+1,2)) g[0] = dfdx(xq[0]) # Initialize hessian storage h = np.zeros((n+1,2,2)) h[0] = [[1, 0.0],[0.0, 1]] for i in range(n): # Compute search direction and magnitude (dx) # with dx = -alpha * inv(h) * grad delta_xq = -np.dot(alpha[i],np.linalg.solve(h[i],g[i])) xq[i+1] = xq[i] + delta_xq # Get gradient update for next step g[i+1] = dfdx(xq[i+1]) # Get hessian update for next step gamma = g[i+1]-g[i] part1 = np.outer(gamma,gamma) part2 = np.outer(gamma,delta_xq) part3 = np.dot(np.linalg.pinv(part2),part1) part4 = np.outer(delta_xq,delta_xq) part5 = np.dot(h[i],part4) part6 = np.dot(part5,h[i]) part7 = np.dot(delta_xq,h[i]) part8 = np.dot(part7,delta_xq) part9 = np.dot(part6,1/part8) h[i+1] = h[i] + part3 - part9 plt.plot(xq[:,0],xq[:,1],'r-o') plt.tight_layout() plt.savefig('contour.png',dpi=600) plt.show() More information is available in the design optimization course. Response to Edit Thanks for clarifying the question and for including a source code example. While it isn't possible to include Inf as a guess, an equivalent form with an additional variable x may be able to accomplish the desired behavior. This sets the term (T[1]-T[0])/R initially equal to zero at the beginning iteration. from gekko import GEKKO from numpy import Inf model=GEKKO() R=model.FV(value=1e20) T=model.Array(model.Var,2) x=model.Var(value=0) Q=model.FV() model.Equations([x==(T[1]-T[0])/R, Q==x]) model.solve()
FTCS Solution of the Wave Equation - Issues with Vpython
I am attempting to make an animation of the motion of the piano string using the facilities provided by the vpython package. There are various ways you could do this, but my goal is to do this with using the curve object within the vpython package. Below is my code for solution of the initial problem of solving the complete sets of simultaneous 1st-order equation. Thanks in advance, I am really uncertain as to where to start with the vpython animation. # Key Module and Function Import(s): import numpy as np import math as m import pylab as py import matplotlib from time import time import scipy # Variable(s) and Constant(s): L = 1.0 # Length on string in m C = 1.0 # velocity of the hammer strike in ms^-1 d = 0.1 # Hammer distance from 0 to point of impact with string N = 100 # Number of divisions in grid sigma = 0.3 # sigma value in meters a = L/N # Grid spacing v = 100.0 # Initial velocity of wave on the string h = 1e-6 # Time-step epsilon = h/1000 # Computation(s): def initialpsi(x): return (C*x*(L-x)/(L**2))*m.exp((-(x-d)**2)/(2*sigma**2)) # Definition of the function phibeg = 0.0 # Beginning - fixed point phimiddle = 0.0 # Initial x phiend = 0.0 # End fixed point psibeg = 0.0 # Initial v at beg psiend = 0.0 # Initial v at end t2 = 2e-3 # string at 2ms t50 = 50e-3 # string at 50ms t100 = 100e-3 # string at 100ms tend = t100 + epsilon # Creation of empty array(s) phi = np.empty(N+1,float) phi[0] = phibeg phi[N] = phiend phi[1:N] = phimiddle phip = np.empty(N+1,float) phip[0] = phibeg phip[N] = phiend psi = np.empty(N+1,float) psi[0] = psibeg psi[N] = psiend for i in range(1,N): psi[i] = initialpsi(i*a) psip = np.empty(N+1,float) psip[0] = psibeg psip[N] = psiend # Main loop t = 0.0 D = h*v**2 / (a*a) timestart = time() while t<tend: # Calculation the new values of T for i in range(1,N): phip[i] = phi[i] + h*psi[i] psip[i] = psi[i] + D*(phi[i+1]+phi[i-1]-2*phi[i]) phip[1:N] = phi[1:N] + h*psi[1:N] psip[1:N] = psi[1:N] + D*(phi[0:N-1] + phi[2:N+1] -2*phi[1:N]) phi= np.copy(phip) psi= np.copy(psip) #phi,phip = phip,phi #psi,psip = psip,psi t += h # Plot creation in step(s) if abs(t-t2)<epsilon: t2array = np.copy(phi) py.plot(phi, label = "2 ms") if abs(t-t50)<epsilon: t50array = np.copy(phi) py.plot(phi, label = "50 ms") if abs(t-t100)<epsilon: t100array = np.copy(phi) py.plot(phi, label = "100 ms")
See the curve documentation at https://www.glowscript.org/docs/VPythonDocs/curve.html Use the "modify" method to change the individual points along the curve object, inside a loop that contains a rate statement: https://www.glowscript.org/docs/VPythonDocs/rate.html
how to create a proper sigmoid curve?
I'm trying to use logistic regression on the popularity of hits songs on Spotify from 2010-2019 based on their durations and durability, whose data are collected from an .csv file. Basically, since the popularity values of each song is numerical, I have converted each of them to binary numbers "0" to "1". If the popularity value of a hit song is less than 70, I will replace its current value to 0, and vice versa if its value is more than 70. The current sigmoid curve is being "log" right now, hence it is showing a straight line. However, in the context of this code, I am still not sure how to add in a proper sigmoid curve, instead of just the straight line. Is there anything i need to add to my code in order to show both a solid sigmoid curve and the log of the curve in the same graph? It would be deeply appreciated if someone can help me with the final step. %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('top10s [SubtitleTools.com] (2).csv') BPM = df.bpm BPM = np.array(BPM) Energy = df.nrgy Energy = np.array(Energy) Dance = df.dnce Dance = np.array(Dance) dB = df.dB dB = np.array(dB) Live = df.live Live = np.array(Live) Valence = df.val Valence = np.array(Valence) Acous = df.acous Acous = np.array(Acous) Speech = df.spch Speech = np.array(Speech) df.loc[df['popu'] <= 70, 'popu'] = 0 df.loc[df['popu'] > 70, 'popu'] = 1 def Logistic_Regression(X, y, iterations, alpha): ones = np.ones((X.shape[0], )) X = np.vstack((ones, X)) X = X.T b = np.zeros(X.shape[1]) for i in range(iterations): z = np.dot(X, b) p_hat = sigmoid(z) gradient = np.dot(X.T, (y - p_hat))/y.size b = b + alpha * gradient if (i % 1000 == 0): print('LL, i ', log_likelihood(X, y, b), i) return b def sigmoid(z): return 1 / (1 + np.exp(-z)) def log_likelihood(X, y, b): z = np.dot(X, b) LL = np.sum(y*z - np.log(1 + np.exp(z))) return LL def LR1(): Dur = df.dur Dur = np.array(Dur) Pop = df.popu Pop = [int(i) for i in Pop]; Pop = np.array(Pop) plt.figure(figsize=(10,8)) colormap = np.array(['r', 'b']) plt.scatter(Dur, Pop, c = colormap[Pop], alpha = .4) b = Logistic_Regression(Dur, Pop, iterations = 8000, alpha = 0.00005) print('Done') p_hat = sigmoid(np.dot(Dur, b[1]) + b[0]) idxDur = np.argsort(Dur) plt.plot(Dur[idxDur], p_hat[idxDur]) plt.show() LR1() My dataset: CSV File My Current Graph What i want to have: Shape of sigmoid i want
at first glance, your Logistic_Regression initialization seems very wrong. I think you packed X with [X, 1] then tries to learn W = [Weight, bias], which should be [1, 0] to start with. Note the 1 is vector [1, 1, 1...] with length = feature vector length.
try something like this: x_range = np.linspace(Dur.min(), Dur.max(), 100) p_hat = sigmoid(np.dot(x_range, b[1]), b[0]) plt.plot(x_range, p_hat) plt.show()
Python: TypeError: 'float' object has no attribute '__getitem__'
I am trying to implement particle filter algorithm in python. I am getting this error: x_P_update[i] = 0.5*x_P[i] + 25*x_P[i]/(1 + x_P[i]**2) + 8*math.cos(1.2*(t-1)) + math.sqrt(x_N)*np.random.randn() TypeError: 'float' object has no attribute '__getitem__' My code: import math import numpy as np import matplotlib.pyplot as plt x = 0.1 #initial value x_N = 1 #process noise covariance in state update x_R = 1 #noise covariance in measurement T = 75 #number of iterations N = 10 #number of particles V = 2 x_P = [None]*(N) for i in xrange(0, N): x_P[i] = x + math.sqrt(V)*np.random.randn() z_out = np.array([x**2 / 20 + math.sqrt(x_R) * np.random.randn()]) #the actual output vector for measurement values. x_out = np.array([x]) #the actual output vector for measurement values. x_est = np.array([x]); # time by time output of the particle filters estimate x_est_out = np.array([x_est]) # the vector of particle filter estimates. x_P_update = [None]*N z_update = [None]*N P_w = [None]*N for t in xrange(1, T+1): x = 0.5*x + 25*x/(1 + x**2) + 8*math.cos(1.2*(t-1)) + math.sqrt(x_N)*np.random.randn() z = x**2/20 + math.sqrt(x_R)*np.random.randn() for i in xrange(0, N): #each particle is updated with process eq x_P_update[i] = 0.5*x_P[i] + 25*x_P[i]/(1 + x_P[i]**2) + 8*math.cos(1.2*(t-1)) + math.sqrt(x_N)*np.random.randn() #observations are updated for each particle z_update[i] = x_P_update[i]**2/20 #generate weights P_w[i] = (1/math.sqrt(2*math.pi*x_R)) * math.exp(-(z - z_update[i])**2/(2*x_R)) P_w[:] = [ k / sum(P_w) for k in P_w] # print(np.where(np.cumsum(P_w, axis=0) >= np.random.rand())) # print(index_tuple[0][1]) # P_w_array = np.array(list(P_w)) # indices = [i for i in range(len(P_w)) if np.cumsum(P_w_array) >= np.random.rand()] for i in xrange(0, N): index_tuple = np.where(np.random.rand() <= np.cumsum(P_w, axis=0)) m = index_tuple[0][1] x_P = x_P_update[m] x_est = np.array([np.mean(x_P)]) x_out = np.array([x_out, x]) z_out = np.array([z_out, z]) x_est_out = np.array([x_est_out, x_est]) I am using matlab code from here to learn how to implement this algorithm in python using scipy. http://studentdavestutorials.weebly.com/particle-filter-with-matlab-code.html I just started learning python and can't get out of this problem, kindly help.
I'm not going to go through the video tutorial and fix your algorithm, but I can show you why you're getting this error. In this line: x_P = x_P_update[m] You are assigning an array with a float value, which you then attempt to access as an array in the outer loop. Updating it instead will get rid of your error: x_P[m] = x_P_update[m]