Python function calling with variable vs raw numbers - python

I am trying to implement a pso algorithm from Wikipedia https://en.wikipedia.org/wiki/Particle_swarm_optimization.
My problem is that when I am calling the cost function with a variable (Gbest), and then manually calling the cost function (with the Gbest data) I get a different output (cost) like the image bellow:
Code fault
I am new to python so thank you for any suggestions.
Here is the complete code:
import matplotlib.pyplot as plt
import numpy as np
from control.matlab import *
A = np.array([[0,0,1],[0,1,0],[1,2,-2]])
B = np.array( [[0],[1],[0]])
C = np.array([[0, 1,0]])
D = np.zeros([C.shape[0],B.shape[1]])
sys = ss(A,B,C,D)
sys_tf = tf(sys)
s = tf('s')
def cost(kp,ki):
global sys_tf, G, y, t, r
G = kp + ki/s
C = feedback(sys_tf*G, 1)
y, t = step(C, linspace(0,100))
r = np.ones(len(t))
return np.sum(y-r)**2
part = 100
ite = 10000
dim = 2
w = 0.001
wdamp = 0.99
phip = 0.9
phig = 0.1
blo, bup = -10,10
x = np.zeros([dim, part])
v = np.zeros([dim, part])
pbest = np.zeros([dim, part])
gbest = np.array([1000000,1000000])
for i in range(part):
for k in range(dim):
x[k][i] = pbest[k][i] = np.random.uniform(blo, bup)
v[k][i] = np.random.uniform(-np.abs(bup - blo), np.abs(bup - blo))
if cost(pbest[0][i], pbest[1][i]) < cost(gbest[0], gbest[1]):
gbest = np.array([pbest[0][i], pbest[1][i]])
for it in range(ite):
for i in range(part):
for k in range(dim):
rp = np.random.uniform(0,1)
rg = np.random.uniform(0,1)
v[k,:] = w*v[k,:] + phip*rp*(pbest[k,:] - x[k,:]) + phig*rg*(gbest[k] - x[k,:])
x[k,:] = x[k,:] + v[k,:]
w = w*wdamp
if cost(x[0][i], x[1][i]) < cost(pbest[0][i], pbest[1][i]):
pbest[:,i] = x[:,i]
if cost(pbest[0][i], pbest[1][i]) < cost(gbest[0], gbest[1]):
gbest = np.array([pbest[0][i], pbest[1][i]])
plt.plot(t, y, 'ro')
plt.plot(t, r, 'x')
plt.pause(0.005)
plt.title(gbest)
print([gbest, cost(gbest[0], gbest[1])])

Related

Why I got ModuleNotFoundError when I try to run the import statement?

I'm trying to import these on Jupyter, however I got an error when I run these code that say ModuleNotFoundError: No module named 'anna_phog'. I have another python file named as 'anna_phog'. How do I fix this?
below is 'anna_phog_demo.py' where I got the error
from anna_phog import anna_phog
import imageio
import matplotlib.pyplot as plt
image_path = "image_0058.jpg"
S = 8
angle = 360
Level = 3
roi = [1,225,1,300]
save=True
Image = imageio.imread(image_path)
p = anna_phog(Image, bin, angle, Level, roi)
print("P: \n{}".format(p))
print(len(p), type(p))
And below is the 'anna_phog.py' code
import numpy as np
import imageio
import cv2
import matplotlib.pyplot as plt
def anna_phog(Img, bin, angle, L, roi):
if Img.shape[2] == 3:
G = cv2.cvtColor(Img, cv2.COLOR_BGR2GRAY)
else:
G = Img
if np.sum(G) > 100:
# apply automatic Canny edge detection using the computed median
sigma = 0.33
v = np.median(G)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
E = cv2.Canny(G,lower,upper) #high and low treshold
GradientX, GradientY = np.gradient(G)
GradientYY = np.gradient(GradientY, axis=1)
Gr = np.sqrt(np.square(GradientX)+np.square(GradientY))
index = GradientX == 0
GradientX[index] = 1e-5 #maybe another value
YX = GradientY*GradientX
if angle == 180: A = ((np.arctan(YX)+(np.pi/2))*180)/np.pi
if angle == 360: A = ((np.arctan2(GradientY,GradientX)+np.pi)*180)/np.pi
bh, bv = anna_BinMatrix(A,E,Gr,angle,bin)
else:
bh = np.zeros(Img.shape)
bv = np.zeros(Img.shape)
bh_roi = bh[roi[0]:roi[1], roi[2]:roi[3]]
bv_roi = bv[roi[0]:roi[1], roi[2]:roi[3]]
p = anna_PhogDescriptor(bh_roi,bv_roi,L,bin)
return p
def anna_BinMatrix(A,E,G,angle,bin):
n, contorns = cv2.connectedComponents(E, connectivity=8)
X = E.shape[1]
Y = E.shape[0]
bm = np.zeros(shape=(Y,X))
bv = np.zeros(shape=(Y,X))
nAngle = angle/bin
for i in range(n):
posY, posX = np.where(contorns==i)
for j in range(posY.shape[0]):
pos_x = posX[j]
pos_y = posY[j]
b = np.ceil(A[pos_y,pos_x]/nAngle)
if b==0: bin=1
if G[pos_y,pos_x]>0:
bm[pos_y,pos_x] = b
bv[pos_y,pos_x] = G[pos_y,pos_x]
return (bm, bv)
def anna_PhogDescriptor(bh,bv,L,bin):
p = np.array([])
#level 0
for b in range(bin):
ind = bh==b
p = np.append(p, np.sum(bv[ind]))
#higher levels
for l in range(1, L+1):
x = int(np.trunc(bh.shape[1]/(2**l)))
y = int(np.trunc(bh.shape[0]/(2**l)))
for xx in range(0, bh.shape[1]-x+1, x):
for yy in range(0, bh.shape[0]-y+1, y):
print(l)
bh_cella = bh[yy:yy+y, xx:xx+x]
bv_cella = bv[yy:yy+y, xx:xx+x]
for b in range(bin):
ind = bh_cella==b
p = np.append(p, np.sum(bv_cella[ind], axis=0))
if np.sum(p)!=0:
p = p/np.sum(p)
return p
Below is the screenshot of the folder where I put these file
folder path

MPC with python and Error ValueError: `f0` passed has more than 1 dimension

I wrote a MPC with Python and it worked before. After a long time I want to use it again but I got this Error
f0 passed has more than 1 dimension.
But I didn't change anything on my code. It is some kind of strange.
Here is my code:
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def mpcAugment(Am, Bm, Cm ):
"Function for Augmented Model"
nx, nu = Bm.shape
ny = Cm.shape[0]
A = np.zeros((nx+ny,nx+ny))
A[0:nx,0:nx] = Am
A[nx:nx+ny,0:nx] = Cm#Am
A[nx:nx+ny,nx:nx+ny] = np.eye(ny)
B = np.zeros((nx+ny,nu))
B[0:nx,:nu] = Bm
B[nx:nx+ny,:nu] = Cm#Bm
C = np.zeros((ny,nx+ny))
C[:ny,nx:nx+ny] = np.eye(ny)
return A, B, C
'Define Parameters'
k = 0.4
AICB = 153.8
mcp = 8.8e4
vamb1 = 30
vamb2 = 45
a = -k*AICB/mcp
b = -1/mcp
Ts = 20
VICBref = -5.0
Am = np.array([[1+Ts*a]])
Bm = np.array([[Ts*b]])
Gm = np.array([[-Ts*a]])
Cm = np.array([[1]])
A, B, C = mpcAugment(Am,Bm,Cm)
A, G, C = mpcAugment(Am,Gm,Cm)
nx, nu = B.shape
ny = C.shape[0]
nd = G.shape[1]
Np = 20
Nu = 5
F = np.zeros((Np*ny,nx))
PHI = np.zeros((Np*ny,Nu*nu))
PHIw = np.zeros((Np*ny,Np*nd))
for i in range(0,Np):
Ai = npl.matrix_power(A, i+1)
F[i*ny:(i+1)*ny,:] = C#Ai
for j in range(0, Nu):
if j <= i:
Aij = np.linalg.matrix_power(A, i-j)
PHI[i*ny:(i+1)*ny, j*nu:(j+1)*nu] = C#Aij#B
for j in range(0, Np):
if j <= i:
Aij = np.linalg.matrix_power(A, i-j)
PHIw[i*ny:(i+1)*ny, j*nd:(j+1)*nd] = C#Aij#G
umax = 3100
umin = 0
Q = np.eye(Np*ny)
R = 1e-2*np.eye(Nu*nu)
Rs = VICBref*np.ones((Np*ny,1))
Ainq = np.zeros((2*Nu*nu,Nu*nu))
binq = np.zeros((2*Nu*nu,1))
cinq = np.zeros((2*Nu*nu,1))
for i in range(0,Nu):
binq[i*nu:(i+1)*nu] = umax
binq[(i+Nu)*nu:(Nu+i+1)*nu] = 1
cinq[i*nu:(i+1)*nu] = 1
cinq[(i+Nu)*nu:(Nu+i+1)*nu] = -1
for j in range(0,i+1):
Ainq[i*nu:(i+1)*nu,j*nu:(j+1)*nu] = np.eye(nu)
Ainq[(i+Nu)*nu:(Nu+i+1)*nu,j*nu:(j+1)*nu] = np.eye(nu)
u0 = 0
def objective(du):
dU = np.array(du).reshape((len(du),1))
Y = F#x + PHI#dU + PHIw#w
return np.transpose((Rs-Y))#(Rs-Y)+np.transpose(dU)#R#(dU)
def constraint1(du):
dU = np.array(du).reshape((len(du),1))
return (binq - Ainq#dU - cinq*u0)[0]
#print(objective([1,1,1]))
ulim = (umin, umax)
bnds = np.kron(np.ones((Nu,1)),ulim)
#print(bnds)
Um = np.ones((nu*Nu,1))
Tsim = 5e4
time = np.arange(0,Tsim,Ts)
Nt = len(time)
xm = np.zeros((Nt,1))
um = np.zeros((Nt,nu))
ym = np.zeros((Nt,ny))
xm[0] = 0
ym[0] = Cm.dot(xm[0])
w = np.zeros((Np*nd,1))
print('Am = ',Am)
print('Bm = ',Bm)
print('Cm = ',Cm)
x = np.zeros((nx,1))
x[1] = xm[0]
vamb = vamb1
Vamb = np.zeros((Nt,1))
Ns = int(np.floor(Nt/2))
Vamb[0:Ns] = vamb1*np.ones((Ns,1))
Vamb[Ns:Nt] = vamb2*np.ones((Nt-Ns,1))
Vref = VICBref*np.ones((Nt,1))
con = {'type':'ineq','fun':constraint1}
for i in range(0,Nt-1):
sol = minimize(objective, Um, method = 'SLSQP',constraints = con)
if sol.success == False:
print('Error Cant solve problem')
exit()
Um = sol.x
um[i+1] = um[i] + Um[0]
u0 = um[i+1]
xm[i+1] = Am.dot(xm[i])+Bm.dot(um[i+1])+Gm.dot(Vamb[i])
ym[i+1] = Cm.dot(xm[i+1])
for j in range(0,Np):
if i+j < Nt:
Rs[j] = Vref[i+j]
w[j] = Vamb[i+j]-Vamb[i+j-1]
else:
Rs[j] = Vref[Nt-1]
w[j] = 0
x[0] = xm[i+1] - xm[i]
x[1] = xm[i+1]
print('Q = ',um[i+1],' , VICB = ',xm[i+1], ' vamb = ', Vamb[i])
hour = 60*60
plt.figure()
plt.subplot(2,1,1)
plt.plot(time/hour,ym)
plt.plot(time/hour,Vref,'--')
plt.xlabel('time(hours)')
plt.xlim([0, Tsim/hour])
plt.subplot(2,1,2)
plt.plot(time/hour,um)
plt.xlim([0, Tsim/hour])
plt.show()
It about a controller, which control the temperature of a cool box.
Is that possible that anything changed in main simply code?
I think the problem is now in minimizations part.
I reinstalled all of my libraries and it worked

Numerical issue in scipy.ode.integrate solver

I am using ode solver to solve stiff problem (since odeint function could not able to solve it). But by this way also I have some warnings and my plot get saturate at some point. Here is image What should I do? Here is the list of warnings:
DVODE-- Warning..internal T (=R1) and H (=R2) are
such that in the machine, T + H = T on the next step
(H = step size). solver will continue anyway
In above, R1 = 0.3667661010318D+00 R2 = 0.1426374862242D-16
DVODE-- Warning..internal T (=R1) and H (=R2) are
such that in the machine, T + H = T on the next step
(H = step size). solver will continue anyway
In above, R1 = 0.3667661010318D+00 R2 = 0.1426374862242D-16
DVODE-- Above warning has been issued I1 times.
it will not be issued again for this problem
In above message, I1 = 2
DVODE-- At current T (=R1), MXSTEP (=I1) steps
taken on this call before reaching TOUT
In above message, I1 = 500
In above message, R1 = 0.3667661010318D+00
My code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as si
def func():
#arguments:::
w = 1./3.
xi = 2.86
phi1 = 1.645
phi2 = 2.* 1.202
gt = 10.**(-60)
Lt = (1.202*gt)/np.pi
Lin = 10.**-5
Lf = 0.49
dt = 0.0001
gin = gt*Lt/Lin
xin = (-np.log((3. - (xi**2)*Lin)/(3. - (xi**2)*Lt)) + np.log(Lin/Lt))/4.0
uin = -(np.log(Lin/Lt))/2.
state0 = [gin,xin,uin]
print state0
def eq(L, state):
g = state[0]
x = state[1]
u = state[2]
N = (-2.*g/(6.*np.pi + 5.*g))*(18./(1. - 2.*L) + 5.*np.log(1.- 2.*L) - phi1 + 6. )
B = (-(2. - N)*L) - ((g/np.pi)* (5.*np.log(1.-2.*L) - phi2 + (5.*N/40.)))
Eqs = np.zeros((3))
gdl = Eqs[0] = ((2.+N)*g)/B
xdl = Eqs[1] = -(2./(3.*(1.+w)))* (1./(1.-(xi**2)*L/3.))*(1./B)
udl = Eqs[2]= 1./B
return Eqs
ode = si.ode(eq)
# BDF method suited to stiff systems of ODEs
ode.set_integrator('vode',nsteps=500,method='bdf')
ode.set_initial_value(state0,Lin)
L = []
G = []
while ode.successful() and ode.t < Lf:
ode.integrate(ode.t + dt)
L.append(ode.t)
G.append(ode.y)
lam = np.vstack(L)
g,x,u = np.vstack(G).T
return g,x,u,lam
r= func()
L = r[3]
g = r[0]
lng = np.log10(g)
x = r[1]
u = r[2]
w = 1./3.
xi = 2.86
O_A = np.zeros(len(L))
q = np.zeros(len(L))
for i in np.arange(len(L)):
O_A[i] = xi**2*L[i]/3.
alpha = 2./ ((3.+3.*w) * (1.- (L[i]*xi**2)/3.) )
q[i] = 1./alpha - 1.
n = np.zeros(len(L)) #eta(n)
b = np.zeros(len(L))
for j in np.arange(len(L)):
n[j] =(-2.*g[j]/(6.*np.pi + 5.*g[j]))*(18./(1. - 2.*L[j]) + 5.*np.log(1.- 2.*L[j]) - 1.645 + 6. )
b[j]= (-(2. - n[j])*L[j]) - ((g[j]/np.pi)* (5.*np.log(1.-2.*L[j]) - 2.* 1.202 + ((5.*n[j])/4.)))
P = np.zeros(len(x))
for k in np.arange(len(x)):
C = (((3. - (xi**2)*L[k])/g[k])**(3./4.)) * (((2.*L[k] + (u[k]*b[k]))*xi**2) + (n[k] * (3.- L[k]*xi**2)) )
P[k] = (np.exp(3.*x[k])) * (np.exp(4.*u[k])) * C
plt.figure()
plt.plot(L,P)
plt.xlabel('Lambda ---->')
plt.ylabel('P ----->')
plt.title('lambda Vs P')
plt.show()

Unable to run Numba on finished code

What am I doing wrong below? I have installed Anaconda on my Mac and Numba along with it. I am trying to run Numba on my Python code, following the instructions given in the section Python with Numba here: https://www.continuum.io/blog/developer/accelerating-python-libraries-numba-part-1
I need to run this code overnight by changing zzzz from 5 to 40. I am just using zzzz = 5 now as a test case. Here is my code:
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from numba import double
from numba.decorators import jit, autojit
#jit
def Kcrit():
# The data of the plot will be added in these lists
x_data_plot=[]
y_data_plot=[]
zzzz = 5
for N in range(2,zzzz,2):
#Constants and parameters
epsilon = 0.01
if N in range(2,11,2):
K00 = np.logspace(0,3,100,10)
elif N in range(12,21,2):
K00 = np.logspace(3,3.75,100,10)
elif N in range(22,31,2):
K00 = np.logspace(3.75,4.15,100,10)
else:
K00 = np.logspace(4.15,4.5,100,10)
len1 = len(K00)
y0 = [0]*(3*N/2+3)
Kplot = np.zeros((len1,1))
Pplot = np.zeros((len1,1))
S = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KS = [np.zeros((len1,1)) for kkkk in range(N/2)]
PS = [np.zeros((len1,1)) for kkkk in range(N/2)]
Splot = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
PSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
for series in range(0,len1):
K0 = K00[series]
Q = 10
r1 = 0.0001
r2 = 0.001
a = 0.001
d = 0.001
k = 0.999
S10 = 1e5
P0 = 1
tf = 1e10
time = np.linspace(0,tf,len1)
#Defining dy/dt's
def f(y,t):
for alpha in range(0,(N/2+1)):
S[alpha] = y[alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = y[beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = y[gamma]
K = y[3*N/2+1]
P = y[3*N/2+2]
# The model equations
ydot = np.zeros((3*N/2+3,1))
B = range((N/2)+1,N+1)
G = range(N+1,3*N/2+1)
runsumPS = 0
runsum1 = 0
runsumKS = 0
runsum2 = 0
for m in range(0,N/2):
runsumPS = runsumPS + PS[m]
runsum1 = runsum1 + S[m+1]
runsumKS = runsumKS + KS[m]
runsum2 = runsum2 + S[m]
ydot[B[m]] = a*K*S[m]-(d+k+r1)*KS[m]
for i in range(0,N/2-1):
ydot[G[i]] = a*P*S[i+1]-(d+k+r1)*PS[i]
for p in range(1,N/2):
ydot[p] = -S[p]*(r1+a*K+a*P)+k*KS[p-1]+d*(PS[p-1]+KS[p])
ydot[0] = Q-(r1+a*K)*S[0]+d*KS[0]+k*runsumPS
ydot[N/2] = k*KS[N/2-1]-(r2+a*P)*S[N/2]+d*PS[N/2-1]
ydot[G[N/2-1]] = a*P*S[N/2]-(d+k+r2)*PS[N/2-1]
ydot[3*N/2+1] = (d+k+r1)*runsumKS-a*K*runsum2
ydot[3*N/2+2] = (d+k+r1)*(runsumPS-PS[N/2-1])- \
a*P*runsum1+(d+k+r2)*PS[N/2-1]
ydot_new = []
for j in range(0,3*N/2+3):
ydot_new.extend(ydot[j])
return ydot_new
# Initial conditions
y0[0] = S10
for i in range(1,3*N/2+1):
y0[i] = 0
y0[3*N/2+1] = K0
y0[3*N/2+2] = P0
# Solve the DEs
soln = odeint(f,y0,time, mxstep = 5000)
for alpha in range(0,(N/2+1)):
S[alpha] = soln[:,alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = soln[:,beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = soln[:,gamma]
for alpha in range(0,(N/2+1)):
Splot[alpha][series] = soln[len1-1,alpha]
for beta in range((N/2)+1,N+1):
KSplot[beta-N/2-1][series] = soln[len1-1,beta]
for gamma in range(N+1,3*N/2+1):
PSplot[gamma-N-1][series] = soln[len1-1,gamma]
u1 = 0
u2 = 0
u3 = 0
for alpha in range(0,(N/2+1)):
u1 = u1 + Splot[alpha]
for beta in range((N/2)+1,N+1):
u2 = u2 + KSplot[beta-N/2-1]
for gamma in range(N+1,3*N/2+1):
u3 = u3 + PSplot[gamma-N-1]
K = soln[:,3*N/2+1]
P = soln[:,3*N/2+2]
Kplot[series] = soln[len1-1,3*N/2+1]
Pplot[series] = soln[len1-1,3*N/2+2]
utot = u1+u2+u3
#Plot
Kcrit = abs((Q/r2)*(1+epsilon)-utot)
v,i = Kcrit.min(0),Kcrit.argmin(0)
# Save the new points for x and y
x_data_plot.append(N)
y_data_plot.append(K00[i])
# Make the plot of all the points together
plt.plot(x_data_plot,y_data_plot)
plt.xlabel('N', fontsize = 20)
plt.ylabel('$K_{crit}$', fontsize = 20)
plt.show()
This is what I typed in the terminal:
from numba import autojit
numba_k = autojit()(Kcrit)
This is my error message:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'Kcrit' is not defined
Note that I am not asking whether Numba will actually speed up my code (that is an entirely different issue). I am just asking how to run it!
Help appreciated.

FloatingPointError: overflow encountered in double_scalars

I've set up numpy.seterr as follows:
np.seterr(invalid='raise', over ='raise', under='raise')
And I'm getting the following error:
c = beta[j,i] + oneminusbeta[j,i]
FloatingPointError: overflow encountered in double_scalars
I've checked what beta[j,i] and oneminusbeta[j,i] are at the point of crash, and these are their values:
beta: -131.340389182
oneminusbeta: 0.0
Please note, this line of addition (beta[j,i] + oneminusbeta[j,i]) has run for thousands of lines in a loop (that performs image classification) before crashing here at this point.
How can I deal with this? Is it necessary to change the type of the numpy arrays?
This is how I've initialized them:
beta = np.empty([m,n])
oneminusbeta = np.empty([m,n])
Is it possible to cast the individual values before adding them up? Rather than changing the entire array declarations? Or is this even a serious issue? Would it be safe to simply turn off the numpy.seterr configuration and let the calculations go ahead without raising the error?
Edit
Someone suggested below, and I suspected as well, that the values being added shouldn't cause an overflow. Then how can I find out where the overflow is really happening?
This is my code:
epthreshold = 709
enthreshold = -708
f.write("weights["+str(i)+", " + str(j)+"] = math.exp(beta: " +str(beta[j,i])+ " + oneminusbeta: " + str(oneminusbeta[j,i])+")\n" )
c = beta[j,i] + oneminusbeta[j,i]
weights[i,j] = math.exp(np.clip(c, enthreshold, epthreshold))
And when I check my log file, this is the line I get:
weights[5550, 13] = math.exp(beta: -131.340389182 + oneminusbeta: 0.0)
Edit 2
Here's the rest of my code, where variables n,m and H have already been initialized to integer values:
import numba
import numpy as np
import statsmodels.api as sm
weights = np.empty([n,m])
for curr_n in range(n):
for curr_m in range(m):
weights[curr_n,curr_m] = 1.0/(n)
beta = np.empty([m,n])
oneminusbeta = np.empty([m,n])
for curr_class in range(m):
for curr_sample in range(n):
beta[curr_class,curr_sample] = 1./m
epthreshold = 709 # positive exponential threshold
enthreshold = -708
for h in range(H):
print "Boosting round %d ... " % h
z = np.empty([n,m])
for j in range(m): # computing working responses and weights, Step 2(a)(i)
for i in range(no_samples):
i_class = y[i] #get the correct class for the current sample
if h == 0:
z[i,j] = (int(j==i_class) - beta[j,i])/((beta[j,i])*(1. - beta[j,i]))
weights[i,j] = beta[j,i]*(1. - beta[j,i])
else:
if j == i_class:
z[i,j] = math.exp(np.clip(-beta[j,i],enthreshold, epthreshold))
else:
z[i,j] = -math.exp(np.clip(oneminusbeta[j,i], enthreshold, epthreshold))
f.write("weights["+str(i)+", " + str(j)+"] = math.exp(beta: " +str(beta[j,i])+ " + oneminusbeta: " + str(oneminusbeta[j,i])+")\n" )
c = beta[j,i] + oneminusbeta[j,i]
weights[i,j] = math.exp(np.clip(c, enthreshold, epthreshold))
g_h = np.zeros([1,1])
j = 0
# Calculating regression coefficients per class
# building the parameters per j class
for y1_w in zip(z.T, weights.T):
y1, w = y1_w
temp_g = sm.WLS(y1, X, w).fit() # Step 2(a)(ii)
if np.allclose(g_h,0):
g_h = temp_g.params
else:
g_h = np.c_[g_h, temp_g.params]
j = j + 1
if np.allclose(g,0):
g = g_h
else:
g = g + g_h # Step(2)(a)(iii)
# now set g(x), function coefficients according to new formula, step (2)(b)
sum_g = g.sum(axis=1)
for j in range(m):
diff = (g[:,j] - ((1./m) * sum_g))
g[:,j] = ((m-1.)/m) * diff
g_per_round[h,:,j] = g[:,j]
#Now computing beta, Step 2(c)...."
Q = 0.
e = 0.
for j in range(m):
# Calculating beta and oneminusbeta for class j
aj = 0.0
for i in range(no_samples):
i_class = y[i]
X1 = X[i].reshape(1, no_features)
g1 = g[:,j].reshape(no_features, 1)
gc = g[:,i_class].reshape(no_features, 1)
dot = 1. + float(np.dot(X1, g1)) - float(np.dot(X1,gc))
aj = dot
sum_e = 0.
a_q = []
a_q.append(0.)
for j2 in range(m): # calculating sum of e's except for all j except where j=i_class
if j2 != i_class: # g based on j2, not necessarily g1?
g2 = g[:,j2].reshape(no_features, 1)
dot1 = 1. + float(np.dot(X1, g2)) - float(np.dot(X1,gc))
e2 = math.exp(np.clip(dot1,enthreshold, epthreshold))
sum_e = sum_e + e2
a_q.append(dot1)
if (int(j==i_class) == 1):
a_q_arr = np.array(a_q)
alpha = np.array(a_q_arr[1:])
Q = mylogsumexp(f,a_q_arr, 1, 0)
sumalpha = mylogsumexp(f,alpha, 1, 0)
beta[j,i] = -Q
oneminusbeta[j,i] = sumalpha - Q
else:
alpha = a_q
alpha = np.array(alpha[1:])
a_q_arr = np.array(a_q)
Q = mylogsumexp(f,a_q_arr, 0, aj)
sumalpha = log(math.exp(np.clip(Q, enthreshold, epthreshold)) - math.exp(np.clip(aj, enthreshold, epthreshold)))
beta[j,i] = aj - Q
oneminusbeta[j,i] = sumalpha - Q
and the function mylogsumexp is:
def mylogsumexp(f, a, is_class, maxaj, axis=None, b=None):
np.seterr(over="raise", under="raise", invalid="raise")
threshold = -sys.float_info.max
maxthreshold = sys.float_info.max
epthreshold = 709 # positive exponential threshold
enthreshold = -708
a = asarray(a)
if axis is None:
a = a.ravel()
else:
a = rollaxis(a, axis)
if is_class == 1:
a_max = a.max(axis=0)
else:
a_max = maxaj
#bnone = " none "
if b is not None:
a_max = maxaj
b = asarray(b)
if axis is None:
b = b.ravel()
else:
b = rollaxis(b, axis)
a = np.clip(a - a_max, enthreshold, epthreshold)
midout = np.sum(np.exp(a), axis=0)
midout = 1.0 + np.clip(midout - math.exp(a_max), threshold, maxthreshold)
out = np.log(midout)
else:
a = np.clip(a - a_max, enthreshold, epthreshold)
out = np.log(np.sum(np.exp(a)))
out += a_max
if out == float("inf"):
out = maxthreshold
if out == float("-inf"):
out = threshold
return out

Categories