Related
I am trying to implement a pso algorithm from Wikipedia https://en.wikipedia.org/wiki/Particle_swarm_optimization.
My problem is that when I am calling the cost function with a variable (Gbest), and then manually calling the cost function (with the Gbest data) I get a different output (cost) like the image bellow:
Code fault
I am new to python so thank you for any suggestions.
Here is the complete code:
import matplotlib.pyplot as plt
import numpy as np
from control.matlab import *
A = np.array([[0,0,1],[0,1,0],[1,2,-2]])
B = np.array( [[0],[1],[0]])
C = np.array([[0, 1,0]])
D = np.zeros([C.shape[0],B.shape[1]])
sys = ss(A,B,C,D)
sys_tf = tf(sys)
s = tf('s')
def cost(kp,ki):
global sys_tf, G, y, t, r
G = kp + ki/s
C = feedback(sys_tf*G, 1)
y, t = step(C, linspace(0,100))
r = np.ones(len(t))
return np.sum(y-r)**2
part = 100
ite = 10000
dim = 2
w = 0.001
wdamp = 0.99
phip = 0.9
phig = 0.1
blo, bup = -10,10
x = np.zeros([dim, part])
v = np.zeros([dim, part])
pbest = np.zeros([dim, part])
gbest = np.array([1000000,1000000])
for i in range(part):
for k in range(dim):
x[k][i] = pbest[k][i] = np.random.uniform(blo, bup)
v[k][i] = np.random.uniform(-np.abs(bup - blo), np.abs(bup - blo))
if cost(pbest[0][i], pbest[1][i]) < cost(gbest[0], gbest[1]):
gbest = np.array([pbest[0][i], pbest[1][i]])
for it in range(ite):
for i in range(part):
for k in range(dim):
rp = np.random.uniform(0,1)
rg = np.random.uniform(0,1)
v[k,:] = w*v[k,:] + phip*rp*(pbest[k,:] - x[k,:]) + phig*rg*(gbest[k] - x[k,:])
x[k,:] = x[k,:] + v[k,:]
w = w*wdamp
if cost(x[0][i], x[1][i]) < cost(pbest[0][i], pbest[1][i]):
pbest[:,i] = x[:,i]
if cost(pbest[0][i], pbest[1][i]) < cost(gbest[0], gbest[1]):
gbest = np.array([pbest[0][i], pbest[1][i]])
plt.plot(t, y, 'ro')
plt.plot(t, r, 'x')
plt.pause(0.005)
plt.title(gbest)
print([gbest, cost(gbest[0], gbest[1])])
I am implementing genetic algorithm but I am facing an error after the first generation with the message: ValueError: negative dimensions are not allowed
I actually change the nfilters parameter from nfilters=[74,27,23] to nfilters=[64,128,256], I don't know if it is due to this parameters.
I declared my class sequential as follow:
class CNN(Sequential):
def __init__(self,nfilters,sfilters):
super().__init__()
tf.random.set_seed(0)
self.add(Conv2D(nfilters[0],kernel_size=(sfilters[0],sfilters[0]),padding='same',activation='relu',input_shape=(50,50,3)))
self.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
self.add(Conv2D(nfilters[1],kernel_size=(sfilters[1],sfilters[1]),padding='same',activation='relu'))
self.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
self.add(Conv2D(nfilters[2],kernel_size=(sfilters[2],sfilters[2]),padding='same',activation='relu'))
self.add(Conv2D(nfilters[2], kernel_size=(sfilters[2], sfilters[2]), padding='same', activation='relu'))
self.add(Flatten())
self.add(Dropout(0.5))
self.add(Dense(128,activation='relu'))
self.add(Dropout(0.5))
self.add(Dense(128, activation='relu'))
self.add(Dense(num_classes, activation='sigmoid'))
self.compile(loss=keras.losses.binary_crossentropy,
optimizer=tf.optimizers.Adam(learning_rate=0.001),
metrics=['accuracy'])
nfilters = [64,128,256] #nfilters = [74,27,23]
sfilters = [9,3,2] #sfilters = [9,3,2]
Then my class genetic is declared as the following:
class Genetic:
def __init__(self,pop_size,nlayers,max_nfilters,max_sfilters):
self.pop_size = pop_size
self.nlayers = nlayers
self.max_nfilters = max_nfilters
self.max_sfilters = max_sfilters
self.max_acc = 0
self.best_arch = np.zeros((1,6))
self.gen_acc = []
def generate_population(self):
np.random.seed(0)
pop_nlayers = np.random.randint(1,self.max_nfilters,(self.pop_size,self.nlayers))
pop_sfilters = np.random.randint(1,self.max_sfilters,(self.pop_size,self.nlayers))
pop_total = np.concatenate((pop_nlayers,pop_sfilters),axis=1)
return pop_total
def select_parents(self,pop,nparents,fitness):
parents = np.zeros((nparents,pop.shape[1]))
for i in range(nparents):
best = np.argmax(fitness)
parents[i] = pop[best]
fitness[best] = -99999
return parents
def crossover(self,parents):
nchild = self.pop_size - parents.shape[0]
nparents = parents.shape[0]
child = np.zeros((nchild,parents.shape[1]))
for i in range(nchild):
first = i % nparents
second = (i+1) % nparents
child[i,:2] = parents[first][:2]
child[i,2] = parents[second][2]
child[i,3:5] = parents[first][3:5]
child[i,5] = parents[second][5]
return child
def mutation(self,child):
for i in range(child.shape[0]):
val = np.random.randint(1,6)
ind = np.random.randint(1,4) - 1
if child[i][ind] + val > 100:
child[i][ind] -= val
else:
child[i][ind] += val
val = np.random.randint(1,4)
ind = np.random.randint(4,7) - 1
if child[i][ind] + val > 20:
child[i][ind] -= val
else:
child[i][ind] += val
return child
def fitness(self,pop,X,Y,epochs):
pop_acc = []
for i in range(pop.shape[0]):
nfilters = pop[i][0:3]
sfilters = pop[i][3:]
model = CNN(nfilters,sfilters)
#H = model.fit_generator(datagen.flow(X,Y,batch_size=256),epochs=epochs,callbacks=[early_stopping_monitor])
H = model.fit_generator(datagen.flow(X,Y,batch_size=256),steps_per_epoch=len(X_trainRusReshaped) / batch_size,epochs=epochs,validation_data=(X_testRusReshaped, Y_testRusHot),callbacks=[early_stopping_monitor])
acc = H.history['accuracy']
pop_acc.append(max(acc)*100)
if max(pop_acc) > self.max_acc:
self.max_acc = max(pop_acc)
self.best_arch = pop[np.argmax(pop_acc)]
self.gen_acc.append(max(pop_acc))
return pop_acc
def smooth_curve(self,factor,gen):
smoothed_points = []
for point in self.gen_acc:
if smoothed_points:
prev = smoothed_points[-1]
smoothed_points.append(prev*factor + point * (1-factor))
else:
smoothed_points.append(point)
plt.plot(range(gen+1),smoothed_points,'g',label='Smoothed training acc')
plt.xticks(np.arange(gen+1))
plt.legend()
plt.title('Fitness Accuracy vs Generations')
plt.xlabel('Generations')
plt.ylabel('Fitness (%)')
plt.show()
plt.savefig('smoothCurve.png')
When I launch these lines of codes, I have the error after 20 epochs on the first generation:
#Starting Genetic Algoritm
pop_size = 2 #10
nlayers = 3 #3
max_nfilters = 500 #100
max_sfilters = 20
epochs = 20
num_generations = 2 #10
genCNN = Genetic(pop_size,nlayers,max_nfilters,max_sfilters)
pop = genCNN.generate_population()
for i in range(num_generations+1):
pop_acc = genCNN.fitness(pop,X_trainRusReshaped,Y_trainRusHot,epochs)
print('Best Accuracy at the generation {}: {}'.format(i,genCNN.max_acc))
parents = genCNN.select_parents(pop,5,pop_acc.copy())
child = genCNN.crossover(parents)
child = genCNN.mutation(child)
pop = np.concatenate((parents,child),axis=0).astype('int')
Any idea where this error is coming from? I tried to increase max_filters from 100 to 500 but it does not solved anything.
I wrote a MPC with Python and it worked before. After a long time I want to use it again but I got this Error
f0 passed has more than 1 dimension.
But I didn't change anything on my code. It is some kind of strange.
Here is my code:
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def mpcAugment(Am, Bm, Cm ):
"Function for Augmented Model"
nx, nu = Bm.shape
ny = Cm.shape[0]
A = np.zeros((nx+ny,nx+ny))
A[0:nx,0:nx] = Am
A[nx:nx+ny,0:nx] = Cm#Am
A[nx:nx+ny,nx:nx+ny] = np.eye(ny)
B = np.zeros((nx+ny,nu))
B[0:nx,:nu] = Bm
B[nx:nx+ny,:nu] = Cm#Bm
C = np.zeros((ny,nx+ny))
C[:ny,nx:nx+ny] = np.eye(ny)
return A, B, C
'Define Parameters'
k = 0.4
AICB = 153.8
mcp = 8.8e4
vamb1 = 30
vamb2 = 45
a = -k*AICB/mcp
b = -1/mcp
Ts = 20
VICBref = -5.0
Am = np.array([[1+Ts*a]])
Bm = np.array([[Ts*b]])
Gm = np.array([[-Ts*a]])
Cm = np.array([[1]])
A, B, C = mpcAugment(Am,Bm,Cm)
A, G, C = mpcAugment(Am,Gm,Cm)
nx, nu = B.shape
ny = C.shape[0]
nd = G.shape[1]
Np = 20
Nu = 5
F = np.zeros((Np*ny,nx))
PHI = np.zeros((Np*ny,Nu*nu))
PHIw = np.zeros((Np*ny,Np*nd))
for i in range(0,Np):
Ai = npl.matrix_power(A, i+1)
F[i*ny:(i+1)*ny,:] = C#Ai
for j in range(0, Nu):
if j <= i:
Aij = np.linalg.matrix_power(A, i-j)
PHI[i*ny:(i+1)*ny, j*nu:(j+1)*nu] = C#Aij#B
for j in range(0, Np):
if j <= i:
Aij = np.linalg.matrix_power(A, i-j)
PHIw[i*ny:(i+1)*ny, j*nd:(j+1)*nd] = C#Aij#G
umax = 3100
umin = 0
Q = np.eye(Np*ny)
R = 1e-2*np.eye(Nu*nu)
Rs = VICBref*np.ones((Np*ny,1))
Ainq = np.zeros((2*Nu*nu,Nu*nu))
binq = np.zeros((2*Nu*nu,1))
cinq = np.zeros((2*Nu*nu,1))
for i in range(0,Nu):
binq[i*nu:(i+1)*nu] = umax
binq[(i+Nu)*nu:(Nu+i+1)*nu] = 1
cinq[i*nu:(i+1)*nu] = 1
cinq[(i+Nu)*nu:(Nu+i+1)*nu] = -1
for j in range(0,i+1):
Ainq[i*nu:(i+1)*nu,j*nu:(j+1)*nu] = np.eye(nu)
Ainq[(i+Nu)*nu:(Nu+i+1)*nu,j*nu:(j+1)*nu] = np.eye(nu)
u0 = 0
def objective(du):
dU = np.array(du).reshape((len(du),1))
Y = F#x + PHI#dU + PHIw#w
return np.transpose((Rs-Y))#(Rs-Y)+np.transpose(dU)#R#(dU)
def constraint1(du):
dU = np.array(du).reshape((len(du),1))
return (binq - Ainq#dU - cinq*u0)[0]
#print(objective([1,1,1]))
ulim = (umin, umax)
bnds = np.kron(np.ones((Nu,1)),ulim)
#print(bnds)
Um = np.ones((nu*Nu,1))
Tsim = 5e4
time = np.arange(0,Tsim,Ts)
Nt = len(time)
xm = np.zeros((Nt,1))
um = np.zeros((Nt,nu))
ym = np.zeros((Nt,ny))
xm[0] = 0
ym[0] = Cm.dot(xm[0])
w = np.zeros((Np*nd,1))
print('Am = ',Am)
print('Bm = ',Bm)
print('Cm = ',Cm)
x = np.zeros((nx,1))
x[1] = xm[0]
vamb = vamb1
Vamb = np.zeros((Nt,1))
Ns = int(np.floor(Nt/2))
Vamb[0:Ns] = vamb1*np.ones((Ns,1))
Vamb[Ns:Nt] = vamb2*np.ones((Nt-Ns,1))
Vref = VICBref*np.ones((Nt,1))
con = {'type':'ineq','fun':constraint1}
for i in range(0,Nt-1):
sol = minimize(objective, Um, method = 'SLSQP',constraints = con)
if sol.success == False:
print('Error Cant solve problem')
exit()
Um = sol.x
um[i+1] = um[i] + Um[0]
u0 = um[i+1]
xm[i+1] = Am.dot(xm[i])+Bm.dot(um[i+1])+Gm.dot(Vamb[i])
ym[i+1] = Cm.dot(xm[i+1])
for j in range(0,Np):
if i+j < Nt:
Rs[j] = Vref[i+j]
w[j] = Vamb[i+j]-Vamb[i+j-1]
else:
Rs[j] = Vref[Nt-1]
w[j] = 0
x[0] = xm[i+1] - xm[i]
x[1] = xm[i+1]
print('Q = ',um[i+1],' , VICB = ',xm[i+1], ' vamb = ', Vamb[i])
hour = 60*60
plt.figure()
plt.subplot(2,1,1)
plt.plot(time/hour,ym)
plt.plot(time/hour,Vref,'--')
plt.xlabel('time(hours)')
plt.xlim([0, Tsim/hour])
plt.subplot(2,1,2)
plt.plot(time/hour,um)
plt.xlim([0, Tsim/hour])
plt.show()
It about a controller, which control the temperature of a cool box.
Is that possible that anything changed in main simply code?
I think the problem is now in minimizations part.
I reinstalled all of my libraries and it worked
I use the nearest neighbors method to predict the price of a stock. I have raw data in example.txt file. I use the close column (price at the end of the period = 1 minute). Linear regression predicts well (shown in green). But the method of nearest neighbors works only at the beginning and then turns into a straight line, please tell me how to fix this? Here is my code I wrote:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
class Reader:
def __init__(self, filename='example.txt'):
self.filename = filename
def read(self):
try:
file = open(self.filename)
return file.read()
except IOError:
return "File not found"
def main():
x = Reader('example.txt')
print(x.read())
class Regression:
def __init__(self, window, P0, Ptest, i):
self.window = window
self.P0 = P0
self.Ptest = Ptest
self.i = i
self.data_train = self.get_data_train()
self.x_train = self.get_x_train()
self.y_train = self.get_y_train()
self.data_test = self.get_data_test()
self.x_test = self.get_x_test()
self.y_test = self.get_y_test()
def get_data_train(self):
""" Method of obtaining data train on prices for the entire period."""
x = Reader('example.txt')
data = x.read().splitlines()
close_column = [x.split(',')[7] for x in data][1:]
result = [float(item) for item in close_column]
relative_price = result[:int(len(result)*P0)]
return relative_price
def get_data_test(self):
""" Method of obtaining data test on prices for the entire period."""
x = Reader('example.txt')
data = x.read().splitlines()
close_column = [x.split(',')[7] for x in data][1:]
result = [float(item) for item in close_column]
len_x_test = int(len(result) * Ptest)
len_x_train = int(len(result) * P0)
relative_price = result[(len_x_train + (len_x_test * self.i)): len_x_train + len_x_test
* (self.i + 1)]
return relative_price
def get_x_train(self):
x = []
for i in range(len(self.data_train)):
if i + self.window < len(self.data_train):
x.append(self.data_train[i: i + self.window])
return x
def get_y_train(self):
y = []
for i in self.data_train[self.window:]:
y += [i]
return y
def get_x_test(self):
x = []
for i in range(len(self.data_test)):
if i + self.window < len(self.data_test):
x.append(self.data_test[i: i + self.window])
return x
def get_y_test(self):
y = []
for i in self.data_test[self.window:]:
y += [i]
return y
class Linear_regression(Regression):
def callculate(self):
reg_linear = LinearRegression().fit(self.x_train, self.y_train)
y_pred = reg_linear.predict(self.x_test)
return y_pred
class Nearest_neighbor(Regression):
def callculate(self):
reg_neighbor = KNeighborsRegressor(n_neighbors=window, weights='distance')
reg_neighbor.fit(self.x_train, self.y_train)
y_pred = reg_neighbor.predict(self.x_test)
return y_pred
window = 10
Pk = 1
P0 = 0.1
Ptest = 0.01
k = (Pk - P0)/Ptest
i = 0
y_real = []
y_neigh = []
y_lin = []
while i < k:
lin_price = list(Linear_regression(window, P0, Ptest, i).callculate())
neighbor = list(Nearest_neighbor(window, P0, Ptest, i).callculate())
y_neigh.extend(neighbor)
y_lin.extend(lin_price)
y_real.extend(list(Linear_regression(window, P0, Ptest, i).y_test))
i += 1
""" Output to graphs of the received data """
fig, ax = plt.subplots()
ax.plot(y_real, label='Initial data')
ax.plot(y_neigh, label='Nearest Neighbor Data')
ax.plot(y_lin, label='Linear Regression Data')
ax.set_xlabel('Time (min)')
ax.set_ylabel('Price, ($)')
ax.legend()
plt.show()
"Linear regression predicts well"
No, it never predicted well. You just looked at the graph and thought it looked kind of similar. But if you look more closely, your 'model' simply takes the price of a bit ago as the prediction of the price now. That means, it's not predicting anything! It's a history device, not a prediction device.
That's why if you feed back this sort of 'model' into itself you get a straight line: it always predicts the next price is going to be equal to the last one.
I am trying to implement the censored data example in Lee&Wagenmakers' book (Chapter 5.5, page 70). In pymc2, I have the following model:
nattempts = 950
nfails = 949
n = 50 # Number of questions
y = np.zeros(nattempts)
y[nattempts-1] = 1
z = 30
unobsmin = 15
unobsmax = 25
unobsrange = np.arange(unobsmin,unobsmax+1)
theta = pymc.Uniform("theta",lower = .25, upper = 1)
#pymc.observed
def Ylike(value=z, theta = theta, n=n, censorn=nfails, unobs=unobsrange):
ylikeobs = pymc.binomial_like(x=value, n=n, p=theta)
ylikeunobs = np.array([])
for i in unobs:
ylikeunobs = np.append(pymc.binomial_like(x=i, n=n, p=theta),ylikeunobs)
return ylikeobs+sum(ylikeunobs)*censorn
testmodel = pymc.Model([theta,Ylike])
mcmc = pymc.MCMC(testmodel)
mcmc.sample(iter = 20000, burn = 50, thin = 2)
which involved the decorater #pymc.observed.
I think I need to express the likelihood using the pm.DensityDist, however, I could not figure it out how to.
OK, I found out how to do it:
with pm.Model():
theta = pm.Uniform("theta",lower = .25, upper = 1)
def logp(value,n,p):
return pm.dist_math.bound(
pm.dist_math.binomln(n, value)
+ pm.dist_math.logpow(p, value)
+ pm.dist_math.logpow(1 - p, n - value),
0 <= value, value <= n,
0 <= p, p <= 1)
def Censorlike(value=z, n=n, censorn=nfails, unobs=unobsrange):
ylikeobs = logp(value=value, n=n, p=theta)
ylikeunobs = 0
for i in unobs:
ylikeunobs += logp(value=i, n=n, p=theta)
return ylikeobs+ylikeunobs*censorn
ylike = pm.DensityDist('ylike', Censorlike, observed={'value':z,'n':n,'censorn':nfails,'unobs':unobsrange})
trace = pm.sample(3e3)