I have implemented a simple randomized, population-based optimization method - Grey Wolf optimizer. I am having some trouble with properly capturing the Matplotlib plots at each iteration using the camera package.
I am running GWO for the objective function f(x,y) = x^2 + y^2. I can only see the candidate solutions converging to the minima, but the contour plot doesn't show up.
Do you have any suggestions, how can I display the contour plot in the background?
GWO Algorithm implementation
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from celluloid import Camera
import ffmpeg
import pillow
# X : Position vector of the initial population
# n : Initial population size
def gwo(f,max_iterations,LB,UB):
fig = plt.figure()
camera = Camera(fig)
def random_population_uniform(m,a,b):
dims = len(a)
x = [list(a + np.multiply(np.random.rand(dims),b - a)) for i in range(m)]
return np.array(x)
def search_agent_fitness(fitness):
alpha = 0
if fitness[1] < fitness[alpha]:
alpha, beta = 1, alpha
else:
beta = 1
if fitness[2] > fitness[alpha] and fitness[2] < fitness[beta]:
beta, delta = 2, beta
elif fitness[2] < fitness[alpha]:
alpha,beta,delta = 2,alpha,beta
else:
delta = 2
for i in range(3,len(fitness)):
if fitness[i] <= fitness[alpha]:
alpha, beta,delta = i, alpha, beta
elif fitness[i] > fitness[alpha] and fitness[i]<= fitness[beta]:
beta,delta = i,beta
elif fitness[i] > fitness[beta] and fitness[i]<= fitness[delta]:
delta = i
return alpha, beta, delta
def plot_search_agent_positions(f,X,alpha,beta,delta,a,b):
# Plot the positions of search agents
x = X[:,0]
y = X[:,1]
s = plt.scatter(x,y,c='gray',zorder=1)
s = plt.scatter(x[alpha],y[alpha],c='red',zorder=1)
s = plt.scatter(x[beta],y[beta],c='blue',zorder=1)
s = plt.scatter(x[delta],y[delta],c='green',zorder=1)
camera.snap()
# Initialize the position of the search agents
X = random_population_uniform(50,np.array(LB),np.array(UB))
n = len(X)
l = 1
# Plot the first image on screen
x = np.linspace(LB[0],LB[1],1000)
y = np.linspace(LB[0],UB[1],1000)
X1,X2 = np.meshgrid(x,y)
Z = f(X1,X2)
cont = plt.contour(X1,X2,Z,20,linewidths=0.75)
while (l < max_iterations):
# Take the x,y coordinates of the initial population
x = X[:,0]
y = X[:,1]
# Calculate the objective function for each search agent
fitness = list(map(f,x,y))
# Update alpha, beta and delta
alpha,beta,delta = search_agent_fitness(fitness)
# Plot search agent positions
plot_search_agent_positions(f,X,alpha,beta,delta,LB,UB)
# a decreases linearly from 2 to 0
a = 2 - l *(2 / max_iterations)
# Update the position of search agents including the Omegas
for i in range(n):
x_prey = X[alpha]
r1 = np.random.rand(2) #r1 is a random vector in [0,1] x [0,1]
r2 = np.random.rand(2) #r2 is a random vector in [0,1] x [0,1]
A1 = 2*a*r1 - a
C1 = 2*r2
D_alpha = np.abs(C1 * x_prey - X[i])
X_1 = x_prey - A1*D_alpha
x_prey = X[beta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A2 = 2*a*r1 - a
C2 = 2*r2
D_beta = np.abs(C2 * x_prey - X[i])
X_2 = x_prey - A2*D_beta
x_prey = X[delta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A3 = 2*a*r1 - a
C3 = 2*r2
D_delta = np.abs(C3 * x_prey - X[i])
X_3 = x_prey - A3*D_delta
X[i] = (X_1 + X_2 + X_3)/3
l = l + 1
return X[alpha],camera
Function call
# define the objective function
def f(x,y):
return x**2 + y**2
minimizer,camera = gwo(f,7,[-10,-10],[10,10])
animation = camera.animate(interval = 1000, repeat = True,
repeat_delay = 500)
Is it possible that the line x = np.linspace(LB[0],LB[1],1000) should be x = np.linspace(LB[0],UB[1],1000) instead? With your current definition of x, x is an array only filled with the value -10 which means that you are unlikely to find a contour.
Another thing that you might want to do is to move the cont = plt.contour(X1,X2,Z,20,linewidths=0.75) line inside of your plot_search_agent_positions function to ensure that the contour is plotted at each iteration of the animation.
Once you make those changes, the code looks like that:
import matplotlib.pyplot as plt
import numpy as np
from celluloid import Camera
import ffmpeg
import PIL
from matplotlib import animation, rc
from IPython.display import HTML, Image # For GIF
from scipy.interpolate import griddata
rc('animation', html='html5')
# X : Position vector of the initial population
# n : Initial population size
def gwo(f,max_iterations,LB,UB):
fig = plt.figure()
fig.gca(aspect='equal')
camera = Camera(fig)
def random_population_uniform(m,a,b):
dims = len(a)
x = [list(a + np.multiply(np.random.rand(dims),b - a)) for i in range(m)]
return np.array(x)
def search_agent_fitness(fitness):
alpha = 0
if fitness[1] < fitness[alpha]:
alpha, beta = 1, alpha
else:
beta = 1
if fitness[2] > fitness[alpha] and fitness[2] < fitness[beta]:
beta, delta = 2, beta
elif fitness[2] < fitness[alpha]:
alpha,beta,delta = 2,alpha,beta
else:
delta = 2
for i in range(3,len(fitness)):
if fitness[i] <= fitness[alpha]:
alpha, beta,delta = i, alpha, beta
elif fitness[i] > fitness[alpha] and fitness[i]<= fitness[beta]:
beta,delta = i,beta
elif fitness[i] > fitness[beta] and fitness[i]<= fitness[delta]:
delta = i
return alpha, beta, delta
def plot_search_agent_positions(f,X,alpha,beta,delta,a,b,X1,X2,Z):
# Plot the positions of search agents
x = X[:,0]
y = X[:,1]
s = plt.scatter(x,y,c='gray',zorder=1)
s = plt.scatter(x[alpha],y[alpha],c='red',zorder=1)
s = plt.scatter(x[beta],y[beta],c='blue',zorder=1)
s = plt.scatter(x[delta],y[delta],c='green',zorder=1)
Z=f(X1,X2)
cont=plt.contour(X1,X2,Z,levels=20,colors='k',norm=True)
plt.clabel(cont, cont.levels, inline=True, fontsize=10)
camera.snap()
# Initialize the position of the search agents
X = random_population_uniform(50,np.array(LB),np.array(UB))
n = len(X)
l = 1
# Plot the first image on screen
x = np.linspace(LB[0],UB[1],1000)
y = np.linspace(LB[0],UB[1],1000)
X1,X2 = np.meshgrid(x,y)
Z=f(X1,X2)
while (l < max_iterations):
# Take the x,y coordinates of the initial population
x = X[:,0]
y = X[:,1]
# Calculate the objective function for each search agent
fitness = list(map(f,x,y))
# Update alpha, beta and delta
alpha,beta,delta = search_agent_fitness(fitness)
# Plot search agent positions
plot_search_agent_positions(f,X,alpha,beta,delta,LB,UB,X1,X2,Z)
# a decreases linearly from 2 to 0
a = 2 - l *(2 / max_iterations)
# Update the position of search agents including the Omegas
for i in range(n):
x_prey = X[alpha]
r1 = np.random.rand(2) #r1 is a random vector in [0,1] x [0,1]
r2 = np.random.rand(2) #r2 is a random vector in [0,1] x [0,1]
A1 = 2*a*r1 - a
C1 = 2*r2
D_alpha = np.abs(C1 * x_prey - X[i])
X_1 = x_prey - A1*D_alpha
x_prey = X[beta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A2 = 2*a*r1 - a
C2 = 2*r2
D_beta = np.abs(C2 * x_prey - X[i])
X_2 = x_prey - A2*D_beta
x_prey = X[delta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A3 = 2*a*r1 - a
C3 = 2*r2
D_delta = np.abs(C3 * x_prey - X[i])
X_3 = x_prey - A3*D_delta
X[i] = (X_1 + X_2 + X_3)/3
l = l + 1
return X[alpha],camera
# define the objective function
def f(x,y):
return x**2 + y**2
minimizer,camera = gwo(f,7,[-10,-10],[10,10])
animation = camera.animate(interval = 1000, repeat = True,repeat_delay = 500)
And the output gives:
I changed some things but i still have a similar problem. I am working on Mandelbrot zoom. I try to zoom in deeper at branches. I count the consecutive points in the set and return the branch if it reaches the defined length. Then I zoom into that area and repeat. But the change gets smaller and the returned branch is nearly the same as the last one.
variables
X0,Y0,X1 = verticies of current area
branch = current branch
n = number of iterations
p = how many times i want to zoom
b = minimum length of branch i am looking for
c = current complex number
z = current value
k = pixel size
the function that returns the branch
def fractal(n, X0, Y0, X1): # searching for new branch
branch = []
k = (X1 - X0) / x_axis # pixel size
b = 5 # the number of black points int the set i am looking for
for y in range(y_axis):
try:
for x in range(x_axis):
c = X0 + k * x + Y0 - k * y * 1.j # new c
z = c
for m in range(n):
if abs(z) <= 2:
z = z * z + c # new z
else:
break
if abs(z) <= 2:
branch.append((x,y)) # the coordinates for those points
else:
if b < len(branch) < 50:
raise BreakOutOfALoop # break if a branch was found
branch = []
except BreakOutOfALoop:
break
return branch, k
the function that calculates the verticies of the new area
def new_area(branch, X0, Y0, X1, k): # calculating new area
print(branch)
X0 = X0 + k * branch[0][0]
Y0 = Y0 - k * branch[0][1]
X1 = X1 + k * branch[-1][0] # new area verticies
return X0, Y0, X1
the loop that calls the functions
for i in range(p):
areaImage = Image.new('RGB', (x_axis,y_axis), "white")
area_pixels = areaImage.load() # image load
branch, k = fractal(n,X0, Y0, X1)
X0, Y0, X1 = new_area(branch, X0, Y0, X1, k)
file_name = "/" + str(i) + "_" + str(n) + "_" + str(x_axis) + "_" + str(y_axis) + ".png" # image save
areaImage = areaImage.save(f"{areaImage_path}{file_name}")
What am I overlooking?
Here are some pictures for different p.
p = 3
p = 10
I am trying to create a program that solves the mass-spring-damper system using backward differentiating, the only problem is that I am running into an index error that I am not sure how to solve:
import numpy as np
import matplotlib.pyplot as plt
def MSD_Solver(m,b,K):
#input: m = mass, b = damping ratio, K = spring constant
#output: (t,x) time vs position
tinitial = 0
tfinal = 15
step = .005
t = np.linspace(tinitial,tfinal,step)
x = np.zeros_like(t)
x[0]=0
x[1]=0
for k in range (len(t)-1): # extra element so subtract by 1
x[k] = (t**2)/((m+b)*t+(t**2)*k) + (x[k-2](-m))/((m+b)*t+(t**2)*k) + (x[k-1]((2*m)+(b*t)))/((m+b)*t+(t**2)*k)
return plt.plot(t,x)
print(MSD_Solver(1,.5,5)),MSD_Solver(1,1,5),MSD_Solver(1,2,5)
plt.show()
The linspace doc shows that the third argument is the number of items, not the step. Your step value got truncated to 0, so the returned array for t was empty. As a result, x has no elements, and x[0] is out of range.
Try this:
tinitial = 0
tfinal = 15
step = .005
num = (tfinal - tinitial) / step + 1
t = np.linspace(tinitial,tfinal,num)
This will get you to the semantic errors in your complex computation.
You want, probably(?), use first and second order difference quotients to discretize
m*x''(t) + b*x'(t) + K*x(t) = 1
to
m*(x[j+1]-2*x[j]+x[j-1]) + 0.5*dt*b*(x[j+1]-x[j-1]) + dt^2*K*x[j] = dt**2
so that
x[j+1] = ( dt**2 + (2*m-K*dt**2)*x[j] - (m-0.5*dt*b)*x[j-1] ) / (m+0.5*dt*b)
In code
def MSD_Solver(m,b,K):
#input: m = mass, b = damping ratio, K = spring constant
#output: (t,x) time vs position
tinitial = 0
tfinal = 15
step = .005
t = np.arange(tinitial,tfinal,step)
x = np.zeros_like(t)
dt = t[1]-t[0] # use the actual time step
x[0:2] = [ 0, 0]
for j in range(1,len(t)-1):
x[j+1] = ( dt**2 + (2*m-K*dt**2)*x[j] - (m-0.5*dt*b)*x[j-1] ) / (m+0.5*dt*b)
return t,x
t,x = MSD_Solver(1,.5,5)
plt.plot(t,x); plt.show();
I have an array of 13.876(13,876) values between 0 and 1. I would like to apply sklearn.cluster.KMeans to only this vector to find the different clusters in which the values are grouped. However, it seems KMeans works with a multidimensional array and not with one-dimensional ones. I guess there is a trick to make it work but I don't know how. I saw that KMeans.fit() accepts "X : array-like or sparse matrix, shape=(n_samples, n_features)", but it wants the n_samples to be bigger than one
I tried putting my array on a np.zeros() matrix and run KMeans, but then is putting all the non-null values on class 1 and the rest on class 0.
Can anyone help in running this algorithm on a one-dimensional array?
You have many samples of 1 feature, so you can reshape the array to (13,876, 1) using numpy's reshape:
from sklearn.cluster import KMeans
import numpy as np
x = np.random.random(13876)
km = KMeans()
km.fit(x.reshape(-1,1)) # -1 will be calculated to be 13876 here
Read about Jenks Natural Breaks. Function in Python found the link from the article:
def get_jenks_breaks(data_list, number_class):
data_list.sort()
mat1 = []
for i in range(len(data_list) + 1):
temp = []
for j in range(number_class + 1):
temp.append(0)
mat1.append(temp)
mat2 = []
for i in range(len(data_list) + 1):
temp = []
for j in range(number_class + 1):
temp.append(0)
mat2.append(temp)
for i in range(1, number_class + 1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2, len(data_list) + 1):
mat2[j][i] = float('inf')
v = 0.0
for l in range(2, len(data_list) + 1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in range(1, l + 1):
i3 = l - m + 1
val = float(data_list[i3 - 1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, number_class + 1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(data_list)
kclass = []
for i in range(number_class + 1):
kclass.append(min(data_list))
kclass[number_class] = float(data_list[len(data_list) - 1])
count_num = number_class
while count_num >= 2: # print "rank = " + str(mat1[k][count_num])
idx = int((mat1[k][count_num]) - 2)
# print "val = " + str(data_list[idx])
kclass[count_num - 1] = data_list[idx]
k = int((mat1[k][count_num] - 1))
count_num -= 1
return kclass
Use and visualization:
import numpy as np
import matplotlib.pyplot as plt
def get_jenks_breaks(...):...
x = np.random.random(30)
breaks = get_jenks_breaks(x, 5)
for line in breaks:
plt.plot([line for _ in range(len(x))], 'k--')
plt.plot(x)
plt.grid(True)
plt.show()
Result: