I want to formulate the objective function (minimization problem): sum[sum[Ri*{PiΒ² + (Qi - Qcj*Xij)Β²}for j in range(Nc)] for i in range(N) ] with P and Q are the constants, Qc is a list of proposed solution and X is our decision variable (binary variable), R=[0.2,0.4,0.5], P=[2,4,5], Q=[1,3,4], Qc=[0,1,3,4,5], N= 3=len(P), Nc= 5.
I'm trying to get the vector X which minimizes the objective function.
You can find her my attempt:
class Problem(ElementwiseProblem):
def __init__(self,L,n_max,Q,P,T,R):
super().__init__(n_var=len(L), n_obj=1, n_ieq_constr=1)
self.L = L
self.n_max = n_max
self.Q = Q
self.P = P
self.T = T
self.R = R
def _evaluate(self, x, out, *args, **kwargs):
out["F"] =(( np.sum(self.P))**2+(np.sum(self.Q -self.L[x]))**2)*np.sum(self.R)
out["G"] = (np.sum(self.Q -self.L[x]))
# create the actual problem to be solved
np.random.seed(1)
P=[2,3,4,5,6]
Q=[6,11,13,14,15]
R=[0.2,0.3,0.4,0.5,0.6]
L = np.array([12,13,14,15,16,17,18,19,2,3,4,5,6,7,8,9,10,11])
n_max = 5
problem = Problem(L, n_max,Q,P,T,R)
Related
Currently my code returns no errors, and generates a weird looking set of points, totally not a Mandelbulb. I've looked over the formulas multiple times and everything seems right, but I could definitely be overlooking something. Any ideas? Just to note, I'm quite inexperienced with Python (I work in Java a lot though so I get the main ideas). Here's my code:
import bpy
import numpy as np
import math
def mandelbulb(x, y, z, iterations):
c = x + y*1j + z*1j
z = c
r = 0
for i in range(iterations):
r2 = x*x + y*y + z*z
if r2 > 2:
return math.sqrt(r2)
theta = math.atan2(math.sqrt(x*x + y*y), z)
phi = math.atan2(y, x)
r = math.sqrt(abs(x*x + y*y + z*z))
x = r*r*r*r * math.cos(4*theta) * math.cos(4*phi) + c.real
y = r*r*r*r * math.cos(4*theta) * math.sin(4*phi) + c.imag
z = r*r*r*r * math.sin(4*theta)
return 0
def generate_mesh(size, iterations):
vertices = []
for x in np.linspace(-2, 2, size):
for y in np.linspace(-2, 2, size):
for z in np.linspace(-2, 2, size):
value = mandelbulb(x, y, z, iterations)
if value >= 2:
vertices.append((x, y, z))
return vertices, []
def create_mesh_object(vertices, faces, name):
mesh = bpy.data.meshes.new(name)
mesh.from_pydata(vertices, [], faces)
mesh.update()
object = bpy.data.objects.new(name, mesh)
bpy.context.collection.objects.link(object)
def execute(size, iterations):
vertices, faces = generate_mesh(size, iterations)
create_mesh_object(vertices, faces, "Mandelbulb")
class MandelbulbOperator(bpy.types.Operator):
bl_idname = "object.mandelbulb_operator"
bl_label = "Mandelbulb Operator"
bl_options = {'REGISTER', 'UNDO'}
size: bpy.props.IntProperty(
name="Size",
default=32,
min=1,
max=256,
step=1
)
iterations: bpy.props.IntProperty(
name="Iterations",
default=64,
min=1,
max=512,
step=1
)
def execute(self, context):
execute(self.size, self.iterations)
return {'FINISHED'}
def draw(self, context):
layout = self.layout
layout.label(text="Create a 3D Mandelbulb")
layout.prop(self, "size")
layout.prop(self, "iterations")
def register():
bpy.utils.register_class(MandelbulbOperator)
def unregister():
bpy.utils.unregister_class(MandelbulbOperator)
if __name__ == "__main__":
register()
I tried messing with values, such as size and iterations but nothing seemed to change the look of the result, and changing the iterations straight up did nothing. I've also tried using variations on the main Mandelbulb formula but to no avail. Any suggestions are welcome.
I try to run this Neural Network script (for a regression model)
There are two classes defined above. One is Standardizer class and other is Neural Net class. The Standardizer class normalizes all the values and the NeuralNet class builds the neural network that learns the data through feed forward and back propagation.
This function takes the the number of inputs, hidden units, and outputs as the three parameters.
The set_hunit function is used to either update or initiate the weights.It takes the weight as the parameter.
The Pack function packs the multiple weights of each layer into one vector. The unpack function does vice versa.
Forward pass in neural network propagates as shown below:
ππ=β(ππβ
π)=ππβ
π
Activation function is used to make the network non linear. We may use tanh or RBG or etc.
In the backward pass the function takes the the z values, Target values and the error as input. Based on the delta value, the weights and the bias are updated accoringly. This method returns the weight vector packed together of that particualr layer. Below are the functions that are excecuted during backward pass.
ππβπ+πΌβ1π1πΎππβ€((πβπ)πβ€β(1βπ2))βπ+πΌπ1π1πΎππβ€(πβπ)
The train function takes the feautures and the target as the input. The gradientf unpacks the weights,proceeds with the forward pass by calling forward function. Now error is calculated using results of forward pass. Now back propagation is proceeded by calling backward function with parameters as error, Z, T(Target), _lambda.
The optimtarget function tries to reduce the error by using the object function and updates the weights accordingly.
The use method is applied to the test data after training the model. Testing data is passed as parameter and it stadardizes the data. Then forward is applied on the data which returns the predictions
This shows module not found error, but I have installed grad module with pip installation
#Importing required libraries
import pandas as pd
import numpy as np
import seaborn as sns
import grad
import matplotlib.pyplot as plt
# Reading data using pandas library
vehicle_data=pd.read_csv('processed_Data.csv')
# Overall idea about distribution of data
vehicle_data.hist(bins=40, figsize=(20,15))
plt.show()
# Count plot of Ellectric Range
sns.countplot(x='Electric Range',data=vehicle_data)
# Joint plot between Latitude on x axis and Longitude on y axis
sns.jointplot(x=vehicle_data.BaseMSRP.values,y=vehicle_data.LegislativeDistrict.values,height=10)
plt.xlabel("Base MSRP",fontsize=10)
plt.ylabel("Lengislative District",fontsize=10)
# function to drop the rows that has null or missing values
vehicle_data=vehicle_data.dropna()
# Data is already clean and has no missing values
vehicle_data.shape
#Dropping unwanted columns
vehicle_data=vehicle_data.drop(['VIN (1-10)','County', 'City', 'State', 'ZIP Code', 'DOL Vehicle ID'],axis=1)
vehicle_data.shape
# Seperating target variable
t=pd.DataFrame(vehicle_data.iloc[:,8])
vehicle_data=vehicle_data.drop(['Electric Range'],axis=1)
t
vehicle_data.head()
#NeuralNet class for regression
# standardization class
class Standardizer:
""" class version of standardization """
def __init__(self, X, explore=False):
self._mu = np.mean(X,8)
self._sigma = np.std(X,8)
if explore:
print ("mean: ", self._mu)
print ("sigma: ", self._sigma)
print ("min: ", np.min(X,8))
print ("max: ", np.max(X,8))
def set_sigma(self, s):
self._sigma[:] = s
def standardize(self,X):
return (X - self._mu) / self._sigma
def unstandardize(self,X):
return (X * self._sigma) + self._mu
def add_ones(w):
return np.hstack((np.ones((w.shape[8], 1)), w))
from grad import scg, steepest
from copy import copy
class NeuralNet:
def __init__(self, nunits):
self._nLayers=len(nunits)-1
self.rho = [1] * self._nLayers
self._W = []
wdims = []
lenweights = 0
for i in range(self._nLayers):
nwr = nunits[i] + 1
nwc = nunits[i+1]
wdims.append((nwr, nwc))
lenweights = lenweights + nwr * nwc
self._weights = np.random.uniform(-0.1,0.1, lenweights)
start = 0 # fixed index error 20110107
for i in range(self._nLayers):
end = start + wdims[i][0] * wdims[i][1]
self._W.append(self._weights[start:end])
self._W[i].resize(wdims[i])
start = end
self.stdX = None
self.stdT = None
self.stdTarget = True
def add_ones(self, w):
return np.hstack((np.ones((w.shape[8], 1)), w))
def get_nlayers(self):
return self._nLayers
def set_hunit(self, w):
for i in range(self._nLayers-1):
if w[i].shape != self._W[i].shape:
print("set_hunit: shapes do not match!")
break
else:
self._W[i][:] = w[i][:]
def pack(self, w):
return np.hstack(map(np.ravel, w))
def unpack(self, weights):
self._weights[:] = weights[:] # unpack
def cp_weight(self):
return copy(self._weights)
def RBF(self, X, m=None,s=None):
if m is None: m = np.mean(X)
if s is None: s = 2 #np.std(X)
r = 1. / (np.sqrt(2*np.pi)* s)
return r * np.exp(-(X - m) ** 2 / (2 * s ** 2))
def forward(self,X):
t = X
Z = []
for i in range(self._nLayers):
Z.append(t)
if i == self._nLayers - 1:
t = np.dot(self.add_ones(t), self._W[i])
else:
t = np.tanh(np.dot(self.add_ones(t), self._W[i]))
#t = self.RBF(np.dot(np.hstack((np.ones((t.shape[0],1)),t)),self._W[i]))
return (t, Z)
def backward(self, error, Z, T, lmb=0):
delta = error
N = T.size
dws = []
for i in range(self._nLayers - 1, -1, -1):
rh = float(self.rho[i]) / N
if i==0:
lmbterm = 0
else:
lmbterm = lmb * np.vstack((np.zeros((1, self._W[i].shape[1])),
self._W[i][1:,]))
dws.insert(0,(-rh * np.dot(self.add_ones(Z[i]).T, delta) + lmbterm))
if i != 0:
delta = np.dot(delta, self._W[i][1:, :].T) * (1 - Z[i]**2)
return self.pack(dws)
def _errorf(self, T, Y):
return T - Y
def _objectf(self, T, Y, wpenalty):
return 0.5 * np.mean(np.square(T - Y)) + wpenalty
def train(self, X, T, **params):
verbose = params.pop('verbose', False)
# training parameters
_lambda = params.pop('Lambda', 0.)
#parameters for scg
niter = params.pop('niter', 1000)
wprecision = params.pop('wprecision', 1e-10)
fprecision = params.pop('fprecision', 1e-10)
wtracep = params.pop('wtracep', False)
ftracep = params.pop('ftracep', False)
# optimization
optim = params.pop('optim', 'scg')
if self.stdX == None:
explore = params.pop('explore', False)
self.stdX = Standardizer(X, explore)
Xs = self.stdX.standardize(X)
if self.stdT == None and self.stdTarget:
self.stdT = Standardizer(T)
T = self.stdT.standardize(T)
def gradientf(weights):
self.unpack(weights)
Y,Z = self.forward(Xs)
error = self._errorf(T, Y)
return self.backward(error, Z, T, _lambda)
def optimtargetf(weights):
""" optimization target function : MSE
"""
self.unpack(weights)
#self._weights[:] = weights[:] # unpack
Y,_ = self.forward(Xs)
Wnb=np.array([])
for i in range(self._nLayers):
if len(Wnb)==0: Wnb=self._W[i][1:,].reshape(self._W[i].size-self._W[i][0,].size,1)
else: Wnb = np.vstack((Wnb,self._W[i][1:,].reshape(self._W[i].size-self._W[i][0,].size,1)))
wpenalty = _lambda * np.dot(Wnb.flat ,Wnb.flat)
return self._objectf(T, Y, wpenalty)
if optim == 'scg':
result = scg(self.cp_weight(), gradientf, optimtargetf,
wPrecision=wprecision, fPrecision=fprecision,
nIterations=niter,
wtracep=wtracep, ftracep=ftracep,
verbose=False)
self.unpack(result['w'][:])
self.f = result['f']
elif optim == 'steepest':
result = steepest(self.cp_weight(), gradientf, optimtargetf,
nIterations=niter,
xPrecision=wprecision, fPrecision=fprecision,
xtracep=wtracep, ftracep=ftracep )
self.unpack(result['w'][:])
if ftracep:
self.ftrace = result['ftrace']
if 'reason' in result.keys() and verbose:
print(result['reason'])
return result
def use(self, X, retZ=False):
if self.stdX:
Xs = self.stdX.standardize(X)
else:
Xs = X
Y, Z = self.forward(Xs)
if self.stdT is not None:
Y = self.stdT.unstandardize(Y)
if retZ:
return Y, Z
return Y
Try to open command prompt and type pip install grad or if you using jupyter notebook, make a new code shell and type !pip install grad before you importing it
Hope that solves your problem
I have this code consisting of a class and a subclass. The class is Euler forward, while the second one is Eulers midpoint method. These are for solving an ODE (x'=x(1/2-x)). Now it doesn't seem to work because when I am to call the function, by typing:
Euler=H.solve(6)
where the 6 is the amount of steps, I get attributeerror.
AttributeError: 'int' object has no attribute 'size'
Could anyone help me make my code more robust and working so I could plot the values later on, really don't see whats wrong. My code below:
import numpy as np
class H:
def __init__(self, f):
self._f = f
def initial(self, u0):
self._u0 = u0
def solve(self, time_points):
n = time_points.size
self._t = time_points
self._u = np.zeros(n)
self._u[0] = self._u0
for k in range(n-1):
self._k = k
self._u[k+1] = self.advance()
return self._u, self._t
class F(H):
def ad(self):
u = self._u; t = self._t; f = self._f; k = self._k
dt = t[k+1] - t[k]
u_k12 = u[k] + dt/2 * f(u[k], t[k])
return u[k] + dt * f(u_k12, (t[k] + dt/2) )
I think what's wrong is the way you use the class. Initial value is set with initial method (u0), then you give solve method the list of points. You can use np.linscape to generate midpoint.
np.linspace(0, 3, 31) # 30 points evenly spaced between 0 and 3
So it's like this:
def func(x, y):
return x * y
midpoint = np.linspace(0, 3, 31)
F_ = F(func)
F_.initial(6)
F_.solve(midpoint)
Code:
class H:
def __init__(self, f):
self._f = f
def initial(self, u0):
self._u0 = u0
def solve(self, time_points):
n = time_points.size
self._t = time_points
self._u = np.zeros(n)
self._u[0] = self._u0
for k in range(n-1):
self._u[k+1] = self.advance(k)
return self._u, self._t
def advance(self, k):
....
class F(H):
def advance(self, k):
dt = self._t[k+1] + self._t[k]
u_k12 = self._u[k] + dt/2 * self._f(self._u[k], self._t[k])
return self._u[k] + dt * self._f(u_k12, (self._t[k] + dt/2))
I'm trying to implement a genetic algorithm for solving the Travelling Salesman Problem (TSP).
I have 2 classes, which are City and Fitness.
I have done the code for initialization.
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
xDis = abs(self.x - city.x)
yDis = abs(self.y - city.y)
distance = np.sqrt((xDis ** 2) + (yDis ** 2))
return distance
def __repr__(self):
return "(" + str(self.x) + "," + str(self.y) + ")"
class Fitness:
def __init__(self, route):
self.route = route
self.distance = None
self.fitness = None
def routeDistance(self):
if self.distance == None:
pathDistance = 0.0
for i in range(0, len(self.route)):
fromCity = self.route[i]
toCity = None
if i+1 < len(self.route):
toCity = self.route[i+1]
else:
toCity = self.route[0]
pathDistance += fromCity.distance(toCity)
self.distance = pathDistance
return self.distance
def routeFitness(self):
if self.fitness == None:
self.fitness = 1 / float(self.routeDistance())
return self.fitness
def selection(population, size=None):
if size== None:
size= len(population)
matingPool = []
fitnessResults = {}
for i in range(0, size):
fitnessResults[i] = Fitness(population[i]).routeFitness()
matingPool.append(random.choice(population))
return matingPool
The code above just randomly selects a parent in the selection method.
My question is: How to code to select a parent using roulette wheels?
You could try this [1, 2]:
from numpy.random import choice
def selection(population, size=None):
if size== None:
size= len(population)
fitnessResults = []
for i in range(0, size):
fitnessResults.append(Fitness(population[i]).routeFitness())
sum_fitness = sum(fitnessResults)
probability_lst = [f/sum_fitness for f in fitnessResults]
matingPool = choice(population, size=size, p=probability_lst)
return matingPool
Read this
So basically, the higher a fitness value, the higher are its chances to be chosen. But that is when high fitness value means a high fitness. But in TSP a lower value of fitness is better so to implement this, we need to implement the concept where probability is indirectly proportional to the fitness value.
Here is something I had implemented in python with some changes
def choose_parent_using_RWS(genes, S):
P = random.uniform(0, S)
for x in genes:
P += get_fitness_value(x)
if P > S:
return x
return genes[-1]
where S is the sum of the inverse of the fitness values of the current population (i.e, 1/f1 + 1/f2 + 1/f3 + ...)
and
get_fitness_value(x) returns the inverse of the distance, just like your routeFitness() function
TeeHee
In Mathematica I can convert multivariable moments in cumulants and back using MomentConvert:
MomentConvert[Cumulant[{2, 2,1}], "Moment"] // TraditionalForm
as one can try in wolframcloud.
I would like to do exactly the same in python. Is there any library in python capable of this?
At least the one direction I now programmed by myself:
# from http://code.activestate.com/recipes/577211-generate-the-partitions-of-a-set-by-index/
from collections import defaultdict
class Partition:
def __init__(self, S):
self.data = list(S)
self.m = len(S)
self.table = self.rgf_table()
def __getitem__(self, i):
#generates set partitions by index
if i > len(self) - 1:
raise IndexError
L = self.unrank_rgf(i)
result = self.as_set_partition(L)
return result
def __len__(self):
return self.table[self.m,0]
def as_set_partition(self, L):
# Transform a restricted growth function into a partition
n = max(L[1:]+[1])
m = self.m
data = self.data
P = [[] for _ in range(n)]
for i in range(m):
P[L[i+1]-1].append(data[i])
return P
def rgf_table(self):
# Compute the table values
m = self.m
D = defaultdict(lambda:1)
for i in range(1,m+1):
for j in range(0,m-i+1):
D[i,j] = j * D[i-1,j] + D[i-1,j+1]
return D
def unrank_rgf(self, r):
# Unrank a restricted growth function
m = self.m
L = [1 for _ in range(m+1)]
j = 1
D = self.table
for i in range(2,m+1):
v = D[m-i,j]
cr = j*v
if cr <= r:
L[i] = j + 1
r -= cr
j += 1
else:
L[i] = r // v + 1
r %= v
return L
# S = set(range(4))
# P = Partition(S)
# for x in P:
# print (x)
# using https://en.wikipedia.org/wiki/Cumulant#Joint_cumulants
import math
def Cum2Mom(arr, state):
def E(op):
return qu.expect(op, state)
def Arr2str(arr,sep):
r = ''
for i,x in enumerate(arr):
r += str(x)
if i<len(arr)-1:
r += sep
return r
if isinstance( arr[0],str):
myprod = lambda x: Arr2str(x,'*')
mysum = lambda x: Arr2str(x,'+')
E=lambda x: 'E('+str(x)+')'
myfloat = str
else:
myfloat = lambda x: x
myprod = np.prod
mysum = sum
S = set(range(len(arr)))
P = Partition(S)
return mysum([
myprod([myfloat(math.factorial(len(pi)-1) * (-1)**(len(pi)-1))
,myprod([
E(myprod([
arr[i]
for i in B
]))
for B in pi])])
for pi in P])
print(Cum2Mom(['a','b','c','d'],1) )
import qutip as qu
print(Cum2Mom([qu.qeye(3) for i in range(3)],qu.qeye(3)) )
It's designed to work with qutip opjects and it also works with strings to verify the correct separation and prefactors.
Exponents of the variables can be represented by repeating the variable.