multidimensional Array out of bounds - python

I am doing something wrong and cannot figure out. I have a multidimentional matrix total this stores. I am messing up the sequencing somewhere just cannot understand how or why. The error is IndexError: index 253 is out of bounds for axis 0 with size 253
class pricing_floatinglookback:
def __init__(self, spot, rate, sigma, time, sims, steps):
self.spot = spot
self.rate = rate
self.sigma = sigma
self.time = time
self.sims = sims
self.steps = steps+1
self.dt = self.time / self.steps
def call_floatingstrike(self):
SimPriceMin = np.array([])
SimPriceAtMaturity = np.array([])
call2 = np.array([])
pathwiseS= np.zeros((self.steps,),float)
total = np.zeros((self.sims,self.steps),float)
for j in range(self.sims):
pathwiseS[0] =self.spot## This will be one dimensional array from 0 to 253
total[j,0] = self.spot ## This will be multidimensional array with columns 0 to 800 and rows 0 to 253
for i in range(self.steps):
phi = np.random.normal()
pathwiseS[i+1] = pathwiseS[i]*(1+self.rate*self.dt+self.sigma*phi*np.sqrt(self.dt))## -->This is where i am going wrong.
total[j,i+1]= pathwiseS[i+1] ## -->This is where i am going wrong.
SimPriceAtMaturity = np.append(SimPriceAtMaturity, pathwiseS[self.steps - 1])
call2 = np.append(call2,max((pathwiseS[self.steps - 1])-self.spot,0))
SimPriceMin = np.append(SimPriceMin, min(pathwiseS))
callbsm = np.average(call2)
call = max(np.average(SimPriceAtMaturity) - np.average(SimPriceMin), 0)
return call, total.reshape(self.sims, self.steps), np.average(SimPriceMin), callbsm*np.exp(-self.rate*self.time)
pricelookback = pricing_floatinglookback(100, 0.05, 0.2, 1, 800, 252)
clookback, callmatrix, calmin, callbsm = pricelookback.call_floatingstrike()
print (callbsm)
plt.plot(callmatrix.T)

So I fixed it. I don't know why or how but it's fixed
class pricing_floatinglookback:
def __init__(self, spot, rate, sigma, time, sims, steps):
self.spot = spot
self.rate = rate
self.sigma = sigma
self.time = time
self.sims = sims
self.steps = steps+1
self.dt = self.time / self.steps
def call_floatingstrike(self):
SimPriceMin = np.array([])
SimPriceAtMaturity = np.array([])
call2 = np.array([])
pathwiseS= np.zeros((self.steps,),float)
total = np.zeros((self.sims,self.steps),float)
for j in range(self.sims):
pathwiseS[0] =self.spot
total[j,0] = self.spot
for i in range(self.steps-1):##--->This was the main reason, dont know why but it was!
phi = np.random.normal()
pathwiseS[i+1] = pathwiseS[i]*(1+self.rate*self.dt+self.sigma*phi*np.sqrt(self.dt))
total[j,i]= pathwiseS[i+1]##--->This as suggested in the comments
SimPriceAtMaturity = np.append(SimPriceAtMaturity, pathwiseS[self.steps - 1])
call2 = np.append(call2,max((pathwiseS[self.steps - 1])-self.spot,0))
SimPriceMin = np.append(SimPriceMin, min(pathwiseS))
callbsm = np.average(call2)
call = max(np.average(SimPriceAtMaturity) - np.average(SimPriceMin), 0)
return call, total.reshape(self.sims, self.steps), np.average(SimPriceMin), callbsm*np.exp(-self.rate*self.time)

Related

Code slowing down when using prioritised replay buffer with Ray

I have code that trains a DQN in the distributed setting. When using a standard replay buffer that all the workers push their experiences too, the code runs perfectly fine.
However, when I switch to a prioritised replay buffer things start to slow down massively. The code for the two buffers is given below:
#ray.remote
class PrioritizedReplayBuffer:
def __init__(self, capacity, alpha=0.6, beta=0.4, beta_increment_per_sampling=0.001, batch_size=128):
self.capacity = capacity
self.alpha = alpha
self.beta = beta
self.beta_increment_per_sampling = beta_increment_per_sampling
self.buffer = []
self.pos = 0
self.priorities = []
self.batch_size = batch_size
def push(self, data):
for experience in data:
max_priority = max(self.priorities) if self.buffer else 1.0
if len(self.buffer) < self.capacity:
self.buffer.append(experience)
self.priorities.append(max_priority)
else:
self.buffer[self.pos] = experience
self.priorities[self.pos] = max_priority
self.pos = (self.pos + 1) % self.capacity
def sample(self):
start = time.time()
N = len(self.buffer)
if N == self.capacity:
priorities = np.array(self.priorities)
else:
priorities = np.array(self.priorities[:self.pos])
self.beta = min(1.0, self.beta + self.beta_increment_per_sampling)
sampling_probabilities = priorities ** self.alpha
sampling_probabilities = sampling_probabilities / sampling_probabilities.sum()
indices = random.choices(range(N), k=self.batch_size, weights=sampling_probabilities)
experiences = [self.buffer[idx] for idx in indices]
weights = np.array([(self.capacity * priorities[i]) ** -self.beta for i in indices])
weights = weights / weights.max()
end = time.time()
print(f"sampling took {(end - start) / 60} minutes")
return experiences, np.array(indices), weights
def update_priorities(self, indices, priorities):
for idx, priority in zip(indices, priorities):
self.priorities[idx] = priority
def __len__(self):
return len(self.buffer)
#ray.remote
class ReplayBuffer:
def __init__(self, capacity, batch_size=128):
self.capacity = capacity
self.buffer = []
self.batch_size = batch_size
def push(self, data):
for experience in data:
self.buffer.append(experience)
def sample(self):
return random.sample(self.buffer, self.batch_size)
def __len__(self):
return len(self.buffer)
The code for my workers looks like this:
#ray.remote
class Actor(object):
def __init__(self, state_dim, action_dim, exploration_decay, exploration_min, worker_id=None, replay_buffer=None, param_server=None, push_size=20, num_grad_steps=1e6):
self.worker_id = worker_id
self.env = gym.make('LunarLander-v2')
self.net = Net(state_dim, action_dim)
# get ray_remote objects; centralized buffer and parameter server
self.replay_buffer = replay_buffer
self.param_server = param_server
self.push_size = push_size # this is how much data we need until we push to the centralised buffer
self.num_grad_steps = num_grad_steps
self.epsilon = 1
self.exploration_decay = exploration_decay
self.exploration_min = exploration_min
self.action_dim = action_dim
def act(self, state):
if np.random.uniform() < self.epsilon:
self.epsilon = max(self.epsilon * self.exploration_decay, self.exploration_min)
return np.random.randint(0, self.action_dim)
else:
state = torch.FloatTensor(state)
with torch.no_grad():
values = self.net(state)
action = torch.argmax(values)
return int(action)
def sync_with_param_server(self):
new_actor_params = ray.get(self.param_server.return_params.remote())
for param in new_actor_params:
new_actor_params[param] = torch.from_numpy(new_actor_params[param]).float()
self.net.load_state_dict(new_actor_params)
def run(self):
state = self.env.reset()
episode_reward = 0
episode = 0
ep_length = 0
grad_steps = 0
intermediate_memory = [] # this is what we will push to the buffer at once
while grad_steps < self.num_grad_steps:
ep_length += 1
action = self.act(state)
next_state, reward, done, _ = self.env.step(action)
intermediate_memory.append((state, action, reward, next_state, done))
if len(intermediate_memory) >= self.push_size:
self.replay_buffer.push.remote(intermediate_memory)
intermediate_memory = []
self.sync_with_param_server()
grad_steps = ray.get(self.param_server.return_grad_steps.remote())
# time.sleep(60 * 5)
episode_reward += reward
if done:
# print results locally
# print(f"Episode {episode}: {episode_reward}")
# print_status(self.env, time_step)
# prepare new rollout
episode += 1
episode_reward = 0
ep_length = 0
next_state = self.env.reset()
state = next_state
I've narrowed the problem down somewhat -- when I uncomment the sleep command in the actor, the speed of the code goes back to usual once this is in effect, i.e. when the actors aren't pushing any data to the buffer. The thing is that I am not sure why actors pushing to the replay buffer would cause it to be slow in the learning steps when it doesn't make a difference for the vanilla replay buffer.
Any help pinpointing what is causing the problem and how to fix it would be greatly appreciated.

Update the class attribute value

I met a problem with call the value from one class into a new iterative loop outside of that class. The code is shown below: (data and newdata are vectors)
class A:
def __init__(self, k, tol=0.0001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
def fit(self, data):
self.centroids = {}
for i in range(self.k):
self.centroids[i] = data[i+50]
for i in range(self.max_iter):
self.classifications = {}
for i in range(self.k):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
prev_centroids = dict(self.centroids)
for classification in self.classifications:
self.centroids[classification] = np.average(self.classifications[classification], axis=0)
optimized = True
for c in self.centroids:
original_centroid = prev_centroids[c]
current_centroid = self.centroids[c]
if np.sum((current_centroid - original_centroid) / original_centroid * 100.0) > self.tol:
#print(np.sum((current_centroid - original_centroid) / original_centroid * 100.0))
optimized = False
if optimized:
break
def cluster_labels(self,data):
cluster_labels = []
for featureset in data:
distances=[np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
cluster_labels.append(distances.index(min(distances)))
return cluster_labels
def predict(self, data):
distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
return classification
def update(self, new_data, delta):
for featureset in new_data:
distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
if min(distances) < delta:
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
self.centroids[classification] = np.average(self.classifications[classification], axis=0)
else:
self.centroids[self.k] = featureset
self.classifications[self.k] = []
self.classifications[self.k].append(featureset)
self.k = self.k + 1
k = self.k
print (k)
return k
class Recorder:
def __init__(rec):
rec.p = pyaudio.PyAudio()
rec.stream = rec.p.open(format = pyaudio.paInt16, channels = 1, rate = 44100, input = True, input_device_index = 2, frames_per_buffer = chunk)
def write():
a = A(k=3)
a.fit(data)
k=a.update(newdata,20)
for num in range(1,100):
rec.Recorder()
rec.write()
Initially, I want to set k =3. And then, the value of k should be updated with k=a.update(newdata,20) However,now for every running, the value of K is staying at 3. And if I set k = 3 outside of the classes it always shows the error :
UnboundLocalError: local variable 'k' referenced before assignment
How could I solve this problem?
The issue is in this function:
def update(self, new_data, delta):
for featureset in new_data:
distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
if min(distances) < delta:
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
self.centroids[classification] = np.average(self.classifications[classification], axis=0)
else:
self.centroids[self.k] = featureset
self.classifications[self.k] = []
self.classifications[self.k].append(featureset)
self.k = self.k + 1
k = self.k
You are only setting the "k" value inside the "else" block. Leaving out anything unrelated it looks like this:
def update(self, new_data, delta):
for featureset in new_data:
...
if min(distances) < delta:
...
else:
...
k = self.k
print (k) # <-- error here
return k # <-- error here
In the case where min(dinstances) >= delta, k will not be set and you will get the error you report.
You have two options:
Add a k = ... line into the if-block where min(distances) < delta
Add a k = ... line just above the if-block (still inside the for-block) to set a "default" value for k
On review it is also possible that you just need to return self.k instead of just k.

blinn-phong shading with numpy

I am trying to implement blinn-phong shading in numpy for educational purposes. However I am stuck at debugging what parameters are doing for several days now.
My general idea was the following. Since the equation was given for a channel. I apply the model to each color channel to get the relative pixel intensities in the channel, then regroup the channels back togather to have all the image.
My lambertian coefficiant does not seem to take into account the light position changes, but it does change the pixel intensity but other parameters have almost no effect on the output.
Any help would be appreciated.
Here are the relative bits of the code (full code is here for anyone interested):
def normalize_1d_array(arr):
"Normalize 1d array"
assert arr.ndim == 1
result = None
if np.linalg.norm(arr) == 0:
result = arr
else:
result = arr / np.linalg.norm(arr)
return result
def normalize_3col_array(arr):
"Normalize 3 column array"
assert arr.shape[1] == 3
assert arr.ndim == 2
normal = np.copy(arr)
normal[:, 0] = normalize_1d_array(normal[:, 0])
normal[:, 1] = normalize_1d_array(normal[:, 1])
normal[:, 2] = normalize_1d_array(normal[:, 2])
return normal
def get_vector_dot(arr1, arr2):
"Get vector dot product for 2 matrices"
assert arr1.shape == arr2.shape
newarr = np.sum(arr1 * arr2, axis=1, dtype=np.float32)
return newarr
class LightSource:
"Simple implementation of a light source"
def __init__(self,
x=10.0, # x
y=5.0, # y
z=0.0, # light source at infinity
intensity=1.0, # I_p
ambient_intensity=1.0, # I_a
ambient_coefficient=0.1, # k_a
light_power=80.0):
"light source"
self.x = x
self.y = y
if z is not None:
assert isinstance(z, float)
self.z = z
self.intensity = intensity
self.power = light_power
self.ambient_intensity = ambient_intensity # I_a
self.ambient_coefficient = ambient_coefficient # k_a
# k_a can be tuned if the material is known
def copy(self):
"copy self"
return LightSource(x=self.x,
y=self.y,
z=self.z,
intensity=self.intensity,
light_power=self.power)
class ChannelShader:
"Shades channels"
def __init__(self,
coordarr: np.ndarray,
light_source: LightSource, # has I_a, I_p, k_a
surface_normal: np.ndarray,
imagesize: (int, int),
color: np.ndarray, # they are assumed to be O_d and O_s
spec_coeff=0.5, # k_s
screen_gamma=2.2,
diffuse_coeff=0.9, # k_d
attenuation_c1=2.0, # f_attr c1
attenuation_c2=0.0, # f_attr c2 d_L coefficient
attenuation_c3=0.0, # f_attr c3 d_L^2 coefficient
shininess=270.0 # n
):
self.light_source = light_source
self.light_intensity = self.light_source.intensity # I_p
self.ambient_coefficient = self.light_source.ambient_coefficient # k_a
self.ambient_intensity = self.light_source.ambient_intensity # I_a
self.coordarr = coordarr
self.surface_normal = np.copy(surface_normal)
self.screen_gamma = screen_gamma
self.shininess = shininess
self.diffuse_coeff = diffuse_coeff # k_d
self.diffuse_color = normalize_1d_array(color) # O_d: object diffuse color
self.spec_color = normalize_1d_array(color) # O_s: object specular color
self.spec_coeff = spec_coeff # k_s: specular coefficient
self.imsize = imagesize
self.att_c1 = attenuation_c1
self.att_c2 = attenuation_c2
self.att_c3 = attenuation_c3
def copy(self):
return ChannelShader(coordarr=np.copy(self.coordarr),
light_source=self.light_source.copy(),
surface_normal=np.copy(self.surface_normal),
color=np.copy(self.diffuse_coeff) * 255.0)
#property
def distance(self):
yarr = self.coordarr[:, 0] # row nb
xarr = self.coordarr[:, 1] # col nb
xdist = (self.light_source.x - xarr)**2
ydist = (self.light_source.y - yarr)**2
return xdist + ydist
#property
def distance_factor(self):
resx = self.imsize[1]
factor = self.distance / self.light_source.z * resx
return 1.0 - factor
#property
def light_direction(self):
"get light direction matrix (-1, 3)"
yarr = self.coordarr[:, 0]
xarr = self.coordarr[:, 1]
xdiff = self.light_source.x - xarr
ydiff = self.light_source.y - yarr
light_matrix = np.zeros((self.coordarr.shape[0], 3))
light_matrix[:, 0] = ydiff
light_matrix[:, 1] = xdiff
light_matrix[:, 2] = self.light_source.z
# light_matrix[:, 2] = 0.0
return light_matrix
#property
def light_attenuation(self):
"""
Implementing from Foley JD 1996, p. 726
f_att : light source attenuation function:
f_att = min(\frac{1}{c_1 + c_2{\times}d_L + c_3{\times}d^2_{L}} , 1)
"""
second = self.att_c2 * self.distance
third = self.att_c3 * self.distance * self.distance
result = self.att_c1 + second + third
result = 1 / result
return np.where(result < 1, result, 1)
#property
def normalized_light_direction(self):
"Light Direction matrix normalized"
return normalize_3col_array(self.light_direction)
#property
def normalized_surface_normal(self):
return normalize_3col_array(self.surface_normal)
#property
def costheta(self):
"set costheta"
# pdb.set_trace()
costheta = get_vector_dot(
arr1=self.normalized_light_direction,
arr2=self.normalized_surface_normal)
# products of vectors
costheta = np.abs(costheta) # as per (Foley J.D, et.al. 1996, p. 724)
return costheta
#property
def ambient_term(self):
"Get the ambient term I_a * k_a * O_d"
term = self.ambient_coefficient * self.ambient_intensity
return term * self.diffuse_color
#property
def view_direction(self):
"Get view direction"
# pdb.set_trace()
cshape = self.coordarr.shape
coord = np.zeros((cshape[0], 3)) # x, y, z
coord[:, :2] = -self.coordarr
coord[:, 2] = 0.0 # viewer at infinity
coord = normalize_3col_array(coord)
return coord
#property
def half_direction(self):
"get half direction"
# pdb.set_trace()
arr = self.view_direction + self.normalized_light_direction
return normalize_3col_array(arr)
#property
def spec_angle(self):
"get spec angle"
specAngle = get_vector_dot(
arr1=self.half_direction,
arr2=self.normalized_surface_normal)
return np.where(specAngle > 0.0, specAngle, 0.0)
#property
def specular(self):
return self.spec_angle ** self.shininess
#property
def channel_color_blinn_phong(self):
"""compute new channel color intensities
Implements: Foley J.D. 1996 p. 730 - 731, variation on equation 16.15
"""
second = 1.0 # added for structuring code in this fashion, makes
# debugging easier
# lambertian terms
second *= self.diffuse_coeff # k_d
second *= self.costheta # (N \cdot L)
second *= self.light_intensity # I_p
# adding phong terms
second *= self.light_attenuation # f_attr
second *= self.diffuse_color # O_d
third = 1.0
third *= self.spec_color # O_s
third *= self.specular # (N \cdot H)^n
third *= self.spec_coeff # k_s
result = 0.0
result += self.ambient_term # I_a × k_a × O_d
result += second
result += third
pdb.set_trace()
return result
Thanks
Well after all, the implementation did not have a lot of problems, however the images I was working with required very weird values for parameters due to their specific condition in which they are produced.
Most images I have used contained rough surfaces with unglazed clay as material and the images were taken in a controlled environment with a single source of light, contrary to real world environments where objects are illuminated from multiple light spots.
So most of the parameters about ambient illumination and specular reflection did not make much sense in usage.
I am putting here the relative parts of my implementation as a reference for future users, be sure NOT to use default values.
Some details about the implementation:
It largely follows the equation specified in Foley J.D. et.al., 1996, p. 730 - 731, no 16.15 with the variation of adding a halfway vector, required for blinn-phong.
ChannelShader expects the following:
Coordinates of channel pixels of shape: (-1, 2)
Surface normals of shape: (-1, 3)
Channel colors of shape: (-1,)
A light source of the type LightSource
As stated above, please do change the default values before proceeding with experimentation.
You can use same surface normals for each channel, if you are shading multiple channels.
Final word of caution, it is noticably slow, even with numpy.
The proper way to render shading is gpu based libraries like pyopengl etc. I have not tested it with gpu ports of numpy, like cupy, nor with other libraries like numba etc though:
def normalize_1d_array(arr):
"Normalize 1d array"
assert arr.ndim == 1
result = None
if np.linalg.norm(arr) == 0:
result = arr
else:
result = arr / np.linalg.norm(arr)
return result
def normalize_3col_array(arr):
"Normalize 3 column array"
assert arr.shape[1] == 3
assert arr.ndim == 2
normal = np.copy(arr)
normal[:, 0] = normalize_1d_array(normal[:, 0])
normal[:, 1] = normalize_1d_array(normal[:, 1])
normal[:, 2] = normalize_1d_array(normal[:, 2])
return normal
def get_vector_dot(arr1, arr2):
"Get vector dot product for 2 matrices"
assert arr1.shape == arr2.shape
newarr = np.sum(arr1 * arr2, axis=1, dtype=np.float32)
return newarr
class ImageArray:
"Image array have some additional properties besides np.ndarray"
def __init__(self, image: np.ndarray):
assert isinstance(image, np.ndarray)
self.image = image
#property
def norm_coordinates(self):
"Get normalized coordinates of the image pixels"
# pdb.set_trace()
rownb, colnb = self.image.shape[0], self.image.shape[1]
norm = np.empty_like(self.coordinates, dtype=np.float32)
norm[:, 0] = self.coordinates[:, 0] / rownb
norm[:, 1] = self.coordinates[:, 1] / colnb
return norm
#property
def norm_image(self):
"Get normalized image with pixel values divided by 255"
return self.image / 255.0
#property
def coordinates(self):
"Coordinates of the image pixels"
rownb, colnb = self.image.shape[:2]
coords = [[(row, col) for col in range(colnb)] for row in range(rownb)]
coordarray = np.array(coords)
return coordarray.reshape((-1, 2))
#property
def arrshape(self):
"get array shape"
return self.image.shape
#property
def flatarr(self):
"get flattened array"
return self.image.flatten()
def interpolateImage(imarr: ImageArray):
"Interpolate image array"
imshape = imarr.image.shape
newimage = imarr.image.flatten()
newimage = np.uint8(np.interp(newimage,
(newimage.min(),
newimage.max()),
(0, 255))
)
newimage = newimage.reshape(imshape)
return ImageArray(newimage)
class LightSource:
"Simple implementation of a light source"
def __init__(self,
x=1.0, # x
y=1.0, # y
z=20.0, # light source distance: 0 to make it at infinity
intensity=1.0, # I_p
ambient_intensity=1.0, # I_a
ambient_coefficient=0.000000002, # k_a
):
"light source"
self.x = x
self.y = y
if z is not None:
assert isinstance(z, float)
self.z = z
self.intensity = intensity
self.ambient_intensity = ambient_intensity # I_a
self.ambient_coefficient = ambient_coefficient # k_a
# k_a can be tuned if the material is known
def copy(self):
"copy self"
return LightSource(x=self.x,
y=self.y,
z=self.z,
intensity=self.intensity,
light_power=self.power)
class ChannelShader:
"Shades channels"
def __init__(self,
coordarr: np.ndarray,
light_source: LightSource, # has I_a, I_p, k_a
surface_normal: np.ndarray,
color: np.ndarray, # they are assumed to be O_d and O_s
spec_coeff=0.1, # k_s
spec_color=1.0, # O_s: obj specular color. It can be
# optimized with respect to surface material
screen_gamma=2.2,
diffuse_coeff=0.008, # k_d
# a good value is between 0.007 and 0.1
attenuation_c1=1.0, # f_attr c1
attenuation_c2=0.0, # f_attr c2 d_L coefficient
attenuation_c3=0.0, # f_attr c3 d_L^2 coefficient
shininess=20.0 # n
):
self.light_source = light_source
self.light_intensity = self.light_source.intensity # I_p
self.ambient_coefficient = self.light_source.ambient_coefficient # k_a
self.ambient_intensity = self.light_source.ambient_intensity # I_a
self.coordarr = coordarr
self.surface_normal = np.copy(surface_normal)
self.screen_gamma = screen_gamma
self.shininess = shininess
self.diffuse_coeff = diffuse_coeff # k_d
# self.diffuse_color = normalize_1d_array(color) # O_d: obj diffuse color
self.diffuse_color = color # O_d: obj diffuse color
self.spec_color = spec_color # O_s
self.spec_coeff = spec_coeff # k_s: specular coefficient
self.att_c1 = attenuation_c1
self.att_c2 = attenuation_c2
self.att_c3 = attenuation_c3
def copy(self):
return ChannelShader(coordarr=np.copy(self.coordarr),
light_source=self.light_source.copy(),
surface_normal=np.copy(self.surface_normal),
color=np.copy(self.diffuse_color))
#property
def distance(self):
yarr = self.coordarr[:, 0] # row nb
xarr = self.coordarr[:, 1] # col nb
xdist = (self.light_source.x - xarr)**2
ydist = (self.light_source.y - yarr)**2
return xdist + ydist
#property
def light_direction(self):
"get light direction matrix (-1, 3)"
yarr = self.coordarr[:, 0]
xarr = self.coordarr[:, 1]
xdiff = self.light_source.x - xarr
ydiff = self.light_source.y - yarr
light_matrix = np.zeros((self.coordarr.shape[0], 3))
light_matrix[:, 0] = ydiff
light_matrix[:, 1] = xdiff
light_matrix[:, 2] = self.light_source.z
# light_matrix[:, 2] = 0.0
# pdb.set_trace()
return light_matrix
#property
def light_attenuation(self):
"""
Implementing from Foley JD 1996, p. 726
f_att : light source attenuation function:
f_att = min(\frac{1}{c_1 + c_2{\times}d_L + c_3{\times}d^2_{L}} , 1)
"""
second = self.att_c2 * self.distance
third = self.att_c3 * self.distance * self.distance
result = self.att_c1 + second + third
result = 1 / result
return np.where(result < 1, result, 1)
#property
def normalized_light_direction(self):
"Light Direction matrix normalized"
return normalize_3col_array(self.light_direction)
#property
def normalized_surface_normal(self):
return normalize_3col_array(self.surface_normal)
#property
def costheta(self):
"set costheta"
# pdb.set_trace()
costheta = get_vector_dot(
arr1=self.normalized_light_direction,
arr2=self.normalized_surface_normal)
# products of vectors
# costheta = np.abs(costheta) # as per (Foley J.D, et.al. 1996, p. 724)
costheta = np.where(costheta > 0, costheta, 0)
return costheta
#property
def ambient_term(self):
"Get the ambient term I_a * k_a * O_d"
term = self.ambient_coefficient * self.ambient_intensity
term *= self.diffuse_color
# pdb.set_trace()
return term
#property
def view_direction(self):
"Get view direction"
# pdb.set_trace()
cshape = self.coordarr.shape
coord = np.zeros((cshape[0], 3)) # x, y, z
coord[:, :2] = -self.coordarr
coord[:, 2] = 0.0 # viewer at infinity
coord = normalize_3col_array(coord)
return coord
#property
def half_direction(self):
"get half direction"
# pdb.set_trace()
arr = self.view_direction + self.normalized_light_direction
return normalize_3col_array(arr)
#property
def spec_angle(self):
"get spec angle"
specAngle = get_vector_dot(
arr1=self.half_direction,
arr2=self.normalized_surface_normal)
return np.where(specAngle > 0.0, specAngle, 0.0)
#property
def specular(self):
return self.spec_angle ** self.shininess
#property
def channel_color_blinn_phong(self):
"""compute new channel color intensities
Implements: Foley J.D. 1996 p. 730 - 731, variation on equation 16.15
"""
second = 1.0 # added for structuring code in this fashion, makes
# debugging easier
# lambertian terms
second *= self.diffuse_coeff # k_d
second *= self.costheta # (N \cdot L)
second *= self.light_intensity # I_p
# adding phong terms
second *= self.light_attenuation # f_attr
second *= self.diffuse_color # O_d
third = 1.0
third *= self.spec_color # O_s
third *= self.specular # (N \cdot H)^n
third *= self.spec_coeff # k_s
result = 0.0
#
result += self.ambient_term # I_a × k_a × O_d
result += second
result += third
# pdb.set_trace()
return result

memory overflow when processing eval() in Tensorflow

I'm using Tensorflow to process a simple matrix factorization algorithm. Every step went correct but at the last step, where I want to eval() a Tensor to store it, the program didn't work and only occupied more and more memory. So is there something wrong in my code? I'm a beginner in Tensorflow and I'm don't know where the problem is. Below is my code.
class model(object):
def __init__(self, D, Q, stepsize = 6e-7, max_iter = 200, inner_maxiter = 50, dim = 200, verbose = 5):
self.D = tf.constant(D, dtype = tf.float32)
self.Q = tf.constant(Q, dtype = tf.float32)
self.rank = dim
self.stepsize = stepsize
self.max_iter = max_iter
self.inner_maxiter = inner_maxiter
self.verbose = verbose
self.W = tf.Variable((np.random.rand(self.rank, sample_num)), dtype = tf.float32, name = 'W')
self.C = tf.Variable((np.random.rand(context_num, self.rank)), dtype = tf.float32, name = 'C')
def _run(self, sess):
Q = self.Q
D = self.D
W = self.W
C = self.C
for i in xrange(self.max_iter):
if (i + 1) % 2 == 1:
for j in xrange(self.inner_maxiter):
ED = tf.transpose(Q) * (1.0 / (1.0 + tf.exp(- tf.matmul(C, W))))
recons = D - ED
W_grad = tf.matmul(tf.transpose(C), recons)
W = W + self.stepsize * W_grad
else:
for j in xrange(self.inner_maxiter):
ED = tf.transpose(Q) * (1.0 / (1.0 + tf.exp(- tf.matmul(C, W))))
recons = D - ED
C_grad = tf.matmul(recons, tf.transpose(W))
C = C + self.stepsize * C_grad
print 'epoch: %d' % i
print W.eval()
print C.eval()
train_epoch = model(D, Q, args.step_size, \
args.max_iter, args.inner_maxiter, args.dim, args.verbose)
with tf.Session(config = config) as sess:
tf.initialize_all_variables().run()
train_epoch._run(sess)
The program stopped and occupying memory at the last two lines in _run(), which contains W.eval() and C.eval(). So what should I do to fix it? Can someone help?
Solved. You cannot add iteration of symbolic operations in Tensorflow. Instead, you should build the data flow first, which means you should define your operations in the initialize step.

Particle instance has no attribute '__getitem__'

I am coding basic PSO (particle swarm optimization) and have been getting this error that particle instance has no attribute __getitem__. I think everything is fine but the particle class seems to have some error. Take a look at the particle class.
from numpy import array
from random import random
from math import sin, sqrt, cos, pi
import matplotlib.pyplot as plt
import pylab
## Settings
c1 = 2
c2 = 2
size = 100
bad_size = 10
dim = 10
max_iterations = 20
Error_limit = 0.00001
def functR(k):
val = 10*dim
for i in range(dim):
val = val + (k[i])**2 - 10*cos(2*pi*k[i])
return val
#print functR([0]*20)
class particle():
def __init__(self, pos, fitness,vel, pbestpos, pbestfit):
self.pos = pos
self.fitness = fitness
self.vel = vel
self.pbestpos = pbestpos
self.pbestfitness = pbestfit
class swarm():
def __init__(self, size, bad_size, dim):
#self.gbest = gbest
self.size = size
self.bad_size = bad_size
self.dim = dim
def create(self):
particles = []
for i in range(size + bad_size):
p = particle()
p.pos = array([random() for i in range(dim)])
p.vel = 0.0
p.fitness = 0.0
p.pbestpos = p.pos
p.pbestfit = p.fitness
#p = particle(pos, fitness,vel, pbestpos, pbestfit)
particles.append(p)
return particles
def optimizer():
s = swarm(size, bad_size, dim)
new_swarm = s.create()
gbest = new_swarm[0]
gbestfit = functR(gbest)
i = 0
## The iterative loop
while i < max_iterations:
for p in s:
fitness = functR(p.pos)
if fitness > p.fitness:
p.fitness = fitness
p.pbestpos = p.pos
if fitness > gbestfit:
gbest = p
## Plotting
pylab.xlim([0,10])
pylab.ylim([0,1.5])
plt.plot(i,gbest.fitness, "bo")
## Velocity and Position update
vel = p.vel + c1 * random() * (p.pbestpos - p.pos) \
+ c2 * random() * (gbest.pos - p.pos)
p.pos = p.pos + vel
plt.show()
i += 1
print "gbest fitness :", gbestfit
print "Best particle :", gbest.pos
print optimizer()
You are treating a single particle() instance as a list here:
val = val + (k[i])**2 - 10*cos(2*pi*k[i])
k is an instance of particle(), the [i] syntax translates to a __getitem__ call on that instance.
You are passing in that instance here:
gbest = new_swarm[0]
gbestfit = functR(gbest)
while elsewhere you pass in the .pos parameter instead:
for p in s:
fitness = functR(p.pos)
so perhaps you meant to do the same for the gbestfit line:
gbest = new_swarm[0]
gbestfit = functR(gbest.pos)

Categories