NN implementation- updating after back propegation changes the dimentions - python

as part of an assignment, we need to implement NN.
I am calculating forward result, then I run back propagation and then I updated the weights (all for the same instance).
When i try to calculate the forward value of the same instance, I am getting an error that the dimensions are wrong.
class MyNN:
def __init__(self, learning_rate, layer_sizes):
self.learning_rate = learning_rate
self.layer_sizes = layer_sizes
self.model_params = {}
self.memory = {}
self.grads = {}
# Initializing weights
for layer_index in range(len(layer_sizes) - 1):
W_input = layer_sizes[layer_index + 1]
W_output = layer_sizes[layer_index]
self.model_params['W_' + str(layer_index + 1)] = np.random.randn(W_input, W_output) * 0.1
self.model_params['b_' + str(layer_index + 1)] = np.random.randn(W_input) * 0.1
def forward_single_instance(self, x):
a_i_1 = x
self.memory['a_0'] = x
for layer_index in range(len(self.layer_sizes) - 1):
W_i = self.model_params['W_' + str(layer_index + 1)]
b_i = self.model_params['b_' + str(layer_index + 1)]
z_i = np.dot(W_i, a_i_1) + b_i
a_i = 1/(1+np.exp(-z_i))
self.memory['a_' + str(layer_index + 1)] = a_i
a_i_1 = a_i
return a_i_1
def log_loss(self, y_hat, y):
'''
Logistic loss, assuming a single value in y_hat and y.
'''
m = y_hat[0]
cost = -y[0]*np.log(y_hat[0]) - (1 - y[0])*np.log(1 - y_hat[0])
return cost
def backward_single_instance(self, y):
a_output = self.memory['a_' + str(len(self.layer_sizes) - 1)]
dz = a_output - y
for layer_index in range(len(self.layer_sizes) - 1, 0, -1):
a_l_1 = self.memory['a_' + str(layer_index - 1)]
dW = np.dot(dz.reshape(-1, 1), a_l_1.reshape(1, -1))
db = dz.transpose()
self.grads['dW_' + str(layer_index)] = dW
self.grads['db_' + str(layer_index)] = db
W_l = self.model_params['W_' + str(layer_index)]
dz = (a_l_1 * (1 - a_l_1)).reshape(-1, 1) * np.dot(W_l.T, dz.reshape(-1, 1))
def update(self):
for layer_index in range(len(self.layer_sizes) - 1):
Wi = 'W_' + str(layer_index + 1)
bi = 'b_' + str(layer_index + 1)
dWi = 'dW_' + str(layer_index + 1)
dbi = 'db_' + str(layer_index + 1)
W_i = self.model_params[Wi]
b_i = self.model_params[bi]
dW_i = self.grads[dWi]
db_i = self.grads[dbi]
self.model_params[Wi] = W_i - self.learning_rate * dW_i
self.model_params[bi] = b_i - self.learning_rate * db_i
then for testing I wrote this code:
nn = MyNN(0.01, [3, 2, 1])
x = np.random.randn(3)
y = np.random.randn(1)
y_hat = nn.forward_single_instance(x)
print(y_hat)
nn.backward_single_instance(y)
nn.update()
y_hat = nn.forward_single_instance(x)
This is the error that is printed:
x
[ 0.57072262 1.8578982 -1.48560691]
x
[[0.53932246 0.57051188]]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-99-d8d9152fef18> in <module>()
----> 1 y_hat = nn.forward_single_instance(x)
2 print(y_hat)
3
4
5 l = nn.log_loss(y_hat, y)
<ipython-input-89-f354993c95f9> in forward_single_instance(self, x)
24 W_i = self.model_params['W_' + str(layer_index + 1)]
25 b_i = self.model_params['b_' + str(layer_index + 1)]
---> 26 z_i = np.dot(W_i, a_i_1) + b_i
27 a_i = 1/(1+np.exp(-z_i))
28 self.memory['a_' + str(layer_index + 1)] = a_i
ValueError: shapes (1,2) and (1,2) not aligned: 2 (dim 1) != 1 (dim 0)
the problem is in b_i dimensions, and I cant figure out why.
I tried variations to b_i shape(row vector, column vector), and all of them throws the same exception.

Related

Invalid index to scalar variable error when trying to use scipy.optimize.curve_fit

I have a function with different parameters that I want to optimize to fit some existing data.
The function runs fine on its own, but when I try to pass it through the scipy.optimize.curve_fit function, I get this error :
IndexError: invalid index to scalar variable.
I don't understand why the function would work on its own, and I would not get any errors.
What can I do ?
The original function used dictionnaries and I thought that might be the problem but I modified it and it still doesn't work.
This is the function I'm using :
def function_test(xy,X1,X2,X3,X4):
precip = xy\[0\]
potential_evap = xy\[1\]
nUH1 = int(math.ceil(X4))
nUH2 = int(math.ceil(2.0*X4))
uh1_ordinates = [0] * nUH1
uh2_ordinates = [0] * nUH2
UH1 = [0] * nUH1
UH2 = [0] * nUH2
for t in range(1, nUH1 + 1):
uh1_ordinates[t - 1] = s_curves1(t, X4) - s_curves1(t-1, X4)
for t in range(1, nUH2 + 1):
uh2_ordinates[t - 1] = s_curves2(t, X4) - s_curves2(t-1, X4)
production_store = X1*0.60# S
routing_store = X3*0.70# R
qsim = []
for j in range(2191):
if precip[j] > potential_evap[j]:
net_evap = 0
scaled_net_precip = (precip[j] - potential_evap[j])/X1
if scaled_net_precip > 13:
scaled_net_precip = 13.
tanh_scaled_net_precip = tanh(scaled_net_precip)
reservoir_production = (X1 * (1 - (production_store/X1)**2) * tanh_scaled_net_precip) / (1 + production_store/X1 * tanh_scaled_net_precip)
routing_pattern = precip[j]-potential_evap[j]-reservoir_production
else:
scaled_net_evap = (potential_evap[j] - precip[j])/X1
if scaled_net_evap > 13:
scaled_net_evap = 13.
tanh_scaled_net_evap = tanh(scaled_net_evap)
ps_div_x1 = (2 - production_store/X1) * tanh_scaled_net_evap
net_evap = production_store * (ps_div_x1) / \
(1 + (1 - production_store/X1) * tanh_scaled_net_evap)
reservoir_production = 0
routing_pattern = 0
production_store = production_store - net_evap + reservoir_production
percolation = production_store / (1 + (production_store/2.25/X1)**4)**0.25
routing_pattern = routing_pattern + (production_store-percolation)
production_store = percolation
for i in range(0, len(UH1) - 1):
UH1[i] = UH1[i+1] + uh1_ordinates[i]*routing_pattern
UH1[-1] = uh1_ordinates[-1] * routing_pattern
for j in range(0, len(UH2) - 1):
UH2[j] = UH2[j+1] + uh2_ordinates[j]*routing_pattern
UH2[-1] = uh2_ordinates[-1] * routing_pattern
groundwater_exchange = X2 * (routing_store / X3)**3.5
routing_store = max(0, routing_store + UH1[0] * 0.9 + groundwater_exchange)
R2 = routing_store / (1 + (routing_store / X3)**4)**0.25
QR = routing_store - R2
routing_store = R2
QD = max(0, UH2[0]*0.1+groundwater_exchange)
Q = QR + QD
qsim.append(Q)
return qsim

Runge Kutta4 in python for complex system

I'm trying to solve a complex system where you can visualize it as some points/nodes where a spring-damper system is connected in between those nodes, each point carry the forces from all other connected springs and dampers in addition to the gravitational forces on them since each spring and damper have a specific mass.
I'm using classes to make the grid and the initial conditions, but i'm not sure how exactly to calculate the new positions and accelerations using the runge kutta 4
this part is where runge kutta is defined
rows = 5
columns = 6
class Runga_kutta4():
def __init__(self, node, u0, v0, t):
self.u0 = u0
self.v0 = v0
self.t = t
self.u = u = 0, 0
self.ux = u[0]
self.uy = u[1]
self.v = v = 0, 0
self.vx = v[0]
self.vy = v[1]
f = Forces(u0, u, v0, v)
self.Node_Forces = f.nodeforces(node)
self.dt = t[1] - t[0]
results = self.calculation()
return results
# Returns the acceleration a
def acceleration(self, Node_Forces):
"""
F = m *a
a = F/m
F_sys = F_externe - (F_damping + Springs) - F_g
"""
a_list = []
for (f, m) in zip(Node_Forces, Masses.Lattice_Mass()):
ax = f[0]/m[0]
ay = f[1]/m[1]
a_list.append((ax, ay))
return a_list.reshape(5, 6)
def calculation(self):
for i in range(self.t.size - 1):
# F at time step t / 2
f_t_05_x = (self.Node_Forces[0][i + 1] - self.Node_Forces[0][i]) / 2 + self.Node_Forces[0][i]
f_t_05_y = (self.Node_Forces[1][i + 1] - self.Node_Forces[1][i]) / 2 + self.Node_Forces[1][i]
u1x = self.ux[i]
v1x = self.vx[i]
u1y = self.uy[i]
v1y = self.vy[i]
a1x = self.acceleration(self.Node_Forces[0][i])
a1y = self.acceleration(self.Node_Forces[1][i])
u2x = self.ux[i] + v1x * self.dt / 2
v2x = self.vx[i] + a1x * self.dt / 2
u2y = self.uy[i] + v1y * self.dt / 2
v2y = self.vy[i] + a1y * self.dt / 2
a2x = self.acceleration(f_t_05_x)
a2y = self.acceleration(f_t_05_y)
u3x = self.ux[i] + v2x * self.dt / 2
v3x = self.vx[i] + a2x * self.dt / 2
u3y = self.uy[i] + v2y * self.dt / 2
v3y = self.vy[i] + a2y * self.dt / 2
a3x = self.acceleration(f_t_05_x)
a3y = self.acceleration(f_t_05_y)
u4x = self.ux[i] + v3x * self.dt
v4x = self.vx[i] + a3x * self.dt
u4y = self.uy[i] + v3y * self.dt
v4y = self.vy[i] + a3y * self.dt
a4x = self.acceleration(self.Node_Forces[0][i + 1])
a4y = self.acceleration(self.Node_Forces[1][i + 1])
self.ux[i + 1] = self.ux[i] + self.dt / 6 * (v1x + 2 * v2x + 2 * v3x + v4x)
self.vx[i + 1] = self.vx[i] + self.dt / 6 * (a1x + 2 * a2x + 2 * a3x + a4x)
self.uy[i + 1] = self.uy[i] + self.dt / 6 * (v1y + 2 * v2y + 2 * v3y + v4y)
self.vy[i + 1] = self.vy[i] + self.dt / 6 * (a1y + 2 * a2y + 2 * a3y + a4y)
self.u = (self.ux, self.uy)
self.v = (self.vx, self.vy)
return self.u, self.v
l = Lattice(3)
t0, te, dt = 0, 3, 0.001 # s
t = np.linspace(t0, te, round((te-t0)/dt + 1))
for node in l.latticeNodes():
position0 = 0, 0
velocity0 = 0, 0
state0 = np.append(position0, velocity0)
new_state = Runga_kutta4(node, position0, velocity0, t)
visualise(l)
photo of the system

solving the wave 1-d equation with python and animate

I'm trying to solve the 1-d wave equation, and I coding the program for numerical computing solutions and animating, saving data in the file. I don't know how to fix the error and finally get the working code.
u_tt = a**2 * u_xx + f(x,t)
It is necessary for the program to solve equations when entering both an additional function and with non-zero initial and boundary conditions, with graphic visualization and saving data to a file.
So I attach my code (Python 3.9), and error message:
import numpy as np
import math
import matplotlib.pyplot as plt
import os
import time
import glob
def sol(I, V, f, a, L, C, T, U_0, U_L, dt, user_func = None):
"""
solver for wave equation
u_tt = a**2*u_xx + f(x,t) (0,L) where u=0 for
x=0,L, for t in (0,T].
:param I:
:param V:
:param f:
:param a:
:param L:
:param C:
:param T:
:param U_0:
:param U_L:
:param dt:
:param user_func:
:return:
"""
nt = int(round(T / dt))
t = np.linspace(0, nt * dt, nt + 1) # array for time points
dx = dt * a / float(C)
nx = int(round(L / dx))
x = np.linspace(0, L, nx + 1) # array for coord points
q = a ** 2
C2 = (dt / dx) ** 2
dt2 = dt * dt
# --- checking f(x,t) ---
if f is None or f == 0:
f = lambda x, t: 0
# --- check the initial conds dU(x,0)/dt ---
if V is None or V == 0:
V = lambda x: 0
# boundary conds
if U_0 is not None:
if isinstance(U_0, (float, int)) and U_0 == 0:
U_0 = lambda t: 0
if U_L is not None:
if isinstance(U_L, (float, int)) and U_L == 0:
U_L = lambda t: 0
# --- allocate memory ---
u = np.zeros(nx + 1)
u_n = np.zeros(nx + 1)
u_nm = np.zeros(nx + 1)
# --- valid indexing check ---
Ix = range(0, nx + 1)
It = range(0, nt + 1)
# --- set the boundary conds ---
for i in range(0, nx + 1):
u_n[i] = I(x[i])
if user_func is not None:
user_func(u_n, x, t, 0)
# --- finite difference step ---
for i in Ix[1:-1]:
u[i] = u_n[i] + dt * V(x[i]) + 0.5 * C2 * (0.5 * (q[i] + q[i + 1]) * (u_n[i + 1] - u_n[i]) -
0.5 * (q[i] + q[i - 1]) * (u_n[i] - u_n[i - 1])) + 0.5 * dt2 * f(x[i], t[0])
i = Ix[0]
if U_0 is None:
# set the boundary conds (x=0: i-1 -> i+1 u[i-1]=u[i+1]
# where du/dn = 0, on x=L: i+1 -> i-1 u[i+1]=u[i-1])
ip1 = i + 1
im1 = ip1 # i-1 -> i+1
u[i] = u_n[i] + dt * V(x[i]) + \
0.5 * C2 * (0.5 * (q[i] + q[ip1]) * (u_n[ip1] - u_n[i]) - 0.5 * (q[i] + q[im1])
* (u_n[i] - u_n[im1])) + 0.5 * dt2 * f(x[i], t[0])
else:
u[i] = U_0(dt)
i = Ix[-1]
if U_L is None:
im1 = i - 1
ip1 = im1 # i+1 -> i-1
u[i] = u_n[i] + dt * V(x[i]) + \
0.5 * C2 * (0.5 * (q[i] + q[ip1]) * (u_n[ip1] - u_n[i]) - 0.5 * (q[i] + q[im1]) * (u_n[i] - u_n[im1])) + \
0.5 * dt2 * f(x[i], t[0])
else:
u[i] = U_L(dt)
if user_func is not None:
user_func(u, x, t, 1)
# update data
u_nm, u_n, u = u_n, u, u_nm
# --- time looping ---
for n in It[1:-1]:
# update all inner points
for i in Ix[1:-1]:
u[i] = - u_nm[i] + 2 * u_n[i] + \
C2 * (0.5 * (q[i] + q[i + 1]) * (u_n[i + 1] - u_n[i]) -
0.5 * (q[i] + q[i - 1]) * (u_n[i] - u_n[i - 1])) + dt2 * f(x[i], t[n])
# --- set boundary conds ---
i = Ix[0]
if U_0 is None:
# set the boundary conds
# x=0: i-1 -> i+1 u[i-1]=u[i+1] where du/dn=0
# x=L: i+1 -> i-1 u[i+1]=u[i-1] where du/dn=0
ip1 = i + 1
im1 = ip1
u[i] = - u_nm[i] + 2 * u_n[i] + \
C2 * (0.5 * (q[i] + q[ip1]) * (u_n[ip1] - u_n[i]) - 0.5 * (q[i] + q[im1]) * (u_n[i] - u_n[im1])) + \
dt2 * f(x[i], t[n])
else:
u[i] = U_0(t[n + 1])
i = Ix[-1]
if U_L is None:
im1 = i - 1
ip1 = im1
u[i] = - u_nm[i] + 2 * u_n[i] + \
C2 * (0.5 * (q[i] + q[ip1]) * (u_n[ip1] - u_n[i]) - 0.5 * (q[i] + q[im1]) * (u_n[i] - u_n[im1])) + \
dt2 * f(x[i], t[n])
else:
u[i] = U_L(t[n + 1])
if user_func is not None:
if user_func(u, x, t, n + 1):
break
u_nm, u_n, u = u_n, u, u_nm
return u, x, t
# --- here function for return functions ---
# return func(x)
def func(x):
"""
:param x:
:return:
"""
return # expression
# start simulate and animate or visualisation and savin the data from file
def simulate(
I, V, f, a, L, C, T, U_0, U_L, dt, # params
umin, umax, # amplitude
animate = True, # animate or not?
solver_func = sol, # call the solver
mode = 'plotter', # mode: plotting the graphic or saving to file
):
# code for visualization and simulate
...........
# start simulate
solver_func(I, V, f, a, L, C, T, U_0, U_L, dt, user_func)
return 0
def task( ):
'''
test tasking for solver and my problem
:return:
'''
I
L = 1
a = 1
C = 0.85
T = 1
dt = 0.05
U_0, U_L, V, f
umax = 2
umin = -umax
simulate(I, V, f, a, L, C, T, U_0, U_L, dt, umax, umin, animate = True, solver_func = sol, mode = 'plotter',)
if __name__ == '__main__':
task()
And I get the same error:
File "C:\\LR2-rep\wave_eq_1d.py", line 102, in sol
u[i] = u_n[i] + dt * V(x[i]) + 0.5 * C2 * (0.5 * (q[i] + q[i + 1]) * (u_n[i + 1] - u_n[i]) -
TypeError: 'int' object is not subscriptable
I understand the meaning of the error, but I do not understand how it can be fixed, and for almost two weeks I have not been able to write a program ... I ask for help with solving this problem! Thank you very much in advance!

offset a parallel line to a given line python

I want to draw parallel line to given X,Y coordinate below code helps to draw ,
import numpy as np
import matplotlib.pyplot as plt
x = [187, 879, 722, 322]
y = [341, 344, 112, 112]
newX = []
newY = []
def findIntesection(p1x, p1y, p2x, p2y, p3x,p3y, p4x, p4y):
dx12 = p2x - p1x
dy12 = p2y - p1y
dx34 = p4x - p3x
dy34 = p4y - p3y
denominator = (dy12*dx34-dx12*dy34)
t1 = ((p1x - p3x) * dy34 + (p3y - p1y) * dx34)/ denominator
t2 = ((p3x - p1x) * dy12 + (p1y - p3y) * dx12)/ -denominator;
intersectX = p1x + dx12 * t1
intersectY = p1y + dy12 * t1
if (t1 < 0): t1 = 0
elif (t1 > 1): t1 = 1
if (t2 < 0): t2 = 0
elif (t2 > 1): t2 = 1
return intersectX,intersectY
def normalizeVec(x,y):
distance = np.sqrt(x*x+y*y)
return x/distance, y/distance
def getEnlarged(oldX, oldY, offset):
num_points = len(oldX)
for j in range(num_points):
i = j - 1
if i < 0:
i += num_points
k = (j + 1) % num_points
vec1X = oldX[j] - oldX[i]
vec1Y = oldY[j] - oldY[i]
v1normX, v1normY = normalizeVec(vec1X,vec1Y)
v1normX *= offset
v1normY *= offset
n1X = -v1normY
n1Y = v1normX
pij1X = oldX[i] + n1X
pij1Y = oldY[i] + n1Y
pij2X = oldX[j] + n1X
pij2Y = oldY[j] + n1Y
vec2X = oldX[k] - oldX[j]
vec2Y = oldY[k] - oldY[j]
v2normX, v2normY = normalizeVec(vec2X,vec2Y)
v2normX *= offset
v2normY *= offset
n2X = -v2normY
n2Y = v2normX
pjk1X = oldX[j] + n2X
pjk1Y = oldY[j] + n2Y
pjk2X = oldX[k] + n2X
pjk2Y = oldY[k] + n2Y
intersectX,intersetY = findIntesection(pij1X,pij1Y,pij2X,pij2Y,pjk1X,pjk1Y,pjk2X,pjk2Y)
#print(intersectX,intersetY)
newX.append(intersectX)
newY.append(intersetY)
getEnlarged(x, y, 20)
plt.plot(x, y)
plt.plot(newX, newY)
plt.show()
This gives result as below
Here it is giving good result by drawing parallel line to each line of our trapezoidal shaped , but i want it to be a closed shape in place of open shape
i want to join the 1st and last coordinate so that it should form a closed shape. Any help will be appreciated .
Using approach from here
outer_ccw parameters combines vertex order and desired offset direction. For CCW order and outer polygon it is 1, for inner polygon it should be -1.
def makeOffsetPoly(oldX, oldY, offset, outer_ccw = 1):
num_points = len(oldX)
for curr in range(num_points):
prev = (curr + num_points - 1) % num_points
next = (curr + 1) % num_points
vnX = oldX[next] - oldX[curr]
vnY = oldY[next] - oldY[curr]
vnnX, vnnY = normalizeVec(vnX,vnY)
nnnX = vnnY
nnnY = -vnnX
vpX = oldX[curr] - oldX[prev]
vpY = oldY[curr] - oldY[prev]
vpnX, vpnY = normalizeVec(vpX,vpY)
npnX = vpnY * outer_ccw
npnY = -vpnX * outer_ccw
bisX = (nnnX + npnX) * outer_ccw
bisY = (nnnY + npnY) * outer_ccw
bisnX, bisnY = normalizeVec(bisX, bisY)
bislen = offset / np.sqrt((1 + nnnX*npnX + nnnY*npnY)/2)
newX.append(oldX[curr] + bislen * bisnX)
newY.append(oldY[curr] + bislen * bisnY)
x = [0, 100, 60, 40]
y = [0, 0, 50, 50]
makeOffsetPoly(x, y, 20)
print(newX, newY)
>>>[-29.424478775259594, 129.4244787752596, 66.79706177729007, 33.202938222709925]
[-14.14213562373095, -14.14213562373095, 64.14213562373095, 64.14213562373095]
Just append the first coordinates to the end of your lists.
x.append(x[0])
y.append(y[0])
newX.append(newX[0])
newY.append(newY[0])
Place this right before you plot. Here's my output

Understanding the implementation of ConvLSTM in tensorflow

In the tensorflow implementation of convLSTM cell the following lines of code are written as:
x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding)
x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding)
x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding)
x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding)
h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i)
h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f)
h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c)
h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
The corresponding equations as described in the paper are:
I am not able to see how W_ci, W_cf, W_co C_{t-1}, C_t is used in the input, forget and output gates. Where does it being used in computing the 4 gates?
Of course you cannot find those in that implementation of ConvLSTM cell, because it not using peephole:
Peephole connections allow the gates to utilize the previous internal
state as well as the previous hidden state (which is what LSTMCell is
limited to)
The tf.keras.experimental.PeepholeLSTMCell follow the equations you post above, as you can see in it source code:
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]) +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(x_f + K.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]) +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(x_c + K.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]) +
self.output_gate_peephole_weights * c)
Or more clear, if you look at source code of tf.compat.v1.nn.rnn_cell.LSTMCell:
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (
sigmoid(f + self._forget_bias) * c_prev +
sigmoid(i) * self._activation(j))

Categories