Related
I am trying to use the vanilla transformer from PyTorch using Pytorch Lightning. I tried to test the model with a reverse number task. So given [1, 3, 5, 4, 13, 19] it returns [1, 13, 4, 5, 3, 19] with 1, 19 being start and end token respectively. The full code is below. The code can run without error but there seems to be a problem with the backpropagation. The training loss does go down at first but it doesn't go beyond 2.8 and the accuracy doesn't go beyond 11%.
It seems that part of the model is able to optimize, I am guessing it is because the weights located in Embeddings and Generator can backpropagate, but weights located in nn.Transformer cannot? I am really not sure.
import math
import torch.nn.functional as F
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
class Embeddings(pl.LightningModule):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
a = self.lut(x) * math.sqrt(self.d_model)
return a
class PositionalEncoding(pl.LightningModule):
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class Generator(pl.LightningModule):
def __init__(self, size):
super(Generator, self).__init__()
self.proj = nn.Linear(512, size)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
class Model(pl.LightningModule):
def __init__(self, src_embed, tgt_embed, transformer, generator):
super(Model, self).__init__()
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.transformer = transformer
self.generator = generator
self.valLoss = 0
self.valAcc = 0
self.automatic_optimization = False
self.optimizer = None
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, x, y, tgt_mask=None):
x = self.src_embed(x)
y = self.tgt_embed(y)
return self.generator(self.transformer(x, y, tgt_mask=tgt_mask))
def training_step(self, batch, batch_idx):
if self.optimizer is None:
self.optimizer = self.optimizers()
batch = Batch(batch[0], batch[1])
tgt_mask = batch.trg_mask.squeeze(0)
tgt_mask = (tgt_mask != True)
output = self(batch.src, batch.trg, tgt_mask)
criterion = LossCompute(V)
loss = criterion.forward(output.contiguous().view(-1, output.size(-1)), batch.trg_y.contiguous().view(-1)) / batch.ntokens
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
self.log('train_loss', loss)
print(loss)
def validation_step(self, batch, batch_idx):
batch = Batch(batch[0], batch[1])
tgt_mask = batch.trg_mask.squeeze(0)
tgt_mask = (tgt_mask != True)
output = self(batch.src, batch.trg, tgt_mask)
criterion = LossCompute(V)
loss = criterion.forward(output.view(-1, output.size(-1)), batch.trg_y.contiguous().view(-1)) / batch.ntokens
self.log('val_loss', loss)
self.valLoss += loss
if batch_idx % 10 == 0:
print(loss)
if batch_idx == 99:
print(self.valLoss/100)
self.valLoss = 0
return {"x": output, "trg": batch.trg_y, "index": batch_idx}
def validation_step_end(self, batch):
output, trg, idx = batch["x"], batch["trg"], batch["index"]
accuracy = getAccuracy(output, trg)
self.log("accuracy", accuracy)
self.valAcc += accuracy
if idx == 99:
print(self.valAcc/100)
self.valAcc = 0
def train_dataloader(self):
data = data_gen(V, 0, 3000)
return DataLoader(data, batch_size=30, shuffle=False, num_workers=2, pin_memory=True)
def val_dataloader(self):
data = data_gen(V, 1, 1000)
return DataLoader(data, batch_size=10, shuffle=False, num_workers=2, pin_memory=True)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-3, betas=(0.9, 0.98), eps=1e-9)
class LossCompute(pl.LightningModule):
def __init__(self, size):
super(LossCompute, self).__init__()
self.criterion = nn.KLDivLoss(reduction='sum')
self.size = size
self.true_dist = None
def forward(self, x, target):
# x has size (batch_size x length, vocab_size)
assert x.size(1) == self.size
true_dist = x.data.clone()
true_dist.fill_(0)
true_dist.scatter_(1, target.data.unsqueeze(1).long(), 1)
self.true_dist = true_dist
return self.criterion(x, true_dist)
# prepare data
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self, src, trg=None):
self.src = src
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg)
self.ntokens = self.trg_y.size(0) * self.trg_y.size(1)
print("")
#staticmethod
def make_std_mask(tgt):
"Create a mask to hide padding and future words."
tgt_mask = subsequent_mask(tgt.size(-1)).type_as(tgt.data)
return tgt_mask
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def data_gen(V, randomSeed, totalTrainingSample):
np.random.seed(randomSeed)
x = torch.from_numpy(np.random.randint(2, V - 2, size=(totalTrainingSample, 10)))
y = torch.flip(torch.flip(x, [0, 1]), [0])
x[:, 0] = 1
y[:, 0] = 1
x[:, -1] = V - 1
y[:, -1] = V - 1
return list(zip(x, y))
def getAccuracy(x, trg):
totalValAcc = 0
totalValAccToken = 0
trg = trg.contiguous().view(-1)
out = x.view(-1, x.size(-1)) # (batch_size * tgt_length, src_vocab)
_, index = torch.max(out, dim=-1) # index (batch_size * tgt_length)
correct = list((trg == index)).count(True)
totalValAcc += correct
totalValAccToken += index.size(0)
return totalValAcc / totalValAccToken
V = 20
transformer = nn.Transformer(num_encoder_layers=2, num_decoder_layers=2, batch_first=True)
PositionEnc = PositionalEncoding(512, 0.1)
src_emb = Embeddings(512, V)
tgt_emb = Embeddings(512, V)
gen = Generator(V)
if __name__ == '__main__':
model = Model(nn.Sequential(src_emb, PositionEnc), nn.Sequential(tgt_emb, PositionEnc), transformer, gen)
earlyStopping = EarlyStopping(monitor='val_loss', patience=3)
trainer = pl.Trainer(max_epochs=10, callbacks=[earlyStopping])
trainer.fit(model)
this is fragment of my code
def train(self, features, targets):
for X, y in zip(features, targets):
X = X.reshape(1, X.shape[0])
outputs = self.feed_forward(X)
when I try to use the method with data:
train(np.array([gameDataList[n].ball_position, gameDataList[n].wall_position]), np.array(gameDataList[n].upOrDown))
where gameDataList[n].upOrDown is an array e.g. [0.1, 0.9], and gameDataList[n].ball_position and gameDataList[n].wall_position are floats, I get this error.
Full code:
#### Imports ####
import numpy as np
#### Neural Network Class ####
class MLP:
##### Constructor ####
def __init__(self, n_input_nodes, hidden_nodes, n_output_nodes, lr):
## Network ##
self.n_input_nodes = n_input_nodes
self.n_output_nodes = n_output_nodes
self.nodes = hidden_nodes
self.nodes.insert(0, n_input_nodes)
self.nodes.append(n_output_nodes)
## Weights and Biases##
self.weights = []
self.biases = []
for i in range(1, len(self.nodes)):
self.weights.append(np.random.uniform(-1.0, 1.0, (self.nodes[i - 1], self.nodes[i])))
self.biases.append(np.random.uniform(-1.0, 1.0, (1, self.nodes[i])))
## Learning Rate ##
self.lr = lr
## Activation Functions ##
# Linear Activation
self.linear = lambda x: x
self.d_linear = lambda x: np.ones(x.shape)
# Relu Activation
def relu(x):
x[x < 0] = 0
return x
def d_relu(out):
out: x[x > 0] = 1
return out
self.relu = relu
self.d_relu = d_relu
# Sigmoid Activation
self.sigmoid = lambda x: 1 / (1 + np.exp(-x))
self.d_sigmoid = lambda out: out * (1 - out) # assumes out is tanh(x)
# Hyperbolic Tangent Activation
self.tanh = lambda x: np.tanh(x)
self.d_tanh = lambda out: 1 - out ** 2 # assumes out is tanh(x)
def getWeights(self):
return self.weights.copy()
def getBiases(self):
return self.biases.copy()
def setWeights(self, weights):
self.weights = weights.copy()
def setBiases(self, biases):
self.biases = biases.copy()
#### Feed Forward ####
def feed_forward(self, X):
outputs = [X]
logits = np.dot(X, self.weights[0]) + self.biases[0]
for i in range(1, len(self.nodes) - 1):
out = self.sigmoid(logits)
outputs.append(out)
logits = np.dot(out, self.weights[i]) + self.biases[i]
out = self.sigmoid(logits)
outputs.append(out)
return outputs
#### Backpropagation ####
def backpropagation(self, X, y, outputs):
weights_gradients = []
biases_gradients = []
d1 = y - outputs[-1]
d2 = self.d_sigmoid(outputs[-1])
error = d1 * d2
grad = outputs[-2].T * error
weights_gradients.append(grad)
biases_gradients.append(error)
for i in range(len(self.weights) - 2, 1, -1):
d = self.d_sigmoid(outputs[i])
error = np.dot(error, self.weights[i + 1].T) * d
grad = outputs[i - 1].T * error
weights_gradients.append(grad)
biases_gradients.append(error)
return weights_gradients, biases_gradients
#### Training ####
def train(self, features, targets):
# Batch Size for weight update step
batch_size = features.shape[0]
# Delta Weights Variables
delta_weights = [np.zeros(weight.shape) for weight in self.weights]
delta_biases = [np.zeros(bias.shape) for bias in self.biases]
# For every data point, forward pass, backpropogation, store weights change
for X, y in zip(features, targets):
# Forward pass
X = X.reshape(1, X.shape[0])
outputs = self.feed_forward(X)
# Back propogation
weights_gradients, biases_gradients = self.backpropagation(X, y, outputs)
for i in range(len(weights_gradients)):
delta_weights[-(i + 1)] += weights_gradients[i]
delta_biases[-(i + 1)] += biases_gradients[i]
for i in range(len(delta_weights)):
self.weights[i] += (self.lr * delta_weights[i]) / batch_size
self.biases[i] += (self.lr * delta_biases[i]) / batch_size
#### Testing Methods ####
def predict(self, X):
# Gives prediction
return self.feed_forward(X)[-1]
def test(self, features, targets):
predictions = self.predict(features)
n_correct = 0
for i in range(len(predictions)):
prediction = np.argmax(predictions[i])
correct = np.argmax(targets[i])
if prediction == correct:
n_correct += 1
return n_correct / len(targets)
class GameData:
def __init__(self, ball_position, wall_position, upOrDown):
self.wall_position = wall_position
self.ball_position = ball_position
self.upOrDown = upOrDown
I collect data, and train my network, in this way:
gameDataList.append(GameData(ball.trt.ycor(), b.trt.ycor(), [0.1, 0.9]))
mlp = MLP(2, [32, 32], 2, 0.0001)
n = random.randint(0, 999)
mlp.train(np.array([gameDataList[n].ball_position, gameDataList[n].wall_position]), np.array(gameDataList[n].upOrDown))
Problem solved. It was needed to write two square brackets instead of one.
wrong example:
np.array([gameDataList[n].ball_position, gameDataList[n].wall_position])
correct example:
np.array([[gameDataList[n].ball_position, gameDataList[n].wall_position]])
I am trying to implement from scratch the multiclass logistic regression but my implementation returns bad results. I believe the definition of the gradient function and the cost function is fine. Maybe there is a problem with how these functions are interacting with the minimize function. I have tried it but I could not find out what is wrong. Could you please cast some light?
You can add the estimator 'myLR': myLR(**par_dict), with paramters
par_dict= {'alpha': 0.1, 'maxit': 2000, 'opt_method': 'bfgs', 'positive': False, 'penalty': None, 'verbose': True, 'seed': 3}
in this example or in any of these examples to test it.
import numpy as np
from scipy.optimize import minimize
from sklearn import preprocessing
class myLR():
def __init__(self, alpha=0.1, reltol=1e-8, maxit=1000, opt_method=None, verbose=True, seed=0):
self.alpha = alpha
self.maxit = maxit
self.reltol = reltol
self.seed = seed
self.verbose = verbose
self.opt_method = opt_method
self.lbin = preprocessing.LabelBinarizer()
def w_2d(self, w, n_classes):
return np.reshape(w, (-1, n_classes), order='F')
def softmax(self, W, X):
a = np.exp(X # W)
o = a / np.sum(a, axis=1, keepdims=True)
return o
def cost_wraper(self, W):
return self.cost(W, self.X, self.T, self.n_samples, self.n_classes)
def cost(self, W, X, T, n_samples, n_classes):
W = self.w_2d(W, n_classes)
log_O = np.log(self.softmax(W, X))
reg = self.apha * np.linalg.norm(W, ord='fro')
c = -np.sum([np.vdot(T[[i]], log_O[[i]]) for i in range(n_samples)]) / n_samples + reg
return c
def gradient_wraper(self, W):
return self.gradient(W, self.X, self.T, self.n_samples, self.n_classes)
def gradient(self, W, X, T, n_samples, n_classes):
W = self.w_2d(W, n_classes)
O = self.softmax(W, X)
reg = self.alpha * W
grad = -X.T.dot(T - O) / n_samples + reg
return grad.flatten()
def fit(self, X, y=None):
self.n_classes = len(np.unique(y))
self.n_samples, n_features = X.shape
if self.n_classes == 2:
self.T = np.zeros((self.n_samples, self.n_classes), dtype=np.float64)
for i, cls in enumerate(range(self.n_classes)):
self.T[y == cls, i] = 1
else:
self.T = self.lbin.fit_transform(y)
self.X = X
np.random.seed(self.seed)
W_0 = np.random.random(n_features * self.n_classes)
options = {'disp': self.verbose, 'maxiter': self.maxit}
f_min = minimize(fun=self.cost_wraper, x0=W_0,
method=self.opt_method,
jac=self.gradient_wraper,
options=options)
self.coef_ = self.w_2d(f_min.x, self.n_classes)
self.W_ = self.coef_
return self
def predict_proba(self, X):
O = self.softmax(self.coef_, X)
return O
def predict(self, X):
sigma = self.predict_proba(X)
y_pred = np.argmax(sigma, axis=1)
return y_pred
Edit: Regularization term is included.
I think it is now working with the following code.
import numpy as np
from scipy.optimize import minimize
from sklearn import preprocessing
class myLR():
def __init__(self, reltol=1e-8, maxit=1000, opt_method=None, verbose=True, seed=0):
self.maxit = maxit
self.reltol = reltol
self.seed = seed
self.verbose = verbose
self.opt_method = opt_method
self.lbin = preprocessing.LabelBinarizer()
def w_2d(self, w, n_classes):
return np.reshape(w, (n_classes, -1))
def softmax(self, W, X):
a = np.exp(X # W.T)
o = a / np.sum(a, axis=1, keepdims=True)
return o
def squared_norm(self, x):
x = np.ravel(x, order='K')
return np.dot(x, x)
def cost(self, W, X, T, n_samples, n_classes):
W = self.w_2d(W, n_classes)
log_O = np.log(self.softmax(W, X))
c = -(T * log_O).sum()
return c / n_samples
def gradient(self, W, X, T, n_samples, n_classes):
W = self.w_2d(W, n_classes)
O = self.softmax(W, X)
grad = -(T - O).T.dot(X)
return grad.ravel() / n_samples
def fit(self, X, y=None):
n_classes = len(np.unique(y))
n_samples, n_features = X.shape
if n_classes == 2:
T = np.zeros((n_samples, n_classes), dtype=np.float64)
for i, cls in enumerate(np.unique(y)):
T[y == cls, i] = 1
else:
T = self.lbin.fit_transform(y)
np.random.seed(self.seed)
W_0 = np.random.random((self.n_classes, self.n_features))
options = {'disp': self.verbose, 'maxiter': self.maxit}
f_min = minimize(fun=self.cost, x0=W_0,
args=(X, T, n_samples, n_classes),
method=self.opt_method,
jac=self.gradient,
options=options)
self.coef_ = self.w_2d(f_min.x, n_classes)
self.W_ = self.coef_
return self
def predict_proba(self, X):
O = self.softmax(self.W_, X)
return O
def predict(self, X):
sigma = self.predict_proba(X)
y_pred = np.argmax(sigma, axis=1)
return y_pred
I am making a prediction and implementing a neural network, is currently working with the numpy library and I am adapting the code to the data that I have.
I leave the current progress of the neural network, I have an error at the end of the code and I do not understand it well.
Anyone who can help me please?
import numpy as np
from sklearn.cross_validation import train_test_split
class LinearLayer:
def __init__(self, n_input, n_output):
self.n = n_input
self.m = n_output
self.W = (1/np.sqrt(n_input))*np.random.rand(n_input+1, n_output)
def forward(self, X):
self.input = np.zeros((X.shape[0],self.n+1))
# if only one feature, the input should always be a batch, at least
if len(X.shape) == 1: # of one element
self.input[:-1,:] = X.reshape(-1,self.n)
else:
self.input[:,:-1] = X
self.input[:,-1] = 1
self.output = self.input.dot(self.W) # xW + b
return self.output
def backward(self, d_out):
self.gradients = self.W.dot(d_out)[:-1]
self.dW = np.einsum("ij,ki", self.input, d_out)
return self.gradients
def updateWeights(self, lr=0.1):
self.W = self.W - lr*self.dW
class Sigmoid:
def __init__(self, n_input):
self.output = np.zeros(n_input)
self.gradients = np.zeros(n_input)
def forward(self, X):
self.output = 1/(np.exp(-X)+1)
return self.output
def backward(self, d_out):
ds = self.output.T*(1 - self.output).T
self.gradients = ds*d_out
return self.gradients
print("Training a multilayer perceptron\n")
import pandas as pd
data = pd.read_csv('Data_Balanceada.csv') #Data (74,11)
X = data.iloc[:,0:11]
y = data.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25, random_state=1)
h1 = LinearLayer(11,1) #stack some layers
s1 = Sigmoid(7)
h2 = LinearLayer(7,1)
s2 = Sigmoid(1)
def loss(pred, target):
return np.mean(np.power(pred-target,2))
predict = lambda x: s2.forward(h2.forward(s1.forward(h1.forward(x))))
backpropagate = lambda d: h1.backward(s1.backward(h2.backward(s2.backward(d))))
lr = 0.005
n = 0 # patience
max_epochs = 1500
valid = loss(predict(X_test), y_test)
for i in range(max_epochs):
l = 0
p = predict(X_train)
backpropagate(p.T-y_train.T)
h1.updateWeights(lr)
h2.updateWeights(lr)
l = loss(p,y_train)
new_valid = loss(predict(X_test), y_test)
if new_valid < valid:
valid = new_valid
n = 0
else:
n += 1
if n > 50: break
if i%50 == 0:
print("Loss: {0}\t\tValidation: {1}".format(l/100, valid))
lr = lr*0.97
# Validation
print("\nFinal validation loss: {0}. {1} epochs\n".format(loss(predict(X_test), y_test),i+1))
#print(np.argmax(predict(X_test), axis=1))
#print(np.argmax(y_test, axis=1))
link Dataset:
https://mega.nz/#!jM8AQAbB!61NOeJadGXtiKJQsn_tdJ955p5lRD6kQjBlCQTHtt6I
I have this error:
Data must be 1-dimensional
IMG - ERROR
I have an estimator that should be compatible with the sklearn api. I am trying to fit one parameter of this estimator with gridsearchcv but I do not understand how to do it.
This is my code:
import numpy as np
import sklearn as sk
from sklearn.linear_model import LinearRegression, LassoLarsCV, RidgeCV
from sklearn.linear_model.base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
class ELM(BaseEstimator):
def __init__(self, n_nodes, link='rbf', output_function='lasso', n_jobs=1, c=1):
self.n_jobs = n_jobs
self.n_nodes = n_nodes
self.c = c
if link == 'rbf':
self.link = lambda z: np.exp(-z*z)
elif link == 'sig':
self.link = lambda z: 1./(1 + np.exp(-z))
elif link == 'id':
self.link = lambda z: z
else:
self.link = link
if output_function == 'lasso':
self.output_function = LassoLarsCV(cv=10, n_jobs=self.n_jobs)
elif output_function == 'lr':
self.output_function = LinearRegression(n_jobs=self.n_jobs)
elif output_function == 'ridge':
self.output_function = RidgeCV(cv=10)
else:
self.output_function = output_function
return
def H(self, x):
n, p = x.shape
xw = np.dot(x, self.w.T)
xw = xw + np.ones((n, 1)).dot(self.b.T)
return self.link(xw)
def fit(self, x, y, w=None):
n, p = x.shape
self.mean_y = y.mean()
if w == None:
self.w = np.random.uniform(-self.c, self.c, (self.n_nodes, p))
else:
self.w = w
self.b = np.random.uniform(-self.c, self.c, (self.n_nodes, 1))
self.h_train = self.H(x)
self.output_function.fit(self.h_train, y)
return self
def predict(self, x):
self.h_predict = self.H(x)
return self.output_function.predict(self.h_predict)
def get_params(self, deep=True):
return {"n_nodes": self.n_nodes,
"link": self.link,
"output_function": self.output_function,
"n_jobs": self.n_jobs,
"c": self.c}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
### Fit the c parameter ###
X = np.random.normal(0, 1, (100,5))
y = X[:,1] * X[:,2] + np.random.normal(0, .1, 100)
gs = sk.grid_search.GridSearchCV(ELM(n_nodes=20, output_function='lr'),
cv=5,
param_grid={"c":np.linspace(0.0001,1,10)},
fit_params={})
#gs.fit(X, y) # Error
There are 2 problems within your code:
You didn't specify scoring argument to GridSearchCV. You seems be doing regression, so mean_squared_error is an option.
Your set_params doesn't return reference to the object itself. You should add return self after the for loop.
As Andreas already mentioned, you rarely need to redefine set_params and get_params in scikit-learn. Just having inherited from the BaseEstimator should be enough.
You inherit from BaseEstimator. It should just work. See https://scikit-learn.org/dev/developers/develop.html
FYI this might be interesting to you: https://github.com/scikit-learn/scikit-learn/pull/3306