AttributeError: 'KMeans' object has no attribute 'labels_' pytorch - python
first of all I thank , I tried to train model with pytorch but I got the following error: AttributeError: 'KMeans' object has no attribute 'labels_'.I am trying to model a extract features point cloud using deep learning in pytorch. I get the following error . Could anyone help on this? ************** *************** Thanks!
# Training loop
def training_loop(gpu, training_dataloader, model, loss_fn, optimizer):
losses = []
correct = 0
batch_results = dict()
conf_mat = np.zeros((10,10))
for batch_n, batch in enumerate(training_dataloader): #batch[batch, pos, ptr, y]
batch_size = int(batch.batch.size()[0] / sample_points)
if dimensionality == 3:
# Input dim [:,3] for your geometry x,y,z
X = batch.pos.cuda(non_blocking=True).view(batch_size, sample_points, -1) + torch.normal(
torch.zeros(batch_size, sample_points, dimensionality), torch.full((batch_size, sample_points,
dimensionality), fill_value=0.1)).cuda(gpu)
else:
# Input dim [:,6] for your geometry x,y,z and normals nx,ny,nz
X = torch.cat((batch.pos.cuda(non_blocking=True), batch.normal.cuda(non_blocking=True)), 1).view(batch_size, sample_points, -1) + torch.normal(
torch.zeros(batch_size, sample_points, dimensionality), torch.full((batch_size, sample_points,
dimensionality), fill_value=0.1)).cuda(gpu)
y = batch.y.cuda(non_blocking=True).flatten() #size (batch_size) --> torch.Size([8])
# Compute predictions
pred = model(None, X) #size (batch_size,classes) --> torch.Size([8, 10])
if overall_classes_loss:
# weighted CE Loss over all classes
loss = loss_fn(pred, y)
else:
# weighted batchwise Loss
sample_count = np.array([[x, batch.y.tolist().count(x)] for x in batch.y])[:,1]
batch_weights = 1. / sample_count
batch_weights = torch.from_numpy(batch_weights)
batch_weights = batch_weights.double()
loss = element_weighted_loss(pred, batch.y, batch_weights, gpu)
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
print(f"Loss: {loss}")
tensor_list_y = [torch.ones_like(y) for _ in range(dist.get_world_size())]
tensor_list_pred = [torch.ones_like(y) for _ in range(dist.get_world_size())]
torch.distributed.all_gather(tensor_list_y, y, group=None, async_op=False)
torch.distributed.all_gather(tensor_list_pred, pred.argmax(1), group=None, async_op=False)
tensor_list_y = torch.cat(tensor_list_y)
tensor_list_pred = torch.cat(tensor_list_pred)
# Confusion Matrix
conf_mat += confusion_matrix(tensor_list_y.cpu().detach().numpy(), tensor_list_pred.cpu().detach().numpy(), labels=np.arange(0,10))
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
# Save batch predictions
batch_results[batch_n] = {'true':tensor_list_y, 'pred':tensor_list_pred}
if verbosity == True:
print(f"\n\nTRAIN on GPU:{gpu}: True Label {y} - Prediction {pred.argmax(1)} - Loss {loss}")
truevalue = '\t\t'.join(classes[items] for items in y.tolist())
predvalues = '\t\t'.join(classes[items] for items in pred.argmax(1).tolist())
print(f"INFO on GPU:{gpu}: TRAIN - True Value\t {truevalue}")
print(f"INFO on GPU:{gpu}: TRAIN - Predictions\t {predvalues}")
if batch_n % 25 == 0:
torch.distributed.reduce(loss, 0)
return torch.tensor(losses, device=f"cuda:{gpu}"), torch.tensor(correct, device=f"cuda:{gpu}"), batch_results, conf_mat
# Test loop
def test_loop(gpu, test_dataloader, model, loss_fn):
test_losses = []
correct = 0
batch_results = dict()
conf_mat = np.zeros((10,10))
with torch.no_grad():
for batch_n, batch in enumerate(test_dataloader):
batch_size = int(batch.batch.size()[0] / sample_points)
if dimensionality == 3:
# Input dim [:,3] for your geometry x,y,z
X = batch.pos.cuda(non_blocking=True).view(batch_size, sample_points, -1)
else:
# Input dim [:,6] for your geometry x,y,z and normals nx,ny,nz
X = torch.cat((batch.pos.cuda(non_blocking=True), batch.normal.cuda(non_blocking=True)), 1).view(batch_size, sample_points, -1)
y = batch.y.cuda(non_blocking=True).flatten()
pred = model(None, X) #size (batch,classes) per batch_n
if overall_classes_loss:
# weighted CE Loss over all classes
loss = loss_fn(pred, y)
else:
# weighted batchwise Loss
sample_count = np.array([[x, batch.y.tolist().count(x)] for x in batch.y])[:,1]
batch_weights = 1. / sample_count
batch_weights = torch.from_numpy(batch_weights)
batch_weights = batch_weights.double()
loss = element_weighted_loss(pred, batch.y, batch_weights, gpu)
test_losses.append(loss.item())
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
print(f"Loss: {loss}")
tensor_list_y = [torch.ones_like(y) for _ in range(dist.get_world_size())]
tensor_list_pred = [torch.ones_like(y) for _ in range(dist.get_world_size())]
torch.distributed.all_gather(tensor_list_y, y, group=None, async_op=False)
torch.distributed.all_gather(tensor_list_pred, pred.argmax(1), group=None, async_op=False)
tensor_list_y = torch.cat(tensor_list_y)
tensor_list_pred = torch.cat(tensor_list_pred)
# Confusion Matrix
conf_mat += confusion_matrix(tensor_list_y.cpu().detach().numpy(), tensor_list_pred.cpu().detach().numpy(), labels=np.arange(0,10))
# Save batch predictions
batch_results[batch_n] = {'true':tensor_list_y, 'pred':tensor_list_pred}
if verbosity == True:
print(f"\n\nTEST on GPU:{gpu}: True Label {y} - Prediction {pred.argmax(1)} - Loss {loss}")
truevalue = '\t\t'.join(classes[items] for items in y.tolist())
predvalues = '\t\t'.join(classes[items] for items in pred.argmax(1).tolist())
print(f"INFO on GPU:{gpu}: TEST - True Value\t {truevalue}")
print(f"INFO on GPU:{gpu}: TEST - Predictions\t {predvalues}")
test_loss = statistics.mean(test_losses)
return torch.tensor(correct, device=f"cuda:{gpu}"), torch.tensor(test_loss, device=f"cuda:{gpu}"), batch_results, conf_mat
def train_optimisation(gpu, gpus, training_dataloader, test_dataloader, model, loss_fn, optimizer, scheduler, dir_path, initial_epoch):
epoch_losses = []
training_accuracies = []
test_losses = []
test_accuracies = []
learning_rates = []
counter = 0 #early stopping counter
batchwise_results = dict()
# Learning Rate Scheduler
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=20)
for i in range(initial_epoch, initial_epoch + epochs):
if gpu == 0:
if initial_epoch > 0:
print(f"\n\nEpoch {i}\n-------------------------------")
else:
print(f"\n\nEpoch {i + 1}\n-------------------------------")
# TRAIN
losses, training_accuracy, train_batch_result, train_conf_mat = training_loop(gpu, training_dataloader, model, loss_fn, optimizer)
average_loss = torch.mean(losses)
torch.distributed.reduce(average_loss, 0, torch.distributed.ReduceOp.SUM)
torch.distributed.reduce(training_accuracy, 0, torch.distributed.ReduceOp.SUM)
# TEST
test_accuracy, test_loss, test_batch_result, test_conf_mat = test_loop(gpu, test_dataloader, model, loss_fn)
torch.distributed.reduce(test_accuracy, 0, torch.distributed.ReduceOp.SUM)
torch.distributed.reduce(test_loss, 0, torch.distributed.ReduceOp.SUM)
# save results
batchwise_results[i] = {'train':train_batch_result, 'test':test_batch_result}
if gpu == 0: # the following operations are performed only by the process running in the first gpu
average_loss = average_loss / torch.tensor(gpus, dtype=torch.float) # average loss among all gpus
test_accuracy = test_accuracy / torch.tensor(len(test_dataloader.dataset),
dtype=torch.float) * torch.tensor(100.0)
training_accuracy = training_accuracy / torch.tensor(len(training_dataloader.dataset),
dtype=torch.float) * torch.tensor(100.0)
test_loss = test_loss / torch.tensor(gpus, dtype=torch.float)
epoch_losses.append(average_loss.item())
training_accuracies.append(training_accuracy.item())
test_losses.append(test_loss.item())
test_accuracies.append(test_accuracy.item())
learning_rates.append((optimizer.param_groups[0])["lr"])
print(f"\nBatch size: {batch_size * int(gpus)}")
print(f"average Training Loss: {average_loss.item():.6f}")
print(f"average Test Loss: {test_loss.item():.6f}")
print(f"\naverage Training Acc: {training_accuracy.item():.6f}")
print(f"average Test Acc: {test_accuracy.item():.6f}")
printLearningRate(optimizer)
scheduler.step(test_loss)
# saving model checkpoint
save_checkpoint(model, optimizer, scheduler, i, epoch_losses, training_accuracies, test_losses, test_accuracies, learning_rates,
os.path.join(dir_path, f"epoch{i}.pth"), {key: value for key, value in batchwise_results[i].items() if key == 'train'}, {key: value for key, value in batchwise_results[i].items() if key == 'test'}, train_conf_mat, test_conf_mat)
#TODO: implement ONNX Export
# early stopping scheduler
if early_stopping(test_losses) == True:
counter += 1
print(f"Early Stopping counter: {counter} of {patience}")
else:
counter += 0
if counter < patience:
pass
else:
print("\n\nEarly Stopping activated")
print(f"Training stopped at Epoch{i + 1}")
dist.destroy_process_group()
exit()
def train(gpu, gpus, world_size):
torch.manual_seed(0)
torch.cuda.set_device(gpu)
try:
dist.init_process_group(backend='nccl', world_size=world_size, rank=gpu) #for distributed GPU training
except RuntimeError:
print("\n\nINFO:RuntimeError is raised >> Used gloo backend instead of nccl!\n")
dist.init_process_group(backend='gloo', world_size=world_size, rank=gpu) #as a fallback option
dir_path = None
if gpu == 0:
dir_path = "stackgraphConvPool3DPnet"
createdir(dir_path)
training_number = next_training_number(dir_path)
dir_path = os.path.join(dir_path, f"train{training_number}")
createdir(dir_path)
#save hyper-parameters in txt protocol file
save_hyperparameters(dir_path, 'hyperparameters.txt')
print("\nINFO: Protocol File saved successfully . . .")
model = Classifier(shrinkingLayers, mlpClassifier)
torch.cuda.set_device(gpu)
model.cuda(gpu)
#setting up optimizer
if optimizer_str == "SGD":
optimizer = torch.optim.SGD(model.parameters(), learning_rate, momentum=momentum, weight_decay=weight_decay)
elif optimizer_str == "RMSprop":
optimizer = torch.optim.RMSprop(model.parameters(), learning_rate, weight_decay=weight_decay)
else:
optimizer = torch.optim.Adam(model.parameters(), learning_rate, weight_decay=weight_decay)
# single-program multiple-data training paradigm (Distributed Data-Parallel Training)
model = DDP(model, device_ids=[gpu])
if dimensionality == 3:
training_data = ModelNet("ModelNet10_train_data", transform=lambda x: NormalizeScale()(SamplePoints(num=sample_points)(x)))
else:
training_data = ModelNet("ModelNet10_train_data", transform=lambda x: NormalizeScale()(NormalizeRotation()(SamplePoints(num=sample_points, remove_faces=True, include_normals=True)(x))))
training_sampler = DistributedWeightedSampler(training_data, num_replicas=world_size) #weight unbalanced classes by 1/cls_count
training_dataloader = DataLoader(dataset=training_data, batch_size=batch_size, shuffle=data_shuffle, num_workers=0,
pin_memory=True, sampler=training_sampler)
if dimensionality == 3:
test_data = ModelNet("ModelNet10_test_data", train=False, transform=lambda x: NormalizeScale()(SamplePoints(num=sample_points)(x)))
else:
test_data = ModelNet("ModelNet10_test_data", train=False, transform=lambda x: NormalizeScale()(NormalizeRotation()(SamplePoints(num=sample_points, remove_faces=True, include_normals=True)(x))))
test_sampler = DistributedWeightedSampler(test_data, num_replicas=world_size) #weight unbalanced classes by 1/cls_count
test_dataloader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=data_shuffle, num_workers=0,
pin_memory=True, sampler=test_sampler)
# weighted CE Loss over all Classes C
class_sample_count = np.array([len(np.where(training_data.data.y == t)[0]) for t in np.unique(training_data.data.y)])
weight = 1. / class_sample_count
weight = torch.from_numpy(weight)
weight = weight.float()
loss_fn = nn.CrossEntropyLoss(weight=weight).cuda(gpu)
# continue training from certain checkpoint
continue_from_scratch = True if args.resume is None else False
if continue_from_scratch:
if gpu == 0:
print("\nINFO: Train from scratch has started . . .")
train_optimisation(gpu, gpus, training_dataloader, test_dataloader, model, loss_fn, optimizer, None, dir_path, 0)
else:
checkpoint_path = "stackgraphConvPool3DPnet/" + args.resume
if gpu == 0:
print(f"\nINFO: Train has started from certain checkpoint {checkpoint_path.split('/')[2].split('.')[0]} in {checkpoint_path.split('/')[1]} . . .")
model.load_state_dict(torch.load(checkpoint_path)['model_state_dict'], strict=False)
optimizer.load_state_dict(torch.load(checkpoint_path)['optimizer_state_dict'])
final_epoch = (torch.load("stackgraphConvPool3DPnet/" + args.resume)['epoch'])+1
train_optimisation(gpu, gpus, training_dataloader, test_dataloader, model, loss_fn, optimizer, None, dir_path, final_epoch)
code tools:
class KMeansInitMostDistantFromMean:
def __call__(self, *args, **kwargs):
X, k = args
mean = np.mean(X, axis=0)
arg_sorted = np.argsort(np.apply_along_axis(lambda y: euclidean(mean, y), 1, X))
output = X[np.flip(arg_sorted)[:k]]
return output
class KMeansInit:
def __call__(self, *args, **kwargs):
X, k = args
current_centroids = np.expand_dims(np.mean(X, axis=0), 0)
for i in range(k - 1):
X, current_centroids = self.next_centroid(X, current_centroids)
return current_centroids
def next_centroid(self, X, curr_centroids):
highest_dist = 0.0
next_centroid = None
next_centroid_index = None
for i, x in enumerate(X):
max_dist = np.amax(np.apply_along_axis(lambda y: euclidean(x, y), 1, curr_centroids))
if max_dist > highest_dist:
next_centroid = x
highest_dist = max_dist
next_centroid_index = i
return np.delete(X, next_centroid_index, 0), np.append(curr_centroids, np.expand_dims(next_centroid, 0), 0)
class Conv(gnn.MessagePassing):
def __init__(self, sigma: nn.Module, F: nn.Module, W: nn.Module, M: nn.Module, C: int, P: int):
super().__init__(aggr="mean")
self.sigma = sigma
self.F = F
self.W = W
self.M = M
self.C = C
self.P = P
self.B = torch.randn(C+P, requires_grad=True)
def forward(self, feature_matrix, edge_index):
return self.propagate(edge_index, feature_matrix=feature_matrix)
def message(self, feature_matrix_i, feature_matrix_j):
message = self.F(feature_matrix_j - feature_matrix_i)
message = message.view(-1, self.C + self.P, self.C)
feature_matrix_i_ = feature_matrix_i.unsqueeze(2)
output = torch.bmm(message, feature_matrix_i_).squeeze()
return output
def update(self, aggr_out, feature_matrix):
Weight = self.M(aggr_out)
aggr_out = aggr_out * Weight
transform = self.W(feature_matrix)
transform = transform.view(-1, self.C + self.P, self.C)
feature_matrix = feature_matrix.unsqueeze(2)
transformation = torch.bmm(transform, feature_matrix).squeeze()
aggr_out = aggr_out + transformation
output = aggr_out + self.B
output = self.sigma(output)
return output
class Aggregation(nn.Module):
def __init__(self, mlp1: nn.Module, mlp2: nn.Module):
super().__init__()
self.mlp1 = mlp1
self.mlp2 = mlp2
self.softmax = nn.Softmax(0)
def forward(self, feature_matrix_batch: torch.Tensor, conv_feature_matrix_batch: torch.Tensor):
N, I, D = feature_matrix_batch.size()
N_, I_, D_ = conv_feature_matrix_batch.size()
augmentation = D_ - D
if augmentation > 0:
feature_matrix_batch = F.pad(feature_matrix_batch, (0, augmentation))
S1 = torch.mean(feature_matrix_batch, 1)
S2 = torch.mean(conv_feature_matrix_batch, 1)
Z1 = self.mlp1(S1)
Z2 = self.mlp2(S2)
M = self.softmax(torch.stack((Z1, Z2), 0))
M1 = M[0]
M2 = M[1]
M1 = M1.unsqueeze(1).expand(-1, I, -1)
M2 = M2.unsqueeze(1).expand(-1, I, -1)
output = (M1 * feature_matrix_batch) + (M2 * conv_feature_matrix_batch)
return output
class MaxPool(nn.Module):
def __init__(self, k: int):
super().__init__()
self.k = k
def forward(self, feature_matrix_batch: torch.Tensor, cluster_index: torch.Tensor):
N, I, D = feature_matrix_batch.size()
feature_matrix_batch = feature_matrix_batch.view(-1, D)
output = scatter_max(feature_matrix_batch, cluster_index, dim=0)[0]
output = output.view(N, self.k, -1)
return output
class GraphConvPool3DPnet(nn.Module):
def __init__(self, shrinkingLayers: [ShrinkingUnit], mlp: nn.Module):
super().__init__()
self.neuralNet = nn.Sequential(*shrinkingLayers, mlp)
def forward(self, x: torch.Tensor, pos: torch.Tensor):
feature_matrix_batch = torch.cat((pos, x), 2) if x is not None else pos
return self.neuralNet(feature_matrix_batch)
class ShrinkingUnitStack(nn.Module):
def __init__(self, input_stack: int, stack_fork: int, mlp: nn.Module, learning_rate: int, k: int, kmeansInit, n_init, sigma: nn.Module, F: nn.Module, W: nn.Module,
M: nn.Module, C, P, mlp1: nn.Module, mlp2: nn.Module):
super().__init__()
self.stack_fork = stack_fork
stack_size = input_stack * stack_fork
self.selfCorrStack = SelfCorrelationStack(stack_size, mlp, learning_rate)
self.kmeansConvStack = KMeansConvStack(stack_size, k, kmeansInit, n_init, sigma, F, W, M, C, P)
self.localAdaptFeaAggreStack = AggregationStack(stack_size, mlp1, mlp2)
self.graphMaxPoolStack = MaxPoolStack(stack_size, k)
def forward(self, feature_matrix_batch):
feature_matrix_batch = torch.repeat_interleave(feature_matrix_batch, self.stack_fork, dim=0)
feature_matrix_batch = self.selfCorrStack(feature_matrix_batch)
feature_matrix_batch_, conv_feature_matrix_batch, cluster_index = self.kmeansConvStack(feature_matrix_batch)
feature_matrix_batch = self.localAdaptFeaAggreStack(feature_matrix_batch, conv_feature_matrix_batch)
output = self.graphMaxPoolStack(feature_matrix_batch, cluster_index)
return output
class SelfCorrelationStack(nn.Module):
def __init__(self, stack_size: int, mlp: nn.Module, learning_rate: int = 1.0):
super().__init__()
self.selfCorrelationStack = nn.ModuleList([SelfCorrelation(copy.deepcopy(mlp), learning_rate) for i in range(stack_size)])
self.apply(init_weights)
def forward(self, feature_matrix_batch: torch.Tensor):
# feature_matrix_batch size = (S,N,I,D) where S=stack_size, N=batch number, I=members, D=member dimensionality
output = selfCorrThreader(self.selfCorrelationStack, feature_matrix_batch)
# output size = (S,N,I,D) where where S=stack_size, N=batch number, I=members, D=member dimensionality
return output
class KMeansConvStack(nn.Module):
def __init__(self, stack_size: int, k: int, kmeansInit, n_init: int, sigma: nn.Module, F: nn.Module, W: nn.Module,
M: nn.Module, C: int, P: int):
super().__init__()
self.kmeansConvStack = nn.ModuleList([
KMeansConv(k, kmeansInit, n_init, copy.deepcopy(sigma), copy.deepcopy(F), copy.deepcopy(W),
copy.deepcopy(M), C, P) for i in range(stack_size)])
self.apply(init_weights)
def forward(self, feature_matrix_batch: torch.Tensor):
# feature_matrix_batch size = (S,N,I,D) where S=stack size, N=batch number, I=members, D=member dimensionality
feature_matrix_batch, conv_feature_matrix_batch, cluster_index = kmeansConvThreader(self.kmeansConvStack,
feature_matrix_batch)
return feature_matrix_batch, conv_feature_matrix_batch, cluster_index
class AggregationStack(nn.Module):
def __init__(self, stack_size: int, mlp1: nn.Module, mlp2: nn.Module):
super().__init__()
self.localAdaptFeatAggreStack = nn.ModuleList([Aggregation(copy.deepcopy(mlp1), copy.deepcopy(mlp2)) for i
in range(stack_size)])
self.apply(init_weights)
def forward(self, feature_matrix_batch: torch.Tensor, conv_feature_matrix_batch: torch.Tensor):
output = threader(self.localAdaptFeatAggreStack, feature_matrix_batch, conv_feature_matrix_batch)
return output
class MaxPoolStack(nn.Module):
def __init__(self, stack_size: int, k: int):
super().__init__()
self.graphMaxPoolStack = nn.ModuleList([MaxPool(k) for i in range(stack_size)])
self.apply(init_weights)
def forward(self, feature_matrix_batch: torch.Tensor, cluster_index: torch.Tensor):
output = threader(self.graphMaxPoolStack, feature_matrix_batch, cluster_index)
return output
def selfCorrThreader(modules, input_tensor):
list_append = []
threads = []
for i, t in enumerate(input_tensor):
threads.append(Thread(target=selfCorrAppender, args=(modules[i], t, list_append, i)))
[t.start() for t in threads]
[t.join() for t in threads]
list_append.sort()
list_append = list(map(lambda x: x[1], list_append))
return torch.stack(list_append)
def selfCorrAppender(module, tensor, list_append, index):
list_append.append((index, module(tensor)))
def kmeansConvThreader(modules, input_tensor):
list1_append = []
list2_append = []
list3_append = []
threads = []
for i, t in enumerate(input_tensor):
threads.append(
Thread(target=kmeansAppender, args=(modules[i], t, list1_append, list2_append, list3_append, i)))
[t.start() for t in threads]
[t.join() for t in threads]
list1_append.sort()
list2_append.sort()
list3_append.sort()
list1_append = list(map(lambda x: x[1], list1_append))
list2_append = list(map(lambda x: x[1], list2_append))
list3_append = list(map(lambda x: x[1], list3_append))
return torch.stack(list1_append), torch.stack(list2_append), torch.stack(list3_append)
def kmeansAppender(module, input, list1_append, list2_append, list3_append, index):
x, y, z = module(input)
list1_append.append((index, x))
list2_append.append((index, y))
list3_append.append((index, z))
def threader(modules, input_tensor1, input_tensor2):
list_append = []
threads = []
for i, t in enumerate(input_tensor1):
threads.append(Thread(target=threaderAppender, args=(modules[i], t, input_tensor2[i], list_append, i)))
[t.start() for t in threads]
[t.join() for t in threads]
list_append.sort()
list_append = list(map(lambda x: x[1], list_append))
return torch.stack(list_append)
def threaderAppender(module, t1, t2, list_append, index):
list_append.append((index, module(t1, t2)))
class Classifier(nn.Module):
def __init__(self, shrinkingLayersStack: [ShrinkingUnitStack], mlp: nn.Module):
super().__init__()
self.neuralNet = nn.Sequential(*shrinkingLayersStack)
self.mlp = mlp
def forward(self, x: torch.Tensor, pos: torch.Tensor):
feature_matrix_batch = pos.unsqueeze(0)
output = self.neuralNet(feature_matrix_batch)
output = torch.mean(output, dim=0)
return self.mlp(output)
Error:
thank you for your help
The attribute labels_ of a KMeans object is created once you actually compute the clusters by running .fit() (or .fit_predict(), or .fit_transform()).
Simple example:
>>> from sklearn.cluster import KMeans
>>> from numpy.random import random
>>> X = random((10,2))
>>> X
array([[0.2096706 , 0.69704806],
[0.31732618, 0.29607599],
[0.10372159, 0.56911046],
[0.30922255, 0.07952464],
[0.21190404, 0.46823665],
[0.67134948, 0.95702692],
[0.14781526, 0.24619197],
[0.89931979, 0.96301003],
[0.88256126, 0.07569739],
[0.70776912, 0.92997521]])
>>> clustering = KMeans(n_clusters=3)
>>> clustering.labels_
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'KMeans' object has no attribute 'labels_'
>>> clustering.fit(X)
KMeans(n_clusters=3)
>>> clustering.labels_
array([0, 0, 0, 0, 0, 1, 0, 1, 2, 1], dtype=int32)
Related
How to use torch.nn.transformer with pytroch lightning?
I am trying to use the vanilla transformer from PyTorch using Pytorch Lightning. I tried to test the model with a reverse number task. So given [1, 3, 5, 4, 13, 19] it returns [1, 13, 4, 5, 3, 19] with 1, 19 being start and end token respectively. The full code is below. The code can run without error but there seems to be a problem with the backpropagation. The training loss does go down at first but it doesn't go beyond 2.8 and the accuracy doesn't go beyond 11%. It seems that part of the model is able to optimize, I am guessing it is because the weights located in Embeddings and Generator can backpropagate, but weights located in nn.Transformer cannot? I am really not sure. import math import torch.nn.functional as F import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping class Embeddings(pl.LightningModule): def __init__(self, d_model, vocab): super(Embeddings, self).__init__() self.lut = nn.Embedding(vocab, d_model) self.d_model = d_model def forward(self, x): a = self.lut(x) * math.sqrt(self.d_model) return a class PositionalEncoding(pl.LightningModule): def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:, :x.size(1)] return self.dropout(x) class Generator(pl.LightningModule): def __init__(self, size): super(Generator, self).__init__() self.proj = nn.Linear(512, size) def forward(self, x): return F.log_softmax(self.proj(x), dim=-1) class Model(pl.LightningModule): def __init__(self, src_embed, tgt_embed, transformer, generator): super(Model, self).__init__() self.src_embed = src_embed self.tgt_embed = tgt_embed self.transformer = transformer self.generator = generator self.valLoss = 0 self.valAcc = 0 self.automatic_optimization = False self.optimizer = None for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def forward(self, x, y, tgt_mask=None): x = self.src_embed(x) y = self.tgt_embed(y) return self.generator(self.transformer(x, y, tgt_mask=tgt_mask)) def training_step(self, batch, batch_idx): if self.optimizer is None: self.optimizer = self.optimizers() batch = Batch(batch[0], batch[1]) tgt_mask = batch.trg_mask.squeeze(0) tgt_mask = (tgt_mask != True) output = self(batch.src, batch.trg, tgt_mask) criterion = LossCompute(V) loss = criterion.forward(output.contiguous().view(-1, output.size(-1)), batch.trg_y.contiguous().view(-1)) / batch.ntokens loss.backward() self.optimizer.step() self.optimizer.zero_grad() self.log('train_loss', loss) print(loss) def validation_step(self, batch, batch_idx): batch = Batch(batch[0], batch[1]) tgt_mask = batch.trg_mask.squeeze(0) tgt_mask = (tgt_mask != True) output = self(batch.src, batch.trg, tgt_mask) criterion = LossCompute(V) loss = criterion.forward(output.view(-1, output.size(-1)), batch.trg_y.contiguous().view(-1)) / batch.ntokens self.log('val_loss', loss) self.valLoss += loss if batch_idx % 10 == 0: print(loss) if batch_idx == 99: print(self.valLoss/100) self.valLoss = 0 return {"x": output, "trg": batch.trg_y, "index": batch_idx} def validation_step_end(self, batch): output, trg, idx = batch["x"], batch["trg"], batch["index"] accuracy = getAccuracy(output, trg) self.log("accuracy", accuracy) self.valAcc += accuracy if idx == 99: print(self.valAcc/100) self.valAcc = 0 def train_dataloader(self): data = data_gen(V, 0, 3000) return DataLoader(data, batch_size=30, shuffle=False, num_workers=2, pin_memory=True) def val_dataloader(self): data = data_gen(V, 1, 1000) return DataLoader(data, batch_size=10, shuffle=False, num_workers=2, pin_memory=True) def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=1e-3, betas=(0.9, 0.98), eps=1e-9) class LossCompute(pl.LightningModule): def __init__(self, size): super(LossCompute, self).__init__() self.criterion = nn.KLDivLoss(reduction='sum') self.size = size self.true_dist = None def forward(self, x, target): # x has size (batch_size x length, vocab_size) assert x.size(1) == self.size true_dist = x.data.clone() true_dist.fill_(0) true_dist.scatter_(1, target.data.unsqueeze(1).long(), 1) self.true_dist = true_dist return self.criterion(x, true_dist) # prepare data class Batch: "Object for holding a batch of data with mask during training." def __init__(self, src, trg=None): self.src = src if trg is not None: self.trg = trg[:, :-1] self.trg_y = trg[:, 1:] self.trg_mask = \ self.make_std_mask(self.trg) self.ntokens = self.trg_y.size(0) * self.trg_y.size(1) print("") #staticmethod def make_std_mask(tgt): "Create a mask to hide padding and future words." tgt_mask = subsequent_mask(tgt.size(-1)).type_as(tgt.data) return tgt_mask def subsequent_mask(size): "Mask out subsequent positions." attn_shape = (1, size, size) subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8') return torch.from_numpy(subsequent_mask) == 0 def data_gen(V, randomSeed, totalTrainingSample): np.random.seed(randomSeed) x = torch.from_numpy(np.random.randint(2, V - 2, size=(totalTrainingSample, 10))) y = torch.flip(torch.flip(x, [0, 1]), [0]) x[:, 0] = 1 y[:, 0] = 1 x[:, -1] = V - 1 y[:, -1] = V - 1 return list(zip(x, y)) def getAccuracy(x, trg): totalValAcc = 0 totalValAccToken = 0 trg = trg.contiguous().view(-1) out = x.view(-1, x.size(-1)) # (batch_size * tgt_length, src_vocab) _, index = torch.max(out, dim=-1) # index (batch_size * tgt_length) correct = list((trg == index)).count(True) totalValAcc += correct totalValAccToken += index.size(0) return totalValAcc / totalValAccToken V = 20 transformer = nn.Transformer(num_encoder_layers=2, num_decoder_layers=2, batch_first=True) PositionEnc = PositionalEncoding(512, 0.1) src_emb = Embeddings(512, V) tgt_emb = Embeddings(512, V) gen = Generator(V) if __name__ == '__main__': model = Model(nn.Sequential(src_emb, PositionEnc), nn.Sequential(tgt_emb, PositionEnc), transformer, gen) earlyStopping = EarlyStopping(monitor='val_loss', patience=3) trainer = pl.Trainer(max_epochs=10, callbacks=[earlyStopping]) trainer.fit(model)
PyTorch AssertionError assert embed_dim == embed_dim_to_check AssertionError
I got an AssertionError from MultiHeadAttention Class from PyTorch. class Attention(nn.Module): def __init__(self, d_model: int, max_pos, num_head, dropout: float = 0.1): super(Attention, self).__init__() self.num_head = num_head self.dropout = nn.Dropout(p=dropout).to(device) self.embed = torch.nn.Embedding(d_model, max_pos).to(device) self.pos = torch.from_numpy(t.positional_encoding(max_pos, d_model)).to(device) self.MHA = nn.MultiheadAttention(d_model, num_head, self.dropout).to(device) def forward(self, x): seq = x.size(dim=1) x = self.embed(x) x += self.pos[:,:seq] x = self.dropout(x) attn, _ = self.MHA(x, x, x) return attn, _ def create_encode_mask(self, seq): out = t.generate_square_subsequent_mask(seq) return out att = Attention(d_model, max_pos, num_heads, dropout_rate) for epoch in range(EPOCHS): # Training print("EPOCH = ", epoch) for (batch, (src, trg)) in enumerate(train_data): print("BATCH = ", batch) src, trg = src.to(device), trg.to(device) out = att(src) I am just trying to see the result of MultiHeadAttention. But the error seems to be unavoidable, I tried to input the embed_dim_to_check value in the MHA including the num_head. attn, _ = self.MHA(x, x, x, self.create_encode_mask(seq), self.num_head) But the error remains the same Thank You
error X = X.reshape(1, X.shape[0]) IndexError: tuple index out of range. How to fix that?
this is fragment of my code def train(self, features, targets): for X, y in zip(features, targets): X = X.reshape(1, X.shape[0]) outputs = self.feed_forward(X) when I try to use the method with data: train(np.array([gameDataList[n].ball_position, gameDataList[n].wall_position]), np.array(gameDataList[n].upOrDown)) where gameDataList[n].upOrDown is an array e.g. [0.1, 0.9], and gameDataList[n].ball_position and gameDataList[n].wall_position are floats, I get this error. Full code: #### Imports #### import numpy as np #### Neural Network Class #### class MLP: ##### Constructor #### def __init__(self, n_input_nodes, hidden_nodes, n_output_nodes, lr): ## Network ## self.n_input_nodes = n_input_nodes self.n_output_nodes = n_output_nodes self.nodes = hidden_nodes self.nodes.insert(0, n_input_nodes) self.nodes.append(n_output_nodes) ## Weights and Biases## self.weights = [] self.biases = [] for i in range(1, len(self.nodes)): self.weights.append(np.random.uniform(-1.0, 1.0, (self.nodes[i - 1], self.nodes[i]))) self.biases.append(np.random.uniform(-1.0, 1.0, (1, self.nodes[i]))) ## Learning Rate ## self.lr = lr ## Activation Functions ## # Linear Activation self.linear = lambda x: x self.d_linear = lambda x: np.ones(x.shape) # Relu Activation def relu(x): x[x < 0] = 0 return x def d_relu(out): out: x[x > 0] = 1 return out self.relu = relu self.d_relu = d_relu # Sigmoid Activation self.sigmoid = lambda x: 1 / (1 + np.exp(-x)) self.d_sigmoid = lambda out: out * (1 - out) # assumes out is tanh(x) # Hyperbolic Tangent Activation self.tanh = lambda x: np.tanh(x) self.d_tanh = lambda out: 1 - out ** 2 # assumes out is tanh(x) def getWeights(self): return self.weights.copy() def getBiases(self): return self.biases.copy() def setWeights(self, weights): self.weights = weights.copy() def setBiases(self, biases): self.biases = biases.copy() #### Feed Forward #### def feed_forward(self, X): outputs = [X] logits = np.dot(X, self.weights[0]) + self.biases[0] for i in range(1, len(self.nodes) - 1): out = self.sigmoid(logits) outputs.append(out) logits = np.dot(out, self.weights[i]) + self.biases[i] out = self.sigmoid(logits) outputs.append(out) return outputs #### Backpropagation #### def backpropagation(self, X, y, outputs): weights_gradients = [] biases_gradients = [] d1 = y - outputs[-1] d2 = self.d_sigmoid(outputs[-1]) error = d1 * d2 grad = outputs[-2].T * error weights_gradients.append(grad) biases_gradients.append(error) for i in range(len(self.weights) - 2, 1, -1): d = self.d_sigmoid(outputs[i]) error = np.dot(error, self.weights[i + 1].T) * d grad = outputs[i - 1].T * error weights_gradients.append(grad) biases_gradients.append(error) return weights_gradients, biases_gradients #### Training #### def train(self, features, targets): # Batch Size for weight update step batch_size = features.shape[0] # Delta Weights Variables delta_weights = [np.zeros(weight.shape) for weight in self.weights] delta_biases = [np.zeros(bias.shape) for bias in self.biases] # For every data point, forward pass, backpropogation, store weights change for X, y in zip(features, targets): # Forward pass X = X.reshape(1, X.shape[0]) outputs = self.feed_forward(X) # Back propogation weights_gradients, biases_gradients = self.backpropagation(X, y, outputs) for i in range(len(weights_gradients)): delta_weights[-(i + 1)] += weights_gradients[i] delta_biases[-(i + 1)] += biases_gradients[i] for i in range(len(delta_weights)): self.weights[i] += (self.lr * delta_weights[i]) / batch_size self.biases[i] += (self.lr * delta_biases[i]) / batch_size #### Testing Methods #### def predict(self, X): # Gives prediction return self.feed_forward(X)[-1] def test(self, features, targets): predictions = self.predict(features) n_correct = 0 for i in range(len(predictions)): prediction = np.argmax(predictions[i]) correct = np.argmax(targets[i]) if prediction == correct: n_correct += 1 return n_correct / len(targets) class GameData: def __init__(self, ball_position, wall_position, upOrDown): self.wall_position = wall_position self.ball_position = ball_position self.upOrDown = upOrDown I collect data, and train my network, in this way: gameDataList.append(GameData(ball.trt.ycor(), b.trt.ycor(), [0.1, 0.9])) mlp = MLP(2, [32, 32], 2, 0.0001) n = random.randint(0, 999) mlp.train(np.array([gameDataList[n].ball_position, gameDataList[n].wall_position]), np.array(gameDataList[n].upOrDown))
Problem solved. It was needed to write two square brackets instead of one. wrong example: np.array([gameDataList[n].ball_position, gameDataList[n].wall_position]) correct example: np.array([[gameDataList[n].ball_position, gameDataList[n].wall_position]])
Why gradient check gives high difference (almost 1)?
I'm trying to implement a Neural Net in python without the use of libraries like Keras or Tensorflow. I still have to test the net, right now I just tried to train it on Iris dataset and check afterwards the correctness of the backpropagation algorithm. To do so, I wrote the gradient checking procedure, calculating the analytical gradients and comparing them with the gradients from backpropagation. The point is that, even if the backpropagation algorithm seems correct to me, the difference between the gradients is always high (around 0.8, instead of the classic 1e-7). Layer class class Dense(Layer): def __init__(self, input_shape, name=None, activation='relu', regularization='l2'): self.name = name self.is_output = False self.weights = np.random.uniform(low=0.01, high=0.10, size=input_shape) self.biases = np.ones((1,input_shape[1])) if activation == 'sigmoid': self.activation = Activation_Sigmoid() else: #activation == 'relu': self.activation = Activation_ReLU() self.cost = Categorical_CrossEntropyLoss() def set_as_output(self, is_output=True): self.is_output = is_output def forward(self, inputs, debug=False, epsilon=None): self.net_input = inputs if debug: augmented_parameters = np.zeros(epsilon.shape) weights_column_vector = np.reshape(self.weights,(-1,1)) biases_column_vector = np.reshape(self.biases,(-1,1)) concatenated_parameters = np.concatenate((weights_column_vector, biases_column_vector)) for i in range(concatenated_parameters.shape[0]): augmented_parameters[i] = concatenated_parameters[i] # make the augmented parameter long as theta in order to sum them # this because epsilon is a standard basis vector augmented_parameters += epsilon # rebuild the weights matrix and biases vector to apply forward propagation weights_end = self.weights.shape[0] * self.weights.shape[1] biases_end = self.biases.shape[0] * self.biases.shape[1] + weights_end weights = np.reshape(augmented_parameters[0:weights_end],self.weights.shape) biases = np.reshape(augmented_parameters[weights_end:biases_end], self.biases.shape) output = np.dot(inputs, weights) + biases activated_output = self.activation.forward(output) return activated_output self.output = np.dot(inputs, self.weights) + self.biases self.activated_output = self.activation.forward(self.output) return self.activated_output def backward(self, X, y, output, step, l2=0.5): #backpropagation m = X.shape[0] # number of examples if self.is_output: error = self.cost.backward(output, y) #(a_k - y_hat_k) delta_k = self.activation.backward(self.output)* error # net input for neuron k is a_j^(l-1) grad = np.dot(self.net_input.T, delta_k) #update weights with l2 regularization self.grad_w = grad + (l2 / m)*self.weights self.grad_b = np.sum(delta_k * 1,axis=0) self.weights -= step * self.grad_w self.biases -= step * self.grad_b return np.dot(delta_k ,self.weights.T) else: delta_j = self.activation.backward(self.output) * output grad = np.dot(self.net_input.T, delta_j) self.grad_w = grad + (l2 / m) * self.weights self.grad_b = np.sum(delta_j * 1, axis=0) self.weights -= step * self.grad_w self.biases -= step * self.grad_b return np.dot(delta_j, self.weights.T) def get_parameters(self): return self.weights, self.biases def get_gradients(self): return self.grad_w, self.grad_b Neural Net class class NeuralNet(): def __init__(self): self.layers = [] self.layers_output = [] self.cost = None self.regularization = L2_Regularization() def add(self,layer): self.layers.append(layer) def forward(self, inputs, debug=False, epsilon=None): input = np.copy(inputs) for layer in self.layers: output = layer.forward(input, debug=debug, epsilon=epsilon) input = output return input def backward(self, X, y, output, step): prev_delta = None out = output for layer in self.layers[::-1]: prev_delta = layer.backward(X, y, out, step) out = prev_delta def fit(self, X, y, batch_size=1, epochs=10, step=0.05, shuffle=True): self.layers[-1].set_as_output() self.error = [] i = 0.005 * epochs for epoch in range(epochs): if shuffle: X = np.random.permutation(X) batches = int(np.ceil(X.shape[0]/batch_size)) batches_error = [] for t in range(batches): batch_X = X[t*batch_size:np.min([X.shape[0],(t+1)*batch_size]),:] batch_y = y[t*batch_size:np.min([y.shape[0],(t+1)*batch_size]),:] output = self.forward(batch_X) cost = self.cost.forward(output,batch_y) cost += self.regularization.forward(X, self.layers) batches_error.append(cost) self.backward(batch_X, batch_y, output, step) self.error.append(np.mean(batches_error)) if epoch % i == 0: print('epoch:', epoch, 'error:', np.mean(self.error)) return self def parameters_to_theta(self): theta = [] for layer in self.layers: w, b = layer.get_parameters() #flatten parameter w new_vector = np.reshape(w, (-1,1)) theta.append(new_vector) #flatten parameter b new_vector = np.reshape(b, (-1,1)) theta.append(new_vector) return np.vstack(theta) def gradients_to_theta(self): theta = [] for layer in self.layers: grad_w, grad_b = layer.get_gradients() new_vector = np.reshape(grad_w, (-1,1)) theta.append(new_vector) new_vector = np.reshape(grad_b, (-1,1)) theta.append(new_vector) return np.vstack(theta) def gradient_check(self, X, y, epsilon=1e-7): theta = self.parameters_to_theta() dtheta = self.gradients_to_theta() num_parameters = theta.shape[0] J_plus = np.zeros((num_parameters, 1)) J_minus = np.zeros((num_parameters, 1)) dtheta_approx = np.zeros((num_parameters, 1)) for i in range(num_parameters): theta_plus = np.zeros((num_parameters,1)) theta_plus[i] = epsilon J_plus[i] = self.cost.forward(self.forward(X, debug=True, epsilon=theta_plus),y) theta_minus = np.zeros((num_parameters,1)) theta_minus[i] = - epsilon J_minus[i] = self.cost.forward(self.forward(X, debug=True, epsilon=theta_minus),y) dtheta_approx[i] = (J_plus[i] - J_minus[i])/ (2 * epsilon) numerator = np.linalg.norm(dtheta - dtheta_approx) denominator = np.linalg.norm(dtheta_approx) + np.linalg.norm(dtheta) difference = numerator / denominator return difference I'm using ReLU and Sigmoid as activation functions, and Categorical Cross Entropy for the cost import numpy as np from scipy.special import expit as sigmoid class Activation_ReLU: def forward(self, inputs): return np.maximum(0, inputs) def backward(self, inputs): return np.greater(inputs,0).astype(int) class Activation_Sigmoid: def forward(self, inputs): return sigmoid(inputs) def backward(self, inputs): return sigmoid(inputs) * (1 - sigmoid(inputs)) class Categorical_CrossEntropyLoss(): def forward(self, y_pred, y_real): predictions = np.copy(y_pred) predictions = np.clip(predictions, 1e-12, 1 - 1e-12) # avoid zero values for log n = y_real.shape[0] return - (1 / n) * np.sum(y_real * np.log(y_pred)) def backward(self, y_pred, y_real): return y_real - y_pred These are the main classes that define the net. The model that I create to train on Iris dataset is a NN with 1 hidden layer. # random seed is 1 X, y = load_iris(return_X_y=True) X = (X - np.mean(X)) / np.std(X) # standardize data to improve network convergence y = y.reshape((-1,1)) encoder = OneHotEncoder(sparse=False) y = encoder.fit_transform(y) X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=0.8) model = NeuralNet() model.add(Dense((4,10),name='input_layer',activation='relu')) model.add(Dense((10,10),name='hidden_layer',activation='relu')) model.add(Dense((10,3),name='output_layer',activation='sigmoid')) model.fit(X_train,y_train, batch_size=5, epochs=200, step=1e-3) difference = model.gradient_check(X_train, y_train) And then, the result of print(difference) is 0.7992920544491866 So there is something wrong with my implementation. What things I have to check to determine the causes of this high difference between gradients?
ValueError: Data must be 1-dimensional (NeuralNetwork)?
I am making a prediction and implementing a neural network, is currently working with the numpy library and I am adapting the code to the data that I have. I leave the current progress of the neural network, I have an error at the end of the code and I do not understand it well. Anyone who can help me please? import numpy as np from sklearn.cross_validation import train_test_split class LinearLayer: def __init__(self, n_input, n_output): self.n = n_input self.m = n_output self.W = (1/np.sqrt(n_input))*np.random.rand(n_input+1, n_output) def forward(self, X): self.input = np.zeros((X.shape[0],self.n+1)) # if only one feature, the input should always be a batch, at least if len(X.shape) == 1: # of one element self.input[:-1,:] = X.reshape(-1,self.n) else: self.input[:,:-1] = X self.input[:,-1] = 1 self.output = self.input.dot(self.W) # xW + b return self.output def backward(self, d_out): self.gradients = self.W.dot(d_out)[:-1] self.dW = np.einsum("ij,ki", self.input, d_out) return self.gradients def updateWeights(self, lr=0.1): self.W = self.W - lr*self.dW class Sigmoid: def __init__(self, n_input): self.output = np.zeros(n_input) self.gradients = np.zeros(n_input) def forward(self, X): self.output = 1/(np.exp(-X)+1) return self.output def backward(self, d_out): ds = self.output.T*(1 - self.output).T self.gradients = ds*d_out return self.gradients print("Training a multilayer perceptron\n") import pandas as pd data = pd.read_csv('Data_Balanceada.csv') #Data (74,11) X = data.iloc[:,0:11] y = data.iloc[:,-1] X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25, random_state=1) h1 = LinearLayer(11,1) #stack some layers s1 = Sigmoid(7) h2 = LinearLayer(7,1) s2 = Sigmoid(1) def loss(pred, target): return np.mean(np.power(pred-target,2)) predict = lambda x: s2.forward(h2.forward(s1.forward(h1.forward(x)))) backpropagate = lambda d: h1.backward(s1.backward(h2.backward(s2.backward(d)))) lr = 0.005 n = 0 # patience max_epochs = 1500 valid = loss(predict(X_test), y_test) for i in range(max_epochs): l = 0 p = predict(X_train) backpropagate(p.T-y_train.T) h1.updateWeights(lr) h2.updateWeights(lr) l = loss(p,y_train) new_valid = loss(predict(X_test), y_test) if new_valid < valid: valid = new_valid n = 0 else: n += 1 if n > 50: break if i%50 == 0: print("Loss: {0}\t\tValidation: {1}".format(l/100, valid)) lr = lr*0.97 # Validation print("\nFinal validation loss: {0}. {1} epochs\n".format(loss(predict(X_test), y_test),i+1)) #print(np.argmax(predict(X_test), axis=1)) #print(np.argmax(y_test, axis=1)) link Dataset: https://mega.nz/#!jM8AQAbB!61NOeJadGXtiKJQsn_tdJ955p5lRD6kQjBlCQTHtt6I I have this error: Data must be 1-dimensional IMG - ERROR