My goal is to compute a confusion matrix from a huge dataset with 10 classes, so far I got the following code and results:
Note: As far as I know is doing the correct predictions over all the classes, I computed the loss in a pre-training phase, and the accuracy during this Transfer classification phase and they behave as expected, my problem comes in the obtention of the predicted labels from the outputs.
train_dataset = Subset(eurosat_dataset, train_indices, train_transforms)
val_dataset = Subset(eurosat_dataset, val_indices, val_transforms)
train_loader = DataLoader(train_dataset, batch_size=batchsize, shuffle=False, num_workers=2, pin_memory=False,
drop_last=True)
val_loader = DataLoader(val_dataset, batch_size=batchsize, shuffle=False, num_workers=2, pin_memory=False,
drop_last=True)
print('train_len: %d val_len: %d' % (len(train_dataset), len(val_dataset)))
#for i, data in enumerate(val_loader): # inputs = data[0], labels = data[1]
# inputs, labels = data # inputs [1,13,224,224], labels[0-9] --> classes
# if i > 10:
# break
# print(inputs.shape, labels, inputs[0].max())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#inputs = inputs.to(device)
# Get the model, definition of the model to be loaded
import models.models_mae_mod as models_mae_mod
from models.util.pos_embed import interpolate_pos_embed # import pos_embed.py ----> Run OK
def prepare_model(chkpt_dir, arch='mae_vit_small_patch16'):
# build model
model = getattr(models_mae_mod, arch)(in_chans=13)
# load model
checkpoint = torch.load(chkpt_dir, map_location='cpu')
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint and checkpoint[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint[k]
# interpolate position embedding
interpolate_pos_embed(model, checkpoint)
msg = model.load_state_dict(checkpoint['model'], strict=False)
print(msg)
return model
# loading the model
chkpt_dir = 'C:/Users/hugo_/PycharmProjects/transfermodel_Eurosat/datasets/B_raw_norm.pth'
model_mae = prepare_model(chkpt_dir, 'mae_vit_small_patch16')
model_mae = model_mae.to(device)
model_mae.eval()
print('Model loaded.')
with torch.no_grad():
for i, (inputs, labels) in enumerate(val_loader):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model_mae(inputs) # 0 is LOSS, 1 is [1, 196, 3328] is PRED, 2 is [1, 196] is MASK,
# 3 is [1, 13, 224, 224] is TARGET
#_, preds = torch.max(outputs, 1)
#outputs = outputs[-1:]
print("set")
I'm not computing the confusion matrix this time since the Outputs format is not the correct to get it.
nb_classes = 10
confusion_matrix = torch.zeros(nb_classes, nb_classes)
with torch.no_grad():
for i, (inputs, classes) in enumerate(val_loader):
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model_mae(inputs)
outputs = outputs[3]
_, preds = torch.max(outputs, 1)
for t, p in zip(classes.view(-1), preds.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
print(confusion_matrix)
I identified my problem as the way I'm getting the Outputs, which is the correct one but not enough to get the information I want, how to get those predicted labels and use them for the calculation of the Confusion Matrix?
I attach an image of my debugging process for a better understanding:
Related
I have a data set like this:
edge_origins = np.array([[0,1,2,3,4],[6,7,8]])
edge_destinations = np.array([[1,2,3,4,5],[7,8,9]])
target = np.array([0,1])
x = [[np.array([0.1,0.5,0.2]),np.array([0.5,0.6,0.23]),
np.array([0.1,0.5,0.5]),np.array([0.1,0.6,0.23]),
np.array([0.1,0.4,0.4]),np.array([0.52,0.6,0.23])],
[np.array([0.1,0.3,0.3]),np.array([0.3,0.6,0.23]),
np.array([0.1,0.1,0.2]),np.array([0.4,0.6,0.23])]]
This is a list of two networks. The first network has 6 nodes with 5 edges and a class 0, and then 4 nodes with 3 edges and class 1 networks.
I want to develop a model in Pytorch that will classify each network into it's class, and then i'll give it a new set of networks to classify.
So ultimately, I want to be able to shuffle these lists (simultaneously, i.e. maintaining the order between the data and the classes), split into train and test, and then read the train and test data into two data loaders, and feed these into a PyTorch network.
I wrote this:
edge_origins = np.array([[0,1,2,3,4],[6,7,8]])
edge_destinations = np.array([[1,2,3,4,5],[7,8,9]])
target = np.array([0,1])
x = [[np.array([0.1,0.5,0.2]),np.array([0.5,0.6,0.23]),
np.array([0.1,0.5,0.5]),np.array([0.1,0.6,0.23]),
np.array([0.1,0.4,0.4]),np.array([0.52,0.6,0.23])],
[np.array([0.1,0.3,0.3]),np.array([0.3,0.6,0.23]),
np.array([0.1,0.1,0.2]),np.array([0.4,0.6,0.23])]]
edge_index = torch.tensor([edge_origins, edge_destinations], dtype=torch.long)
dataset = Data(x=x, edge_index=edge_index, y=y, num_classes = len(set(target)))
print(dataset)
And the error is:
edge_index = torch.tensor([edge_origins, edge_destinations], dtype=torch.long)
ValueError: expected sequence of length 5 at dim 2 (got 3)
But then once that is fixed I think the next step is:
torch.manual_seed(12345)
dataset = dataset.shuffle()
train_dataset = dataset[:1] #for toy example
test_dataset = dataset[1:]
print(f'Number of training graphs: {len(train_dataset)}')
print(f'Number of test graphs: {len(test_dataset)}')
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
class GCN(torch.nn.Module):
def __init__(self, hidden_channels):
super(GCN, self).__init__()
torch.manual_seed(12345)
self.conv1 = GCNConv(dataset.num_node_features, hidden_channels)
self.conv2 = GCNConv(hidden_channels, hidden_channels)
self.conv3 = GCNConv(hidden_channels, hidden_channels)
self.lin = Linear(hidden_channels, dataset.num_classes)
def forward(self, x, edge_index, batch):
# 1. Obtain node embeddings
x = self.conv1(x, edge_index)
x = x.relu()
x = self.conv2(x, edge_index)
x = x.relu()
x = self.conv3(x, edge_index)
# 2. Readout layer
x = global_mean_pool(x, batch) # [batch_size, hidden_channels]
# 3. Apply a final classifier
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin(x)
return x
model = GCN(hidden_channels=64)
print(model)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
def train():
model.train()
for data in train_loader: # Iterate in batches over the training dataset.
out = model(data.x, data.edge_index, data.batch) # Perform a single forward pass.
loss = criterion(out, data.y) # Compute the loss.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
optimizer.zero_grad() # Clear gradients.
def test(loader):
model.eval()
correct = 0
for data in loader: # Iterate in batches over the training/test dataset.
out = model(data.x, data.edge_index, data.batch)
pred = out.argmax(dim=1) # Use the class with highest probability.
correct += int((pred == data.y).sum()) # Check against ground-truth labels.
return correct / len(loader.dataset) # Derive ratio of correct predictions.
for epoch in range(1, 171):
train()
train_acc = test(train_loader)
test_acc = test(test_loader)
print(f'Epoch: {epoch:03d}, Train Acc: {train_acc:.4f}, Test Acc: {test_acc:.4f}')
Could someone demonstrate to me how to get my data running into the Pytorch network above?
In Pytorch Geometric the Data object is used to contain only one graph. So you could iterate through all your arrays like so:
data_list = []
for i in range(2):
edge_index_curr = torch.tensor([edge_origins[i],
edge_destinations[i],
dtype=torch.long)
data = Data(x=torch.tensor(x[i]), edge_index=edge_index_curr, y=torch.tensor(target[i]))
datas.append(data)
You can then use this list of Data to create your own Dataloader:
loader = DataLoader(data_list, batch_size=32)
If you need to split into train/val/test (I would advise having more than 2 samples for this case) you can do it manually or using sklearn.model_selection.
For data augmentation if you really do have very little data, pytorch-geometric comes with transforms.
my data are divided to 2 parts training and validation one . I Used load_dataset and dataloader functions . I convert data in dataset to torch format using traindataset.set_format
when starting training I got error
new(): invalid data type 'numpy.str_'
in this line
for step,batch in enumerate(train_dataloader):
so how can i fix this error?
model= MixModel()
#model.load_state_dict(torch.load(r"/media/sh/saved_weightscnnbert.pt"))
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
traindataset = load_dataset('csv', data_files='/content/drive//My Drive/Colab Notebooks/newdataset/newdata_train2',split='train')
testdataset = load_dataset('csv', data_files='/content/drive//My Drive/Colab Notebooks/newdataset/newdata_valid2',split='train')
traindataset =traindataset.map(encode)
testdataset1 = testdataset.map(encode)
traindataset =traindataset.map(lambda examples: {'labels': examples['symptoms']}, batched=True)
testdataset =testdataset1.map(lambda examples: {'labels': examples['symptoms']}, batched=True)
traindataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
testdataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
train_dataloader = torch.utils.data.DataLoader(traindataset, batch_size= 64)
test_dataloader = torch.utils.data.DataLoader(testdataset, batch_size= 64)
# function to train the model
def train():
model.train()
total_loss, total_accuracy = 0, 0
# empty list to save model predictions
total_preds=[]
Labels=[]
# iterate over batches
for step,batch in enumerate(train_dataloader):
# progress update after every 50 batches.
if step % 100 == 0 and not step == 0:
print(' Batch {:>5,} of {:>5,}.'.format(step, len(train_dataloader)))
sent_id, mask, labels = batch['input_ids'],batch['attention_mask'],batch['labels']
# clear previously calculated gradients
model.zero_grad()
# get model predictions for the current batch
preds = model(sent_id, mask, labels)
# compute the loss between actual and predicted values
alpha=0.25
gamma=2
ce_loss = loss_fn(preds, labels)
#pt = torch.exp(-ce_loss)
#focal_loss = (alpha * (1-pt)**gamma * ce_loss).mean() # mean over the batch
# add on to the total loss
total_loss = total_loss + ce_loss.item()
# backward pass to calculate the gradients
ce_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# update parameters
optimizer.step()
preds =torch.argmax(preds, dim=1)
total_preds.append(preds)
total_accuracy += (preds == labels).float().sum()
# compute the training loss of the epoch
avg_loss = total_loss / len(train_dataloader)
avg_accuracy = total_accuracy / len(traindataset)
# predictions are in the form of (no. of batches, size of batch, no. of classes).
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
#returns the loss and predictions
return avg_loss, total_preds, avg_accuracy
I'm trying to switch the stress from CPU to GPU as my trusty RTX2070 can do it better than the CPU but I keep running into this problem and I'm quite new to AI so if you are kind enough to share some insights with me regarding any potential solution, it would be highly appreciated, thank you.
**I'm using PyTorch
Here's the code that I'm using :
# to measure run-time
# for csv dataset
import os
# to shuffle data
import random
# to get the alphabet
import string
# import statements for iterating over csv file
import cv2
# for plotting
import matplotlib.pyplot as plt
import numpy as np
# pytorch stuff
import torch
import torch.nn as nn
from PIL import Image
# generate the targets
# the targets are one hot encoding vectors
# print(torch.cuda.is_available())
nvcc_args = [
'-gencode', 'arch=compute_30,code=sm_30',
'-gencode', 'arch=compute_35,code=sm_35',
'-gencode', 'arch=compute_37,code=sm_37',
'-gencode', 'arch=compute_50,code=sm_50',
'-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_61,code=sm_61',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_75,code=sm_75'
]
alphabet = list(string.ascii_lowercase)
target = {}
# Initalize a target dict that has letters as its keys and empty one-hot encoding vectors of size 37 as its values
for letter in alphabet:
target[letter] = [0] * 37
# Do the one-hot encoding for each letter now
curr_pos = 0
for curr_letter in target.keys():
target[curr_letter][curr_pos] = 1
curr_pos += 1
# extra symbols
symbols = ["space", "number", "period", "comma", "colon", "apostrophe", "hyphen", "semicolon", "question",
"exclamation", "capitalize"]
# create vectors
for curr_symbol in symbols:
target[curr_symbol] = [0] * 37
# create one-hot encoding vectors
for curr_symbol in symbols:
target[curr_symbol][curr_pos] = 1
curr_pos += 1
# collect all data from the csv file
data = []
for tgt in os.listdir("dataset"):
if not tgt == ".DS_Store":
for folder in os.listdir("dataset/" + tgt + "/Uploaded"):
if not folder == ".DS_Store":
for filename in os.listdir("dataset/" + tgt + "/Uploaded/" + folder):
if not filename == ".DS_Store":
# store the image and label
picture = []
curr_target = target[tgt]
image = Image.open("dataset/" + tgt + "/Uploaded/" + folder + "/" + filename)
image = image.convert('RGB')
# f.show()
image = np.array(image)
# resize image to 28x28x3
image = cv2.resize(image, (28, 28))
# normalize to 0-1
image = image.astype(np.float32) / 255.0
image = torch.from_numpy(image)
picture.append(image)
# convert the target to a long tensor
curr_target = torch.Tensor([curr_target])
picture.append(curr_target)
# append the current image & target
data.append(picture)
# create a dictionary of all the characters
characters = alphabet + symbols
index2char = {}
number = 0
for char in characters:
index2char[number] = char
number += 1
# find the number of each character in a dataset
def num_chars(dataset, index2char):
chars = {}
for _, label in dataset:
char = index2char[int(torch.argmax(label))]
# update
if char in chars:
chars[char] += 1
# initialize
else:
chars[char] = 1
return chars
# Create dataloader objects
# shuffle all the data
random.shuffle(data)
# batch sizes for train, test, and validation
batch_size_train = 30
batch_size_test = 30
batch_size_validation = 30
# splitting data to get training, test, and validation sets
# change once get more data
# 1600 for train
train_dataset = data[:22000]
# test has 212
test_dataset = data[22000:24400]
# validation has 212
validation_dataset = data[24400:]
# create the dataloader objects
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size_test, shuffle=False)
validation_loader = torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=batch_size_validation,
shuffle=True)
# to check if a dataset is missing a char
test_chars = num_chars(test_dataset, index2char)
num = 0
for char in characters:
if char in test_chars:
num += 1
else:
break
print(num)
class CNN(nn.Module):
def __init__(self):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
super(CNN, self).__init__()
self.block1 = nn.Sequential(
# 3x28x28
nn.Conv2d(in_channels=3,
out_channels=16,
kernel_size=5,
stride=1,
padding=2),
# batch normalization
# nn.BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True),
# 16x28x28
nn.MaxPool2d(kernel_size=2),
# 16x14x14
nn.LeakyReLU()
)
# 16x14x14
self.block2 = nn.Sequential(
nn.Conv2d(in_channels=16,
out_channels=32,
kernel_size=5,
stride=1,
padding=2),
# batch normalization
# nn.BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True),
# 32x14x14
nn.MaxPool2d(kernel_size=2),
# 32x7x7
nn.LeakyReLU()
)
# linearly
self.block3 = nn.Sequential(
nn.Linear(32 * 7 * 7, 100),
# batch normalization
# nn.BatchNorm1d(100),
nn.LeakyReLU(),
nn.Linear(100, 37)
)
# 1x37
def forward(self, x):
out = self.block1(x)
out = self.block2(out)
# flatten the dataset
out = out.view(-1, 32 * 7 * 7)
out = self.block3(out)
return out
# convolutional neural network model
model = CNN()
model.cuda()
# print summary of the neural network model to check if everything is fine.
print(model)
print("# parameter: ", sum([param.nelement() for param in model.parameters()]))
# setting the learning rate
learning_rate = 1e-4
# Using a variable to store the cross entropy method
criterion = nn.CrossEntropyLoss()
# Using a variable to store the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# list of all train_losses
train_losses = []
# list of all validation losses
validation_losses = []
# for loop that iterates over all the epochs
num_epochs = 20
for epoch in range(num_epochs):
# variables to store/keep track of the loss and number of iterations
train_loss = 0
num_iter_train = 0
# train the model
model.train()
# Iterate over train_loader
for i, (images, labels) in enumerate(train_loader):
# need to permute so that the images are of size 3x28x28
# essential to be able to feed images into the model
images = images.permute(0, 3, 1, 2)
# Zero the gradient buffer
# resets the gradient after each epoch so that the gradients don't add up
optimizer.zero_grad()
# Forward, get output
outputs = model(images)
# convert the labels from one hot encoding vectors into integer values
labels = labels.view(-1, 37)
y_true = torch.argmax(labels, 1)
# calculate training loss
loss = criterion(outputs, y_true)
# Backward (computes all the gradients)
loss.backward()
# Optimize
# loops through all parameters and updates weights by using the gradients
# takes steps backwards to optimize (to reach the minimum weight)
optimizer.step()
# update the training loss and number of iterations
train_loss += loss.data
num_iter_train += 1
print('Epoch: {}'.format(epoch + 1))
print('Training Loss: {:.4f}'.format(train_loss / num_iter_train))
# append training loss over all the epochs
train_losses.append(train_loss / num_iter_train)
# evaluate the model
model.eval()
# variables to store/keep track of the loss and number of iterations
validation_loss = 0
num_iter_validation = 0
# Iterate over validation_loader
for i, (images, labels) in enumerate(validation_loader):
# need to permute so that the images are of size 3x28x28
# essential to be able to feed images into the model
images = images.permute(0, 3, 1, 2)
# Forward, get output
outputs = model(images)
# convert the labels from one hot encoding vectors to integer values
labels = labels.view(-1, 37)
y_true = torch.argmax(labels, 1)
# calculate the validation loss
loss = criterion(outputs, y_true)
# update the training loss and number of iterations
validation_loss += loss.data
num_iter_validation += 1
print('Validation Loss: {:.4f}'.format(validation_loss / num_iter_validation))
# append all validation_losses over all the epochs
validation_losses.append(validation_loss / num_iter_validation)
num_iter_test = 0
correct = 0
# Iterate over test_loader
for images, labels in test_loader:
# need to permute so that the images are of size 3x28x28
# essential to be able to feed images into the model
images = images.permute(0, 3, 1, 2)
# Forward
outputs = model(images)
# convert the labels from one hot encoding vectors into integer values
labels = labels.view(-1, 37)
y_true = torch.argmax(labels, 1)
# find the index of the prediction
y_pred = torch.argmax(outputs, 1).type('torch.FloatTensor')
# convert to FloatTensor
y_true = y_true.type('torch.FloatTensor')
# find the mean difference of the comparisons
correct += torch.sum(torch.eq(y_true, y_pred).type('torch.FloatTensor'))
print('Accuracy on the test set: {:.4f}%'.format(correct / len(test_dataset) * 100))
print()
# learning curve function
def plot_learning_curve(train_losses, validation_losses):
# plot the training and validation losses
plt.ylabel('Loss')
plt.xlabel('Number of Epochs')
plt.plot(train_losses, label="training")
plt.plot(validation_losses, label="validation")
plt.legend(loc=1)
# plot the learning curve
plt.title("Learning Curve (Loss vs Number of Epochs)")
plot_learning_curve(train_losses, validation_losses)
torch.save(model.state_dict(), "model1.pth")
I'm also using a trusty RTX 2070 and this is how I do GPU acceleration (for 1 GPU):
cuda_ = "cuda:0"
device = torch.device(cuda_ if torch.cuda.is_available() else "cpu")
model = CNN()
model.to(device)
This is the most up-to-date and recommended way to do GPU acceleration, as it gives more flexibility (don't need to amend code even when GPU isn't available). You would do the same to pass your images into the GPU vram, via images = images.to(device).
I'm using this Pytorch implementation of Segnet with pretrained values I found for object segmentation, and it works fine.
Now I want to resume the training from the values I have, using a new dataset with similar images.
How can I do that?
I guess I have to use the "train.py" file found in the repository, but I don't know what to write in order to replace the "fill the batch" comment.
Here is that portion of the code:
def train(epoch):
model.train()
# update learning rate
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# define a weighted loss (0 weight for 0 label)
weights_list = [0]+[1 for i in range(17)]
weights = np.asarray(weights_list)
weigthtorch = torch.Tensor(weights_list)
if(USE_CUDA):
loss = nn.CrossEntropyLoss(weight=weigthtorch).cuda()
else:
loss = nn.CrossEntropyLoss(weight=weigthtorch)
total_loss = 0
# iteration over the batches
batches = []
for batch_idx,batch_files in enumerate(tqdm(batches)):
# containers
batch = np.zeros((args.batch_size,input_nbr, imsize, imsize), dtype=float)
batch_labels = np.zeros((args.batch_size,imsize, imsize), dtype=int)
# fill the batch
# ...
# What should I write here?
batch_th = Variable(torch.Tensor(batch))
target_th = Variable(torch.LongTensor(batch_labels))
if USE_CUDA:
batch_th =batch_th.cuda()
target_th = target_th.cuda()
# initilize gradients
optimizer.zero_grad()
# predictions
output = model(batch_th)
# Loss
output = output.view(output.size(0),output.size(1), -1)
output = torch.transpose(output,1,2).contiguous()
output = output.view(-1,output.size(2))
target = target.view(-1)
l_ = loss(output.cuda(), target)
total_loss += l_.cpu().data.numpy()
l_.cuda()
l_.backward()
optimizer.step()
return total_loss/len(files)
If I had to guess he probablly made some Dataloader feeder that extended the Pytorch Dataloader class. See
https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
Near the bottom of the page you can see an example in which they loop over their data loader
for i_batch, sample_batched in enumerate(dataloader):
What this would like like for images for example is:
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=False, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batchSize, shuffle=True, num_workers=2)
for batch_idx, (inputs, targets) in enumerate(trainloader):
# Using the pytorch data loader the inputs and targets are given
# automatically
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
How exactly the author loads his files I don't know. You could follow the procedure from: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html to make your own Dataloader though.
Relevant parts of the code below. The call to the Scattering object returns a 3D tensor of coefficients, based on fixed filter maps. The program only enters and returns from the Scattering call once, indicating that the code hangs forever somewhere in the first training step, but not in the Scattering call. Where could this be happening?
def my_model_fn(features, labels, mode, params):
M, N = features.get_shape().as_list()[-2:]
scattering_coefficients = Scattering(M=M, N=N, J=1, L=2)(features)
batch_size = scattering_coefficients.get_shape().as_list()[0]
# throw all coefficients into single vector for each image
scattering_coefficients = tf.reshape(scattering_coefficients, [batch_size, -1])
# returns tensor of correct shape
print(scattering_coefficients)
n_classes = 10
n_coefficients = scattering_coefficients.get_shape().as_list()[1]
# use linear classifier
W = tf.Variable(tf.zeros([n_coefficients, n_classes]))
b = tf.Variable(tf.zeros([n_classes]))
y_predict = tf.nn.softmax(tf.matmul(scattering_coefficients, W) + b)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions={"predictions": y_predict})
# loss function and training step
cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=y_predict) )
train_op = tf.train.GradientDescentOptimizer(params["learning_rate"]).minimize(cross_entropy)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=cross_entropy,
train_op=train_op)
def sample_batch(X, y, batch_size):
idx = np.random.choice(X.shape[0], batch_size, replace=False)
return tf.convert_to_tensor(X[idx]), tf.convert_to_tensor(y[idx])
LEARNING_RATE = 0.01
BATCH_SIZE = 2
n_training_steps = 2
image_dimension = 28
model_params = {"learning_rate": LEARNING_RATE}
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
X_train = mnist.train.images.astype(np.float32)
X_train = normalize(X_train)
# number of channels is 1, -1 infers number of samples
X_train = X_train.reshape(-1, 1, image_dimension, image_dimension)
y_train = mnist.train.labels.astype(np.int64)
X_validation = mnist.validation.images.astype(np.float32)
X_validation = normalize(X_validation)
X_validation = X_validation.reshape(-1, 1, image_dimension, image_dimension)
y_validation = mnist.validation.labels.astype(np.int64)
train_input_fn = lambda: sample_batch(X_train, y_train, BATCH_SIZE)
validation_input_fn = lambda: sample_batch(X_validation, y_validation, BATCH_SIZE)
# Train
scattering_classifier = tf.estimator.Estimator(model_fn=my_model_fn, params=model_params)
# Hangs forever...
scattering_classifier.train(input_fn=train_input_fn, max_steps=n_training_steps)
# If I comment out training step, this finishes immediately.
print("start scoring accuracy")
predictions = scattering_classifier.predict(input_fn=validation_input_fn)
Changing
train_op = tf.train.GradientDescentOptimizer(params["learning_rate"]).minimize(cross_entropy)
to
train_op = tf.train.GradientDescentOptimizer(params["learning_rate"]).minimize(
cross_entropy, global_step=tf.train.get_global_step())
solves the problem. Explanations are very welcome.