I am trying to create a neural net to feed in an input of shape 249561, 80, 1 and y labels are (249561, 2).
def __init__(self):
super(Net1, self).__init__()
self.conv1 = nn.Conv1d(80, 16, kernel_size=1)
self.conv2_drop = nn.Dropout()
self.fc1 = nn.Linear(1,256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64,32)
self.fc4 = nn.Linear(32,2)
def forward(self, x):
print(type(x))
x = F.relu(F.max_pool1d(self.conv1(x), 1))
print(x.shape)
x.reshape(-1)
e1 = F.relu(self.fc1(x))
x = F.dropout(e1, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, training=self.training)
x = F.relu(self.fc3(x))
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return x
My training loop looks something like this
losses = [];
batch_size = 16
for epoch in range(10):
permutation = torch.randperm(x2.size()[0])
for i in range(0,len(x2), batch_size):
indices = permutation[i:i+batch_size]
batch_x, batch_y = x2[indices], onehot_encoded[indices]
#images = Variable(images.float())
#labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(batch_x)
loss = criterion(outputs, batch_y)
loss.backward()
optimizer.step()
I have a batch of 16 and I feed in a tensor with a shape [16, 80, 1].I get the following error. RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 'target' in call to _thnn_nll_loss2d_forward. I suspect that it is the problem with an output layer but it returns a tensor of shape 2 which is the same as my labels. The output x size torch.Size([16, 16, 2])
Instead of changing the input, why don't you use nn.Conv1d (replace both nn.Conv2d -- you'll need to change the dropout as well)?
If you really wanna change the input, you can add:
batch_x = batch_x[..., None]
After
batch_x, batch_y = x2[indices], onehot_encoded[indices]
Related
File "C:\Users\J2\Desktop\Pytorchseries\thenn.py", line 50, in
net = Net()
TypeError: new(): argument 'size' must be tuple of ints, but found element of type NoneType at pos 2
If it helps I was following the sentdex pytorch tutorial. Any help would be appreciated. I am new to machine learning, and I was hoping that this would work. Please help me out!
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import tqdm
training_data = np.load('training_data.npy', allow_pickle=True)
print(len(training_data))
X = torch.Tensor([i[0] for i in training_data]).view(-1,50,50)
X = X/255.0
y = torch.Tensor([i[1] for i in training_data])
plt.imshow(X[0], cmap='gray')
print(y[0])
class Net(nn.Module):
def __init__(self):
super().__init__() # just run the init of parent class (nn.Module)
self.conv1 = nn.Conv2d(1, 32, 5) # input is 1 image, 32 output channels, 5x5 kernel / window
self.conv2 = nn.Conv2d(32, 64, 5) # input is 32, bc the first layer output 32. Then we say the output will be 64 channels, 5x5 kernel / window
self.conv3 = nn.Conv2d(64, 128, 5)
x = torch.randn(50,50).view(-1,1,50,50)
self._to_linear = None
self.convs(x)
self.fc1 = nn.Linear(self._to_linear, 512) #flattening.
self.fc2 = nn.Linear(512, 2) # 512 in, 2 out bc we're doing 2 classes (dog vs cat).
def convs(self, x):
# max pooling over 2x2
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self._to_linear) # .view is reshape ... this flattens X before
x = F.relu(self.fc1(x))
x = self.fc2(x) # bc this is our output layer. No activation here.
return F.softmax(x, dim=1)
if self._to_linear is None:
self._to_linear = x[0].shape[0]*x[0].shape[1]*x[0].shape[2]
return x
net = Net()
print(net)
import torch.optim as optim
optimizer = optim.Adam(net.parameters(), lr=0.001)
loss_function = nn.MSELoss()
X = torch.Tensor([i[0] for i in training_data]).view(-1,50,50)
X = X/255.0
y = torch.Tensor([i[1] for i in training_data])
VAL_PCT = 0.1 # lets reserve 10% of our data for validation
val_size = int(len(X)*VAL_PCT)
print(val_size)
train_X = X[:-val_size]
train_y = y[:-val_size]
test_X = X[-val_size:]
test_y = y[-val_size:]
print(len(train_X), len(test_X))
BATCH_SIZE = 100
EPOCHS = 1
for epoch in range(EPOCHS):
for i in tqdm(range(0, len(train_X), BATCH_SIZE)): # from 0, to the len of x, stepping BATCH_SIZE at a time. [:50] ..for now just to dev
#print(f"{i}:{i+BATCH_SIZE}")
batch_X = train_X[i:i+BATCH_SIZE].view(-1, 1, 50, 50)
batch_y = train_y[i:i+BATCH_SIZE]
net.zero_grad()
outputs = net(batch_X)
loss = loss_function(outputs, batch_y)
loss.backward()
optimizer.step() # Does the update
print(f"Epoch: {epoch}. Loss: {loss}")
correct = 0
total = 0
with torch.no_grad():
for i in tqdm(range(len(test_X))):
real_class = torch.argmax(test_y[i])
net_out = net(test_X[i].view(-1, 1, 50, 50))[0] # returns a list,
predicted_class = torch.argmax(net_out)
if predicted_class == real_class:
correct += 1
total += 1
print("Accuracy: ", round(correct/total, 3))
The issue is with self._to_linear. You use it in __init__ as:
self._to_linear = None
self.convs(x)
self.fc1 = nn.Linear(self._to_linear, 512) #flattening.
The call to nn.Linear has it as a parameter. This parameter should equal the number of input features in the linear layer, and cannot be None, since the value will determine the shape of the layer (number of weights and biases). How to fix this depends on what you're trying to achieve.
I am building a neural network to solve my classification problem, but for some reason Im getting the error RuntimeError: size mismatch, m1: [10 x 33], m2: [34 x 10]. I dont understand what im doing wrong.
Here is my code.
X_train= torch.tensor(X_train, dtype= torch.float)
X_test= torch.tensor(X_test, dtype= torch.float )
Y_train= torch.tensor(Y_train, dtype= torch.float).view(-1,1)
Y_test= torch.tensor(Y_test, dtype= torch.float).view(-1,1)
dataset= torch.utils.data.TensorDataset(X_train, Y_train)
train_iter = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=True)
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(34, 10)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(10, 1)
# Define sigmoid activation and softmax output
self.sigmoid_act = nn.Sigmoid()
self.ReLU_act= nn.ReLU()
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden(x)
x = self.ReLU_act(x)
x = self.output(x)
x = self.sigmoid_act(x)
return x
model= Network()
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)
epochs = 30
steps = 0
train_losses, test_losses = [], []
for e in range(epochs):
running_loss = 0
for images, labels in train_iter:
optimizer.zero_grad()
**log_ps = model(images)**
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
test_loss = 0
accuracy = 0
Im getting the error on the line in bold. Please help me resolve this. Im stuck on it for almost a day now. Im a beginner at neural networks.
I'm new to deep learning and Pytorch, but I hope someone can help me out with this. My dataset contains images from different sizes. I'm trying to create a simple neural network that can classify images. However, I'm getting mismatch errors.
Neural network
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3)
self.conv2 = nn.Conv2d(32, 32, 3)
self.fc1 = nn.Linear(32 * 3 * 3, 200)
self.fc2 = nn.Linear(200, 120)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
net = Net()
My first convolution layer has 1 input channel, because I transform the images to grayscale images. 32 output channels was an arbitrary decision. The final fully-connected layer has 120 output channels, because there are 120 different classes.
Determine transformations and assign training set and validation set
transform = transforms.Compose(
[transforms.Grayscale(1),
transforms.RandomCrop((32,32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
data_dir = 'dataset'
full_dataset = datasets.ImageFolder(os.path.join(data_dir, 'train'), transform = transform)
train_size = int(0.8 * len(full_dataset))
val_size = len(full_dataset) - train_size
trainset, valset = torch.utils.data.random_split(full_dataset, [train_size, val_size])
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
valloader = torch.utils.data.DataLoader(valset, batch_size=4,
shuffle=False, num_workers=2)
classes = full_dataset.classes
I transform the images to grayscale, because they are gray anyway. I crop the images to 32, because the images have different sizes and I figured that they must all be the same size when putting it through the neural network. Everything is working fine so far.
Train neural network
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
When running this last piece of code, I get the following error: size mismatch, m1: [3584 x 28], m2: [288 x 200] at /Users/soumith/miniconda2/conda-bld/pytorch_1532623076075/work/aten/src/TH/generic/THTensorMath.cpp:2070 when the following line is being executed: outputs = net(inputs)
My code is a variation of the code provided in this Pytorch tutorial. Can someone tell me what I'm doing wrong?
UPDATE
I updated the neural network class to this:
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
But now I get an error at loss = criterion(outputs, labels):
Assertion cur_target >= 0 && cur_target < n_classes' failed. at /Users/soumith/miniconda2/conda-bld/pytorch_1532623076075/work/aten/src/THNN/generic/ClassNLLCriterion.c:93
In your first configuration, you have configured self.fc1 incorrectly. You need the input to be of dimensions 32 * 28 * 28 instead of 32 * 3 * 3 as your images are 32 * 32 and kernel and stride are 3 and 1 respectively. See this video for a simpler explanation. Try adjusting your second configuration yourself now, if you can't, comment below.
I'm running across a very weird issue and was hoping to get someone who might be a bit more familiar. I'm attempting a basic LSTM to do some binary classification with the following coding:
class FakeData(object):
def __init__(self, n):
self.x = np.random.randint(4, size=(n, 90, 4))
blah = np.random.randint(2, size=(n))
self.y = np.zeros((n,2))
self.y[:,0] = blah
self.y[:,1] = 1 - blah
self.mask = np.arange(n)
self.cnt = 0
self.n = n
def getdata(self, n):
if self.cnt + n > self.n:
np.randoom.shuffle(self.mask)
self.cnt = 0
mask = self.mask[self.cnt : self.cnt + n]
return self.x[mask], self.y[mask]
n_data = 10000
batch_size = 10
fd = FakeData(n_data)
n_units = 200
n_classes = 2
x = tf.placeholder(tf.float32, shape=[None, 90, 4])
y_ = tf.placeholder(tf.float32, shape=[None, n_classes])
dropout = tf.placeholder(tf.float32)
w_out = tf.Variable(tf.truncated_normal([n_units, n_classes]))
b_out = tf.Variable(tf.truncated_normal([n_classes]))
lstm = tf.contrib.rnn.LSTMCell(n_units)
cell = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=1.0 - dropout)
new_x = tf.unstack(x, 90, 1)
new_x = tf.Print(new_x, [tf.shape(new_x)], message='newx is: ')
output, state = tf.nn.dynamic_rnn(cell, new_x, dtype=tf.float32)
output = tf.Print(output, [tf.shape(output)], message='output is: ')
logits = tf.matmul(output[-1], w_out) + b_out
logits = tf.Print(logits, [tf.shape(logits)], message='logits is: ')
preds = tf.nn.softmax(logits)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=y_))
training = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
correct = tf.equal(tf.argmax(preds, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
#
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(10):
batch_x, batch_y = fd.getdata(batch_size)
sess.run([training], feed_dict={x: batch_x, y_: batch_y, dropout: 0})
if i % 100 == 0:
print "Accuracy {}".format(accuracy.eval(feed_dict={x: batch_x,
y_: batch_y, dropout: 0}))
The specific question I have is, for some reason, when I run the code without the tf.Print lines, I get some sort of weird shape transformation error
ValueError: Dimension must be 2 but is 3 for 'transpose' (op: 'Transpose') with shapes: [?,4], [3].
on line
output, state = tf.nn.dynamic_rnn(cell, new_x, dtype=tf.float32)
However, when I include the tf.Print lines, it correctly logs the shapes and is able to run the whole session. Am I missing something?
For clarity, the shapes should be:
input: n x 90 x 4
new_x: 90 x n x 4
output: 90 x n x 200
logits: n x 2
Adding the answer here in case anyone else runs across this problem in the future.
Turns out, a lot of old RNN examples floating around use unstack. However, that turns it into a list of tensors, which dynamic_rnn cannot take as input. The print was converting it from a list of 2d tensors to a 3d tensor so that it was able to handle it correctly. Solution is to transform the data dimensionally in some other way like:
new_x = tf.transpose(x, perm=(1, 0, 2)) (thanks rvinas)
I am currently trying to train my model to categorize the cifar-10 dataset. I read the data like this:
def convert_images(raw):
raw_float = np.array(raw, dtype = float)
images = raw_float.reshape([-1,3,32,32])
images = images.transpose([0,2,3,1])
return images
def load_data(filename):
data = unpickle(filename)
raw_images = data[b'data']
labels = np.array(data[b'labels'])
images = convert_images(raw_images)
return images, labels
def load_training_data():
images = np.zeros(shape=[50000,32,32,3], dtype = float)
labels = np.zeros(shape = [50000], dtype = int)
begin = 0
for i in range(5):
filename = "data_batch_" + str(i+1)
images_batch, labels_batch = load_data(filename)
num_images = len(images_batch)
end = begin + num_images
images[begin:end, :] = images_batch
labels[begin:end] = labels_batch
begin = end
return images, labels, OneHotEncoder(categorical_features=labels, n_values=10)
What this does is reshape the data so that it is a 4d array with 32x32x3 values for the pixels and rgb colors. I define my model like this (i first reshape X to be a row vector because the 4d array creates errors):
X = tf.placeholder(tf.float32, [None,32,32,3])
Y_labeled = tf.placeholder(tf.int32, [None])
data = load_training_data()
with tf.name_scope('dnn'):
XX = tf.reshape(X, [-1,3072])
hidden1 = tf.layers.dense(XX, 300, name = 'hidden1', activation = tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, 200, name = 'hidden2', activation = tf.nn.relu)
hidden3 = tf.layers.dense(hidden2, 200, name = 'hidden3', activation = tf.nn.relu)
hidden4 = tf.layers.dense(hidden3, 100, name = 'hidden4', activation = tf.nn.relu)
logits = tf.layers.dense(hidden4, 10, name = 'outputs')
with tf.name_scope('loss'):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = (Y_labeled), logits = logits)
loss = tf.reduce_mean(cross_entropy, name = 'loss')
learning_rate = 0.01
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope('eval'):
correct = tf.nn.in_top_k(logits,Y_labeled, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
batch_size = 100
n_epochs = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(50000 // batch_size):
X_batch = data[0][iteration*batch_size:(iteration+1)*batch_size]
y_batch = data[1][iteration*batch_size:(iteration+1)*batch_size]
#X_batch, y_batch = data.train.next_batch(batch_size)
sess.run(training_op, feed_dict = {X: X_batch,Y_labeled: y_batch})
acc_train = accuracy.eval(feed_dict = {X: X_batch,Y_labeled: y_batch})
print(epoch, "train accuracy:", acc_train, "loss", loss)
I want to define a simple model that has 4 hidden layers. When I run this it compiles with no errors and starts "training", but the accuracy is 0.0 and it does not print any losses. I am not sure if the error is in my calculation of accuracy and loss or in my definition of the model.
There seem to be a problem with the way you feed your labels. When you create the placholder Y_labeled = tf.placeholder(tf.int32, [None, 10]) it seems to be a vector of dimension 10 but later when you create the label numpy tensor labels = np.zeros(shape = [50000], dtype = int) it seems to be a scalar.
This is why you have this error, the placeholder needs to be fed with a tensor of dimension (batch_size, 10) but you feed it with (batch_size, 0)