I'm new to PyTorch and I'm using the classic MNIST dataset for image classification.While fitting the model, I'm getting the error :
NotImplementedError: uint8
I'm using fastai's library's class as a wrapper for all training and validation data and a very basic single-layer neural network. The code I'm using is as follows :
from keras.datasets import mnist
import matplotlib.pyplot as plt
from fastai.metrics import *
from fastai.model import *
from fastai.dataset import *
import torch.nn as nn
(x_train, y_train), (x_valid, y_valid) = mnist.load_data()
net = nn.Sequential(
nn.Linear(784,10),
nn.Softmax()).cuda()
md = ImageClassifierData.from_arrays('/data/mnist',
(x_train,y_train),
(x_valid, y_valid))
loss = nn.NLLLoss()
metrics = [accuracy]
opt=optim.SGD(net.parameters(), 1e-1, momentum=0.9, weight_decay=1e-3)
fit(net, md, n_epochs=3, crit=loss, opt=opt, metrics=metrics)
Can someone tell what is this error about and it's solution?
Related
I am trying to create a stacking ensemble using scikit-learn that contains a Keras model wrapped using KerasClassifier.
Here's an example of how my code looks using the iris dataset:
# import libraries
import pandas
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.layers import Dropout, Flatten, Dense
from keras.utils import np_utils
from keras import optimizers
from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import StackingClassifier
from numpy import mean
from numpy import std
# import data
dataframe = pandas.read_csv("iris.csv", header=None)
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
# create and wrap neural network
def create_model():
model = Sequential()
model.add(Flatten(input_shape=X.shape[1:]))
model.add(Dense(150, activation=tf.keras.layers.LeakyReLU(alpha=0.3)))
model.add(Dropout(0.9))
model.add(Dense(50, activation=tf.keras.layers.LeakyReLU(alpha=0.3)))
model.add(Dropout(0.9))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(lr=2e-3),
metrics=['acc'])
return model
model_nn = KerasClassifier(build_fn=create_model, epochs=50, batch_size=5, verbose=0)
model_nn._estimator_type = "classifier"
# create stack
def stacking():
level0 = list()
level0.append(('lr', LogisticRegression(max_iter = 500000, C = .00041, solver = 'newton-cg', multi_class = 'ovr')))
level0.append(('nn', model_nn))
level0.append(('svm', SVC(C=1.0, gamma='scale', tol=.001, probability = True)))
level1 = LogisticRegression()
model = StackingClassifier(estimators=level0, final_estimator=level1, cv=5)
return model
# evaluate model score
def evaluate_model(model, X, y):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
return scores
scores = evaluate_model(stacking(), X, Y)
print('%.3f (%.3f)' % (mean(scores), std(scores)))
and I get this error:
ValueError: The estimator KerasClassifier should be a classifier.
I found some posts where other users had this issue, but they were able to fix it using the model_nn._estimator_type = "classifier" line. Unfortunately, that isn't solving the issue for me. I'm really new to all of this, so any advice is appreciated. :)
KerasClassifier is migrated from keras.wrappers.scikit_learn to scikeras.wrappers.
You need to use below code to access the KerasClassifier :
!pip install scikeras
from scikeras.wrappers import KerasClassifier
Please check this link for more details.
I have been trying to implement MNIST dataset on a simple Neural Network(784,512,128,10) using cross-entropy loss. I am using Keras for getting the MNIST dataset. But I'm getting an error :
RuntimeError: 1D target tensor expected, multi-target not supported
When I have main model as:
for epoch in range(num_epochs):
for x,y in train_data:
x=Variable(x)
y=Variable(y)
print(x.shape)
y_pred=model(x)
optimizer.zero_grad()
loss=criterion(y_pred,y)
loss.backward()
optimizer.step()
So, to remove that error I implemented that:
y=y[0][0:]
y_pred=y_pred[0][0:]
loss=criterion(y_pred,y)
But after that I'm getting this error:
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
I read many articles regarding how to solve this error but none helped.
Is this error coming because of Keras dataset?
or Is something wrong in my code can someone please help find the error?
My Code:
import torch
import torch.nn as nn
import numpy as np
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader,Dataset
import keras
import torch.nn.functional as F
from torch.autograd import Variable
class Netz(nn.Module):
def __init__(self,n_input_features):
super(Netz,self).__init__()
self.linear=nn.Linear(784,512,bias=True)
self.l1=nn.Linear(512,128,bias=True)
self.l2=nn.Linear(128,10,bias=True)
self.relu=nn.ReLU()
self.relu2=nn.ReLU()
self.softmax=nn.Softmax(dim=-1)
def forward(self,x):
# x=x.view(-1,784)
x=self.relu(self.linear(x))
x=self.relu2(self.l1(x))
x=self.softmax(self.l2(x))
return x
model=Netz(784)
class Data(Dataset):
def __init__(self):
self.x=x_train
self.y=y_train
self.len=self.x.shape[0]
def __getitem__(self,index):
return self.x[index],self.y[index]
mnist = keras.datasets.mnist
#Copying data
(x_train, y_train),(x_test, y_test) = mnist.load_data()
#One-hot encoding the labels
y_train = keras.utils.to_categorical(y_train,10)
y_test = keras.utils.to_categorical(y_test,10)
#Flattening the images
x_train_reshaped = x_train.reshape((60000,784))
x_test_reshaped = x_test.reshape((10000,784))
#Normalizing the inputs
x_train = x_train_reshaped/255.0
x_test = x_test_reshaped/255.0
x_train=torch.from_numpy(x_train.astype(np.float32))
x_test=torch.from_numpy(x_test.astype(np.float32))
y_train=torch.from_numpy(y_train.astype(np.float32))
y_test=torch.from_numpy(y_test.astype(np.float32))
criterion=nn.CrossEntropyLoss()
print(criterion)
optimizer=torch.optim.SGD(model.parameters(),lr=0.05)
dataset=Data()
train_data=DataLoader(dataset=dataset,batch_size=1,shuffle=False)
num_epochs=5
for epoch in range(num_epochs):
for x,y in train_data:
x=Variable(x)
y=Variable(y)
y_pred=model(x)
optimizer.zero_grad()
y=y[0][0:]
y_pred=y_pred[0][0:]
loss=criterion(y_pred,y)
loss.backward()
optimizer.step()
I'm getting a numpy shape error when I use the predict function of a Keras estimator. I build, evaluate, and then retrain the model using the following code:
import pandas as pd
import sqlalchemy as sqla
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
# Connect to to the DB and retrieve the iris table
con = sqla.create_engine('postgresql://tristan:sebens#db:5432/tristan')
con.connect()
table_name = "iris"
schema = "public"
iris = pd.read_sql_table(table_name, con, schema=schema)
iris.head()
iris_ds = iris.values # Convert the table to a numpy array
X = iris_ds[:, 0:4].astype(float) # Slice the descriptive features into a numpy array
Y = iris_ds[:, 4] # Slice the labels away as their own numpy array
# The labels are encoded as strings, so we need to encode them
# as numbers that can be output by an ANN
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = to_categorical(encoded_Y)
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
seed = 7
# Train the model:
# First we define the model as a classifier. This will affect the process used to train it
estimator = KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=5, verbose=0)
# Honestly not totally sure what this is, but it has to do with splitting the training/evaluation data in
# a way that gives us a more realistic metric of the model's accuracy
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
# Now that we have our classifier and our data pipeline defined, we can begin the training process
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# If we like our accuracy, then we can train the model for real
# Evaluating the model actually evaluates a clone of the model, so now we need to train the model again
estimator.fit(X, dummy_y)
And this is where the trouble is. I try to make a test prediction:
# Let's make a test prediction with our model
x = X[0]
estimator.predict(x)
And I get an input shape error:
ValueError: Error when checking input: expected dense_21_input to have shape (4,) but got array with shape (1,)
I'm at a loss. How can the input have the wrong shape if it's literally a member of the training dataset?
I'm currently working with sklearn.neural_network.MPLRegressor but looking for more fexibility in the definition of the neural network, I tried Keras.
I create the following dataset
import numpy as np
x = np.linspace(-0.5,5,2000)
y = np.tanh(x)
y=y.reshape(-1, )
x=x.reshape(-1, 1)
and implement the trivial neural network with sklearn
from sklearn.neural_network import MLPRegressor
model = MLPRegressor(hidden_layer_sizes=(1,),activation='tanh',solver='lbfgs',verbose=True,validation_fraction = 0.1)
model.fit(x,y)
which is solved with a high degree of precision MSE 7.252229995056021e-10
I replicate the same nn with Keras
from keras.layers import Input, Dense
from keras.models import Model
import keras
from keras.wrappers.scikit_learn import KerasRegressor
keras.initializers.RandomUniform()
visible = Input(shape=(1,))
hidden1 = Dense(1, activation='tanh')(visible)
output = Dense(1,activation="linear")(hidden1)
model = Model(inputs=visible, outputs=output)
model.compile(optimizer='adam',metrics=['mean_squared_error'],loss =['mean_squared_error'] )
model.fit(x, y,validation_split=0.1)
but I obtain MSE 0.02130996414874087 (the result doesn't change a lot using different optimizers).
Since the nn can be solved analytically, I would expect a higher degree of precision.
Does anyone know the reason behind such difference, if any (apart from the different optimizer)?
Thanks!
I am a beginner in Keras and I am writing a simple program for MNIST but when I tried to load the model I am getting this error:
ValueError: You are trying to load a weight file containing 2 layers into a model with 0 layers.
This is my code:
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
#fixing random number seed
np.random.seed(7)
(X_train, Y_train),(X_test, Y_test) = mnist.load_data("D:\MY CODE PROJECT\CNN\datasets\mnist.npz")
num_pixel = X_train.shape[1] * X_train.shape[2]
#converting image to vector
X_train = X_train.reshape(X_train.shape[0],num_pixel).astype('float32')
X_test = X_test.reshape(X_test.shape[0],num_pixel).astype('float32')
# Normalizing Input from 0-255 to 0-1
X_train = X_train/255
X_test = X_test/255
#As output is multiclass so change output labels to 'ONE-HOT' ecodings Form
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
#defining simple Neural Network with one hidden layer
num_classes = Y_test.shape[1]
#creating model
model = Sequential()
model.add(Dense(num_pixel,activation = 'relu',kernel_initializer='normal'))
model.add(Dense(num_classes, kernel_initializer='normal',activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
#Fitting the model
model.fit(X_train,Y_train,batch_size=200,epochs=10,verbose=2,validation_data=(X_test,Y_test))
scores = model.evaluate(X_test,Y_test,verbose=0)
#Printing Error
print("baseline Error: %f" %(100-scores[1]*100))
model.save('mnist_nn_keras.h5')
del model
model = load_model('mnist_nn_keras.h5')
Can anyone explain what's wrong in the code? I am using Keras version 2.2.0.
you need to add input_shape to your model while adding layer instance. read the documentation for add function it's clearly talking about error details.
Please see below screenshot.