Getting an error for dimension in Neural Network - python

Hi I a trying to make a Neural Network and getting an error ValueError: Error when checking input: expected conv2d_27_input to have 4 dimensions, but got array with shape (60000, 28, 28)
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
# load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# normalize inputs from 0-255 to 0.0-1.0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = x_train / 255.0
x_test = x_test / 255.0
# Encode the outputs
y_train = np_utils.to_categorical(y_train) #Converts a class vector (integers) to binary class matrix.
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# Build the model
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 2), activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
epochs = 5
lrate = 0.002
decay = lrate/epochs
Can someone help in understanding it?

Reshape your data before/after normalisation
# ...
# reshape to be [samples][width][height][channels]
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0

Related

How to extract the image names and labels in the training set after completing the active learning loop and write them to a CSV file?

I am using the Keras script at https://modal-python.readthedocs.io/en/latest/content/examples/Keras_integration.html to perform active learning for a binary classification task. After completing the active learning loop, how do we extract the image names and labels in the training set that gives the optimal test performance and write them to a CSV file?
from tensorflow import keras
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from modAL.models import ActiveLearner
from modAL.uncertainty import entropy_sampling
# build function for the Keras' scikit-learn API
def create_keras_model():
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
# create the classifier
classifier = KerasClassifier(create_keras_model)
# read training data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 28, 28, 1).astype('float32') / 255
X_test = X_test.reshape(10000, 28, 28, 1).astype('float32') / 255
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# assemble initial data
n_initial = 1000
initial_idx = np.random.choice(range(len(X_train)), size=n_initial, replace=False)
X_initial = X_train[initial_idx]
y_initial = y_train[initial_idx]
# generate the pool
# remove the initial data from the training dataset
X_pool = np.delete(X_train, initial_idx, axis=0)
y_pool = np.delete(y_train, initial_idx, axis=0)
"""
Training the ActiveLearner
"""
# initialize ActiveLearner
learner = ActiveLearner(
estimator=classifier,
X_training=X_initial, y_training=y_initial,
verbose=1
)
# the active learning loop
n_queries = 100
for idx in range(n_queries):
query_idx, query_instance = learner.query(X_pool, n_instances=100, verbose=0)
print(query_idx)
learner.teach(
X=X_pool[query_idx], y=y_pool[query_idx], only_new=True,
verbose=1
)
# remove queried instance from pool
X_pool = np.delete(X_pool, query_idx, axis=0)
y_pool = np.delete(y_pool, query_idx, axis=0)
# the final accuracy score
print(learner.score(X_test, y_test, verbose=1))

Input 0 of layer sequential is incompatible with the layer: expected axis -1 of input shape to have value 1 but received input with shape

Traceback:
ValueError: Input 0 of layer sequential is incompatible with the layer: expected axis -1 of input shape to have value 1 but received input with shape (None, 1, 300, 300)
import tensorflow as tf
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers import Conv2D, MaxPool2D
from keras.optimizers import SGD,RMSprop,Adam
from keras.utils import np_utils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import os
import theano
from PIL import Image
from numpy import *
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
img_rows, img_cols = 300, 300
img_channels = 1
path1 = "./images"
path2 = "./resize"
listing = os.listdir(path1)
listing2 = list(listing)
num_samples=size(listing)
for i in range(len(listing)):
if "jpg" in listing[i]:
im = Image.open(path1 + '/' + listing[i])
img = im.resize((img_rows,img_cols))
gray = img.convert('L')
gray.save(path2 +'/' + listing[i], "JPEG")
elif "txt" in listing[i]:
listing2.remove(listing[i])
imlist = os.listdir(path2)
im1 = array(Image.open(path2 + '/'+ imlist[0]))
m,n = im1.shape[0:2]
imnbr = len(imlist)
immatrix = array([array(Image.open(path2 + '/'+ im2)).flatten()
for im2 in imlist],'f')
label=np.ones((num_samples,),dtype = int)
label[0:1260]=0
label[1260:2716]=1
label[2716:]=2
data,Label = shuffle(immatrix,label, random_state=2)
train_data = [data,Label]
img=immatrix[167].reshape(img_rows,img_cols)
plt.imshow(img)
plt.imshow(img,cmap='gray')
print (train_data[0].shape)
print (train_data[1].shape)
batch_size = 32
classes = 3
epochs = 20
filters = 32
pool = 2
conv = 3
(X, y) = (train_data[0],train_data[1])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4)
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
Y_train = np_utils.to_categorical(y_train, classes)
Y_test = np_utils.to_categorical(y_test, classes)
i = 256
plt.imshow(X_train[i, 0], interpolation='nearest')
print("label : ", Y_train[i,:])
model = Sequential()
model.add(Convolution2D(filters, conv, conv, padding = 'valid',
input_shape = (img_rows, img_cols, 1),
data_format = "channels_last"))
model.add(Activation('relu'))
model.add(Convolution2D(filters, conv, conv, padding = 'valid'))
model.add(Activation('relu'))
model.add(Convolution2D(filters, conv, conv, padding = 'valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (pool, pool), strides = pool, padding = "valid"))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
print(model.summary())
hist = model.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs,
verbose=1, validation_data=(X_test, Y_test))
I've tried various approaches such as ValueError: Input 0 of layer sequential is incompatible with the layer: expected axis -1 of input shape to have value 1, but I still face the same error. May I ask if anyone has a solution to this?
I know that the problem lies with the line
model.add(Convolution2D(filters, conv, conv, padding = 'valid',
input_shape = (img_rows, img_cols, 1),
data_format = "channels_last"))
But I'm not too sure how to fix it.
Thanks!
This is actually a fairly descriptive/accurate error message. The input shape you are giving it is (batch, img_rows, img_cols, 1), yet it is clearly seeing images to be shape (batch, 1, 300, 300). So it seems like you just have one dimension setup wrong. Just do something like
train_data = tf.transpose(train_data, [0, 2, 3, 1])
And you should be set

ValueError: Input 0 of layer sequential_2 is incompatible with the layer

I have the following code:
import tensorflow as tf
import keras
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
import numpy as np
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], x_train.shape[2], 3))
print(x_train.shape)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], x_test.shape[2], 3))
print(x_test.shape)
x_train = x_train.astype('float32')/255.0
x_test = x_test.astype('float32')/255.0
from keras.utils import to_categorical
y_train = to_categorical(y_train, num_classes = 10)
y_test = to_categorical(y_test, num_classes = 10)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
model = Sequential()
#Defining layers of the model
model.add(Dense(2056, activation='relu', input_shape = (3072,)))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, batch_size=1000, epochs=50)
And I am facing the following error:
ValueError: Input 0 of layer sequential_2 is incompatible with the layer: expected axis -1 of input shape to have value 3072 but received input with shape (1000, 32, 32, 3)
I want to keep the input_shape as 3072 only. How can I reshape my y_test to solve this?
You should Flatten your input data before passing them to Dense layer.
model = Sequential()
#Defining layers of the model
model.add(Flatten(input_shape=(32,32,3)) # 32*32*3 = 3072
model.add(Dense(2056, activation='relu'))
model.add(Dense(10, activation='softmax'))
This should fix the problem.

the prediction function gives the same output everytime

from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras.preprocessing import image
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape((X_train.shape[0], 28, 28, 1)).astype('float32')
X_test = X_test.reshape((X_test.shape[0], 28, 28, 1)).astype('float32')
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def larger_model():
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(15, (3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = larger_model()
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
scores = model.evaluate(X_test, y_test, verbose=0)
print("Large CNN Error: %.2f%%" % (100-scores[1]*100))
model.save('good_model.h5')
print("Model saved")
After running this code, we get a '.h5' model, then to predict this image
i added this code:
import cv2
model = load_model('good_model.h5')
file = cv2.imread('screenshot.png')
file = cv2.resize(file, (28, 28))
file = cv2.cvtColor(file, cv2.COLOR_BGR2GRAY)
file = file.reshape((-1, 28, 28,1))
result = model.predict(file)
print(result[0])
t = (np.argmax(result[0]))
print("I predict this number is a:", t)
But I always get the same answer which is 4. above I tried to load the image with cv and convert it to gray and then reshape to the size of the input. It takes the input correctly but the answer is always the same no matter what image I give it as input
You need to invert the image before prediction. Once you invert the image, it will predict correctly. The given example is predicting as "2" but I checked with other images such as "7" and it is correctly predicting.
file = cv2.bitwise_not(file)
Other than the above, I made one change. I imported modules from Tensorflow 2.x. Please check the full code here.

Error in getting confusion matrix [duplicate]

This question already has answers here:
Multilabel-indicator is not supported for confusion matrix
(4 answers)
Closed 4 years ago.
I want to get a confusion matrix with the following code (MNIST classification):
from sklearn.metrics import confusion_matrix
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.callbacks import TensorBoard
import numpy as np
batch_size = 128
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.get_weights()
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
y_pred=model.predict(x_test)
confusion_matrix(y_test, y_pred)
But I get the following error:
ValueError: Can't handle mix of multilabel-indicator and continuous-multioutput. I think I wrong interpreted meaning of y_pred or calculted it wrong.
How can I solve this?
confusion_matrix expects the true and predicted class labels, not one-hot/probability distribution representations. Replace the last line with the following:
confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1))
This will convert the 10000x10 format to 10000 corresponding to the predicted class for each sample.

Categories