InvalidArgumentError while using Tensorboard - python

I was trying to use tensorboard to look at my model. But I get an InvalidArgumentError.
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
model.fit(x = X_train,
y = y_train,
epochs = 10,
validation_split = 0.1,
callbacks=[tensorboard_callback])
When I remove the callbacks=[tensorboard_callback] the model trains fine but when I include it, it spits out the following error.
InvalidArgumentError: You must feed a value for placeholder tensor 'conv2d_input' with dtype float and shape [?,50,50,1]
[[{{node conv2d_input}}]]

Related

ValueError: logits and labels must have the same shape ((None, 6, 8, 1) vs (None, 1))

I am trying to get my hands dirty with neural networks in practice, for such task i am trying to classify some images, where i'll have two classes basically. So, i took a CNN as an example using keras and tensorflow from tutorial on youtube.
I tried changing my output layer activation to sigmoid and when did it, i started getting the error:
ValueError: logits and labels must have the same shape ((None, 6, 8, 1) vs (None, 1))
Given specifically at the following line:
validation_steps = nb_validation_Samples // batch_size)
My neural network code:
Libraries
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import numpy as np
from keras.preprocessing import image
Setup
img_width, img_height = 128, 160
train_data_dir = '/content/drive/My Drive/First-Group/Eyes/'
validation_data_dir = '/content/drive/My Drive/First-Validation-Group/'
nb_train_samples = 1300
nb_validation_Samples = 1300
epochs = 100
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
train_datagen = ImageDataGenerator(
zoom_range=0.2,
)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode="binary")
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Dense(64))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data = validation_generator,
error line -> **validation_steps = nb_validation_Samples // batch_size)**
model.save_weights('weights.npy')
the input of your network is 4d (batch_dim, height, width, channel), while your target is 2d (batch_dim, 1). you need something in your network to pass from 4d to 2d like flatten or global pooling. for example, you can add one of them after your last max-pooling layer.
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) #<========================
model.add(Dense(64))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
the usage of binary_crossentropy as a loss with sigmoid and class_mode='binary' in generator seems ok if u are dealing with a binary classification problem

expected conv2d_28_input to have 4 dimensions, but got array with shape consisting of 3 dimensions only

I have built an image classification model but I get an error stating ValueError: Error when checking input: expected conv2d_28_input to have 4 dimensions, but got array with shape (341, 720, 3)
I also changed the input_shape in model.add function
Here is the model I have built:
model = Sequential()
#3 Convolutional layers
input_shape = X.shape[1:]
model.add(Conv2D(64, (3,3), input_shape = X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
#2 hidden layers
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
model.add(Dense(128))
model.add(Activation("relu"))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=['accuracy'])
history=model.fit(X,y, batch_size=6, epochs=5, validation_split=0.1)
Saving the model
model.save_weights("model.h5")
model.save('CNN.model')
Predicting the output class
from keras.preprocessing.image import img_to_array, load_img
model = tf.keras.models.load_model("CNN.model")
image = load_img("/content/drive/My Drive/Images/Blackened/blackened-1.jpg") #image is stored here
final = img_to_array(image)
prediction = model.predict(final)
prediction = list(prediction[0])
print(CATEGORIES[prediction.index(max(prediction))])
When I run the final block of code I get the error mentioned above
You need to add the batch dimension which is missing in your data, even if its a single image:
final = img_to_array(image)
final = np.expand_dims(final, axis=0)
prediction = model.predict(final)

i have 10000 images in a vector form how do i convert it for my Convolution neural network?

I am new to Convolutional Neural Network. Instead of getting my data in image format i have been given flattened images matrix which is [10000x784].
Means 10000 images of size 28x28
Considering one image size is 28x28, how should i give the data matrix to my input for CNN?
My model is:
model = models.Sequential()
model.add(layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28,28,1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#model.add(layers.Flatten())
model.add(layers.Dense(2500, activation='relu'))
model.add(layers.Dense(2500, activation='relu'))
model.add(layers.Dense(1, activation='relu'))
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['mae','mse'])
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=15)
#Fits model
history= model.fit(x_trained, y_train, epochs = 7000, validation_split = 0.2, shuffle= True, verbose = 1, callbacks=[callback])
I get error at model.fit.
P.S: I am doing regression and for every image i have one value as output
Begin with a Reshape layer:
model = models.Sequential()
model.add(layers.Reshape((28, 28, 1), input_shape=(784,)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
# ...

Trying to create a convolutional neural Autoencoder network in Keras but it keeps crashing

I'm trying to create an autoencoder with keras and my data's shape is like this:
(62328, 1, 40, 40)
The error:
ValueError: Negative dimension size caused by subtracting 3 from 1 for 'conv2d/Conv2D' (op: 'Conv2D') with input shapes: [?,1,40,40], [3,3,40,4]
and I don't know how to fix it. I've tried changing the data_format to channels_last or channels_first, but still it doesn't work.
Please help
K.set_image_data_format('channels_last')
dense_layer = 0
layer_size = 4
conv_layer = 1
IMG_SIZE = 40
NAME = "AutoEncoder-{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time()))
loading the data
pickle_in = open("X5.pickle","rb")
X = pickle.load(pickle_in)
pickle_in.close()
X=np.array(X)
print( X.shape)
X= X/255
pickle_in = open("y5.pickle","rb")
y = pickle.load(pickle_in)
pickle_in.close()
y=np.array(y)
starting with the model in keras
model = Sequential()
#encoding
this is where my problem happens
shape=[1,IMG_SIZE,IMG_SIZE]
print (shape)
model.add(Conv2D(4, (3,3),input_shape = shape))
encoding / decoding the data
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(2, (3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(2, (3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2))) #encoded
#decoding
model.add(UpSampling2D((2,2)))
model.add(Conv2D(2, (3,3)))
model.add(Activation('relu'))
model.add(UpSampling2D((2,2)))
model.add(Conv2D(2, (3,3)))
model.add(Activation('relu'))
model.add(UpSampling2D((2,2)))
model.add(Conv2D(4, (3,3)))
model.add(Activation('relu'))
model.add(Conv2D(1,(3,3)))
model.add(Activation('sigmoid'))
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'],
)
model.summary()
model.fit(X,X,
batch_size=32,
epochs=10,
validation_split=0.3,
callbacks=[tensorboard])
model.save("64x3-CND.model")
This is because your input shape is incorrect, with the data_format variable set to channels_last, the input shape of the images is expected to be (HEIGHT, WIDTH, CHANNELS_NUM).
Changing the data_format to be channels_first should fix your problem.
K.set_image_data_format('channels_first')

Dimension mismatch in keras

I was trying to run a 10 fold cross validation on my dataset.I had reshaped my data before training as follows
data = data.reshape(500,1,1028,1)
data_y = np_utils.to_categorical(data_y, 3)
After this i described my model
for train,test in kf.split(data):
fold+=1
print("Fold #{}".format(fold))
x_train = data[train]
y_train = data_y[train]
x_test = data[test]
y_test = data_y[test]
print(x_train.shape)
model.add(Conv2D(32, (1, 3),input_shape=(1,1028,1)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(1,2)))
model.add(Conv2D(34, (1, 4)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(1,2)))
model.add(Conv2D(64,(1, 3)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(1,2)))
model.add(Conv2D(64, (1, 4)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(1,2)))
model.add(Flatten())
#fully connected for new model
model.add(Dense(550))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(250))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(100))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(25))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.fit(x_train.reshape(450,1,1028,1), y_train,
batch_size=5,
epochs=1,
verbose=1,
validation_data=(x_test, y_test))
pred = model.predict(x_test)
oos_y.append(y_test)
pred = np.argmax(pred, axis=1) # raw probabilities to chosen class (highest probability)
oos_pred.append(pred)
# Measure this fold's accuracy
y_compare = np.argmax(y_test, axis=1) # For accuracy calculation
score = metrics.accuracy_score(y_compare, pred)
print("Fold score (accuracy): {}".format(score))
The problem is that when I run my code the code runs properly for fold 1 but for fold 2 it gives me the following error
ValueError: Input 0 is incompatible with layer conv2d_5: expected ndim=4, found ndim=2
When I checked the dimensions of x_train it was (450, 1, 1028, 1)
I am not sure what the error is.
You are adding model layers inside loop over and over. The error was produced when you try to add convolutional layer(for second iteration of loop) after softmax activation layer(the last layer for first iteration of the loop). After careful inspection I've come to the following solution to your question.
First split the dataset into train and test
for train_index, test_index in kf.split(data):
X_train, X_test = data[train_index], data[test_index]
y_train, y_test = data_y[train_index], data_y[test_index]
then add layers to model outside of the loop.
model.add(Conv2D(32, (1, 3),input_shape=(1,1028,1)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(1,2)))
model.add(Conv2D(34, (1, 4)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(1,2)))
# ... The reset of the code

Categories