Using TimeDistributed in Python results in dimensions error - python

I'm working with the Keras API - specifically the SimpleRNN and LSTM layers.
When trying to use the TimeDistributed layer, I'm getting an error about dimensions that I don't understand:
Error when checking target: expected activation_5 to have 3 dimensions, but got array with shape (3252, 2).
Here's my code:
batch_size=32
nb_epoch=100
nb_classes=2
label=np.ones((total_length,), dtype='float32')
samples_per_class=2602 # number of normal
s=0
r=samples_per_class
for classIndex in range(nb_classes):
label[s:r]=classIndex
s=r
r=s+samples_per_class
data,label=shuffle(PPG,label,random_state=2)
train_data=[data,label]
(X,y)=(train_data[0],train_data[1])
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.375)
X_train = X_train.reshape(X_train.shape[0], 1000, 1)
X_test = X_test.reshape(X_test.shape[0] , 1000, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
y_train= np.array(y_train)
y_test = np.array(y_test)
model = Sequential()
model.add(SimpleRNN(16, input_shape=(1000,1), return_sequences=True,
activation='softsign', dropout_W=0.2, dropout_U=0.2))
model.add(SimpleRNN(16, return_sequences=True,activation='softsign',
dropout_W=0.2, dropout_U=0.2))
model.add(TimeDistributed(Dense(2)))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer=Nadam(),
metrics=['accuracy', 'binary_accuracy'])

Related

Why I am getting an input shape error after I specified the input shape?

I am trying to train a CNN in anaconda, jupyter notebook. The TensorFlow version is 1.14. And I am experimenting the mobilenet_v2. Here is my code:
from tensorflow.keras.models import Model
base_model=tf.keras.applications.mobilenet_v2.MobileNetV2(include_top=False,weights=None,input_shape=(150,150,3))
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
predictions = tf.keras.layers.Dense(9,activation='softmax')(x)
model = Model(inputs = base_model.input,outputs=predictions)
model.compile(loss='categorical_crossentropy',metrics=["accuracy"],optimizer = tf.keras.optimizers.Adam())
history = model.fit(train_data,
epochs=5,steps_per_epoch=len(train_data),
validation_steps=0.2*len(train_data))
The input shape of the images is 150x150x3, and I double-checked the input image size to ensure it is correct.
image size of a random image from imagedata
After I fit the model, I got an error said( Error when checking input: expected input_8 to have shape (150, 150, 3) but got array with shape (256, 256, 3) )
error message screenshot
Here is a screenshot of the model summary; the inputlayer has the correct shape, so I am not sure where the 256 comes from.
first few layers of the model
ps: I also tried to build a custom model with just a few layers, but the same error still occurred:
custom_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(10,10,activation='relu',input_shape=(150,150,3)),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(10,10,activation='relu'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(9,activation='softmax')
])
custom_model.summary()
custom_model.compile(loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
custom_model_history = custom_model.fit(train_data,
epochs=5,
steps_per_epoch=len(train_data),
validation_data=val_data,
validation_steps=len(val_data))
Here is the summary of the custom model: custom model
And here is the error message: error message from custom model
please look into your train, test splits (look into this example)
num_classes = 10
input_shape = (28, 28, 1)
Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
also while training you have to mention both x and y in your training data
i think you didn't mentioned x_train and y_train instead of that you mentioned only x_train
batch_size = 128
epochs = 15
custom_model_history = custom_model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)

I need help finding the correct input shape for my 2 dimensional CNN model

I am trying to build a CNN model for a 2d data. I have 1000 rows with 26 cols. this is the code I have, I have tried numerous combinations for my input shape but I cannot figure out what i am doing wrong.
# CNN
# The known number of output classes.
num_classes = 10
# label encoding
encoder = LabelEncoder()
y_train = encoder.fit_transform(y_train)
y_test = encoder.fit_transform(y_test)
# one hot encoding
y_train = keras.utils.np_utils.to_categorical(y_train, num_classes)
y_test = keras.utils.np_utils.to_categorical(y_test, num_classes)
print(X_train.shape)
print(X_test.shape)
# reshape 2D to 3D
x_train = X_train.reshape(670, 26, 1)
x_test = X_test.reshape(330, 26, 1)
print(x_train.shape)
print(x_test.shape)
# build CNN model
model2 = models.Sequential()
model2.add(layers.Conv1D(64, kernel_size=2, input_shape=(26, 1), activation='relu')) # convolution
model2.add(layers.MaxPool1D(pool_size=2)) # pooling
model2.add(layers.Flatten()) # flatten
model2.add(layers.Dense(128, activation='relu')) # fc
2.add(layers.Dense(num_classes, activation='softmax'))
# model compile
model2.compile(loss="categorical_crossentropy",
optimizer=adam,
metrics=['accuracy'])
# model.summary()
batch_size = 128
epochs = 5000
model = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2,
callbacks=early_stopping,
validation_split=0.1,
)
This gives:
Error:ValueError: Input 0 of layer sequential is incompatible with the layer: expected axis -1 of input shape to have value 26 but received input with shape (None, 26, 1)
Your implementation would work perfectly if you load the data correctly and fit the label encoder on Train only, then transform Train & Test set!
df = pd.read_csv('data.csv')
train, test = train_test_split(df, test_size=0.33)
y_train, X_train = train.iloc[:,-1].values, train.iloc[:,1:-1].values
y_test, X_test = test.iloc[:,-1].values, test.iloc[:,1:-1].values
print(X_test.shape, X_train.shape, y_test.shape, y_train.shape)
# CNN
# The known number of output classes.
num_classes = 10
# label encoding
encoder = LabelEncoder()
y_train = encoder.fit_transform(y_train)
y_test = encoder.transform(y_test)
# one hot encoding
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
print(X_train.shape)
print(X_test.shape)
# reshape 2D to 3D
x_train = X_train.reshape(670, 26, 1)
x_test = X_test.reshape(330, 26, 1)
print(x_train.shape)
print(x_test.shape)
# build CNN model
model2 = models.Sequential()
model2.add(layers.Conv1D(64, kernel_size=2, input_shape=(26, 1), activation='relu')) # convolution
model2.add(layers.MaxPool1D(pool_size=2)) # pooling
model2.add(layers.Flatten()) # flatten
model2.add(layers.Dense(128, activation='relu')) # fc
model2.add(layers.Dense(num_classes, activation='softmax'))
# model compile
model2.compile(loss="categorical_crossentropy",
optimizer=Adam(),
metrics=['accuracy'])
# model.summary()
batch_size = 128
epochs = 5000
model = model2.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2,
callbacks=EarlyStopping(),
validation_split=0.1,
)
output:
(330, 26) (670, 26) (330,) (670,)
(670, 26)
(330, 26)
(670, 26, 1)
(330, 26, 1)
Epoch 1/5000
5/5 - 0s - loss: 244.5596 - accuracy: 0.0829 - val_loss: 151.6749 - val_accuracy: 0.1045

Convert numpy array shape to tensorflow

I'm constructing an image array with numpy and then trying to convert it to a tensor to fit a tensorflow model but then I get an error
Data prep
def prep_data(images):
count = len(images)
data = np.ndarray((count, CHANNELS, ROWS, COLS), dtype=np.uint8)
for i, image_file in enumerate(tqdm(images)):
image = read_image(image_file)
data[i] = image.T
return data
train = prep_data(train_images)
test = prep_data(test_images)
Build model
pretrained_base = hub.KerasLayer("https://tfhub.dev/google/imagenet/inception_v1/classification/5")
pretrained_base.trainable = False
model = keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(64, 64, 3)),
pretrained_base,
Flatten(),
Dense(6, activation='relu'),
Dense(1, activation='sigmoid')
])
model.build((None, 64, 64, 3))
model.summary()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(train, labels, test_size=0.25, random_state=0)
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test))
def run_catdog():
history = LossHistory()
model.fit(train_dataset,
batch_size=batch_size,
epochs=nb_epoch,
verbose=1,
callbacks=[history, early_stopping])
predictions = model.predict(test, verbose=0)
return predictions, history
predictions, history = run_catdog()
WARNING:tensorflow:Model was constructed with shape (None, 64, 64, 3) for input KerasTensor(type_spec=TensorSpec(shape=(None, 64, 64, 3), dtype=tf.float32, name='input_63'), name='input_63', description="created by layer 'input_63'"), but it was called on an input with incompatible shape (None, 3, 64, 64).
Can't quite figure out how to change/convert the numpy array to TF
You don't need to convert the NumPy array to tensor, just change the shape of your input. np.moveaxis can do the trick. It works like this:
np.moveaxis(your_array, source, destination).
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(train, labels, test_size=0.25, random_state=0)
# now reshape the train and test input data
X_train = np.moveaxis(X_train, 0, -1)
X_test = np.moveaxis(X_test, 0, -1)
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test))
def run_catdog():
history = LossHistory()
model.fit(train_dataset,
batch_size=batch_size,
epochs=nb_epoch,
verbose=1,
callbacks=[history, early_stopping])
predictions = model.predict(test, verbose=0)
return predictions, history
predictions, history = run_catdog()

How to Reshape my data for CNN? ValueError: cannot reshape array of size 267 into shape (267,2)

#Input 13 features
#Output Binary
# 297 data points
x = x.iloc[:,[0,1,2,3,4,5,6,7,8,9,10,11,12]].values
y1= y['Target'}
# Stratified K fold cross Validation
kf = StratifiedKFold(n_splits=10,random_state=None)
num_features=13
num_predictions=2
#Splitting data
for train_index, test_index in kf.split(x,y1):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y1[train_index], y1[test_index]
# Standardization of data
sc=StandardScaler(0,1)
X_train = sc.fit_transform(x_train)
X_test = sc.transform(x_test)
print(X_train.shape) # o/p: (267,13)
Print(y_train.shape) # o/p: (267)
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], -1))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], -1))
# Convert class vectors to binary class matrices.
y_train = np.reshape(y_train, (y_train.shape[0], num_predictions))
y_test = np.reshape(y_test, (y_test.shape[0], num_predictions))
verbose, epochs, batch_size = 1, 10, 32
n_timesteps, n_features, n_outputs = X_train.shape[1],X_train.shape[2],y_train.shape[1]
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape (n_timesteps,n_features)))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(297, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=verbose)
# evaluate model
accuracy = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
print(accuracy)
How can i input data to feed into CNN which requires 3 dimensions of data. How to solve issue
ValueError: cannot reshape array of size 267 into shape (267,2).
Imagine you have a line of 100 squares, and you want to make it a rectangle. Could you turn it into a rectangle by making it 2x100? No, but you could make it 50x2.
In short, you can't make a rectangle that has more values than the original.

Fix the reshaping target when combining Keras CNN with SVM clasifier

I was trying to to use the combination of SVM with my CNN code, so I used this code. However, I got some problems in the part of reshaping the target to fit SVM.
The orignal X_train.shape, X_test.shape, y_train.shape, y_test.shape are correspondingly :
(2480, 1, 513, 125)
(560, 1, 513, 125)
(2480, 2)
(560, 2)
After that when I tried this :
exTrain = getFeature([X_train[:50], 0])[0]
exTest = getFeature([X_test[:10], 0])[0]
y_train = y_train[:50].reshape(y_train[:50].shape[0],)
I got this error message :
ValueError: cannot reshape array of size 100 into shape (50,)
This is my code
import os
import numpy as np
from sklearn.metrics import confusion_matrix
from plot_metrics import plot_accuracy, plot_loss, plot_roc_curve
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')
np.random.seed(15)
"""
CNN used to classify spectrograms of normal participants (0) or depressed
participants (1). Using Theano backend and Theano image_dim_ordering:
(# channels, # images, # rows, # cols)
(1, 3040, 513, 125)
"""
def preprocess(X_train, X_test):
"""
Convert from float64 to float32 and normalize normalize to decibels
relative to full scale (dBFS) for the 4 sec clip.
"""
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = np.array([(X - X.min()) / (X.max() - X.min()) for X in X_train])
X_test = np.array([(X - X.min()) / (X.max() - X.min()) for X in X_test])
return X_train, X_test
def prep_train_test(X_train, y_train, X_test, y_test, nb_classes):
"""
Prep samples ands labels for Keras input by noramalzing and converting
labels to a categorical representation.
"""
print('Train on {} samples, validate on {}'.format(X_train.shape[0],
X_test.shape[0]))
# normalize to dBfS
X_train, X_test = preprocess(X_train, X_test)
# Convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, X_test, Y_train, Y_test
def keras_img_prep(X_train, X_test, img_dep, img_rows, img_cols):
"""
Reshape feature matrices for Keras' expexcted input dimensions.
For 'th' (Theano) dim_order, the model expects dimensions:
(# channels, # images, # rows, # cols).
"""
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
return X_train, X_test, input_shape
def cnn(X_train, y_train, X_test, y_test, batch_size,
nb_classes, epochs, input_shape):
"""
The Convolutional Neural Net architecture for classifying the audio clips
as normal (0) or depressed (1).
"""
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='valid', strides=1,
input_shape=input_shape, activation='relu'))
model.add(MaxPooling2D(pool_size=(4, 3), strides=(1, 3)))
model.add(Conv2D(32, (1, 3), padding='valid', strides=1,
input_shape=input_shape, activation='relu'))
model.add(MaxPooling2D(pool_size=(1, 3), strides=(1, 3)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(X_test, y_test))
# Evaluate accuracy on test and train sets
score_train = model.evaluate(X_train, y_train, verbose=0)
print('Train accuracy:', score_train[1])
score_test = model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', score_test[1])
return model, history
def model_performance(model, X_train, X_test, y_train, y_test):
"""
Evaluation metrics for network performance.
"""
y_test_pred = model.predict_classes(X_test)
y_train_pred = model.predict_classes(X_train)
y_test_pred_proba = model.predict_proba(X_test)
y_train_pred_proba = model.predict_proba(X_train)
# Converting y_test back to 1-D array for confusion matrix computation
y_test_1d = y_test[:, 1]
# Computing confusion matrix for test dataset
conf_matrix = standard_confusion_matrix(y_test_1d, y_test_pred)
print("Confusion Matrix:")
print(conf_matrix)
return y_train_pred, y_test_pred, y_train_pred_proba, \
y_test_pred_proba, conf_matrix
def standard_confusion_matrix(y_test, y_test_pred):
"""
Make confusion matrix with format:
-----------
| TP | FP |
-----------
| FN | TN |
-----------
Parameters
----------
y_true : ndarray - 1D
y_pred : ndarray - 1D
Returns
-------
ndarray - 2D
"""
[[tn, fp], [fn, tp]] = confusion_matrix(y_test, y_test_pred)
return np.array([[tp, fp], [fn, tn]])
if __name__ == '__main__':
print('Retrieving locally')
X_train = np.load('E:/depression detection/data/processed/train_samples.npz')
y_train = np.load('E:/depression detection/data/processed/train_labels.npz')
X_test = np.load('E:/depression detection/data/processed/test_samples.npz')
y_test = np.load('E:/depression detection/data/processed/test_labels.npz')
X_train, y_train, X_test, y_test = \
X_train['arr_0'], y_train['arr_0'], X_test['arr_0'], y_test['arr_0']
# CNN parameters
batch_size = 32
nb_classes = 2
epochs = 1
# normalalize data and prep for Keras
print('Processing images for Keras...')
X_train, X_test, y_train, y_test = prep_train_test(X_train, y_train,
X_test, y_test,
nb_classes=nb_classes)
# 513x125x1 for spectrogram with crop size of 125 pixels
img_rows, img_cols, img_depth = X_train.shape[1], X_train.shape[2], 1
# reshape image input for Keras
# used Theano dim_ordering (th), (# chans, # images, # rows, # cols)
X_train, X_test, input_shape = keras_img_prep(X_train, X_test, img_depth,
img_rows, img_cols)
# run CNN
print('Fitting model...')
model, history = cnn(X_train, y_train, X_test, y_test, batch_size,
nb_classes, epochs, input_shape)
# evaluate model
print('Evaluating model...')
y_train_pred, y_test_pred, y_train_pred_proba, y_test_pred_proba, \
conf_matrix = model_performance(model, X_train, X_test, y_train, y_test)
for l in range(len(model.layers)):
print(l, model.layers[l])
# feature extraction layer
getFeature = K.function([model.layers[0].input, K.learning_phase()],
[model.layers[7].output])
# classification layer
getPrediction = K.function([model.layers[8].input, K.learning_phase()],
[model.layers[9].output])
exTrain = getFeature([X_train[:50], 0])[0]
exTest = getFeature([X_test[:10], 0])[0]
y_train = y_train[:50].reshape(y_train[:50].shape[0],)
y_test = y_test[:10]
print(exTrain.shape, exTest.shape, y_train.shape, y_test.shape)
from sklearn.svm import SVC
clf = SVC(gamma='auto')
clf.fit(exTrain, y_train)
score_train = model.evaluate(exTrain, y_train, verbose=0)
print('Train accuracy:', score_train[1])
score_test = model.evaluate(exTest, y_test, verbose=0)
print('Test accuracy:', score_test[1])
I don't know how to fix this problem.

Categories