error when using the keras api on convelutional layers - python

I'm trying to build a model that looks like this:
input
/
convlayers/flatten
/ \
first_output \
\ /
second_output
but it fails at the first conv layers with the error:
ValueError: Layer conv2d_4 was called with an input that isn't a symbolic tensor.
Received type: <class 'keras.layers.convolutional.Conv2D'>.
Full input: [<keras.layers.convolutional.Conv2D object at 0x7f450d7b8630>].
All inputs to the layer should be tensors.
and the error points to the layer after the first conv with the inputshape call.
Help would be appreciated.
Here is the code:
conv1 = Conv2D(8, 4, padding = "same", strides = 2)(inputs)
conv2 = Conv2D(16 ,4, padding = "same", strides = 2)(conv1)
flat = Flatten()(conv2)
dense1 = Dense(32)(flat)
dense2 = Dense(32)(dense1)
first_output = Dense(64)(dense2)
merged = concatenate([flat,first_output])
second_output_dense1 = Dense(32)(merged)
second_output_dense2 = Dense(32)(second_output_dense1)
second_output = Dense(64)(second_output_dense2)
model = Model(inputs=conv1, outputs=[first_output,second_output])
model.compile(loss = "mse", optimizer = "adam" )
Answer:
i was under the impression that you could call the model without an input layer and just define the input in the first layer : conv1 = Conv2D(8, 4, padding = "same", strides = 2, input_shape = (6,8,8,))
but that didnt work so instead you have to delete the input shape thing and create an input layer here is the fixed code
inputs = Input(shape=(6,8,8,))
conv1 = Conv2D(8, 4, padding = "same", strides = 2, input_shape = (6,8,8,))
conv2 = Conv2D(16 ,4, padding = "same", strides = 2)(conv1)
flat = Flatten()(conv2)
dense1 = Dense(32)(flat)
dense2 = Dense(32)(dense1)
first_output = Dense(64)(dense2)
merged = concatenate([flat,first_output])
second_output_dense1 = Dense(32)(merged)
second_output_dense2 = Dense(32)(second_output_dense1)
second_output = Dense(64)(second_output_dense2)
model = Model(inputs=inputs, outputs=[first_output,second_output])
model.compile(loss = "mse", optimizer = "adam" )

Related

How to solve Input to reshape is a tensor with

I want to solve this below error.
InvalidArgumentError: Input to reshape is a tensor with 737280 values, but the requested shape requires a multiple of 184832
so I see reference.
reference
Python / Tensorflow - Input to reshape is a tensor with 92416 values, but the requested shape requires a multiple of 2304
However, looking at the answers to this problem, I do not know where to fix it.
So ,I would like to know how to check the size of the input image.
Thank you for your time.
my model:
# For multi_model
activationFunction='elu'
def build_multi2(main_input_shape, output_dim):
inputA = Input(shape=main_input_shape)
ch1_model = create_convolution_layers(inputA)
inputB = Input(shape=main_input_shape)
ch2_model = create_convolution_layers(inputB)
inputC = Input(shape=main_input_shape)
ch3_model = create_convolution_layers(inputC)
inputD = Input(shape=main_input_shape)
ch4_model = create_convolution_layers(inputD)
conv = concatenate([ch1_model, ch2_model, ch3_model, ch4_model])
conv = Flatten()(conv)
dense = Dense(512)(conv)
dense = LeakyReLU(alpha=0.1)(dense)
dense = Dropout(0.5)(dense)
output = Dense(N_class, activation='softmax')(dense)
return Model(inputs=[inputA, inputB, inputC, inputD], outputs=[output])
def create_convolution_layers(input_img):
model = Conv2D(32, (3, 3), padding='same', input_shape=main_input_shape)(input_img)
model = LeakyReLU(alpha=0.1)(model)
model = MaxPooling2D((2, 2),padding='same')(model)
model = Dropout(0.25)(model)
model = Conv2D(64, (3, 3), padding='same')(model)
model = LeakyReLU(alpha=0.1)(model)
model = MaxPooling2D(pool_size=(2, 2),padding='same')(model)
model = Dropout(0.25)(model)
model = Conv2D(128, (3, 3), padding='same')(model)
model = LeakyReLU(alpha=0.1)(model)
model = MaxPooling2D(pool_size=(2, 2),padding='same')(model)
model = Dropout(0.4)(model)
return model
my model call
# For model declaration
N_class = 20
main_input_shape = (150,150, 3)
output_dim = N_class
# opt = tf.keras.optimizers.RMSprop(lr=0.001)
opt = tf.keras.optimizers.Adam()
clf = build_multi2(main_input_shape, output_dim)
clf.compile(optimizer=opt, loss=['categorical_crossentropy'], metrics=['accuracy'])
clf.summary()
my image size: 96×96 pixel
my tensorflow. ImageDataGenerator
train_imgen = ImageDataGenerator(rescale = 1./255,
# shear_range = 0.2,
# zoom_range = 0.2,
# rotation_range=5.,
horizontal_flip = False)
'''
You have specified your input shape as (150, 150, 3) and your image shape is (96, 96, 3), these are incompatible.
You can either resize your images to (150, 150, 3) or change your input shape to be the same as your image shape.

ValueError: Input 0 of layer conv1d is incompatible with the layer: : expected min_ndim=3, found ndim=2. Full shape received: (None, 19)

I am building a prediction model for the sequence data using conv1d layer provided by Keras. This is how I did`
def autoencoder():
#autoencoder = Model(inputs=input_layer, outputs=decoder)
input_dim = x_train_scaled.shape[1]
input_layer = Input(shape=(input_dim,))
conv1 = Conv1D(filters = 32, kernel_size=3,activation='relu') (input_layer)
batch1 = BatchNormalization()(conv1)
maxp1 = MaxPooling1D(pool_size=2)(batch1)
dropout1 = Dropout(0.2)(maxp1)
conv2 = Conv1D(filters = 16, kernel_size=3,activation='relu') (dropout1)
batch2 = BatchNormalization()(conv2)
maxp2 = MaxPooling1D(2)(batch2)
dropout2 = Dropout(0.2)(maxp2)
conv3 = Conv1D(filters = 8, kernel_size=3,activation='relu') (dropout2)
batch3 = BatchNormalization()(conv3)
maxp3 = MaxPooling1D(2)(batch3)
dropout3 = Dropout(0.2)(maxp3)
#decoder layers
conv4 = Conv1D(filters = 8, kernel_size=3,activation='relu') (dropout3)
batch4 = BatchNormalization()(conv4)
dropout4 = Dropout(0.2)(batch4)
conv5 = Conv1D(filters = 16, kernel_size=3,activation='relu') (dropout4)
batch5 = BatchNormalization()(conv5)
unsamp5 = UpSampling1D(2)(batch5)
dropout5 = Dropout(0.2)(unsamp5)
conv6 = Conv1D(filters = 32, kernel_size=3,activation='relu') (dropout5)
batch6 = BatchNormalization()(conv6)
unsamp6 = UpSampling1D(2)(batch6)
dropout6 = Dropout(0.2)(unsamp6)
decoder = Conv1D(filters = 1, kernel_size=3,activation='sigmoid') (dropout6)
return Model(input_layer, decoder)
Train model to reduce the data dimension using autoencoder
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.fit(x_train_scaled, x_train_scaled, epochs=15, batch_size=32,
verbose=verbose, shuffle=True)
However, the debugging information has
ValueError: Input 0 of layer conv1d is incompatible with the layer: :
expected min_ndim=3, found ndim=2. Full shape received: (None, 19)
The training data and validation data shape are as follows
x_train_scaled shape (125973, 19)
Dataset use to train model NSL-KDD(https://www.unb.ca/cic/datasets/nsl.html)
Conv1D expects 3+D tensor with shape: batch_shape + (steps, input_dim). Add extra dimension to your input.
Working sample code
# The inputs are 128-length vectors with 10 timesteps, and the batch size
# is 4.
input_shape = (4, 10, 128)
x = tf.random.normal(input_shape)
y = tf.keras.layers.Conv1D(
32, 3, activation='relu',input_shape=input_shape[1:])(x)
print(y.shape)
Output
(4, 8, 32)

Softmax output returning only ones and zeros?

my convolutional neural network is returning only ones and zeros on softmax output (out1), anyone knows why?
def build(self):
inp = Input(self.obs_shape)
conv0 = Conv2D(32, 2, 1, padding="same", activation = "relu")(inp)
drop0 = MaxPool2D((2,2))(conv0)
conv1 = Conv2D(64, 3, 2, padding="same", activation = "relu")(drop0)
drop1 = MaxPool2D((2,2))(conv1)
flat = Flatten()(drop1)
hid0 = Dense(128, activation='relu')(flat)
hid1 = Dense(256, activation='relu')(hid0)
hid = Dense(128, activation='relu')(hid1)
out1 = Dense(self.action_count, activation='softmax')(hid)
out2 = Dense(1, activation='linear')(hid)
model = Model(inputs = [inp], outputs = [out1, out2])
model.compile(optimizer = tf.keras.optimizers.Adam(lr = self.lr),
loss = [self.actor_loss, "mse"])
return model
def actor_loss(self, y_actual, y_pred):
actions = tf.cast(y_actual[:, 0], tf.int32)
returns = y_actual[:, 1]
mask = tf.one_hot(actions, self.action_count)
logps = tf.math.log(tf.boolean_mask(y_pred, mask) + 1e-3)
entropy = -tf.math.reduce_sum(y_pred * tf.math.log(y_pred))
return -tf.math.reduce_sum(logps * returns) - 0.0001*entropy
model = Model(inputs = [inp], outputs = [out1, out2])
look at above, there only two output.
so, you function build was lock the number of output,
so only get 1 or 0 ;
in one word :you need change your models
sorry ,my english is bad .

Converting model declaration in Keras (removing Sequential) into a new one without Sequential returns different shape

I'm newbie with Python, Tensorflow and Keras.
I have modified this code:
def build_base_network(input_shape):
seq = Sequential()
nb_filter = [6, 12]
kernel_size = 3
#convolutional layer 1
seq.add(Convolution2D(nb_filter[0], kernel_size, kernel_size, input_shape=input_shape,
border_mode='valid', dim_ordering='th'))
seq.add(Activation('relu'))
seq.add(MaxPooling2D(pool_size=(2, 2)))
seq.add(Dropout(.25))
#convolutional layer 2
seq.add(Convolution2D(nb_filter[1], kernel_size, kernel_size, border_mode='valid', dim_ordering='th'))
seq.add(Activation('relu'))
seq.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
seq.add(Dropout(.25))
#flatten
seq.add(Flatten())
seq.add(Dense(128, activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(50, activation='relu'))
return seq
Writing my own version:
def build_base_network(input_shape):
inputs = Input(shape = input_shape)
nb_filter = [6, 12]
kernel_size = 3
conv1 = Conv2D(nb_filter[0], (kernel_size, kernel_size), activation='relu', padding="valid", data_format='channels_first')(inputs)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
drop1 = Dropout(.25)(pool1)
#convolutional layer 2
conv2 = Conv2D(nb_filter[1], (kernel_size, kernel_size), activation='relu', padding="valid", data_format="channels_first")(drop1)
pool2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(conv2)
drop2 = Dropout(.25)(pool2)
#flatten
dense1 = Dense(128, activation='relu')(drop2)
drop3 = Dropout(0.1)(dense1)
dense2 = Dense(50, activation='relu')(drop3)
model = Model(inputs=inputs, outputs=dense2)
return model
I call it with this code:
input_dim = x_train.shape[2:]
img_a = Input(shape=input_dim)
img_b = Input(shape=input_dim)
base_network = build_base_network(input_dim)
feat_vecs_a = base_network(img_a)
feat_vecs_b = base_network(img_b)
The unmodified code (the first one) returns this shape: (None, 50)
The modified code (my own version) returns this shape: (None, 12, 12, 50)
I hadn't modified any other piece of code. The function base_network is the only one I have changed it.
By the way, input_dim is (1, 56, 46).
What am I doing wrong?
You forgot a Flatten operation :
pool2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(conv2)
drop2 = Dropout(.25)(pool2)
#flatten
dense1 = Dense(128, activation='relu')(drop2)
should then be
pool2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(conv2)
drop2 = Dropout(.25)(pool2)
#flatten
flatten1 = Flatten()(drop2)
dense1 = Dense(128, activation='relu')(flatten1)

How to use part of a tensor in Keras?

I want to use part of a the tensor in the output of lstm layer, but don't know how to do it correctly.
My purpose is tell LSTM layer the "real" length of its input sequence.
Here is my attempt, but it fails.
Isthere anyone who can help solve this problem and explain the details, thanks a lot~
input_spectrogram = Input(shape=(64,500,1))
input_length = Input(shape=(1,))
cnn1 = Conv2D(filters = 64, kernel_size = (1,4),input_shape=(64,500, 1),padding = 'same',strides = 1,activation = 'relu',name='conv1')(input_spectrogram)
maxpooling1 = MaxPooling2D(pool_size = (1,4),name='maxpooling1')(cnn1)
bn1 = BatchNormalization(name='BN1')(maxpooling1)
cnn2 = Conv2D(filters = 128, kernel_size = (64,1),strides = 1,activation ='relu',name='conv2')(bn1)
maxpooling2 = MaxPooling2D(pool_size = (1,2),name='maxpooling2')(cnn2)
reshape = Reshape((62,128))(maxpooling2)
lstm1 = LSTM(128,return_sequences = True,recurrent_dropout=0.3,name='lstm1')(reshape) #output:(None,62,128)
softmax_in = Lambda(lambda x:x[0][x[1],:])([lstm1,input_length])
softmax_ = Dense(10,activation='softmax',name='softmax_')(softmax_in)
seq = Model(inputs=input_spectrogram, outputs=[softmax_])
seq.compile(loss='categorical_crossentropy', optimizer='adadelta',metrics=['accuracy'])
Seems to be indexing with tensor is not fully supported (see discussion here: https://github.com/tensorflow/tensorflow/issues/206#issuecomment-158435464).
Does it work for you to perform indexing with constant instead?
input_spectrogram = Input(shape=(64,500,1))
input_length = Input(shape=(1,))
cnn1 = Conv2D(filters = 64, kernel_size = (1,4),input_shape=(64,500, 1),padding = 'same',strides = 1,activation = 'relu',name='conv1')(input_spectrogram)
maxpooling1 = MaxPooling2D(pool_size = (1,4),name='maxpooling1')(cnn1)
bn1 = BatchNormalization(name='BN1')(maxpooling1)
cnn2 = Conv2D(filters = 128, kernel_size = (64,1),strides = 1,activation ='relu',name='conv2')(bn1)
maxpooling2 = MaxPooling2D(pool_size = (1,2),name='maxpooling2')(cnn2)
reshape = Reshape((62,128))(maxpooling2)
lstm1 = LSTM(128,return_sequences = True,recurrent_dropout=0.3,name='lstm1')(reshape) #output:(None,62,128)
softmax_in = Lambda(lambda x:x[:,5])(lstm1)
softmax_ = Dense(10,activation='softmax',name='softmax_')(softmax_in)
seq = Model(inputs=input_spectrogram, outputs=[softmax_])
seq.compile(loss='categorical_crossentropy', optimizer='adadelta',metrics=['accuracy'])
now it is feasible, so how to use the "real_length" from an input layer?

Categories