A question from CGAN which indicates me "InvalidArgumentError" - python

I was trainning the MNIST dataset from keras. And I copy the example from keras. First I met the problem, I tried to record each variable when concating. But I didn't find any strange shape that the ERROR indicates me. Here is the code:
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow as tf
import numpy as np
batch_size = 64
num_channels = 1
num_classes = 10
image_size = 28
latent_dim = 128
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_labels = np.concatenate([y_train, y_test])
all_digits = all_digits.astype("float32") / 255.0
# print(all_digits.shape)
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
# print(all_digits.shape)
all_labels = keras.utils.to_categorical(all_labels, 10)
dataset = tf.data.Dataset.from_tensor_slices((all_digits, all_labels))
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
generator_in_channels = latent_dim + num_classes
discriminator_in_channels = num_channels + num_classes
discriminator = keras.Sequential(
[
keras.layers.InputLayer((28,28,discriminator_in_channels)),
layers.Conv2D(64,(3,3),strides=(2,2),padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128,(3,3),strides=(2,2),padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPool2D(),
layers.Dense(1)
],
name='discriminator'
)
generator = keras.Sequential(
[
keras.layers.InputLayer((generator_in_channels,)),
layers.Dense(7*7*generator_in_channels),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7,7,generator_in_channels)),
layers.Conv2DTranspose(8,(4,4),strides=(2,2),padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(8,(4,4),strides=(2,2),padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1,(3,3),padding='same',activation='sigmoid')
],
name='generator'
)
generator.summary()
discriminator.summary()
class ConditionalGan(keras.Model):
def __init__(self,discriminator,generator,latent_dim):
super(ConditionalGan,self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.gen_loss_tracker = keras.metrics.Mean(name='generator_loss')
self.disc_loss_tracker = keras.metrics.Mean(name='discriminator_loss')
#property
def metrics(self):
return [self.gen_loss_tracker,self.disc_loss_tracker]
def compile(self,d_optimizer,g_optimizer,loss_fn):
super(ConditionalGan,self).compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
def train_step(self,data):
real_image , one_hot_labels = data
image_one_hot_labels = one_hot_labels[:,:,None,None]
image_one_hot_labels = tf.repeat(image_one_hot_labels,repeats=[image_size*image_size])
image_one_hot_labels = tf.reshape(image_one_hot_labels,shape=(-1,image_size,image_size,num_classes))
#Disciminator
random_latent_vector = tf.random.normal(shape=(batch_size,latent_dim))
print(random_latent_vector.shape)
print(one_hot_labels.shape)
random_vector_labels = tf.concat((random_latent_vector,one_hot_labels),axis=1)
generator_image = self.generator(random_vector_labels)
fake_image_and_labels = tf.concat([generator_image,image_one_hot_labels],-1)
real_image_and_labels = tf.concat([real_image,image_one_hot_labels],-1)
print(generator_image.shape)
print(real_image.shape)
combine_images = tf.concat([real_image_and_labels,fake_image_and_labels],0)
labels = tf.concat([tf.ones((batch_size,1)),tf.zeros((batch_size,1))],0)
with tf.GradientTape() as tape:
pred = self.discriminator(combine_images)
d_loss = self.loss_fn(labels,pred)
grads = tape.gradient(d_loss,self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(zip(grads,self.discriminator.trainable_weights))
#Generator
random_latent_vector = tf.random.normal(shape=(batch_size,latent_dim))
random_vector_labels = tf.concat((random_latent_vector,one_hot_labels),-1)
print(random_latent_vector.shape)
print(one_hot_labels.shape)
misleading_labels = tf.zeros((batch_size,1))
with tf.GradientTape() as tape:
fake_image = self.generator(random_vector_labels)
print(fake_image.shape)
print(image_one_hot_labels.shape)
fake_image_and_labels = tf.concat([fake_image,image_one_hot_labels],-1)
pred = self.discriminator(fake_image_and_labels)
g_loss = self.loss_fn(misleading_labels,pred)
grads = tape.gradient(g_loss,self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads,self.generator.trainable_weights))
#Monitor loss.
self.gen_loss_tracker.update_state(g_loss)
self.disc_loss_tracker.update_state(d_loss)
return {
'g_loss':self.gen_loss_tracker.result(),
'd_loss':self.disc_loss_tracker.result()
}
cond_gan = ConditionalGan(
discriminator=discriminator, generator=generator, latent_dim=latent_dim
)
cond_gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),
)
cond_gan.fit(dataset, epochs=20)
And the ERROR is below. And i have search for some answering. Some of them are solving this kind of problems by editing the batch_size. I have tried, but failed. The error doesn't indicate the right line. This code is compiling in Kaggle Notebook. So maybe the location doesn't right.
InvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [64,128] vs. shape[1] = [48,10]
[[node concat (defined at tmp/ipykernel_34/2191800439.py:95) ]] [Op:__inference_train_function_38770]
Errors may have originated from an input operation.
Input Source operations connected to node concat:
IteratorGetNext (defined at tmp/ipykernel_34/2191800439.py:156)
random_normal (defined at tmp/ipykernel_34/2191800439.py:90)
Function call stack:
train_function
THANK U FOR ANSWERING!!!!

The problem is here random_vector_labels = tf.concat((random_latent_vector,one_hot_labels),-1)
It throws "InvalidArgumentError" error because random_latent_vector shape [64,128] and your one_hot_labels shape[48,10] don't match. your random_latent_vector has 64 rows of data but your one_hot_labels only has 48 rows of data. So when you are trying to concatenate two arrays with different dimensions make sure that they have the same number of rows. So make sure your random_latent_vector shape matches with one_hot_labels shape.

Related

Same input, same model, same weights but getting different results

I'm finetuning sentence-bert to do some task like sentence cosine-similarity calculation in Tensorflow. I set up a encoder, let's say, encoder1 using the code below:
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('sentence-transformers/all-MiniLM-L12-v2')
embeddings = model.encode(sentences)
This is using the sentence-transformers API. And I also set up another encoder, call encoder2 using the code below:
from transformers import AutoTokenizer, TFAutoModel
import tensorflow as tf
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L12-v2', from_pt=True)
model_tf = TFAutoModel.from_pretrained('sentence-transformers/all-MiniLM-L12-v2', from_pt=True)
encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='tf')
outputs = model_tf(**encoded_input)
def mean_pooling(model_output, input_mask):
# seq_output shape=[batch_size, max_seq_len, hidden_size]
# input_mask shape=[batch_size, max_seq_len]
# expand input_mask
seq_output = model_output[0]
input_mask_expanded = tf.cast(tf.broadcast_to(tf.expand_dims(input_mask, -1), seq_output.shape), tf.float32)
# pooled = tf.reduce_sum(seq_output * input_mask_expanded, 1) / tf.clip_by_value(tf.reduce_sum(input_mask_expanded, 1), clip_value_min=-1, clip_value_max=)
pooled = tf.reduce_sum(seq_output * input_mask_expanded, 1) / tf.reduce_sum(input_mask_expanded, 1)
# shape = [batch_size, hidden_size]
return pooled
sentence_embeddings = mean_pooling(outputs, encoded_input['attention_mask'])
pooled_output, _ = tf.linalg.normalize(sentence_embeddings, 2, axis=1)
This loads a pre-trained model from Huggingface, which I have tested and I'm sure that it will produce the same results(pooled_output and embeddings) as in encoder1.
However, the weird thing is that, when I load this encoder2 into my tf.Model, and try to run a classifier to see whether two sentences are close, with the same input, same model trainable weights, same model, it gives different values. Does the network randomly initialize everything after I load the model?
Here's my encoder code:
class ApplicationCLS(tf.keras.layers.Layer):
def __init__(self, bert_encoder_path, batch_size):
super().__init__()
self.bert_encoder = TFAutoModel.from_pretrained(bert_encoder_path, from_pt=True)
self.classifier = CLSlayer(256, 1)
self.loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.metric_fn = tf.keras.metrics.BinaryAccuracy(name="accuracy")
self.auc_fn = tf.keras.metrics.AUC()
self.batch_size = batch_size
def mean_pooling(self, model_output, input_mask):
seq_output = model_output[0]
shape = [self.batch_size, seq_output.shape[1], seq_output.shape[2]]
input_mask_expanded = tf.cast(tf.broadcast_to(tf.expand_dims(input_mask, -1), shape), tf.float32)
pooled = tf.reduce_sum(seq_output * input_mask_expanded, 1) / tf.reduce_sum(input_mask_expanded, 1)
return pooled
def call(self, inputs, labels, training=True):
sent1_inputs = inputs["sent1_inputs"]
sent2_inputs = inputs["sent2_inputs"]
# inputs: {"input_ids": input_ids, "input_mask":input_mask, "type_ids": type_id}
sent1_outputs = self.bert_encoder(**sent1_inputs)
tf.print("output:", sent1_outputs[0][0])
sent1_embeddings = self.mean_pooling(sent1_outputs, sent1_inputs['attention_mask'])
sent1_pooled_output, _ = tf.linalg.normalize(sent1_embeddings, 2, axis=1)
sent2_outputs = self.bert_encoder(**sent2_inputs)
sent2_embeddings = self.mean_pooling(sent2_outputs, sent2_inputs['attention_mask'])
sent2_pooled_output, _ = tf.linalg.normalize(sent2_embeddings, 2, axis=1)
# concat
interaction = tf.concat([sent1_pooled_output, sent2_pooled_output], 1)
# classification
logits = self.classifier(interaction)
loss = self.loss_fn(labels, logits)
self.add_loss(loss)
acc = self.metric_fn(labels, logits)
auc = self.auc_fn(labels, logits)
self.add_metric(loss, name="loss")
self.add_metric(acc, name="acc")
self.add_metric(auc, name="auc")
return tf.nn.softmax(logits, name="prediction")
where the sent1_inputs is the same, but the printed outputs are different, what happened?

Error Shape are not incompatible when using TFDebertaForSequenceClassification for binary classification

i was trying to use recall metrics for the TFDebertaForSequenceClassification model for binary classification with label 0 and 1, but i got this error:
ValueError: Shapes (32, 2) and (32, 1) are incompatible
anyone know how to solve it ?
this is how i processed the data:
with tf.device('/cpu:0'):
train_data = tf.data.Dataset.from_tensor_slices((train_df["body"].values, train_df["label"].values))
valid_data = tf.data.Dataset.from_tensor_slices((valid_df.body.values, valid_df.label.values))
def map_example_to_dict(input_ids, attention_masks, token_type_ids, label):
X = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_masks,
}
Y = label
return X, Y
def encode_examples(df, limit=-1):
# prepare list, so that we can build up final TensorFlow dataset from slices.
input_ids_list = []
token_type_ids_list = []
attention_mask_list = []
labels = []
for data in df.to_numpy():
bert_input = tokenizer(data[2],add_special_tokens=True,
max_length=MAX_SEQ_LEN,
padding='max_length',
return_token_type_ids=True,
truncation=True)
input_ids_list.append(bert_input['input_ids'])
token_type_ids_list.append(bert_input['token_type_ids'])
attention_mask_list.append(bert_input['attention_mask'])
labels.append([data[3]])
return tf.data.Dataset.from_tensor_slices((input_ids_list, attention_mask_list, token_type_ids_list, labels)).map(map_example_to_dict)
encoded_train_input = encode_examples(train_df).shuffle(1000).batch(32, drop_remainder=True)
encoded_valid_input = encode_examples(valid_df).shuffle(1000).batch(32, drop_remainder=True)
and this is how i setup the mode:
model = TFDebertaForSequenceClassification.from_pretrained('kamalkraj/deberta-base')
lr = 2e-6 #1e-6 #2e-5 #3e-5
epochs = 1
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.Recall()])
history = model.fit(encoded_train_input, validation_data=encoded_valid_input, epochs=epochs, verbose=1)
this is the screenshot error:
shape not incompatible error
Try passing the number of classes to the model.
configure = DebertaConfig(model_name, gradient_checkpointing=True, num_labels=3)
model=TFDebertaForSequenceClassification.from_pretrained('kamalkraj/deberta-base', config=configure)

Evaluate U-Net by layer

I am coming from medical background and a newbie in this machine learning field. I am trying to train my U-Net model using keras and tensorflow for image segmentation. However, my loss value is all NaN and the prediction is all black.
I would like to check the U-Net layer by layer but I don't know how to feed the data and from where to start. What I meant by checking for each layer is that I want to feed my images to first layer for example and see the output from the first layer and then moving on to the second layer and until to the last layer. Just want to see how the output is produced for each layer and to check from where the nan value is started. Really appreciate for your help.
These are my codes.
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from keras_preprocessing.image
import ImageDataGenerator
from tensorflow import keras
#Constants
SEED = 42
BATCH_SIZE_TRAIN = 16
BATCH_SIZE_TEST = 16
IMAGE_HEIGHT = 512
IMAGE_WIDTH = 512
IMG_SIZE = (IMAGE_HEIGHT, IMAGE_WIDTH)
data_dir = 'data'
data_dir_train = os.path.join(data_dir, 'training')
data_dir_train_image = os.path.join(data_dir_train, 'img')
data_dir_train_mask = os.path.join(data_dir_train, 'mask')
data_dir_test = os.path.join(data_dir, 'test')
data_dir_test_image = os.path.join(data_dir_test, 'img')
data_dir_test_mask = os.path.join(data_dir_test, 'mask')
NUM_TRAIN = 1413
NUM_TEST = 210
NUM_OF_EPOCHS = 10
def create_segmentation_generator_train(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def create_segmentation_generator_test(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def display(display_list):
plt.figure(figsize=(15,15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]), cmap='gray')
plt.show()
def show_dataset(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
display([image[0], mask[0]])
def unet(n_levels, initial_features=32, n_blocks=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1):
#n_blocks = how many conv in each level
inputs = keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
x = inputs
convpars = dict(kernel_size=kernel_size, activation='relu', padding='same')
#downstream
skips = {}
for level in range(n_levels):
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
if level < n_levels - 1:
skips[level] = x
x = keras.layers.MaxPool2D(pooling_size)(x)
#upstream
for level in reversed(range(n_levels-1)):
x = keras.layers.Conv2DTranspose(initial_features * 2 ** level, strides=pooling_size, **convpars)(x)
x = keras.layers.Concatenate()([x, skips[level]])
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
#output
activation = 'sigmoid' if out_channels == 1 else 'softmax'
x = keras.layers.Conv2D(out_channels, kernel_size=1, activation='sigmoid', padding='same')(x)
return keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}')
EPOCH_STEP_TRAIN = NUM_TRAIN // BATCH_SIZE_TRAIN
EPOCH_STEP_TEST = NUM_TEST // BATCH_SIZE_TRAIN
model = unet(4)
model.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])
model.fit_generator(generator=train_generator, steps_per_epoch=EPOCH_STEP_TRAIN, validation_data=test_generator, validation_steps=EPOCH_STEP_TEST, epochs=NUM_OF_EPOCHS)
def show_prediction(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
pred_mask = model.predict(image)[0] > 0.5
display([image[0], mask[0], pred_mask])
show_prediction(test_generator, 2)
To investigate your model layer-by-layer please see example how to show summary of the model and also how to save the model:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
#luodaan input
inputs=keras.Input(shape=(1,))
#luodaan kerros
dense=layers.Dense(64,activation="relu")
x=dense(inputs)
x=layers.Dense(64,activation="relu")(x)
outputs=layers.Dense(10)(x)
#Koostetaa
model=keras.Model(inputs=inputs,outputs=outputs,name="Spesiaali")
#Tarkastellaan
model.summary()
#Tallennellaan
model.save(".\model_to_be_investigated_by_someone_else_to_help_you")
...this makes it possible for you to see the whole model structure for "debugging your AI". If you do not find the solution itself, then add the last row of example to your own code, and then put the resulting folder e.g. to github and ask someone other to see the structure of your model to help you in solving the problem.
The blue drawing illustrates the output of command model.summary() and the red line illustrates the output shape of the first dense layer.

TensorFlow ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type list)

I'm trying to write this code into colab. Interestingly, I was running the same code in colab a few days ago but now it won't work. the code also works in kaggle kernel. I tried changing the TensorFlow version but all of them give different errors. Why do you think I can't run this code? This is the colab notebook if you needed more info.
Thanks in advance!
class DisasterDetector:
def __init__(self, tokenizer, bert_layer, max_len =30, lr = 0.0001,
epochs = 15, batch_size = 32, dtype = tf.int32 ,
activation = 'sigmoid', optimizer = 'SGD',
beta_1=0.9, beta_2=0.999, epsilon=1e-07,
metrics = 'accuracy', loss = 'binary_crossentropy'):
self.lr = lr
self.epochs = epochs
self.max_len = max_len
self.batch_size = batch_size
self.tokenizer = tokenizer
self.bert_layer = bert_layer
self.models = []
self.activation = activation
self.optimizer = optimizer
self.dtype = dtype
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon =epsilon
self.metrics = metrics
self.loss = loss
def encode(self, texts):
all_tokens = []
masks = []
segments = []
for text in texts:
tokenized = self.tokenizer.convert_tokens_to_ids(['[CLS]'] + self.tokenizer.tokenize(text) + ['[SEP]'])
len_zeros = self.max_len - len(tokenized)
padded = tokenized + [0] * len_zeros
mask = [1] * len(tokenized) + [0] * len_zeros
segment = [0] * self.max_len
all_tokens.append(padded)
masks.append(mask)
segments.append(segment)
print(len(all_tokens[0]))
return np.array(all_tokens), np.array(masks), np.array(segments)
def make_model(self):
input_word_ids = Input(shape = (self.max_len, ), dtype=tf.int32,
name = 'input_word_ids')
input_mask = Input(shape = (self.max_len, ), dtype=tf.int32,
name = 'input_mask')
segment_ids = Input(shape = (self.max_len, ), dtype=tf.int32,
name = 'segment_ids')
#pooled output is the output of dimention and
pooled_output, sequence_output = self.bert_layer([input_word_ids,
input_mask,
segment_ids])
clf_output = sequence_output[:, 0, :]
out = tf.keras.layers.Dense(1, activation = self.activation)(clf_output)
#out = tf.keras.layers.Dense(1, activation = 'sigmoid', input_shape = (clf_output,) )(clf_output)
model = Model(inputs = [input_word_ids, input_mask, segment_ids],
outputs = out)
if self.optimizer is 'SGD':
optimizer = SGD(learning_rate = self.lr)
elif self.optimizer is 'Adam':
optimizer = Adam(learning_rate = self.lr, beta_1=self.beta_1,
beta_2=self.beta_2, epsilon=self.epsilon)
model.compile(loss = self.loss, optimizer = self.optimizer,
metrics = [self.metrics])
return model
def train(self, x, k = 3):
kfold = StratifiedKFold(n_splits = k, shuffle = True)
for fold, (train_idx, val_idx) in enumerate(kfold.split(x['cleaned_text'], x['target'])):
print('fold: ', fold)
x_trn = self.encode(x.loc[train_idx, 'cleaned_text'])
x_val = self.encode(x.loc[val_idx, 'cleaned_text'])
y_trn = np.array(x.loc[train_idx, 'target'], dtype = np.uint8)
y_val = np.array(x.loc[val_idx, 'target'], dtype = np.uint8)
print('the data type of y train: ', type(y_trn))
print('x_val shape', x_val[0].shape)
print('x_trn shape', x_trn[0].shape)
model = self.make_model()
print('model made.')
model.fit(x_trn, tf.convert_to_tensor(y_trn),
validation_data = (x_val, tf.convert_to_tensor(y_val)),
batch_size=self.batch_size, epochs = self.epochs)
self.models.append(model)
and after calling the train function of the class I get that error.
classifier = DisasterDetector(tokenizer = tokenizer, bert_layer = bert_layer, max_len = max_len, lr = 0.0001,
epochs = 10, activation = 'sigmoid',
batch_size = 32,optimizer = 'SGD',
beta_1=0.9, beta_2=0.999, epsilon=1e-07)
classifier.train(train_cleaned)
and here is the error:
ValueError Traceback (most
recent call last)
<ipython-input-10-106c756f2e47> in <module>()
----> 1 classifier.train(train_cleaned)
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
96 dtype = dtypes.as_dtype(dtype).as_datatype_enum
97 ctx.ensure_initialized()
---> 98 return ops.EagerTensor(value, ctx.device_name, dtype)
99
100
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type list).
Well, it turns out that by not giving the appropriate maximum sequence length, TensorFlow throws this error. By changing the max_len variable to 54 I could run my program with no difficulty. So the problem was not about the type of the input or the numpy arrays.

How to join an encoder and a decoder

I have built the following encoder-decoder architecture, and the encoder and decoder both work fine separately:
from tensorflow.keras.layers import LSTM, Input, Reshape, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
WORD_TO_INDEX = {"foo": 0, "bar": 1}
MAX_QUERY_WORD_COUNT = 10
QUERY_ENCODING_SIZE = 15
# ENCODER
query_encoder_input = Input(shape=(None, len(WORD_TO_INDEX)), name="query_encoder_input")
query_encoder_output = LSTM(QUERY_ENCODING_SIZE, name="query_encoder_lstm")(query_encoder_input)
query_encoder = Model(inputs=query_encoder_input, outputs=query_encoder_output)
# DECODER
query_decoder_input = Input(shape=(QUERY_ENCODING_SIZE,), name="query_decoder_input")
query_decoder_reshape = Reshape((1, QUERY_ENCODING_SIZE), name="query_decoder_reshape")(query_decoder_input)
query_decoder_lstm = LSTM(QUERY_ENCODING_SIZE, name="query_decoder_lstm", return_sequences=True, return_state=True)
recurrent_input, state_h, state_c = query_decoder_lstm(query_decoder_reshape)
states = [state_h, state_c]
query_decoder_outputs = []
for _ in range(MAX_QUERY_WORD_COUNT):
recurrent_input, state_h, state_c = query_decoder_lstm(recurrent_input, initial_state=states)
query_decoder_outputs.append(recurrent_input)
states = [state_h, state_c]
query_decoder_output = Lambda(lambda x: K.concatenate(x, axis=1), name="query_decoder_concat")(query_decoder_outputs)
query_decoder = Model(inputs=query_decoder_input, outputs=query_decoder_output)
But when I try to join them together to create an autoencoder, I get an odd error and I don't know why.
# AUTOENCODER
# apply the reshape layer to the output of the encoder
query_autoencoder_output = query_decoder.layers[1](query_encoder_output)
# rebuild the autoencoder by applying each layer of the decoder to the output of the encoder
for decoder_layer in query_decoder.layers[2:]:
# this fails and I don't know why
query_autoencoder_output = decoder_layer(query_autoencoder_output)
# the code never gets here
query_autoencoder = Model(inputs=query_encoder_input, outputs=query_autoencoder_output)
This throws the error:
ValueError: Shape must be rank 3 but is rank 2 for '{{node
query_decoder_concat/concat_1}} = ConcatV2[N=3, T=DT_FLOAT,
Tidx=DT_INT32](query_decoder_lstm/PartitionedCall_11:1,
query_decoder_lstm/PartitionedCall_11:2,
query_decoder_lstm/PartitionedCall_11:3,
query_decoder_concat/concat_1/axis)' with input shapes: [?,1,15],
[?,15], [?,15], [].
This is the template I used for my decoder. (See the "What if I don't want to use teacher forcing for training?" section.)
I relied on these StackOverflow questions (especially the last one) to figure out how to combine the models together.
What does this error mean and how can I fix it?
You can treat a model as a layer * essentially *. With an autoencoder, it'll be as straightforward as something like this:
autoencoder = Sequential([encoder, decoder])
If you want some extra flexibility you can subclass tf.keras.Model:
class AutoEncoder(tf.keras.Model):
def __init__(self, encoder, decoder):
super(AutoEncoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
def call(self, inputs, training=None, **kwargs):
x = self.encoder(inputs)
x = self.decoder(x)
return x
ae = AutoEncoder(encoder, decoder)
ae.fit(...
Full reproducible example:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow import keras
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
import numpy as np
(xtrain, ytrain), (xtest, ytest) = keras.datasets.cifar10.load_data()
train_ix = np.where(ytrain.ravel() == 1)
test_ix = np.where(ytest.ravel() == 1)
cars_train = xtrain[train_ix]
cars_test = xtest[test_ix]
cars = np.vstack([cars_train, cars_test]).astype(np.float32)/255
X = tf.data.Dataset.from_tensor_slices(cars).batch(8)
class Encoder(keras.Model):
def __init__(self):
super(Encoder, self).__init__()
self.flat = keras.layers.Flatten(input_shape=(32, 32, 3))
self.dense1 = keras.layers.Dense(128)
self.dense2 = keras.layers.Dense(32)
def call(self, inputs, training=None, **kwargs):
x = self.flat(inputs)
x = keras.activations.selu(self.dense1(x))
x = keras.activations.selu(self.dense2(x))
return x
class Decoder(keras.Model):
def __init__(self):
super(Decoder, self).__init__()
self.dense1 = keras.layers.Dense(128, input_shape=[32])
self.dense2 = keras.layers.Dense(32 * 32 * 3)
self.reshape = keras.layers.Reshape([32, 32, 3])
def call(self, inputs, training=None, **kwargs):
x = keras.activations.selu(self.dense1(inputs))
x = keras.activations.sigmoid(self.dense2(x))
x = self.reshape(x)
return x
class AutoEncoder(keras.Model):
def __init__(self, encoder, decoder):
super(AutoEncoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
def call(self, inputs, training=None, **kwargs):
x = self.encoder(inputs)
x = self.decoder(x)
return x
ae = AutoEncoder(Encoder(), Decoder())
loss_object = keras.losses.BinaryCrossentropy()
reconstruction_loss = keras.metrics.Mean(name='reconstruction_loss')
optimizer = keras.optimizers.Adam()
#tf.function
def reconstruct(inputs):
with tf.GradientTape() as tape:
out = ae(inputs)
loss = loss_object(inputs, out)
gradients = tape.gradient(loss, ae.trainable_variables)
optimizer.apply_gradients(zip(gradients, ae.trainable_variables))
reconstruction_loss(loss)
if __name__ == '__main__':
template = 'Epoch {:2} Reconstruction Loss {:.4f}'
for epoch in range(50):
reconstruction_loss.reset_states()
for input_batches in X:
reconstruct(input_batches)
print(template.format(epoch + 1, reconstruction_loss.result()))
Output:
Epoch 35 Reconstruction Loss 0.5794
Epoch 36 Reconstruction Loss 0.5793
Epoch 37 Reconstruction Loss 0.5792
Epoch 38 Reconstruction Loss 0.5791
Epoch 39 Reconstruction Loss 0.5790
Epoch 40 Reconstruction Loss 0.5789
Based off of M Z's answer, but without using Sequential, you can do this:
query_autoencoder = Model(inputs=query_encoder_input, outputs=query_decoder(query_encoder_output))
query_autoencoder.summary()
The summary also breaks down into more layers than M Z's answer did.
For anyone using Javascript and Tensorflow JS, here is an example of how to make deeper networks.
I thought I would include this since most Javascript examples of autoencoders show only one encoding and decoding layer.
function buildModel (layers)
{
const model = tf.sequential();
layers.forEach(layer =>
{
model.add(layer);
});
return model;
}
Then you can make a deeper network with more encoding and decoding layers with:
// [ encoder ] [ decoder ]
// Model layers: 20 -> 10 -> 2 -> 10 -> 20
// Encoding Layers
const encoder = [
tf.layers.dense({ inputShape: [20], units: 10, activation: 'relu' }),
tf.layers.dense({ units: 2, activation: 'relu' })
];
// Decoding Layers
const decoder = [
tf.layers.dense({ units: 10, activation: 'relu' }),
tf.layers.dense({ units: 20, activation: 'relu' })
];
// Build model
const model = buildModel([...encoder, ...decoder]);
// ... Then compile and Train ...
After training, predictions can be made with:
const predictionModel = buildModel([...encoder]);
predictionModel.predict(data);

Categories