TFLite: `ValueError: Model input is not quantized.` - python

Problem
I try to convert the crop_and_resize operation to TFLite. I am using the script below. But the conversion fails and the error ValueError: Model input is not quantized. is raised.
Does someone have any idea? I did not find any relevant information on the related previous questions.
Code snippet
import tensorflow as tf
IMG_SIZE = 128
NUM_BOXES = 100
CROP_SIZE = 28
NB_DATA_SAMPLES = 32
BATCH_SIZE = 1
def create_model():
images_ = tf.keras.Input(shape=(IMG_SIZE, IMG_SIZE, 3), batch_size=BATCH_SIZE, dtype=tf.float32)
boxes_ = tf.keras.Input(shape=(NUM_BOXES, 4), batch_size=BATCH_SIZE, dtype=tf.float32)
box_indices = tf.reshape(
tf.repeat(
tf.expand_dims(tf.range(BATCH_SIZE, dtype=tf.int32), axis=-1),
NUM_BOXES,
axis=-1
),
shape=(-1,)
)
cropped_images = tf.image.crop_and_resize(
image=images_,
boxes=tf.reshape(boxes_, (-1, 4)),
box_indices=box_indices,
crop_size=(CROP_SIZE, CROP_SIZE))
model = tf.keras.models.Model(inputs=[images_, boxes_], outputs=cropped_images)
model.summary(line_length=200)
return model
model = create_model()
def representative_dataset_generator():
for _ in range(NB_DATA_SAMPLES):
image_ = tf.random.normal(shape=(1, IMG_SIZE, IMG_SIZE, 3))
bboxes_ = tf.random.uniform((1, NUM_BOXES, 4), maxval=1)
yield [image_, bboxes_]
# Converter
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8,
tf.lite.OpsSet.SELECT_TF_OPS
]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
converter.representative_dataset = representative_dataset_generator
quant_model = converter.convert()
Thanks

I could execute your code successfully. However, it only quantize the Reshape layer but not crop layer. So I guess maybe tflite can't quantize the op they do not support.

Related

Model cannot fit on Tensorflow data pipline with unknown TensorShape

I have a data loader pipeline for video data. Although I specify the output of the pipeline, I still get the following error when calling model.fit. "ValueError: as_list() is not defined on an unknown TensorShape". I searched for the error and most people say it is because of the tf.numpy_function that returns an (to the Tensorflow pipeline) unknown shape. Specifying the shape after that function should solve the problem. However, it does not.
AUTOTUNE = tf.data.experimental.AUTOTUNE
#get list of numpy files in directory
train_ds = tf.data.Dataset.list_files("dir")
#load numpy files (video with shape 40,160,160,3), get corresponding label and output both
#video and label
def get_label(file_path):
label = tf.strings.split(file_path, os.path.sep)
return label [-2]
def process_image(file_path):
label = get_label(file_path)
video= np.load(file_path, allow_pickle=True)
video= tf.convert_to_tensor(video/255, dtype=tf.float32)
return video, np.float32(label)
train_ds = train_ds.map(lambda item: tf.numpy_function(
process_image, [item], (tf.float32, tf.float32)),num_parallel_calls = AUTOTUNE )
#Convert video to tf object
def set_shape(video, label):
video = tf.reshape(video, (40,160,160,3))
#video = tf.ensure_shape(video, (40,160,160,3)) #also does not work
#video = tf.convert_to_tensor(video, dtype=tf.float32) #also does not work
return video, label
train_ds = train_ds.map(set_shape)
#batching
train_ds = train_ds.batch(batch_size =5)
#optimazation
train_ds = train_ds.prefetch(AUTOTUNE)
train_ds.take(1)
Although the rest of the code seems fine (it does work when I manually input data), I will paste it in case it is not.
def create_LRCN_model():
'''
This function will construct the required LRCN model.
Returns:
model: It is the required constructed LRCN model.
'''
# We will use a Sequential model for model construction.
model = Sequential()
# Define the Model Architecture.
########################################################################################
model.add(TimeDistributed(Conv2D(128, (3, 3), padding='same',activation = 'relu'),
input_shape = (40, 160, 160, 3)))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Conv2D(256, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Conv2D(128, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
#model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(32))
model.add(Dense(1, activation = 'sigmoid'))
########################################################################################
# Display the models summary.
model.summary()
# Return the constructed LRCN model.
return model
LRCN_model = create_LRCN_model()
early_stopping_callback = EarlyStopping(monitor = 'val_loss', patience = 15, mode = 'min', restore_best_weights = True)
LRCN_model.compile(loss='binary_crossentropy', optimizer = 'Adam', metrics = ["accuracy"])
LRCN_model_training_history = LRCN_model.fit(train_ds, validation_data= val_ds, epochs = 70, callbacks = [early_stopping_callback])
Okay I found another solution. I do not exactly know why it works, just calling the following function does the job.
def set_shape(video, label):
video.set_shape((40,160,160, 3))
label.set_shape([])
return video, label
Got it! You just need to change "accuracy" to "binary_accuracy" in model compile. It worked for me with your code and some dummy video and label input data.

A question from CGAN which indicates me "InvalidArgumentError"

I was trainning the MNIST dataset from keras. And I copy the example from keras. First I met the problem, I tried to record each variable when concating. But I didn't find any strange shape that the ERROR indicates me. Here is the code:
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow as tf
import numpy as np
batch_size = 64
num_channels = 1
num_classes = 10
image_size = 28
latent_dim = 128
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_labels = np.concatenate([y_train, y_test])
all_digits = all_digits.astype("float32") / 255.0
# print(all_digits.shape)
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
# print(all_digits.shape)
all_labels = keras.utils.to_categorical(all_labels, 10)
dataset = tf.data.Dataset.from_tensor_slices((all_digits, all_labels))
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
generator_in_channels = latent_dim + num_classes
discriminator_in_channels = num_channels + num_classes
discriminator = keras.Sequential(
[
keras.layers.InputLayer((28,28,discriminator_in_channels)),
layers.Conv2D(64,(3,3),strides=(2,2),padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128,(3,3),strides=(2,2),padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPool2D(),
layers.Dense(1)
],
name='discriminator'
)
generator = keras.Sequential(
[
keras.layers.InputLayer((generator_in_channels,)),
layers.Dense(7*7*generator_in_channels),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7,7,generator_in_channels)),
layers.Conv2DTranspose(8,(4,4),strides=(2,2),padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(8,(4,4),strides=(2,2),padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1,(3,3),padding='same',activation='sigmoid')
],
name='generator'
)
generator.summary()
discriminator.summary()
class ConditionalGan(keras.Model):
def __init__(self,discriminator,generator,latent_dim):
super(ConditionalGan,self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.gen_loss_tracker = keras.metrics.Mean(name='generator_loss')
self.disc_loss_tracker = keras.metrics.Mean(name='discriminator_loss')
#property
def metrics(self):
return [self.gen_loss_tracker,self.disc_loss_tracker]
def compile(self,d_optimizer,g_optimizer,loss_fn):
super(ConditionalGan,self).compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
def train_step(self,data):
real_image , one_hot_labels = data
image_one_hot_labels = one_hot_labels[:,:,None,None]
image_one_hot_labels = tf.repeat(image_one_hot_labels,repeats=[image_size*image_size])
image_one_hot_labels = tf.reshape(image_one_hot_labels,shape=(-1,image_size,image_size,num_classes))
#Disciminator
random_latent_vector = tf.random.normal(shape=(batch_size,latent_dim))
print(random_latent_vector.shape)
print(one_hot_labels.shape)
random_vector_labels = tf.concat((random_latent_vector,one_hot_labels),axis=1)
generator_image = self.generator(random_vector_labels)
fake_image_and_labels = tf.concat([generator_image,image_one_hot_labels],-1)
real_image_and_labels = tf.concat([real_image,image_one_hot_labels],-1)
print(generator_image.shape)
print(real_image.shape)
combine_images = tf.concat([real_image_and_labels,fake_image_and_labels],0)
labels = tf.concat([tf.ones((batch_size,1)),tf.zeros((batch_size,1))],0)
with tf.GradientTape() as tape:
pred = self.discriminator(combine_images)
d_loss = self.loss_fn(labels,pred)
grads = tape.gradient(d_loss,self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(zip(grads,self.discriminator.trainable_weights))
#Generator
random_latent_vector = tf.random.normal(shape=(batch_size,latent_dim))
random_vector_labels = tf.concat((random_latent_vector,one_hot_labels),-1)
print(random_latent_vector.shape)
print(one_hot_labels.shape)
misleading_labels = tf.zeros((batch_size,1))
with tf.GradientTape() as tape:
fake_image = self.generator(random_vector_labels)
print(fake_image.shape)
print(image_one_hot_labels.shape)
fake_image_and_labels = tf.concat([fake_image,image_one_hot_labels],-1)
pred = self.discriminator(fake_image_and_labels)
g_loss = self.loss_fn(misleading_labels,pred)
grads = tape.gradient(g_loss,self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads,self.generator.trainable_weights))
#Monitor loss.
self.gen_loss_tracker.update_state(g_loss)
self.disc_loss_tracker.update_state(d_loss)
return {
'g_loss':self.gen_loss_tracker.result(),
'd_loss':self.disc_loss_tracker.result()
}
cond_gan = ConditionalGan(
discriminator=discriminator, generator=generator, latent_dim=latent_dim
)
cond_gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),
)
cond_gan.fit(dataset, epochs=20)
And the ERROR is below. And i have search for some answering. Some of them are solving this kind of problems by editing the batch_size. I have tried, but failed. The error doesn't indicate the right line. This code is compiling in Kaggle Notebook. So maybe the location doesn't right.
InvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [64,128] vs. shape[1] = [48,10]
[[node concat (defined at tmp/ipykernel_34/2191800439.py:95) ]] [Op:__inference_train_function_38770]
Errors may have originated from an input operation.
Input Source operations connected to node concat:
IteratorGetNext (defined at tmp/ipykernel_34/2191800439.py:156)
random_normal (defined at tmp/ipykernel_34/2191800439.py:90)
Function call stack:
train_function
THANK U FOR ANSWERING!!!!
The problem is here random_vector_labels = tf.concat((random_latent_vector,one_hot_labels),-1)
It throws "InvalidArgumentError" error because random_latent_vector shape [64,128] and your one_hot_labels shape[48,10] don't match. your random_latent_vector has 64 rows of data but your one_hot_labels only has 48 rows of data. So when you are trying to concatenate two arrays with different dimensions make sure that they have the same number of rows. So make sure your random_latent_vector shape matches with one_hot_labels shape.

Error Shape are not incompatible when using TFDebertaForSequenceClassification for binary classification

i was trying to use recall metrics for the TFDebertaForSequenceClassification model for binary classification with label 0 and 1, but i got this error:
ValueError: Shapes (32, 2) and (32, 1) are incompatible
anyone know how to solve it ?
this is how i processed the data:
with tf.device('/cpu:0'):
train_data = tf.data.Dataset.from_tensor_slices((train_df["body"].values, train_df["label"].values))
valid_data = tf.data.Dataset.from_tensor_slices((valid_df.body.values, valid_df.label.values))
def map_example_to_dict(input_ids, attention_masks, token_type_ids, label):
X = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_masks,
}
Y = label
return X, Y
def encode_examples(df, limit=-1):
# prepare list, so that we can build up final TensorFlow dataset from slices.
input_ids_list = []
token_type_ids_list = []
attention_mask_list = []
labels = []
for data in df.to_numpy():
bert_input = tokenizer(data[2],add_special_tokens=True,
max_length=MAX_SEQ_LEN,
padding='max_length',
return_token_type_ids=True,
truncation=True)
input_ids_list.append(bert_input['input_ids'])
token_type_ids_list.append(bert_input['token_type_ids'])
attention_mask_list.append(bert_input['attention_mask'])
labels.append([data[3]])
return tf.data.Dataset.from_tensor_slices((input_ids_list, attention_mask_list, token_type_ids_list, labels)).map(map_example_to_dict)
encoded_train_input = encode_examples(train_df).shuffle(1000).batch(32, drop_remainder=True)
encoded_valid_input = encode_examples(valid_df).shuffle(1000).batch(32, drop_remainder=True)
and this is how i setup the mode:
model = TFDebertaForSequenceClassification.from_pretrained('kamalkraj/deberta-base')
lr = 2e-6 #1e-6 #2e-5 #3e-5
epochs = 1
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.Recall()])
history = model.fit(encoded_train_input, validation_data=encoded_valid_input, epochs=epochs, verbose=1)
this is the screenshot error:
shape not incompatible error
Try passing the number of classes to the model.
configure = DebertaConfig(model_name, gradient_checkpointing=True, num_labels=3)
model=TFDebertaForSequenceClassification.from_pretrained('kamalkraj/deberta-base', config=configure)

Keras TFRecord use variation of inputs to vgg19 model -- error graph disconnected

I try to split a big image into small patches and use vgg19 to extract features from each patch, and then combine the features for model.
def create_model_tl(input_shape=(448, 448, 3)):
print("start creating model - transfer learning ...")
img_input = Input(shape=input_shape)
########## patches ############
n_dim = 512
patch_size = 32
patches = Patches(patch_size)(img_input) ## a function to generate tensor as [batch_size*num_patches, patch_size, patch_size, 3]
num_patches = int((448//patch_size)**2)
base_model = vgg19.VGG19(include_top=False, input_shape=(patch_size, patch_size, 3), weights="imagenet")
x_pat = base_model(patches)
x_pat = base_model.layers[-1].output
x_pat = tf.reshape(x_pat, [-1, num_patches, patch_size, patch_size, 512]) ## convert shape to [batch_size, num_patches, patch_size, patch_size, depth]
x_pat = tf.reshape(x_pat, [tf.shape(x_pat)[0], num_patches, patch_size*patch_size*512])
x_pat = Dense(units=n_dim)(x_pat)
x = x_pat
flat1 = Flatten()(x)
class0 = Dense(5120, activation='relu')(flat1)
dropout1 = Dropout(0.2)(class0)
class1 = Dense(1024, activation='relu')(dropout1)
dropout2 = Dropout(0.2)(class1)
class2 = Dense(512, activation='relu')(dropout2)
dropout3 = Dropout(0.2)(class2)
output = Dense(num_classes, activation='softmax')(dropout3)
print(output.shape)
model = Model(inputs=img_input, outputs=output)
print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=1e-6), metrics=['accuracy'])
return model
I got below error, could not figure out what was wrong.

TFLIte Cannot set tensor: Dimension mismatch on model conversion

I've a keras model constructed as follows
module_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
backbone = hub.KerasLayer(module_url)
backbone.build([None, 224, 224, 3])
model = tf.keras.Sequential([backbone, tf.keras.layers.Dense(len(classes), activation='softmax')])
model.build([None, 224, 224, 3])
model.compile('adam', loss='sparse_categorical_crossentropy')
Then I load Caltech101 dataset from TF hub as follows
samples, info = tfds.load("caltech101", with_info=True)
train_samples, test_samples = samples['train'], samples['test']
def normalize(row):
image, label = row['image'], row['label']
image = tf.dtypes.cast(image, tf.float32)
image = tf.image.resize(image, (224, 224))
image = image / 255.0
return image, label
train_data = train_samples.repeat().shuffle(1024).map(normalize).batch(32).prefetch(1)
test_data = test_samples.map(normalize).batch(1)
Now i'm ready to train and save my model as follows:
model.fit_generator(train_data, epochs=1, steps_per_epoch=100)
saved_model_dir = './output'
tf.saved_model.save(model, saved_model_dir)
At this point the model is usuable, I can evaluate an input of shape (224, 224, 3). I try to convert this model as follows:
def generator2():
data = train_samples
for _ in range(num_calibration_steps):
images = []
for image, _ in data.map(normalize).take(1):
images.append(image)
yield images
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = tf.lite.RepresentativeDataset(generator2)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
tflite_default_quant_model = converter.convert()
The conversion triggers the following error
/usr/local/lib/python3.6/dist-packages/tensorflow_core/lite/python/optimize/tensorflow_lite_wrap_calibration_wrapper.py in FeedTensor(self, input_value)
110
111 def FeedTensor(self, input_value):
--> 112 return _tensorflow_lite_wrap_calibration_wrapper.CalibrationWrapper_FeedTensor(self, input_value)
113
114 def QuantizeModel(self, input_py_type, output_py_type, allow_float):
ValueError: Cannot set tensor: Dimension mismatch
Now there is a similar question but in there case they are loading an already converted model unlike my case where the issue happens when I try to convert a model.
The converter object is an auto generated class from C++ code using SWIG which makes it difficult to inspect. How can I found the exact Dimension expected by the converter object?
Had the same problem when using
def representative_dataset_gen():
for _ in range(num_calibration_steps):
# Get sample input data as a numpy array in a method of your choosing.
yield [input]
from https://www.tensorflow.org/lite/performance/post_training_quantization.
It seems that converter.representative_dataset expects a list containing one example with shape (1, input_shape). That is, using something along the lines
def representative_dataset_gen():
for i in range(num_calibration_steps):
# Get sample input data as a numpy array in a method of your choosing.
yield [input[i:i+1]]
if input has shape (num_samples, input_shape), solved the problem. In your case, when using tf Datasets, a working example would be
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
samples, info = tfds.load("caltech101", with_info=True)
train_samples, test_samples = samples['train'], samples['test']
def normalize(row):
image, label = row['image'], row['label']
image = tf.dtypes.cast(image, tf.float32)
image = tf.image.resize(image, (224, 224))
image = image / 255.0
return image, label
train_data = train_samples.repeat().shuffle(1024).map(normalize).batch(32).prefetch(1)
test_data = test_samples.map(normalize).batch(1)
module_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
backbone = hub.KerasLayer(module_url)
backbone.build([None, 224, 224, 3])
model = tf.keras.Sequential([backbone, tf.keras.layers.Dense(102, activation='softmax')])
model.build([None, 224, 224, 3])
model.compile('adam', loss='sparse_categorical_crossentropy')
model.fit_generator(train_data, epochs=1, steps_per_epoch=100)
saved_model_dir = 'output/'
tf.saved_model.save(model, saved_model_dir)
num_calibration_steps = 50
def generator():
single_batches = train_samples.repeat(count=1).map(normalize).batch(1)
i=0
while(i<num_calibration_steps):
for batch in single_batches:
i+=1
yield [batch[0]]
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = tf.lite.RepresentativeDataset(generator)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
tflite_default_quant_model = converter.convert()
I had the same problem, I used this solution, setting inputs_test as your input test it should work also for you:
def representative_dataset():
arrs=np.expand_dims(inputs_test, axis=1).astype(np.float32)
for data in arrs:
yield [ data ]
import tensorflow as tf
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8 # or tf.uint8
converter.inference_output_type = tf.int8 # or tf.uint8
tflite_quant_model = converter.convert()
Applied this on a raspberry pi and did worked, just be sure to install tflite outside of your venev
import tflite_runtime.interpreter as tflite

Categories