ValueError: Shapes (None, 1) and (None, 101) are incompatible - python

The output dense layer is 101 but this error pops up. The error could be with respect to train_dataset and test_dataset or model compiling. Help me out with this !!
Also if I want to sample the dataset and then assign the sample to training and testing. How do I it? I'm new to Tensorflow and python and the syntax is confusing.
BATCH_SIZE = 32
IMG_SIZE = (224, 224)
directory = "/Food/Food 101/images"
train_dataset = image_dataset_from_directory(directory,
shuffle=True,
batch_size=BATCH_SIZE,
label_mode = "int",
image_size=IMG_SIZE,
validation_split=0.2,
subset='training',
seed=42)
validation_dataset = image_dataset_from_directory(directory,
shuffle=True,
batch_size=BATCH_SIZE,
label_mode = "int",
image_size=IMG_SIZE,
validation_split=0.2,
subset='validation',
seed=42)
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
def data_augmenter():
data_augmentation = tf.keras.Sequential()
data_augmentation.add(tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"))
data_augmentation.add(tf.keras.layers.experimental.preprocessing.RandomRotation(0.2))
return data_augmentation
data_augmentation = data_augmenter()
for image, _ in train_dataset.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0] / 255)
plt.axis('off')
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
IMG_SHAPE = IMG_SIZE + (3,)
IMG_SHAPE
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=True,
weights='imagenet')
base_model.summary()
nb_layers = len(base_model.layers)
print(base_model.layers[nb_layers - 2].name)
print(base_model.layers[nb_layers - 1].name)
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
label_batch
base_model.trainable = False
image_var = tf.Variable(image_batch)
pred = base_model(image_var)
tf.keras.applications.mobilenet_v2.decode_predictions(pred.numpy(), top=2)
image_shape=IMG_SIZE
def FC_model(image_shape=IMG_SIZE, data_augmentation=data_augmenter()):
input_shape = image_shape + (3,)
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
weights='imagenet')
base_model.trainable = False
inputs = tf.keras.Input(shape = image_shape + (3, ))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
prediction_layer = tf.keras.layers.Dense(101)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
return model
model2 = FC_model(IMG_SIZE, data_augmentation)
model2.summary()
base_learning_rate = 0.001
model2.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True),
metrics=['accuracy'])
initial_epochs = 10
history = model2.fit(train_dataset, validation_data=validation_dataset, epochs=initial_epochs)

The problem is in how you are loading the data:
train_dataset = image_dataset_from_directory(directory,
shuffle=True,
batch_size=BATCH_SIZE,
label_mode = "int",
image_size=IMG_SIZE,
validation_split=0.2,
subset='training',
seed=42)
In particular label_mode="int" means that your target variable is encoded as an integer (i.e., 1 if cat, 2 if dog, 3 if tree). You want to change it to label_mode="categorical".

Related

Uncaught (in promise) Error: Unknown layer:

I'm developing a plants disease classification app using React and tensorflow.
To fill this job, I have developed an AI model with CNN and preprocessing layers.
the model works fine on google colab, but when I converted it to the JSON form to deploy it in my app it shows me this error on my browsers console:
Uncaught (in promise) Error: Unknown layer: RandomFlip. This may be due to one of the following reasons:
The layer is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code.
The custom layer is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().
this is my python code:
# -*- coding: utf-8 -*-
"""Copie de Untitled7.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1IFoiRxZ-FLnM-SIc6YfMtVooH2s-cMP4
"""
from google.colab import drive
drive.mount('/content/drive')
import zipfile
import os
zip_ref = zipfile.ZipFile('/content/drive/MyDrive/SoniaKarimDatasetPFA_Agriculture/archive.zip','r')
zip_ref.extractall('/content')
zip_ref.close()
len(os.listdir('/content'))
#importing necessary directories for analysis
import numpy as np
import tensorflow as tf
from tensorflow.keras import models, layers, callbacks
import matplotlib.pyplot as plt
!pip install split-folders
#Some additional parameters
#It is a good practice to declare these parameters
#outside of the functions also in a separate code block
directory = '/content/PlantVillage'
IMAGE_SIZE = 258
BATCH_SIZE = 32
CHANNELS = 3
EPOCHS = 3
import splitfolders
splitfolders.ratio(directory, output="output",
seed=1337, ratio=(.7,.3), group_prefix=None, move=False)
#Now, we gonna do the do! time to deal with
#our images!
#Let's call this tensorflow.keras function which will
#... you know!
dataset = tf.keras.preprocessing.image_dataset_from_directory(
"/content/output/val",
shuffle = True,
image_size = (IMAGE_SIZE,IMAGE_SIZE ),
batch_size = BATCH_SIZE
)
#dataset = dataset.take(int(len(dataset)*0.35))
class_names = dataset.class_names
class_names
len(dataset)
plt.figure(figsize=(10,10))
for image_batch, label_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3,4,i+1)
plt.imshow(image_batch[i].numpy().astype('uint8'))
plt.title(class_names[label_batch[i]])
plt.axis('off')
#print(image_batch[0].shape)
#print(label_batch.numpy())
train_size = int(len(dataset)*0.8)
train_size
train_ds = dataset.take(train_size)
test_ds = dataset.skip(train_size)
validation_size = int(len(dataset)*0.1)
validation_ds = test_ds.take(validation_size)
test_ds = test_ds.skip(validation_size)
def get_dataset_partitions_tf(ds, train_split = 0.8,
val_split = 0.2,
test_split = 0.1,
shuffle = True,
shuffle_size = 10000):
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed = 12)
train_size = int(train_split * ds_size)
val_size = int (val_split * ds_size)
val_ds = ds.skip(train_size).take(val_size)
train_ds = ds.take(train_size)
test_ds = ds.skip(train_size).take(val_size)
return train_ds, val_ds, test_ds
train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset)
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
data_augmentation = tf.keras.Sequential([
layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'),
layers.experimental.preprocessing.RandomRotation(0.2)
])
resize_and_rescale = tf.keras.Sequential([
layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.experimental.preprocessing.Rescaling(1.0/255)
])
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 15
model = models.Sequential([
resize_and_rescale,
data_augmentation,
layers.Conv2D(32 , (3,3), input_shape = input_shape, activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64 , (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Flatten(),
layers.Dense(64, activation = 'relu'),
layers.Dense(n_classes, activation = 'softmax')
])
model.build(input_shape = input_shape )
model.summary()
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(
min_delta = 0.001,
patience = 10,
restore_best_weights = True,
)
model.compile(
optimizer = 'adam',
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = False),
metrics = ['accuracy']
)
from tensorflow.keras import callbacks
history = model.fit(
train_ds,
epochs = EPOCHS,
batch_size = BATCH_SIZE,
verbose = 1,
callbacks = [early_stopping],
validation_data = val_ds
)
score = model.evaluate(test_ds)
score
history.params
history.history.keys()
len(history.history['accuracy'])
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize = (8,8))
plt.subplot(1,2,1)
plt.plot(range(EPOCHS), acc, label = 'Training Accuracy')
plt.plot(range(EPOCHS), val_acc, label = 'Validation Accuracy')
plt.legend(loc = 'lower right')
plt.title('Training and validation Accuracy')
plt.subplot(1,2,2)
plt.plot(range(EPOCHS), loss, label = 'Training_Loss')
plt.plot(range(EPOCHS), val_loss, label = 'Validation Loss')
plt.legend(loc = 'lower right')
plt.title('Training and validation Loss')
for images_batch, labels_batch in test_ds.take(1):
first_image = (image_batch[0].numpy().astype('uint8'))
first_label = labels_batch[0]
print('first image to predict')
plt.imshow(first_image)
print('actual label: ', class_names[first_label])
batch_prediction = model.predict(images_batch)
print(class_names[np.argmax(batch_prediction[0])])
def predict(model,img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
predicted_class = class_names[np.argmax(predictions[0])]
confidence = round(100* (np.max(predictions[0])),2)
return predicted_class, confidence
plt.figure(figsize = (15,15))
for images, labels in test_ds.take(1):
for i in range (9):
ax = plt.subplot(3,3,i+1)
plt.imshow(images[i].numpy().astype('uint8'))
predicted_class, confidence = predict( model, images[i].numpy())
actual_class = class_names[labels[i]]
plt.title(f"actual: {actual_class}, \n Predicted : {predicted_class} \n Confidence: {confidence} ")
plt.axis('off')
!pip install tensorflowjs
import tensorflowjs as tfjs
tfjs.converters.save_keras_model(model, "mode.h5")
#model.save("model1.h5")
#!pip install tensorflowjs
!tensorflowjs_converter --input_format keras '/content/mode.h5' '/content/sabetna-model'

Vision transformer binary classifier is only predicting one class

I wrote a code for a vision transformer to classify mammograms into benign and malignant. After training for 30 epochs, the model is, however, predicting only one class(benign). All the final predictions for test images are in the range: 0.47 - 0.49.
The code:
datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255,
samplewise_center = True,
samplewise_std_normalization = True,
validation_split = 0.1,
rotation_range=180,
shear_range=15,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
train_gen = datagen.flow_from_dataframe(dataframe = DF_TRAIN,
directory = TRAIN_PATH,
x_col = 'image_file_path',
y_col = 'pathology',
subset = 'training',
batch_size = BATCH_SIZE,
# seed = 1,
color_mode = 'rgb',
shuffle = True,
class_mode = 'binary',
target_size = (IMAGE_SIZE, IMAGE_SIZE))
valid_gen = datagen.flow_from_dataframe(dataframe = DF_TRAIN,
directory = TRAIN_PATH,
x_col = 'image_file_path',
y_col = 'pathology',
subset = 'validation',
batch_size = BATCH_SIZE,
# seed = 1,
color_mode = 'rgb',
shuffle = False,
class_mode = 'binary',
target_size = (IMAGE_SIZE, IMAGE_SIZE))
test_gen = datagen.flow_from_dataframe(dataframe = DF_TEST,
directory = TEST_PATH,
x_col = 'image_file_path',
y_col = 'pathology',
# subset = 'validation',
batch_size = BATCH_SIZE,
# seed = 1,
color_mode = 'rgb',
shuffle = False,
class_mode = 'binary',
target_size = (IMAGE_SIZE, IMAGE_SIZE))
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
class Patches(layers.Layer):
def __init__(self, patch_size):
super(Patches, self).__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches
def get_config(self):
config = super().get_config().copy()
config.update({
'patch_size': self.patch_size,
})
return config
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super(PatchEncoder, self).__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def get_config(self):
config = super().get_config().copy()
config.update({
'num_patches': self.num_patches,
'projection': self.projection,
'position_embedding': self.position_embedding,
})
return config
def create_vit_classifier():
inputs = layers.Input(shape=input_shape)
# Create patches.
patches = Patches(patch_size)(inputs)
# Encode patches.
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
# Create multiple layers of the Transformer block.
for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(num_heads=num_heads, key_dim=projection_dim, dropout=0.1)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
# Add MLP.
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
# Classify outputs.
logits = layers.Dense(num_classes, activation="sigmoid")(features)
# Create the Keras model.
model = tf.keras.Model(inputs=inputs, outputs=logits)
return model
def run_experiment(model):
optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=weight_decay)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
STEP_SIZE_TRAIN = train_gen.n // train_gen.batch_size
STEP_SIZE_VALID = valid_gen.n // valid_gen.batch_size
print(STEP_SIZE_TRAIN, STEP_SIZE_VALID)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
factor=0.2,
patience=2,
verbose=1,
min_delta=1e-4,
min_lr=1e-6,
mode='max')
checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath='./model_3.hdf5',
monitor='val_accuracy',
verbose=1,
save_best_only=True,
save_weights_only=True,
mode='max')
callbacks = [reduce_lr, checkpointer]
history = model.fit(x=train_gen,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_gen,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callbacks,
verbose=1)
model.save(f'{save_path}/model_3.h5')
return history
vit_classifier = create_vit_classifier()
history = run_experiment(vit_classifier)
vit_classifier.load_weights(f'{save_path}/model_3.h5')
A = vit_classifier.predict(test_gen, steps = test_gen.n // test_gen.batch_size + 1)
predicted_classes = np.where(A > 0.5, 1, 0)
true_classes = test_gen.classes
class_labels = list(test_gen.class_indices.keys())
results = pd.DataFrame(list(zip(test_gen.filenames, true_classes, predicted_classes)),
columns =['Image name', 'True class', 'Predicted class'])
results = results.replace({"True class": classes_dict})
results = results.replace({"Predicted class": classes_dict})
prob = pd.DataFrame(A, columns =['Predicted probability'])
result_df = pd.concat([results, prob], axis=1)
result_df['Predicted probability'] = pd.Series(["{0:.1f}".format(val * 100) for val in result_df['Predicted probability']], index=result_df.index)
results_csv = f'{save_path}/results_3.csv'
with open(results_csv, mode='w') as f:
result_df.to_csv(f)
Confusion matrix:
[[428 0]
[276 0]]
Performance metrics:
Please help me figure out how to rectify this problem

Evaluate U-Net by layer

I am coming from medical background and a newbie in this machine learning field. I am trying to train my U-Net model using keras and tensorflow for image segmentation. However, my loss value is all NaN and the prediction is all black.
I would like to check the U-Net layer by layer but I don't know how to feed the data and from where to start. What I meant by checking for each layer is that I want to feed my images to first layer for example and see the output from the first layer and then moving on to the second layer and until to the last layer. Just want to see how the output is produced for each layer and to check from where the nan value is started. Really appreciate for your help.
These are my codes.
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from keras_preprocessing.image
import ImageDataGenerator
from tensorflow import keras
#Constants
SEED = 42
BATCH_SIZE_TRAIN = 16
BATCH_SIZE_TEST = 16
IMAGE_HEIGHT = 512
IMAGE_WIDTH = 512
IMG_SIZE = (IMAGE_HEIGHT, IMAGE_WIDTH)
data_dir = 'data'
data_dir_train = os.path.join(data_dir, 'training')
data_dir_train_image = os.path.join(data_dir_train, 'img')
data_dir_train_mask = os.path.join(data_dir_train, 'mask')
data_dir_test = os.path.join(data_dir, 'test')
data_dir_test_image = os.path.join(data_dir_test, 'img')
data_dir_test_mask = os.path.join(data_dir_test, 'mask')
NUM_TRAIN = 1413
NUM_TEST = 210
NUM_OF_EPOCHS = 10
def create_segmentation_generator_train(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def create_segmentation_generator_test(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def display(display_list):
plt.figure(figsize=(15,15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]), cmap='gray')
plt.show()
def show_dataset(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
display([image[0], mask[0]])
def unet(n_levels, initial_features=32, n_blocks=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1):
#n_blocks = how many conv in each level
inputs = keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
x = inputs
convpars = dict(kernel_size=kernel_size, activation='relu', padding='same')
#downstream
skips = {}
for level in range(n_levels):
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
if level < n_levels - 1:
skips[level] = x
x = keras.layers.MaxPool2D(pooling_size)(x)
#upstream
for level in reversed(range(n_levels-1)):
x = keras.layers.Conv2DTranspose(initial_features * 2 ** level, strides=pooling_size, **convpars)(x)
x = keras.layers.Concatenate()([x, skips[level]])
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
#output
activation = 'sigmoid' if out_channels == 1 else 'softmax'
x = keras.layers.Conv2D(out_channels, kernel_size=1, activation='sigmoid', padding='same')(x)
return keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}')
EPOCH_STEP_TRAIN = NUM_TRAIN // BATCH_SIZE_TRAIN
EPOCH_STEP_TEST = NUM_TEST // BATCH_SIZE_TRAIN
model = unet(4)
model.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])
model.fit_generator(generator=train_generator, steps_per_epoch=EPOCH_STEP_TRAIN, validation_data=test_generator, validation_steps=EPOCH_STEP_TEST, epochs=NUM_OF_EPOCHS)
def show_prediction(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
pred_mask = model.predict(image)[0] > 0.5
display([image[0], mask[0], pred_mask])
show_prediction(test_generator, 2)
To investigate your model layer-by-layer please see example how to show summary of the model and also how to save the model:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
#luodaan input
inputs=keras.Input(shape=(1,))
#luodaan kerros
dense=layers.Dense(64,activation="relu")
x=dense(inputs)
x=layers.Dense(64,activation="relu")(x)
outputs=layers.Dense(10)(x)
#Koostetaa
model=keras.Model(inputs=inputs,outputs=outputs,name="Spesiaali")
#Tarkastellaan
model.summary()
#Tallennellaan
model.save(".\model_to_be_investigated_by_someone_else_to_help_you")
...this makes it possible for you to see the whole model structure for "debugging your AI". If you do not find the solution itself, then add the last row of example to your own code, and then put the resulting folder e.g. to github and ask someone other to see the structure of your model to help you in solving the problem.
The blue drawing illustrates the output of command model.summary() and the red line illustrates the output shape of the first dense layer.

AttributeError: 'Tensor' object has no attribute 'assign' while attempting transfer learning

I based my attempt on the example from https://colab.research.google.com/github/google/eng-edu/blob/master/ml/pc/exercises/image_classification_part3.ipynb#scrollTo=BMXb913pbvFg
and attempted transfer learning for a classification of cats and dogs, then did the same for an example of "Zebra and elephants" and encountered no issues.
The code for my example was almost the same as the tutorial and worked:
image_data_generator = ImageDataGenerator(rescale=1./255., validation_split=.2)
image_size = 128
IMAGE_WIDTH = image_size
IMAGE_HEIGHT = image_size
BATCH_SIZE=128
image_data_dir = "images"
seed = 1
image_generator = image_data_generator.flow_from_directory(
image_data_dir,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size=BATCH_SIZE,
color_mode='rgb',
subset='training',
seed = seed,
class_mode='binary')
test_generator = image_data_generator.flow_from_directory(
image_data_dir,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size=BATCH_SIZE,
color_mode='rgb',
subset='validation',
seed = seed,
class_mode='binary')
from keras.applications.vgg16 import VGG16
inputs = Input((image_size, image_size, 3))
vgg16 = VGG16(include_top=False, weights='imagenet', pooling=None, input_shape=(image_size, image_size, 3), classes=2 )
for layer in vgg16.layers:
layer.trainable = False
for layer in vgg16.layers:
print(layer.name, layer.trainable)
x = Flatten()(vgg16.get_layer('block5_pool').output)
x_ = BatchNormalization()(x)
x_ = Dense(128, activation='relu')(x_)
x_ = BatchNormalization()(x_)
x_ = Dense(64, activation='relu')(x_)
x_ = Dense(1, activation='sigmoid')(x_)
model = Model(vgg16.input, x_)
model.compile(loss='binary_crossentropy',
optimizer='Adam',
metrics=['binary_accuracy'])
model.summary()
model.fit_generator(image_generator,
steps_per_epoch=n/BATCH_SIZE,
epochs=50,
validation_data=test_generator)
This code encountered no trouble.
The following one throwed the error:
AttributeError Traceback (most recent call last)
<ipython-input-6-55354bac5e27> in <module>()
43 validation_data=test_generator,
44 validation_steps= 458 // BATCH_SIZE,
---> 45 epochs=90)
--> 228 return ref.assign(value, name=name)
229
230
AttributeError: 'Tensor' object has no attribute 'assign'
here it is:
image_data_generator = ImageDataGenerator(rescale=1./255., validation_split=.15)
image_data_dir = "processed/img/"
seed = 1
IMAGE_SIZE = SIZE
IMAGE_WIDTH, IMAGE_HEIGHT = IMAGE_SIZE, IMAGE_SIZE
image_generator = image_data_generator.flow_from_directory(
image_data_dir,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size=BATCH_SIZE,
class_mode="categorical", #could be "binary" for binary problems
color_mode='rgb',
subset='training',
seed = seed)
test_generator = image_data_generator.flow_from_directory(
image_data_dir,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size=BATCH_SIZE,
class_mode="categorical", #could be "binary" for binary problems
color_mode='rgb',
subset='validation',
seed = seed)
layer1 = Input((SIZE, SIZE, 3))
vgg16 = VGG16(include_top=False, weights='imagenet', pooling=None, input_shape=(SIZE, SIZE, 3), classes=num_classes )
for layer in vgg16.layers:
layer.trainable = False
bnorm = BatchNormalization(name="14")(vgg16.get_layer('block4_pool').output)
conv = Conv2D(512, (3, 3), activation='relu', padding='same',name="13")()
pool = MaxPooling2D(pool_size=(2, 2),name="15")(conv)
pool = BatchNormalization(name="16")(pool)
x = Flatten()(pool)
x_ = BatchNormalization()(x)
x_ = Dense(128, activation='relu')(x_)
x_ = BatchNormalization()(x_)
x_ = Dense(64, activation='relu')(x_)
x_ = Dense(num_classes, activation='softmax')(x_)
model = Model(vgg16.input, x_)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['categorical_accuracy'])
model.fit_generator( image_generator,
steps_per_epoch= 2606 // BATCH_SIZE,
validation_data=test_generator,
validation_steps= 458 // BATCH_SIZE,
epochs=90)
To my knowledge, There are a few useless lines ( such as the creation of an input layer that is not used in the architecture i end up with.
Generally, i think both cases are pretty similar, i only made a few changes due to the fact that the new problem has 3 classes instead of 2.
Unfortunately googling that error didn't help much. Does anybody see an error i made ?

ValueError: Error when checking target: expected flatten_1 to have shape (2048,) but got array with shape (2,)

I'm trying to run this code, and I have this error:
ValueError: Error when checking target: expected flatten_4 to have shape (2048,) but got array with shape (2,)
NUM_CLASSES = 2
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
NUM_EPOCHS = 10
EARLY_STOP_PATIENCE = 3
STEPS_PER_EPOCH_TRAINING = 10
STEPS_PER_EPOCH_VALIDATION = 10
BATCH_SIZE_TRAINING = 100
BATCH_SIZE_VALIDATION = 100
BATCH_SIZE_TESTING = 1
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = Sequential()
train_data_dir = "C:\\Users\\Desktop\\RESNET"
model = ResNet50(include_top=True, weights='imagenet')
model.layers.pop()
model = Model(input=model.input,output=model.layers[-1].output)
model.summary()
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics= ['binary_accuracy'])
data_dir = "C:\\Users\\Desktop\\RESNET"
batch_size = 32
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
image_size = IMAGE_RESIZE
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
def append_ext(fn):
return fn+".jpg"
from os import listdir
from os.path import isfile, join
dir_path = os.path.dirname(os.path.realpath(__file__))
train_dir_path = dir_path + '\data'
onlyfiles = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
data_labels = [0, 1]
t = []
maxi = 25145
LieOffset = 15799
i = 0
while i < maxi: # t = tuple
if i <= LieOffset:
t.append(label['Lie'])
else:
t.append(label['Truth'])
i = i+1
train_datagenerator = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2)
train_generator = train_datagenerator.flow_from_directory(
train_data_dir,
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='training')
validation_generator = train_datagenerator.flow_from_directory(
train_data_dir, # same directory as training data kifkif
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='validation')
(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
cb_early_stopper = EarlyStopping(monitor = 'val_loss', patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = '../working/best.hdf5', monitor = 'val_loss', save_best_only = True, mode = 'auto')
from sklearn.grid_search import ParameterGrid
param_grid = {'epochs': [5, 10, 15], 'steps_per_epoch' : [10, 20, 50]}
grid = ParameterGrid(param_grid)
val_loss as final model
for params in grid:
print(params)
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper])
model.load_weights("../working/best.hdf5")
The error suggests that your models output layer should have 2 nodes whereas you have 2048 as you are using the output of avg_pool layer of ResNet50 model as your model output. So, you can add a Dense layer having 2 nodes on top of the avg_pool layer to solve the problem.
model = ResNet50(include_top=True, weights='imagenet')
print(model.summary())
x = model.get_layer('avg_pool').output
predictions = Dense(2, activation='sigmoid')(x)
model = Model(input = model.input, output = predictions)
print(model.summary())
As I'm not quite sure about what type of problem you are solving, i assumed that multilabel (2) classification as your data label shape is (2,).
However, if you are solving a binary classification problem then you need to change your label so that it's either 1 or 0. So, Change class_mode='categorical' to class_mode='binary' in both train_generator and validation_generator. In that case the model output layer should have 1 node.
predictions = Dense(1, activation='sigmoid')(x)

Categories