I'm trying to implement a simple Unet network using Keras on Tensorflow 2.0 backend.
I use custom image generator without augumentated data. My templates anf masks are 1536x1536 RGB images (masks are black and white).
import numpy as np
import os
import cv2
import random
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Input, BatchNormalization, Activation, Dense, Dropout
from tensorflow.python.keras.layers.core import Lambda, RepeatVector, Reshape
from tensorflow.python.keras.layers.convolutional import Conv2D, Conv2DTranspose
from tensorflow.python.keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D
from tensorflow.python.keras.layers.merge import concatenate, add
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
def data_gen(templates_folder, masks_folder, image_width, batch_size): # Custom image generator
counter = 0
images_list = os.listdir(templates_folder)
random.shuffle(images_list)
while True:
templates_pack = np.zeros((batch_size, image_width, image_width, 3)).astype('float')
masks_pack = np.zeros((batch_size, image_width, image_width, 1)).astype('float')
for i in range(counter, counter + batch_size):
template = cv2.imread(templates_folder + '/' + images_list[i]) / 255.
templates_pack[i - counter] = template
mask = cv2.imread(masks_folder + '/' + images_list[i], cv2.IMREAD_GRAYSCALE) / 255.
mask = mask.reshape(image_width, image_width, 1) # Add extra dimension for parity with template size [1536 * 1536 * 3]
masks_pack[i - counter] = mask
counter += batch_size
if counter + batch_size >= len(images_list):
counter = 0
random.shuffle(images_list)
yield templates_pack, masks_pack
def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal", padding="same")(input_tensor)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal", padding="same")(x)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def get_unet(input_img, n_filters=16, dropout=0.5, batchnorm=True):
c1 = conv2d_block(input_img, n_filters=n_filters * 1, kernel_size=3, batchnorm=batchnorm)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout * 0.5)(p1)
c2 = conv2d_block(p1, n_filters=n_filters * 2, kernel_size=3, batchnorm=batchnorm)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = conv2d_block(p2, n_filters=n_filters * 4, kernel_size=3, batchnorm=batchnorm)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = conv2d_block(p3, n_filters=n_filters * 8, kernel_size=3, batchnorm=batchnorm)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = conv2d_block(p4, n_filters=n_filters * 16, kernel_size=3, batchnorm=batchnorm)
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
u6 = Dropout(dropout)(u6)
c6 = conv2d_block(u6, n_filters=n_filters * 8, kernel_size=3, batchnorm=batchnorm)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
u7 = Dropout(dropout)(u7)
c7 = conv2d_block(u7, n_filters=n_filters * 4, kernel_size=3, batchnorm=batchnorm)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
u8 = Dropout(dropout)(u8)
c8 = conv2d_block(u8, n_filters=n_filters * 2, kernel_size=3, batchnorm=batchnorm)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c1], axis=3)
u9 = Dropout(dropout)(u9)
c9 = conv2d_block(u9, n_filters=n_filters * 1, kernel_size=3, batchnorm=batchnorm)
outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
model = Model(inputs=[input_img], outputs=[outputs])
return model
callbacks = [
EarlyStopping(patience=10, verbose=1),
ReduceLROnPlateau(factor=0.1, patience=3, min_lr=0.00001, verbose=1),
ModelCheckpoint("model-prototype.h5", verbose=1, save_best_only=True,
save_weights_only=True)
]
train_templates_path = "E:/train/templates"
train_masks_path = "E:/train/masks"
valid_templates_path = "E:/valid/templates"
valid_masks_path = "E:/valid/masks"
TRAIN_SET_SIZE = len(os.listdir(train_templates_path))
VALID_SET_SIZE = len(os.listdir(valid_templates_path))
BATCH_SIZE = 1
EPOCHS = 100
STEPS_PER_EPOCH = TRAIN_SET_SIZE / BATCH_SIZE
VALIDATION_STEPS = VALID_SET_SIZE / BATCH_SIZE
IMAGE_WIDTH = 1536
train_generator = data_gen(train_templates_path, train_masks_path, IMAGE_WIDTH, batch_size = BATCH_SIZE)
val_generator = data_gen(valid_templates_path, valid_masks_path, IMAGE_WIDTH, batch_size = BATCH_SIZE)
model = get_unet(input_img, n_filters=16, dropout=0.05, batchnorm=True)
model.compile(optimizer=Adam(lr=0.001), loss="binary_crossentropy", metrics=["accuracy"])
results = model.fit_generator(train_generator, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, validation_data=val_generator, validation_steps=VALIDATION_STEPS, callbacks=callbacks)
For some reason I get a following error:
Epoch 1/100
Traceback (most recent call last):
File "E:/Explorium/python/unet_trainer.py", line 83, in <module>
results = model.fit_generator(train_generator, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, validation_data=val_generator, validation_steps=VALIDATION_STEPS, callbacks=callbacks)
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 1297, in fit_generator
steps_name='steps_per_epoch')
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training_generator.py", line 265, in model_iteration
batch_outs = batch_function(*batch_data)
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 973, in train_on_batch
class_weight=class_weight, reset_metrics=reset_metrics)
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 253, in train_on_batch
extract_tensors_from_dataset=True)
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 2472, in _standardize_user_data
exception_prefix='input')
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training_utils.py", line 574, in standardize_input_data
str(data_shape))
ValueError: Error when checking input: expected img to have shape (1536, 1536, 1) but got array with shape (1536, 1536, 3)
It seems that Keras can't normalize data using standardize_input_data(), but I have no idea why it's happening.
Related
I know that reshape problems are a basic thing and that there are a lot of solutions out there, but I can't find one that works for me.
I'm currently trying to use ResNet50 to train with the Iceberg challenge (https://www.kaggle.com/competitions/statoil-iceberg-classifier-challenge):
import numpy as np, pandas as pd
from tensorflow.keras.optimizers import Adam
from keras.models import Model, Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Input, concatenate, GlobalMaxPooling2D
from tensorflow.keras.applications.mobilenet import MobileNet
vgg16_fl = "imagenet"
from tensorflow.keras.applications import VGG16, VGG19, ResNet50, Xception
def get_simple(dropout=0.5):
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(75, 75, 3)))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Dropout(dropout))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Dropout(dropout))
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Dropout(dropout))
return model
factory = {
'vgg16': lambda: VGG16(include_top=False, input_shape=(75, 75, 3), weights=vgg16_fl),
'mobilenetv2': lambda: MobileNet(include_top=False, input_shape=(75, 75, 3)),
'resnet50': lambda: ResNet50(include_top=False, input_shape=(200, 200, 3)),
}
def get_model(name='simple',train_base=True,use_angle=False,dropout=0.5,layers=(512,256)):
base = factory[name]()
inputs = [base.input]
x = GlobalMaxPooling2D()(base.output)
if use_angle:
angle_in = Input(shape=(1,))
angle_x = Dense(1, activation='relu')(angle_in)
inputs.append(angle_in)
x = concatenate([x, angle_x])
for l_sz in layers:
x = Dense(l_sz, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(1, activation='sigmoid')(x)
for l in base.layers:
l.trainable = train_base
return Model(inputs=inputs, outputs=x)
data = pd.read_json('/content/drive/MyDrive/iceberg/train.json')
b1 = np.array(data["band_1"].values.tolist()).reshape(-1, 75, 75, 1)
b2 = np.array(data["band_2"].values.tolist()).reshape(-1, 75, 75, 1)
b3 = b1 + b2
X = np.concatenate([b1, b2, b3], axis=3)
y = np.array(data['is_iceberg'])
angle = np.array(pd.to_numeric(data['inc_angle'], errors='coerce').fillna(0))
model = get_model('vgg16', train_base=False, use_angle=True)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-3), metrics=['accuracy'])
history = model.fit([X, angle], y, shuffle=True, verbose=1, epochs=5)
model = get_model('mobilenetv2', train_base=False, use_angle=True)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-3), metrics=['accuracy'])
history = model.fit([X, angle], y, shuffle=True, verbose=1, epochs=5)
model = get_model('resnet50', train_base=False, use_angle=True)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-3), metrics=['accuracy'])
history = model.fit([X, angle], y, shuffle=True, verbose=1, epochs=5)
I can use VGG16 and MobileNet easly, but I can't do the same with ResNet, here's the error:
ValueError Traceback (most recent call last)
<ipython-input-58-cb998dc5f0be> in <module>()
1 model = get_model('resnet50', train_base=False, use_angle=True)
2 model.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-3), metrics=['accuracy'])
----> 3 history = model.fit([X, angle], y, shuffle=True, verbose=1, epochs=5)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 859, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py", line 264, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" is '
ValueError: Input 0 of layer "model_13" is incompatible with the layer: expected shape=(None, 200, 200, 3), found shape=(None, 75, 75, 3)
If I try to modify the RESHAPE function (b1 = np.array(data["band_1"].values.tolist()).reshape(-1, 200, 200, 1)...) I get:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-14c39c176685> in <module>()
1 data = pd.read_json('/content/drive/MyDrive/iceberg/train.json')
----> 2 b1 = np.array(data["band_1"].values.tolist()).reshape(-1, 200, 200, 1)
3 b2 = np.array(data["band_2"].values.tolist()).reshape(-1, 200, 200, 1)
4 b3 = b1 + b2
5
ValueError: cannot reshape array of size 9022500 into shape (200,200,1)
Is there any way to fix this?
The problem is those 2 lines:
ResNet50(include_top=False, input_shape=(200, 200, 3)),
^^^^^^^^^^^^^^^^^^^^^^^^^^
np.array(data["band_2"].values.tolist()).reshape(-1, 75, 75, 1)
^^^^^^^^^^^^^
and since a sample of your dataset is 75x75, you can't obviously be reshaped to become 200x200
probably worth just using
ResNet50(include_top=False, input_shape=(75, 75, 1)),
instead of your current one
I am trying super-resolution of RGB images. I have an input image and output image I run the model and when I predict the output the image loses saturation compared to the original image. The image size is 512x512 pixels. I don't know if this happening because of the conv2dtranspose layer.
ImagedataGenerator
tf.config.run_functions_eagerly(True)
import os
import numpy as np
import cv2
def generator(idir,odir,batch_size,shuffle ):
i_list=os.listdir(idir)
o_list=os.listdir(odir)
batch_index=0
batch_size = batch_size
sample_count=len(i_list)
while True:
input_image_batch=[]
output_image_batch=[]
i_list=os.listdir(idir)
o_list=os.listdir(odir)
batch_size = batch_size
sample_count=len(i_list)
for i in range(batch_index * batch_size, (batch_index + 1) * batch_size ):
#iterate for a batch
j=i % sample_count # cycle j value over range of available images
k=j % batch_size # cycle k value over batch size
if shuffle == True: # if shuffle select a random integer between 0 and sample_count-1 to pick as the image=label pair
m=np.random.randint(low=0, high=sample_count-1, size=None, dtype=int)
else:
m=j
path_to_in_img=os.path.join(idir,i_list[m])
path_to_out_img=os.path.join(odir,i_list[m])
input_image=cv2.imread(path_to_in_img)
input_image=cv2.resize(input_image,(512,512))
input_image = cv2.cvtColor(input_image,cv2.COLOR_BGR2RGB)#create the target image from the input image
output_image=cv2.imread(path_to_out_img)
output_image=cv2.resize(output_image,(512,512))
output_image = cv2.cvtColor(output_image,cv2.COLOR_BGR2RGB)
input_image_batch.append(input_image)
output_image_batch.append(output_image)
input_image_array=np.array(input_image_batch)
input_image_array = input_image_array / 255.0
output_image_array=np.array(output_image_batch)
output_image_array = output_image_array / 255.0
batch_index= batch_index + 1
yield (input_image_array, output_image_array)
if batch_index * batch_size > sample_count:
batch_index=0
Model
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda
from keras.metrics import MeanIoU
kernel_initializer = 'he_uniform' # also try 'he_normal' but model not converging...
################################################################
def simple_unet_model(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS):
#Build the model
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
#s = Lambda(lambda x: x / 255)(inputs) #No need for this if we normalize our inputs beforehand
s = inputs
#Contraction path
c1 = Conv2D(16, (3, 3), kernel_initializer = kernel_initializer,activation='relu',padding='same')(s)
c1 = Dropout(0.5)(c1)
p1 = MaxPooling2D((2, 2))(c1)
c2 = Conv2D(32, (3, 3), kernel_initializer = kernel_initializer,activation='relu',padding='same')(p1)
c2 = Dropout(0.5)(c2)
p2 = MaxPooling2D((2, 2))(c2)
c3 = Conv2D(64, (3, 3), kernel_initializer = kernel_initializer,activation='relu', padding='same')(p2)
c3 = Dropout(0.5)(c3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = Conv2D(128, (3, 3), kernel_initializer = kernel_initializer, activation='relu', padding='same')(p3)
c4 = Dropout(0.5)(c4)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)
c5 = Conv2D(256, (3, 3), kernel_initializer = kernel_initializer,activation='relu', padding='same')(p4)
c5 = Dropout(0.5)(c5)
#Expansive path
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3),kernel_initializer = kernel_initializer,activation='relu', padding='same')(u6)
c6 = Dropout(0.5)(c6)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3),kernel_initializer = kernel_initializer,activation='relu', padding='same')(u7)
c7 = Dropout(0.5)(c7)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3),kernel_initializer = kernel_initializer,activation='relu', padding='same')(u8)
c8 = Dropout(0.5)(c8)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3),kernel_initializer = kernel_initializer,activation='relu', padding='same')(u9)
c9 = Dropout(0.5)(c9)
outputs = Conv2D(3, (1, 1), activation='sigmoid')(c9)
model = Model(inputs, outputs)
model.summary()
return model
model.compile(optimizer='adam',loss='mean_squared_error',metrics=[psnr,ssim,tf.losses.mean_squared_error])
I have a problem with the prediction output. I leave attached the model code of my classifier (6 classes)
I'm using Tensorflow 2.2
The code is:
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
import cv2
import PIL
import os
import numpy as np
#Dir
train_dir ='/home/Tensorflow/Dataset_mix/train/'
validation_dir = '/home/Tensorflow/Dataset_mix/test/'
ACCURACY_THRESHOLD = 0.99
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy') > ACCURACY_THRESHOLD):
print("\nReached %2.2f%% accuracy, so stopping training!!" %(ACCURACY_THRESHOLD*100))
self.model.stop_training = True
train_0_dir = os.path.join(train_dir, '0')
train_1_dir = os.path.join(train_dir, '1')
train_2_dir = os.path.join(train_dir, '2')
train_3_dir = os.path.join(train_dir, '3')
train_4_dir = os.path.join(train_dir, '4')
train_5_dir = os.path.join(train_dir, '5')
validation_0_dir = os.path.join(validation_dir, '0')
validation_1_dir = os.path.join(validation_dir, '1')
validation_2_dir = os.path.join(validation_dir, '2')
validation_3_dir = os.path.join(validation_dir, '3')
validation_4_dir = os.path.join(validation_dir, '4')
validation_5_dir = os.path.join(validation_dir, '5')
num_0_tr = len(os.listdir(train_0_dir))
num_1_tr = len(os.listdir(train_1_dir))
num_2_tr = len(os.listdir(train_2_dir))
num_3_tr = len(os.listdir(train_3_dir))
num_4_tr = len(os.listdir(train_4_dir))
num_5_tr = len(os.listdir(train_5_dir))
num_0_val = len(os.listdir(validation_0_dir))
num_1_val = len(os.listdir(validation_1_dir))
num_2_val = len(os.listdir(validation_2_dir))
num_3_val = len(os.listdir(validation_3_dir))
num_4_val = len(os.listdir(validation_4_dir))
num_5_val = len(os.listdir(validation_5_dir))
total_train = num_0_tr + num_1_tr + num_2_tr + num_3_tr + num_4_tr + num_5_tr
total_val = num_0_val + num_1_val + num_2_val + num_3_val + num_4_val + num_5_val
batch_size = 32
epochs = 20
IMG_HEIGHT = 128
IMG_WIDTH = 128
callbacks = myCallback()
CLASS_NAMES = ['0','1','2','3','4','5']
train_image_generator = ImageDataGenerator(rescale=1./255, zoom_range=0.25, horizontal_flip=True, height_shift_range=.15, rotation_range=75, brightness_range=(0.25,0.9)) # Generator for our training data
validation_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our validation data
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
color_mode='grayscale',
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
classes = list(CLASS_NAMES))
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
directory=validation_dir,
color_mode='grayscale',
target_size=(IMG_HEIGHT, IMG_WIDTH),
classes = list(CLASS_NAMES))
model = Sequential([
Conv2D(16, 3, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,1)), #(W-F)/S +1 (128-3)/1 + 1 = 126 (32, 126, 126)
Conv2D(16, 3, activation='relu'), #(32, 124, 124)
MaxPooling2D(), #(32, 62. 62)
Conv2D(32, 3, activation='relu'), # (64, 60, 60)
Conv2D(32, 3, activation='relu'), # (64, 58, 58)
MaxPooling2D(), # (64, 26, 26)
Conv2D(64, 3, activation='relu'), # (128, 24, 24)
Conv2D(64, 3, activation='relu'), # (128, 22, 22)
MaxPooling2D(), # (128, 11, 11)
Conv2D(128, 3, activation='relu'), # (256, 9, 9)
Conv2D(128, 3, activation='relu'), # (256, 7, 7)
MaxPooling2D(), # (256, 3, 3)
Flatten(),
Dense(1152, activation='relu'),
Dropout(0.3),
Dense(1152, activation='relu'),
Dropout(0.3),
Dense(6, activation='softmax')
])
opt = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit(
train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=total_val // batch_size,
callbacks= callbacks,
verbose=1
)
model.save('saved_model/ModelMix')
model.save('Modelmix.h5')
And for predict:
image_to_predict = cv2.imread('./image054.png')
image_to_predict = cv2.cvtColor(image_to_predict, cv2.COLOR_BGR2GRAY)
image_to_predict = image_to_predict.reshape(1, 128, 128, 1)
print(image_to_predict.shape)
prediction = model.predict(image_to_precit)
print(prediction)
Well, when I do model.predict(image_to_predict) and I print the result, the output that comes back to me is the following:
[[0. 0. 0. 1. 0. 0.]]
The output is correct, the detected class is the one expected but it is not what I want. I would need to have an output that would give me back the percentage of confidence with which the choice was made, so something like:
[[0.02 0.04 0.10 0.72 0.08 0.04]]
But I can't find any function that gives me the certainty of the evaluation.
Anyone have any idea how I can do that?
I want to test what happens when VAE's posterior is GMM. I wrote the code, but there is an error:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
# Dependency imports
import numpy as np
import matplotlib.pyplot as plt
from absl import flags
import numpy as np
from six.moves import urllib
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from tensorflow.compat.v1.keras.datasets import mnist
tf.compat.v1.disable_eager_execution()
tf.reset_default_graph()
tf.test.is_gpu_available()
tfd = tfp.distributions
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')
# BACK TO [0,1]
train_images /= 255.
test_images /= 255.
# Binarized
train_images[train_images >= .5] = 1. # shape = (60000,28,28,1)
train_images[train_images < .5] = 0.
base_depth = 32
latent_size = 2
mixture_components = 10
IMAGE_SHAPE = [28, 28, 1]
# encoder
x = tf.keras.layers.Input(shape=(28, 28, 1))
h1 = tf.keras.layers.Conv2D(filters=base_depth, kernel_size=5, strides=1,
padding='same', activation='relu')
h2 = tf.keras.layers.Conv2D(filters=base_depth, kernel_size=5, strides=2,
padding='same', activation='relu')
h3 = tf.keras.layers.Conv2D(filters=base_depth * 2, kernel_size=5, strides=1,
padding='same', activation='relu')
h4 = tf.keras.layers.Conv2D(filters=base_depth * 2, kernel_size=5, strides=2,
padding='same', activation='relu')
h5 = tf.keras.layers.Conv2D(filters=base_depth * 6, kernel_size=3, strides=1,
padding='same', activation='relu')
h6 = tf.keras.layers.Conv2D(filters=latent_size * 4, kernel_size=7, padding='valid'
, activation='relu')
flatten1 = tf.keras.layers.Flatten()
encoder_end = tf.keras.layers.Dense(2 * mixture_components * latent_size + mixture_components, activation=None)
e1 = h1(x)
e2 = h2(e1)
e3 = h3(e2)
e4 = h4(e3)
e5 = h5(e4)
e6 = h6(e5)
e7 = flatten1(e6)
encoder_out = encoder_end(e7) # parameters of GMM
# split {mean,variance,pi}
loc, raw_scale_diag, mixture_logits = tf.split(encoder_out,
[latent_size * mixture_components, latent_size * mixture_components,
mixture_components], axis=-1)
loc = tf.reshape(loc, [-1, mixture_components, latent_size])
raw_scale_diag = tf.reshape(raw_scale_diag, [-1, mixture_components, latent_size]) # [B,k,E]
# posterior
approx_posterior = tfd.MixtureSameFamily(components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
)
# sample from posterior (16 latent variable z)
approx_posterior_samples = approx_posterior._sample_n(16, seed=None)
# decoder
decoder = tf.keras.Sequential([
tf.keras.layers.Conv2DTranspose(2 * base_depth, kernel_size=7, padding='valid', activation='relu'),
tf.keras.layers.Conv2DTranspose(2 * base_depth, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(2 * base_depth, kernel_size=5, strides=2, padding='valid', activation='relu'),
tf.keras.layers.Conv2DTranspose(base_depth, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(base_depth, kernel_size=5, strides=2, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(base_depth, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.Conv2D(IMAGE_SHAPE[-1], kernel_size=5, padding='same', activation=None)
])
original_shape = tf.shape(input=approx_posterior_samples)
approx_posterior_samples = tf.reshape(approx_posterior_samples, (-1, 1, 1, latent_size))
logits = decoder(approx_posterior_samples)
logits = tf.reshape(
logits, shape=tf.concat([original_shape[:-1], IMAGE_SHAPE], axis=0))
logits = tf.reduce_mean(logits, axis=0)
logits = tf.reshape(logits, shape=tf.shape(x))
# bulid model
model = tf.keras.Model(x, logits)
# prior of z (also GMM)
def make_mixture_prior(latent_size, mixture_components):
loc = tf.compat.v1.get_variable(
name="loc", shape=[mixture_components, latent_size])
raw_scale_diag = tf.compat.v1.get_variable(
name="raw_scale_diag", shape=[mixture_components, latent_size])
mixture_logits = tf.compat.v1.get_variable(
name="mixture_logits", shape=[mixture_components])
return tfd.MixtureSameFamily(
components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
name="prior")
# latent_size=2,mixture_components=10
latent_prior = make_mixture_prior(2, 10)
# reconstruction error
distortion = tf.nn.sigmoid_cross_entropy_with_logits(labels=x, logits=logits)
avg_distortion = tf.reduce_mean(distortion)
# kl divergence
rate = (approx_posterior.log_prob(approx_posterior_samples) - latent_prior.log_prob(approx_posterior_samples))
avg_rate = tf.reduce_mean(rate)
loss = avg_distortion + avg_rate
# custom loss
model.add_loss(loss)
Here is the error:
FailedPreconditionError: 2 root error(s) found.
(0) Failed precondition: Error while reading resource variable mixture_logits from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/mixture_logits/class tensorflow::Var does not exist.
[[{{node prior_1/log_prob/Categorical_2/logits_parameter/Identity/ReadVariableOp}}]]
[[prior_1/log_prob/LogSoftmax/_269]]
(1) Failed precondition: Error while reading resource variable mixture_logits from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/mixture_logits/class tensorflow::Var does not exist.
[[{{node prior_1/log_prob/Categorical_2/logits_parameter/Identity/ReadVariableOp}}]]
0 successful operations.
0 derived errors ignored.
I Have two questions as follows:
1.Is the way I model this type of vae correct(use Model(input,output))?
Whether the way i define my loss is correct?
I think my errors may came from above questions.
Finally,i have tried before in several ways:
1.
sess = tf.Session()
sess.run([tf.global_variables_initializer(),
tf.local_variables_initializer()])
2.
consider the "tensorflow.compat.v1.keras and tensorflow.keras " error
they really doesn't work for me!
I am trying to train a U-net for image segmentation on satellite data and therewith extract a road network with nine different road types. Thus far I have tried many different U-net codes that are freely available on the web, however I was not able to tailor them to my specific case. I'm sincerely hoping you are able to help me.
The satellite image and associated labels can be downloaded via the following link:
Satellite image and associated labels
Additionally, I've written the following code to prep the data for the Unet
import skimage
from skimage.io import imread, imshow, imread_collection, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from sklearn.metrics import jaccard_similarity_score
from shapely.geometry import MultiPolygon, Polygon
import shapely.wkt
import shapely.affinity
from collections import defaultdict
#Importing image and labels
labels = skimage.io.imread("ede_subset_293_wegen.tif")
images = skimage.io.imread("ede_subset_293_20180502_planetscope.tif")[...,:-1]
#Scaling image
img_scaled = images / images.max()
#Make non-roads 0
labels[labels == 15] = 0
#Resizing image and mask and labels
img_scaled_resized = img_scaled[:6400, :6400,:4 ]
print(img_scaled_resized.shape)
labels_resized = labels[:6400, :6400]
print(labels_resized.shape)
#splitting images
split_img = [
np.split(array, 25, axis=0)
for array in np.split(img_scaled_resized, 25, axis=1)
]
split_img[-1][-1].shape
#splitting labels
split_labels = [
np.split(array, 25, axis=0)
for array in np.split(labels_resized, 25, axis=1)
]
#Convert to np.array
split_labels = np.array(split_labels)
split_img = np.array(split_img)
train_images = np.reshape(split_img, (625, 256, 256, 4))
train_labels = np.reshape(split_labels, (625, 256, 256))
x_trn = train_images[:400,:,:,:]
x_val = train_images[400:500,:,:,:]
x_test = train_images[500:625,:,:,:]
y_trn = train_labels[:400,:,:]
y_val = train_labels[400:500,:,:]
y_test = train_labels[500:625,:,:]
plt.imshow(train_images[88,:,:,:])
skimage.io.imshow(train_labels[88,:,:])
Furthermore, I found the following U-net on kaggle, which I think should have to work for this particular case:
def get_unet():
inputs = Input((8, ISZ, ISZ))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(N_Cls, 1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[jaccard_coef, jaccard_coef_int, 'accuracy'])
return model
I know it is a big question, but I'm getting pretty desperate. Any help is greatly appreciated!
i found that Conv2DTranspose works better than UpSampling2D and here is a quick implementation using the same
def conv_block(tensor, nfilters, size=3, padding='same', initializer="he_normal"):
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(tensor, residual, nfilters, size=3, padding='same', strides=(2, 2)):
y = Conv2DTranspose(nfilters, kernel_size=(size, size), strides=strides, padding=padding)(tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, nfilters)
return y
def Unet(img_height, img_width, nclasses=3, filters=64):
# down
input_layer = Input(shape=(img_height, img_width, 3), name='image_input')
conv1 = conv_block(input_layer, nfilters=filters)
conv1_out = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_block(conv1_out, nfilters=filters*2)
conv2_out = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_block(conv2_out, nfilters=filters*4)
conv3_out = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_block(conv3_out, nfilters=filters*8)
conv4_out = MaxPooling2D(pool_size=(2, 2))(conv4)
conv4_out = Dropout(0.5)(conv4_out)
conv5 = conv_block(conv4_out, nfilters=filters*16)
conv5 = Dropout(0.5)(conv5)
# up
deconv6 = deconv_block(conv5, residual=conv4, nfilters=filters*8)
deconv6 = Dropout(0.5)(deconv6)
deconv7 = deconv_block(deconv6, residual=conv3, nfilters=filters*4)
deconv7 = Dropout(0.5)(deconv7)
deconv8 = deconv_block(deconv7, residual=conv2, nfilters=filters*2)
deconv9 = deconv_block(deconv8, residual=conv1, nfilters=filters)
# output
output_layer = Conv2D(filters=nclasses, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('softmax')(output_layer)
model = Model(inputs=input_layer, outputs=output_layer, name='Unet')
return model
Now for the data generators, you can use the builtin ImageDataGenerator class
here is the code from Keras docs
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
Another way to go is implement your own generator by extending the Sequence class from Keras
class seg_gen(Sequence):
def __init__(self, x_set, y_set, batch_size, image_dir, mask_dir):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.samples = len(self.x)
self.image_dir = image_dir
self.mask_dir = mask_dir
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
idx = np.random.randint(0, self.samples, batch_size)
batch_x, batch_y = [], []
drawn = 0
for i in idx:
_image = image.img_to_array(image.load_img(f'{self.image_dir}/{self.x[i]}', target_size=(img_height, img_width)))/255.
mask = image.img_to_array(image.load_img(f'{self.mask_dir}/{self.y[i]}', grayscale=True, target_size=(img_height, img_width)))
# mask = np.resize(mask,(img_height*img_width, classes))
batch_y.append(mask)
batch_x.append(_image)
return np.array(batch_x), np.array(batch_y)
Here is a sample code to train the model
unet = Unet(256, 256, nclasses=66, filters=64)
print(unet.output_shape)
p_unet = multi_gpu_model(unet, 4)
p_unet.load_weights('models-dr/top_weights.h5')
p_unet.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
tb = TensorBoard(log_dir='logs', write_graph=True)
mc = ModelCheckpoint(mode='max', filepath='models-dr/top_weights.h5', monitor='acc', save_best_only='True', save_weights_only='True', verbose=1)
es = EarlyStopping(mode='max', monitor='acc', patience=6, verbose=1)
callbacks = [tb, mc, es]
train_gen = seg_gen(image_list, mask_list, batch_size)
p_unet.fit_generator(train_gen, steps_per_epoch=steps, epochs=13, callbacks=callbacks, workers=8)
I have tried using the dice loss when i had only two classes, here is the code for it
def dice_coeff(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss