I'm trying to implement a 3D variational autoencoder following instructions in the book Generative Deep Learning and taking also some things from here link, those examples are in 2D so I have adapted it.
This is the code i'm using:
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, BatchNormalization, Conv3D, Dense, Flatten, Lambda, Reshape, UpSampling3D
from tensorflow.keras.regularizers import l2
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
## Functional API
input_shape = (32, 32, 32, 1)
z_dim = 3000
########### Encoder ############
enc_in = Input(shape = input_shape, name="encoder_input")
enc_conv1 = tf.keras.layers.Conv3D(filters=8, kernel_size=5, strides=(1,1,1), padding='same', activation="ReLU", kernel_initializer="he_uniform", name="conv_1")(enc_in)
max1 = tf.keras.layers.MaxPool3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name="max_pool_1")(enc_conv1)
enc_conv2 = tf.keras.layers.Conv3D(filters=16, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="conv_2")(max1)
max2 = tf.keras.layers.MaxPool3D(pool_size=2, name="max_pool_2")(enc_conv2)
enc_fc1 = Dense(units = 512, kernel_initializer = 'he_uniform', activation = 'ReLU')(Flatten()(max2))
mu = Dense(units = z_dim, kernel_initializer = 'he_uniform', activation = None, name="mu")(enc_fc1)
log_variance = Dense(units = z_dim, kernel_initializer = 'he_uniform', activation = None, name="log_variance")(enc_fc1)
def sampling(args):
mu, log_variance = args
epsilon = K.random_normal(shape=K.shape(mu), mean= 0., stddev= 1.)
return mu + K.exp(0.5 * log_variance) * epsilon
z = Lambda(sampling, output_shape = (z_dim, ), name="z")([mu, log_variance])
encoder = Model(enc_in, [mu, log_variance, z], name="encoder")
########## Decoder ############
dec_in = Input(shape = (z_dim, ), name="decoder_input")
dec_fc1 = Dense(units = 512, kernel_initializer = 'he_uniform', activation = 'ReLU')(dec_in)
dec_unflatten = Reshape(target_shape = (8,8,8,1))(dec_fc1)
dec_conv1 = tf.keras.layers.Conv3D(filters=32, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="deconv_1")(dec_unflatten)
ups1 = tf.keras.layers.UpSampling3D(size=(2, 2, 2), name="ups_1")(dec_conv1)
dec_conv2 = tf.keras.layers.Conv3D(filters=16, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="deconv_2")(ups1)
ups2 = tf.keras.layers.UpSampling3D(size=(2, 2, 2), name="ups_2")(dec_conv2)
dec_conv4 = Conv3D(filters = 1, kernel_size = (3, 3, 3), padding = 'same', activation="ReLU", kernel_initializer = 'he_uniform', name='decorder_output')(ups2)
decoder = Model(dec_in, dec_conv4, name="decoder")
model_input = enc_in
model_output = decoder(z)
v_autoencoder = Model(model_input, model_output)
######################
def vae_r_loss(T1_input, model_output):
r_loss = K.mean(K.square(T1_input - model_output), axis = [1,2,3])
return r_loss_factor * r_loss
def vae_kl_loss(T1_input, model_output):
kl_loss = -0.5 * K.sum(1 + log_variance - K.square(mu) - K.exp(log_variance), axis = 1)
return kl_loss
def vae_loss(T1_input, model_output):
r_loss = vae_r_loss(T1_input, model_output)
kl_loss = vae_kl_loss(T1_input, model_output)
return r_loss + kl_loss
learning_rate=0.001
r_loss_factor = 500
optimizer = keras.optimizers.Adam(lr=learning_rate)
v_autoencoder.compile(optimizer=optimizer, loss = vae_loss, metrics = [vae_r_loss, vae_kl_loss])
v_autoencoder.summary()
But when I try to train the model:
history = v_autoencoder.fit(T1_input, T1_input, epochs=10, validation_split=0.2, shuffle=True)
I get this error:
TypeError: You are passing KerasTensor(type_spec=TensorSpec(shape=(), dtype=tf.float32, name=None), name='Placeholder:0', description="created by layer 'tf.cast_4'"), an intermediate Keras symbolic input/output, to a TF API that does not allow registering custom dispatchers, such as `tf.cond`, `tf.function`, gradient tapes, or `tf.map_fn`. Keras Functional model construction only supports TF API calls that *do* support dispatching, such as `tf.math.add` or `tf.reshape`. Other APIs cannot be called directly on symbolic Kerasinputs/outputs. You can work around this limitation by putting the operation in a custom Keras layer `call` and calling that layer on this symbolic input/output.
The encoder and the decoder model seem to disconnected and the KerasTensor are not able to flow correctly. Also, the encoder returns three tensors [ mu , variance , z] whereas the decoder only requires z as an input.
Replace these lines,
model_input = enc_in
model_output = decoder(z)
v_autoencoder = Model(model_input, model_output)
with,
vae_input = Input(shape = input_shape, name="encoder_input")
mu_ , variance_ , z_ = encoder( vae_input )
vae_output = decoder( z_ )
v_autoencoder = Model( vae_input , [vae_output , mu_ , variance_] )
So, basically, the model will now output mu_ and variance_ which are the outputs of the encoder, and are required for vae_kl_loss. Earlier, you were using these KerasTensor directly in vae_kl_loss and hence you got the error.
Remove the axis argument from vae_kl_loss,
def vae_kl_loss( mu , log_variance ):
kl_loss = -0.5 * K.sum(1 + log_variance - K.square(mu) - K.exp(log_variance))
return kl_loss
Pass mu and variance to vae_kl_loss,
def vae_loss(T1_input, model_output):
mu = model_output[ 1 ]
variance = model_output[ 2 ]
vae_output = model_output[ 0 ]
r_loss = vae_r_loss(T1_input, vae_output )
kl_loss = vae_kl_loss( mu , variance )
return r_loss + kl_loss
Modified code:
import tensorflow as tf
from tensorflow.keras.layers import Input, BatchNormalization, Conv3D, Dense, Flatten, Lambda, Reshape, UpSampling3D
from tensorflow.keras.regularizers import l2
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
## Functional API
input_shape = (32, 32, 32, 1)
z_dim = 3000
########### Encoder ############
enc_in = Input(shape = input_shape, name="encoder_input")
enc_conv1 = tf.keras.layers.Conv3D(filters=8, kernel_size=5, strides=(1,1,1), padding='same', activation="ReLU", kernel_initializer="he_uniform", name="conv_1")(enc_in)
max1 = tf.keras.layers.MaxPool3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name="max_pool_1")(enc_conv1)
enc_conv2 = tf.keras.layers.Conv3D(filters=16, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="conv_2")(max1)
max2 = tf.keras.layers.MaxPool3D(pool_size=2, name="max_pool_2")(enc_conv2)
enc_fc1 = Dense(units = 512, kernel_initializer = 'he_uniform', activation = 'ReLU')(Flatten()(max2))
mu = Dense(units = z_dim, kernel_initializer = 'he_uniform', activation = None, name="mu")(enc_fc1)
log_variance = Dense(units = z_dim, kernel_initializer = 'he_uniform', activation = None, name="log_variance")(enc_fc1)
def sampling(args):
mu, log_variance = args
epsilon = K.random_normal(shape=K.shape(mu), mean= 0., stddev= 1.)
return mu + K.exp(0.5 * log_variance) * epsilon
z = Lambda(sampling, output_shape = (z_dim, ), name="z")([mu, log_variance])
encoder = Model(enc_in, [mu, log_variance, z], name="encoder")
########## Decoder ############
dec_in = Input(shape = (z_dim, ), name="decoder_input")
dec_fc1 = Dense(units = 512, kernel_initializer = 'he_uniform', activation = 'ReLU')(dec_in)
dec_unflatten = Reshape(target_shape = (8,8,8,1))(dec_fc1)
dec_conv1 = tf.keras.layers.Conv3D(filters=32, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="deconv_1")(dec_unflatten)
ups1 = tf.keras.layers.UpSampling3D(size=(2, 2, 2), name="ups_1")(dec_conv1)
dec_conv2 = tf.keras.layers.Conv3D(filters=16, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="deconv_2")(ups1)
ups2 = tf.keras.layers.UpSampling3D(size=(2, 2, 2), name="ups_2")(dec_conv2)
dec_conv4 = Conv3D(filters = 1, kernel_size = (3, 3, 3), padding = 'same', activation="ReLU", kernel_initializer = 'he_uniform', name='decorder_output')(ups2)
decoder = Model(dec_in, dec_conv4, name="decoder")
vae_input = Input(shape = input_shape, name="encoder_input")
mu_ , variance_ , z_ = encoder( vae_input )
vae_output = decoder( z_ )
v_autoencoder = Model( vae_input , [vae_output , mu_ , variance_] )
######################
def vae_r_loss(T1_input, model_output):
r_loss = K.mean(K.square(T1_input - model_output), axis = [1,2,3])
return r_loss_factor * r_loss
def vae_kl_loss( mu , log_variance ):
kl_loss = -0.5 * K.sum(1 + log_variance - K.square(mu) - K.exp(log_variance))
return kl_loss
def vae_loss(T1_input, model_output):
mu = model_output[ 1 ]
variance = model_output[ 2 ]
vae_output = model_output[ 0 ]
r_loss = vae_r_loss(T1_input, vae_output )
kl_loss = vae_kl_loss( mu , variance )
return r_loss + kl_loss
learning_rate=0.001
r_loss_factor = 500
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
v_autoencoder.compile(optimizer=optimizer, loss = vae_loss, metrics = [vae_r_loss, vae_kl_loss])
v_autoencoder.summary()
Related
Here is an example of tensorflow GradientTape provided by keras for a typical variotional autoencoder:
VAE-keras-example
The train_step function is implemented inside the model and it is trained with the "model.fit()". The example performs great and no problem at all.
However, for another application, I need to implement the train_step function outside of the model definition. In the beginning, I started with above mentioned example as the target application is also a kind of VAE. Accordingly, I applied some modifications and tried to train the same model structure; please find the whole code in the next; however, I get very weird numbers for the loss values comparing to the original code; even after a couple of iterations it gets nan values for losses.
Could you please let me know what's the mistake and why this happens?
Thanks in advance
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import backend as K
from tensorflow import keras
import numpy as np
print(tf.test.is_gpu_available()) # prints True
print(tf.__version__) # prints '2.0.0-beta1'
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
latent_dim = 2
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(encoder_inputs)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Flatten()(x)
x = layers.Dense(16, activation="relu")(x)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()([z_mean, z_log_var])
x = layers.Dense(7 * 7 * 64, activation="relu")(z)
x = layers.Reshape((7, 7, 64))(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
model = keras.Model(encoder_inputs, [decoder_outputs, z_mean, z_log_var] , name="decoder")
model.summary()
optimizer = tf.keras.optimizers.Adam(lr=0.001)
objective = tf.keras.losses.SparseCategoricalCrossentropy()
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
num_samples = x_train.shape[0]
epochs=1
batch_size=128
#tf.function
def train_step(data):
with tf.GradientTape() as tape:
reconstruction, z_mean, z_log_var = model(data, training=True)
data = tf.expand_dims(data, axis=-1)
reconstruction_loss = tf.reduce_mean(
tf.reduce_sum(keras.losses.binary_crossentropy(data, reconstruction), axis=(1, 2)))
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
total_loss = (reconstruction_loss + kl_loss)
grads = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return total_loss, reconstruction_loss, kl_loss
with tf.device('gpu:0'):
for epoch in range (epochs):
for step in range(num_samples//batch_size):
s = step*batch_size
e = s+batch_size
x_batch = x_train[s:e,:,:]
total_loss, reconstruction_loss, kl_loss = train_step(x_batch)
print("-----------------")
print(f"epoch: {epoch} step: {step}")
print(f"reconstruction_loss: {reconstruction_loss} ")
print(f"kl_loss: {kl_loss} ")
print(f"total_loss: {total_loss}")
I think you forgot to normalize your data as shown in the tutorial you are referring to:
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
x_train = x_train.astype("float32") / 255
Otherwise, your code seems to be running fine and the loss in not nan. Here is the code for reference:
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import backend as K
from tensorflow import keras
import numpy as np
print(tf.test.is_gpu_available()) # prints True
print(tf.__version__) # prints '2.0.0-beta1'
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
latent_dim = 2
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(encoder_inputs)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Flatten()(x)
x = layers.Dense(16, activation="relu")(x)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()([z_mean, z_log_var])
x = layers.Dense(7 * 7 * 64, activation="relu")(z)
x = layers.Reshape((7, 7, 64))(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
model = keras.Model(encoder_inputs, [decoder_outputs, z_mean, z_log_var] , name="decoder")
model.summary()
optimizer = tf.keras.optimizers.Adam(lr=0.001)
objective = tf.keras.losses.SparseCategoricalCrossentropy()
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
x_train = x_train.astype("float32") / 255
num_samples = x_train.shape[0]
epochs=4
batch_size=128
#tf.function
def train_step(data):
with tf.GradientTape() as tape:
reconstruction, z_mean, z_log_var = model(data, training=True)
reconstruction_loss = tf.reduce_mean(
tf.reduce_sum(keras.losses.binary_crossentropy(data, reconstruction), axis=(1, 2)))
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))/batch_size
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))/batch_size
total_loss = (reconstruction_loss + kl_loss)
grads = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return total_loss, reconstruction_loss, kl_loss
with tf.device('gpu:0'):
for epoch in range (epochs):
for step in range(num_samples//batch_size):
s = step*batch_size
e = s+batch_size
x_batch = x_train[s:e,:,:, tf.newaxis]
print(x_batch.shape)
total_loss, reconstruction_loss, kl_loss = train_step(x_batch)
print("-----------------")
print(f"epoch: {epoch} step: {step}")
print(f"reconstruction_loss: {reconstruction_loss} ")
print(f"kl_loss: {kl_loss} ")
print(f"total_loss: {total_loss}")
I am trying to get an Adversarial AutoEncoder going using keras Fit method on a keras.model class
but for some reason it is not working.
Keep in mind that I tried updating encoder and decoder at the same time.
I tried giving the disc loss to the encoder with and without the reconstruction loss
The reconstruction loss stayed the same while encoder disc loss kept increasing as the discriminator's own loss kept dropping.
discriminator = keras.Sequential(
[
keras.Input(shape=(4, 4, 128)),
layers.Flatten(),
layers.Dense(128, activation="relu"),
layers.Dense(128, activation="relu"),
layers.Dense(128, activation="relu"),
layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
discriminator.summary()
encoder = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(24, 3, activation="relu", strides=2, padding="same"),
layers.Conv2D(48, 3, activation="relu", strides=2, padding="same"),
layers.Conv2D(96, 3, activation="relu", strides=2, padding="same"),
layers.Flatten(),
layers.Dense(4 * 4 * 128, activation="linear"),
layers.Reshape((4, 4, 128)),
],
name="encoder",
)
encoder.summary()
decoder = keras.Sequential(
[
keras.Input(shape=(4, 4, 128)),
layers.Flatten(),
layers.Dense(7 * 7 * 64, activation="relu"),
layers.Reshape((7, 7, 64)),
layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same"),
layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same"),
layers.Conv2DTranspose(1, 3, activation="sigmoid", strides=1, padding="same"),
],
name="decoder",
)
I am not sure If it is in the model itself of not. I am using MNIST Dataset for this
class AAE(keras.Model):
def __init__(self, encoder, decoder, discriminator):
super(AAE, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.discriminator = discriminator
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(name="reconstruction_loss")
self.disc_tracker = keras.metrics.Mean(name="disc_loss")
self.discEnc_tracker = keras.metrics.Mean(name="discEnc_loss")
#property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.disc_tracker,
self.discEnc_tracker,
]
def compile(self, di_optimizer, e_optimizer,de_optimizer, loss_fn):
super(AAE, self).compile()
self.dis_optimizer = di_optimizer
self.e_optimizer = e_optimizer
self.de_optimizer = de_optimizer
self.lossBCE = loss_fn[0]
self.lossMAE = loss_fn[1]
def train_step(self, data):
latent = self.encoder(data)
batch_size = 200
dists = tf.random.normal((batch_size,4,4,128))
y_real = tf.ones((batch_size, 1))
y_fake = tf.zeros((batch_size, 1))
real_dist_mix = tf.concat((dists, latent),axis=0)
y_real_fake_mix = tf.concat((y_real, y_fake),axis=0)
with tf.GradientTape() as tape:
predictions = self.discriminator(real_dist_mix)
d_loss = self.lossBCE(y_real_fake_mix, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.dis_optimizer.apply_gradients(zip(grads, self.discriminator.trainable_weights))
with tf.GradientTape() as Etape, tf.GradientTape() as Dtape:
latent = self.encoder(data)
reconstruction = self.decoder(latent)
reconstruction_loss = self.lossMAE(data, reconstruction)
total_loss = reconstruction_loss
Egrads = Etape.gradient(total_loss, self.encoder.trainable_weights)
self.e_optimizer.apply_gradients(zip(Egrads, self.encoder.trainable_weights))
Dgrads = Dtape.gradient(total_loss, self.decoder.trainable_weights)
self.de_optimizer.apply_gradients(zip(Dgrads, self.decoder.trainable_weights))
with tf.GradientTape() as tape:
latent = self.encoder(data)
predictions = self.discriminator(latent)
e_loss = self.lossBCE(y_fake, predictions)
grads = tape.gradient(e_loss, self.encoder.trainable_weights)
self.e_optimizer.apply_gradients(zip(grads, self.encoder.trainable_weights))
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.disc_tracker.update_state(d_loss)
self.discEnc_tracker.update_state(e_loss)
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"disc_loss": self.disc_tracker.result(),
"discEnc_loss": self.discEnc_tracker.result(),
}
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
mnist_digits = np.concatenate([x_train, x_test], axis=0)
mnist_digits = np.expand_dims(mnist_digits, -1).astype("float32") / 255
Aae = AAE(encoder, decoder, discriminator)
#vae.compile(optimizer=keras.optimizers.Adam())
Aae.compile(
di_optimizer=keras.optimizers.Adam(learning_rate=0.00001),
e_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
de_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss_fn=[tf.keras.losses.BinaryCrossentropy(),tf.keras.losses.MeanAbsoluteError()]
)
h=Aae.fit(mnist_digits, epochs=15, batch_size=200)
I think that the error is here:
with tf.GradientTape() as tape:
latent = self.encoder(data)
predictions = self.discriminator(latent)
e_loss = self.lossBCE(y_fake, predictions)
grads = tape.gradient(e_loss, self.encoder.trainable_weights)
self.e_optimizer.apply_gradients(zip(grads, self.encoder.trainable_weights))
I would put e_loss = self.lossBCE(y_real, predictions), because the encoder tries to fool the discriminator.
I've been experimenting with VAEs for weeks now, trying to get similar results as in a tutorial code, but being unsuccessful in it. The only difference between mine and the tutorial's code is that it compiles the model (demonstration will be below), and runs fit on it, but I run each layer manually. What I've done is set up weight initialization with numpy arrays, just as with epsilons, saved into .npy files, and ran the two models. The results were different (the tutorial's model gives way more smooth results)
As of my understanding of neural networks so far, given the same training data in the same order, with the same layers, with same initial weights, same optimizer, and hyperparameters, the results should be the same all the time.
I've made 3 simple examples. MNIST data with two dense layers with sizes 16, and 10. Weight initialization:
import numpy as np
w1 = np.random.uniform(-1, 1, size=(784, 16))
w2 = np.random.uniform(-1, 1, size=(16, 10))
np.save('w1', w1)
np.save('w2', w2)
First version (manually executing layers):
import numpy as np
import tensorflow as tf
import math
w1 = np.load('w1.npy')
w2 = np.load('w2.npy')
class Model(tf.keras.Model):
def __init__(self):
super().__init__()
self.flat = tf.keras.layers.Flatten()
self.w1 = tf.keras.layers.Dense(
units=16,
activation='relu',
use_bias=False,
kernel_initializer=tf.keras.initializers.constant(w1)
)
self.w2 = tf.keras.layers.Dense(
units=10,
activation='sigmoid',
use_bias=False,
kernel_initializer=tf.keras.initializers.constant(w2)
)
self.optimizer = tf.keras.optimizers.Adam()
#tf.function
def call(self, x, y):
with tf.GradientTape() as tape:
x = self.flat(x)
a = self.w1(x)
y_hat = self.w2(a)
loss = tf.keras.losses.sparse_categorical_crossentropy(y, y_hat)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return loss
def train(x, y, epochs, batch_size):
batch_quantity = math.floor(x.shape[0] / batch_size)
for e in range(epochs):
for b in range(batch_quantity):
train_x = x[b:b + batch_size]
train_y = y[b:b + batch_size]
loss = model(train_x, train_y)
print(
'Epoch:', e,
'Batch:', b,
'Loss:', loss.numpy().sum(),
)
model = Model()
(x, y), _ = tf.keras.datasets.mnist.load_data()
epochs = 30
batch_size = 100
train(x, y, epochs, batch_size)
This one gives the exact same results no matter how many times you run it. As expected.
Second version (saving layers into a Model - manually calculating gradients, optimization - compile, and fit):
import numpy as np
import tensorflow as tf
w1 = np.load('w1.npy')
w2 = np.load('w2.npy')
input = tf.keras.Input(shape=(28, 28))
x = tf.keras.layers.Flatten()(input)
a = tf.keras.layers.Dense(
units=16,
activation='relu',
use_bias=False,
kernel_initializer=tf.keras.initializers.constant(w1)
)(x)
y_hat = tf.keras.layers.Dense(
units=10,
activation='sigmoid',
use_bias=False,
kernel_initializer=tf.keras.initializers.constant(w2)
)(a)
model = tf.keras.Model(input, y_hat, name='model')
model.summary()
class Model(tf.keras.Model):
def __init__(self, model):
super().__init__()
self.model = model
def train_step(self, data):
x, y = data[0]
with tf.GradientTape() as tape:
y_hat = self.model(x)
loss = tf.keras.losses.sparse_categorical_crossentropy(y, y_hat)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return {
'loss': tf.reduce_sum(loss)
}
model = Model(model)
train_data, _ = tf.keras.datasets.mnist.load_data()
epochs = 30
batch_size = 100
model.compile(optimizer=tf.keras.optimizers.Adam())
model.fit(train_data, epochs=epochs, batch_size=batch_size shuffle=False)
Third version (saving layers into a Model compile, and fit):
import numpy as np
import tensorflow as tf
w1 = np.load('w1.npy')
w2 = np.load('w2.npy')
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(
units=16,
activation='relu',
use_bias=False,
kernel_initializer=tf.keras.initializers.constant(w1)
),
tf.keras.layers.Dense(
units=10,
activation='sigmoid',
use_bias=False,
kernel_initializer=tf.keras.initializers.constant(w2)
)
])
(x, y), _ = tf.keras.datasets.mnist.load_data()
batch_size = 100
epochs = 30
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.SUM),
optimizer=tf.keras.optimizers.Adam()
)
model.fit(
x, y, batch_size=batch_size, epochs=epochs, shuffle=False
)
The second, and third versions are giving almost the exact same results, but different than the first. That I can't understand, as the optimizer, the loss function, the loss reduction, initial weights, training data, everything is exactly the same.
If you're curious, the VAE examples, and their differences can be tested.
Initialize weights:
import numpy as np
epsilon = np.random.normal(size=(100, 2))
wc1 = np.random.uniform(-0.022, 0.022, size=(3,3,1,32))
wc2 = np.random.uniform(-0.022, 0.022, size=(3,3,32,64))
wd1 = np.random.uniform(-0.022, 0.022, size=(3136,16))
wm = np.random.uniform(-0.022, 0.022, size=(16,2))
ws = np.random.uniform(-0.022, 0.022, size=(16,2))
wd2 = np.random.uniform(-0.022, 0.022, size=(2,3136))
wct1 = np.random.uniform(-0.022, 0.022, size=(3,3,64,64))
wct2 = np.random.uniform(-0.022, 0.022, size=(3,3,32,64))
wct3 = np.random.uniform(-0.022, 0.022, size=(3,3,1,32))
np.save('epsilon', epsilon)
np.save('wc1', wc1)
np.save('wc2', wc2)
np.save('wd1', wd1)
np.save('wm', wm)
np.save('ws', ws)
np.save('wd2', wd2)
np.save('wct1', wct1)
np.save('wct2', wct2)
np.save('wct3', wct3)
Tutorial's VAE:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
wc1 = np.load('wc1.npy')
wc2 = np.load('wc2.npy')
wd1 = np.load('wd1.npy')
wm = np.load('wm.npy')
ws = np.load('ws.npy')
wd2 = np.load('wd2.npy')
wct1 = np.load('wct1.npy')
wct2 = np.load('wct2.npy')
wct3 = np.load('wct3.npy')
epsilon = np.load('epsilon.npy')
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
# batch = tf.shape(z_mean)[0]
# dim = tf.shape(z_mean)[1]
# epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
latent_dim = 2
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wc1))(encoder_inputs)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wc2))(x)
x = layers.Flatten()(x)
x = layers.Dense(16, activation="relu", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wd1))(x)
z_mean = layers.Dense(latent_dim, name="z_mean", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wm))(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var", use_bias=False, kernel_initializer=tf.keras.initializers.constant(ws))(x)
z = Sampling()([z_mean, z_log_var])
encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()
latent_inputs = keras.Input(shape=(latent_dim,))
x = layers.Dense(7 * 7 * 64, activation="relu", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wd2))(latent_inputs)
x = layers.Reshape((7, 7, 64))(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wct1))(x)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wct2))(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wct3))(x)
decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
decoder.summary()
class VAE(keras.Model):
def __init__(self, encoder, decoder, **kwargs):
super(VAE, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def train_step(self, data):
if isinstance(data, tuple):
data = data[0]
with tf.GradientTape() as tape:
z_mean, z_log_var, z = encoder(data)
reconstruction = decoder(z)
reconstruction_loss = tf.reduce_mean(
keras.losses.binary_crossentropy(data, reconstruction)
)
reconstruction_loss *= 28 * 28
kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
kl_loss = tf.reduce_mean(kl_loss)
kl_loss *= -0.5
total_loss = reconstruction_loss + kl_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return {
'mean': z_mean,
"loss": total_loss,
"reconstruction_loss": reconstruction_loss,
"kl_loss": kl_loss,
}
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
mnist_digits = x_train
mnist_digits = np.expand_dims(mnist_digits, -1).astype("float32") / 255
epochs=30
batch_size=100
vae = VAE(encoder, decoder)
vae.compile(optimizer=keras.optimizers.Adam())
vae.fit(mnist_digits, epochs=epochs, batch_size=batch_size)
import matplotlib.pyplot as plt
def plot_latent(encoder, decoder):
# display a n*n 2D manifold of digits
n = 30
digit_size = 28
scale = 2.0
figsize = 15
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates corresponding to the 2D plot
# of digit classes in the latent space
grid_x = np.linspace(-scale, scale, n)
grid_y = np.linspace(-scale, scale, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi]])
x_decoded = decoder.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[
i * digit_size : (i + 1) * digit_size,
j * digit_size : (j + 1) * digit_size,
] = digit
plt.figure(figsize=(figsize, figsize))
start_range = digit_size // 2
end_range = n * digit_size + start_range + 1
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
plt.xticks(pixel_range, sample_range_x)
plt.yticks(pixel_range, sample_range_y)
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.imshow(figure, cmap="Greys_r")
plt.savefig('trg.png')
plt.close()
plot_latent(encoder, decoder)
def plot_label_clusters(encoder, decoder, data, labels):
# display a 2D plot of the digit classes in the latent space
z_mean, _, _ = encoder.predict(data)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.savefig('wer.png')
plt.close()
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, -1).astype("float32") / 255
plot_label_clusters(encoder, decoder, x_train, y_train)
My VAE:
import math
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
batch_size = 100
epochs = 30
(x, y), _ = tf.keras.datasets.mnist.load_data()
x = tf.expand_dims(x.astype("float32") / 255, 3)
wc1 = np.load('wc1.npy')
wc2 = np.load('wc2.npy')
wd1 = np.load('wd1.npy')
wm = np.load('wm.npy')
ws = np.load('ws.npy')
wd2 = np.load('wd2.npy')
wct1 = np.load('wct1.npy')
wct2 = np.load('wct2.npy')
wct3 = np.load('wct3.npy')
epsilon = np.load('epsilon.npy')
class VAE(tf.keras.Model):
def __init__(self):
super().__init__()
self.encoder = [
tf.keras.layers.Conv2D(32, 3, activation="relu", strides=2, padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wc1)),
tf.keras.layers.Conv2D(64, 3, activation="relu", strides=2, padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wc2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(16, activation="relu", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wd1)),
]
self.wm = tf.keras.layers.Dense(2, use_bias=False, kernel_initializer=tf.keras.initializers.constant(wm))
self.wv = tf.keras.layers.Dense(2, use_bias=False, kernel_initializer=tf.keras.initializers.constant(ws))
self.decoder = [
tf.keras.layers.Dense(7 * 7 * 64, activation="relu", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wd2)),
tf.keras.layers.Reshape((7, 7, 64)),
tf.keras.layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wct1)),
tf.keras.layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wct2)),
tf.keras.layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same", use_bias=False, kernel_initializer=tf.keras.initializers.constant(wct3)),
]
self.optimizer = tf.keras.optimizers.Adam()
#tf.function
def call(self, x):
with tf.GradientTape() as tape:
z = x
for layer in self.encoder:
z = layer(z)
mean = self.wm(z)
stdev = self.wv(z)
# epsilon = tf.random.normal(mean.shape)
z = mean + tf.exp(0.5 * stdev) * epsilon
y_pred = z
for layer in self.decoder:
y_pred = layer(y_pred)
reconstruction_loss = tf.reduce_mean(
tf.keras.losses.binary_crossentropy(x, y_pred)
)
reconstruction_loss *= 28 * 28
kl_loss = 1 + stdev - tf.square(mean) - tf.exp(stdev)
kl_loss = tf.reduce_mean(kl_loss)
kl_loss *= -0.5
loss = reconstruction_loss + kl_loss
gradients = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(gradients, self.trainable_weights))
return loss, reconstruction_loss, kl_loss
model = VAE()
def train(x):
batch_quantity = math.floor(x.shape[0] / batch_size)
for i in range(epochs):
for bi in range(batch_quantity):
train_x = x[bi:bi + batch_size]
loss, reconstruction_loss, kl_loss = model(train_x)
print(
'Epoch:', i,
'Batch:', bi,
'Loss:', loss.numpy(),
'Reconstruction:', reconstruction_loss.numpy(),
'KL:', kl_loss.numpy()
)
train(x)
# display a 2D plot of the digit classes in the latent space
z = x
for layer in model.encoder:
z = layer(z)
mean = model.wm(z)
plt.figure(figsize=(12, 10))
plt.scatter(mean[:, 0], mean[:, 1], c=y)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.savefig('def.png')
plt.close()
n = 30
d = 2.0
s = 28
grid_x = np.linspace(-d, d, n)
grid_y = np.linspace(d, -d, n)
image_width = s*n
image_height = image_width
image = np.zeros((image_height, image_width))
for row, x in enumerate(grid_x):
for col, y in enumerate(grid_y):
z = np.array([[x, y]])
for layer in model.decoder:
z = layer(z)
digit = tf.squeeze(z)
image[row * s: (row + 1) * s,
col * s: (col + 1) * s] = digit.numpy()
plt.figure(figsize=(15, 15))
plt.imshow(image, cmap='Greys_r')
plt.axis('Off')
plt.savefig('asd.png')
plt.close()
I want to test what happens when VAE's posterior is GMM. I wrote the code, but there is an error:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
# Dependency imports
import numpy as np
import matplotlib.pyplot as plt
from absl import flags
import numpy as np
from six.moves import urllib
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from tensorflow.compat.v1.keras.datasets import mnist
tf.compat.v1.disable_eager_execution()
tf.reset_default_graph()
tf.test.is_gpu_available()
tfd = tfp.distributions
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')
# BACK TO [0,1]
train_images /= 255.
test_images /= 255.
# Binarized
train_images[train_images >= .5] = 1. # shape = (60000,28,28,1)
train_images[train_images < .5] = 0.
base_depth = 32
latent_size = 2
mixture_components = 10
IMAGE_SHAPE = [28, 28, 1]
# encoder
x = tf.keras.layers.Input(shape=(28, 28, 1))
h1 = tf.keras.layers.Conv2D(filters=base_depth, kernel_size=5, strides=1,
padding='same', activation='relu')
h2 = tf.keras.layers.Conv2D(filters=base_depth, kernel_size=5, strides=2,
padding='same', activation='relu')
h3 = tf.keras.layers.Conv2D(filters=base_depth * 2, kernel_size=5, strides=1,
padding='same', activation='relu')
h4 = tf.keras.layers.Conv2D(filters=base_depth * 2, kernel_size=5, strides=2,
padding='same', activation='relu')
h5 = tf.keras.layers.Conv2D(filters=base_depth * 6, kernel_size=3, strides=1,
padding='same', activation='relu')
h6 = tf.keras.layers.Conv2D(filters=latent_size * 4, kernel_size=7, padding='valid'
, activation='relu')
flatten1 = tf.keras.layers.Flatten()
encoder_end = tf.keras.layers.Dense(2 * mixture_components * latent_size + mixture_components, activation=None)
e1 = h1(x)
e2 = h2(e1)
e3 = h3(e2)
e4 = h4(e3)
e5 = h5(e4)
e6 = h6(e5)
e7 = flatten1(e6)
encoder_out = encoder_end(e7) # parameters of GMM
# split {mean,variance,pi}
loc, raw_scale_diag, mixture_logits = tf.split(encoder_out,
[latent_size * mixture_components, latent_size * mixture_components,
mixture_components], axis=-1)
loc = tf.reshape(loc, [-1, mixture_components, latent_size])
raw_scale_diag = tf.reshape(raw_scale_diag, [-1, mixture_components, latent_size]) # [B,k,E]
# posterior
approx_posterior = tfd.MixtureSameFamily(components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
)
# sample from posterior (16 latent variable z)
approx_posterior_samples = approx_posterior._sample_n(16, seed=None)
# decoder
decoder = tf.keras.Sequential([
tf.keras.layers.Conv2DTranspose(2 * base_depth, kernel_size=7, padding='valid', activation='relu'),
tf.keras.layers.Conv2DTranspose(2 * base_depth, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(2 * base_depth, kernel_size=5, strides=2, padding='valid', activation='relu'),
tf.keras.layers.Conv2DTranspose(base_depth, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(base_depth, kernel_size=5, strides=2, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(base_depth, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.Conv2D(IMAGE_SHAPE[-1], kernel_size=5, padding='same', activation=None)
])
original_shape = tf.shape(input=approx_posterior_samples)
approx_posterior_samples = tf.reshape(approx_posterior_samples, (-1, 1, 1, latent_size))
logits = decoder(approx_posterior_samples)
logits = tf.reshape(
logits, shape=tf.concat([original_shape[:-1], IMAGE_SHAPE], axis=0))
logits = tf.reduce_mean(logits, axis=0)
logits = tf.reshape(logits, shape=tf.shape(x))
# bulid model
model = tf.keras.Model(x, logits)
# prior of z (also GMM)
def make_mixture_prior(latent_size, mixture_components):
loc = tf.compat.v1.get_variable(
name="loc", shape=[mixture_components, latent_size])
raw_scale_diag = tf.compat.v1.get_variable(
name="raw_scale_diag", shape=[mixture_components, latent_size])
mixture_logits = tf.compat.v1.get_variable(
name="mixture_logits", shape=[mixture_components])
return tfd.MixtureSameFamily(
components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
name="prior")
# latent_size=2,mixture_components=10
latent_prior = make_mixture_prior(2, 10)
# reconstruction error
distortion = tf.nn.sigmoid_cross_entropy_with_logits(labels=x, logits=logits)
avg_distortion = tf.reduce_mean(distortion)
# kl divergence
rate = (approx_posterior.log_prob(approx_posterior_samples) - latent_prior.log_prob(approx_posterior_samples))
avg_rate = tf.reduce_mean(rate)
loss = avg_distortion + avg_rate
# custom loss
model.add_loss(loss)
Here is the error:
FailedPreconditionError: 2 root error(s) found.
(0) Failed precondition: Error while reading resource variable mixture_logits from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/mixture_logits/class tensorflow::Var does not exist.
[[{{node prior_1/log_prob/Categorical_2/logits_parameter/Identity/ReadVariableOp}}]]
[[prior_1/log_prob/LogSoftmax/_269]]
(1) Failed precondition: Error while reading resource variable mixture_logits from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/mixture_logits/class tensorflow::Var does not exist.
[[{{node prior_1/log_prob/Categorical_2/logits_parameter/Identity/ReadVariableOp}}]]
0 successful operations.
0 derived errors ignored.
I Have two questions as follows:
1.Is the way I model this type of vae correct(use Model(input,output))?
Whether the way i define my loss is correct?
I think my errors may came from above questions.
Finally,i have tried before in several ways:
1.
sess = tf.Session()
sess.run([tf.global_variables_initializer(),
tf.local_variables_initializer()])
2.
consider the "tensorflow.compat.v1.keras and tensorflow.keras " error
they really doesn't work for me!
I am trying to train a U-net for image segmentation on satellite data and therewith extract a road network with nine different road types. Thus far I have tried many different U-net codes that are freely available on the web, however I was not able to tailor them to my specific case. I'm sincerely hoping you are able to help me.
The satellite image and associated labels can be downloaded via the following link:
Satellite image and associated labels
Additionally, I've written the following code to prep the data for the Unet
import skimage
from skimage.io import imread, imshow, imread_collection, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from sklearn.metrics import jaccard_similarity_score
from shapely.geometry import MultiPolygon, Polygon
import shapely.wkt
import shapely.affinity
from collections import defaultdict
#Importing image and labels
labels = skimage.io.imread("ede_subset_293_wegen.tif")
images = skimage.io.imread("ede_subset_293_20180502_planetscope.tif")[...,:-1]
#Scaling image
img_scaled = images / images.max()
#Make non-roads 0
labels[labels == 15] = 0
#Resizing image and mask and labels
img_scaled_resized = img_scaled[:6400, :6400,:4 ]
print(img_scaled_resized.shape)
labels_resized = labels[:6400, :6400]
print(labels_resized.shape)
#splitting images
split_img = [
np.split(array, 25, axis=0)
for array in np.split(img_scaled_resized, 25, axis=1)
]
split_img[-1][-1].shape
#splitting labels
split_labels = [
np.split(array, 25, axis=0)
for array in np.split(labels_resized, 25, axis=1)
]
#Convert to np.array
split_labels = np.array(split_labels)
split_img = np.array(split_img)
train_images = np.reshape(split_img, (625, 256, 256, 4))
train_labels = np.reshape(split_labels, (625, 256, 256))
x_trn = train_images[:400,:,:,:]
x_val = train_images[400:500,:,:,:]
x_test = train_images[500:625,:,:,:]
y_trn = train_labels[:400,:,:]
y_val = train_labels[400:500,:,:]
y_test = train_labels[500:625,:,:]
plt.imshow(train_images[88,:,:,:])
skimage.io.imshow(train_labels[88,:,:])
Furthermore, I found the following U-net on kaggle, which I think should have to work for this particular case:
def get_unet():
inputs = Input((8, ISZ, ISZ))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(N_Cls, 1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[jaccard_coef, jaccard_coef_int, 'accuracy'])
return model
I know it is a big question, but I'm getting pretty desperate. Any help is greatly appreciated!
i found that Conv2DTranspose works better than UpSampling2D and here is a quick implementation using the same
def conv_block(tensor, nfilters, size=3, padding='same', initializer="he_normal"):
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(tensor, residual, nfilters, size=3, padding='same', strides=(2, 2)):
y = Conv2DTranspose(nfilters, kernel_size=(size, size), strides=strides, padding=padding)(tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, nfilters)
return y
def Unet(img_height, img_width, nclasses=3, filters=64):
# down
input_layer = Input(shape=(img_height, img_width, 3), name='image_input')
conv1 = conv_block(input_layer, nfilters=filters)
conv1_out = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_block(conv1_out, nfilters=filters*2)
conv2_out = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_block(conv2_out, nfilters=filters*4)
conv3_out = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_block(conv3_out, nfilters=filters*8)
conv4_out = MaxPooling2D(pool_size=(2, 2))(conv4)
conv4_out = Dropout(0.5)(conv4_out)
conv5 = conv_block(conv4_out, nfilters=filters*16)
conv5 = Dropout(0.5)(conv5)
# up
deconv6 = deconv_block(conv5, residual=conv4, nfilters=filters*8)
deconv6 = Dropout(0.5)(deconv6)
deconv7 = deconv_block(deconv6, residual=conv3, nfilters=filters*4)
deconv7 = Dropout(0.5)(deconv7)
deconv8 = deconv_block(deconv7, residual=conv2, nfilters=filters*2)
deconv9 = deconv_block(deconv8, residual=conv1, nfilters=filters)
# output
output_layer = Conv2D(filters=nclasses, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('softmax')(output_layer)
model = Model(inputs=input_layer, outputs=output_layer, name='Unet')
return model
Now for the data generators, you can use the builtin ImageDataGenerator class
here is the code from Keras docs
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
Another way to go is implement your own generator by extending the Sequence class from Keras
class seg_gen(Sequence):
def __init__(self, x_set, y_set, batch_size, image_dir, mask_dir):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.samples = len(self.x)
self.image_dir = image_dir
self.mask_dir = mask_dir
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
idx = np.random.randint(0, self.samples, batch_size)
batch_x, batch_y = [], []
drawn = 0
for i in idx:
_image = image.img_to_array(image.load_img(f'{self.image_dir}/{self.x[i]}', target_size=(img_height, img_width)))/255.
mask = image.img_to_array(image.load_img(f'{self.mask_dir}/{self.y[i]}', grayscale=True, target_size=(img_height, img_width)))
# mask = np.resize(mask,(img_height*img_width, classes))
batch_y.append(mask)
batch_x.append(_image)
return np.array(batch_x), np.array(batch_y)
Here is a sample code to train the model
unet = Unet(256, 256, nclasses=66, filters=64)
print(unet.output_shape)
p_unet = multi_gpu_model(unet, 4)
p_unet.load_weights('models-dr/top_weights.h5')
p_unet.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
tb = TensorBoard(log_dir='logs', write_graph=True)
mc = ModelCheckpoint(mode='max', filepath='models-dr/top_weights.h5', monitor='acc', save_best_only='True', save_weights_only='True', verbose=1)
es = EarlyStopping(mode='max', monitor='acc', patience=6, verbose=1)
callbacks = [tb, mc, es]
train_gen = seg_gen(image_list, mask_list, batch_size)
p_unet.fit_generator(train_gen, steps_per_epoch=steps, epochs=13, callbacks=callbacks, workers=8)
I have tried using the dice loss when i had only two classes, here is the code for it
def dice_coeff(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss