FailedPreconditionError: 2 root error(s) found - python

I want to test what happens when VAE's posterior is GMM. I wrote the code, but there is an error:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
# Dependency imports
import numpy as np
import matplotlib.pyplot as plt
from absl import flags
import numpy as np
from six.moves import urllib
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from tensorflow.compat.v1.keras.datasets import mnist
tf.compat.v1.disable_eager_execution()
tf.reset_default_graph()
tf.test.is_gpu_available()
tfd = tfp.distributions
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')
# BACK TO [0,1]
train_images /= 255.
test_images /= 255.
# Binarized
train_images[train_images >= .5] = 1. # shape = (60000,28,28,1)
train_images[train_images < .5] = 0.
base_depth = 32
latent_size = 2
mixture_components = 10
IMAGE_SHAPE = [28, 28, 1]
# encoder
x = tf.keras.layers.Input(shape=(28, 28, 1))
h1 = tf.keras.layers.Conv2D(filters=base_depth, kernel_size=5, strides=1,
padding='same', activation='relu')
h2 = tf.keras.layers.Conv2D(filters=base_depth, kernel_size=5, strides=2,
padding='same', activation='relu')
h3 = tf.keras.layers.Conv2D(filters=base_depth * 2, kernel_size=5, strides=1,
padding='same', activation='relu')
h4 = tf.keras.layers.Conv2D(filters=base_depth * 2, kernel_size=5, strides=2,
padding='same', activation='relu')
h5 = tf.keras.layers.Conv2D(filters=base_depth * 6, kernel_size=3, strides=1,
padding='same', activation='relu')
h6 = tf.keras.layers.Conv2D(filters=latent_size * 4, kernel_size=7, padding='valid'
, activation='relu')
flatten1 = tf.keras.layers.Flatten()
encoder_end = tf.keras.layers.Dense(2 * mixture_components * latent_size + mixture_components, activation=None)
e1 = h1(x)
e2 = h2(e1)
e3 = h3(e2)
e4 = h4(e3)
e5 = h5(e4)
e6 = h6(e5)
e7 = flatten1(e6)
encoder_out = encoder_end(e7) # parameters of GMM
# split {mean,variance,pi}
loc, raw_scale_diag, mixture_logits = tf.split(encoder_out,
[latent_size * mixture_components, latent_size * mixture_components,
mixture_components], axis=-1)
loc = tf.reshape(loc, [-1, mixture_components, latent_size])
raw_scale_diag = tf.reshape(raw_scale_diag, [-1, mixture_components, latent_size]) # [B,k,E]
# posterior
approx_posterior = tfd.MixtureSameFamily(components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
)
# sample from posterior (16 latent variable z)
approx_posterior_samples = approx_posterior._sample_n(16, seed=None)
# decoder
decoder = tf.keras.Sequential([
tf.keras.layers.Conv2DTranspose(2 * base_depth, kernel_size=7, padding='valid', activation='relu'),
tf.keras.layers.Conv2DTranspose(2 * base_depth, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(2 * base_depth, kernel_size=5, strides=2, padding='valid', activation='relu'),
tf.keras.layers.Conv2DTranspose(base_depth, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(base_depth, kernel_size=5, strides=2, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(base_depth, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.Conv2D(IMAGE_SHAPE[-1], kernel_size=5, padding='same', activation=None)
])
original_shape = tf.shape(input=approx_posterior_samples)
approx_posterior_samples = tf.reshape(approx_posterior_samples, (-1, 1, 1, latent_size))
logits = decoder(approx_posterior_samples)
logits = tf.reshape(
logits, shape=tf.concat([original_shape[:-1], IMAGE_SHAPE], axis=0))
logits = tf.reduce_mean(logits, axis=0)
logits = tf.reshape(logits, shape=tf.shape(x))
# bulid model
model = tf.keras.Model(x, logits)
# prior of z (also GMM)
def make_mixture_prior(latent_size, mixture_components):
loc = tf.compat.v1.get_variable(
name="loc", shape=[mixture_components, latent_size])
raw_scale_diag = tf.compat.v1.get_variable(
name="raw_scale_diag", shape=[mixture_components, latent_size])
mixture_logits = tf.compat.v1.get_variable(
name="mixture_logits", shape=[mixture_components])
return tfd.MixtureSameFamily(
components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
name="prior")
# latent_size=2,mixture_components=10
latent_prior = make_mixture_prior(2, 10)
# reconstruction error
distortion = tf.nn.sigmoid_cross_entropy_with_logits(labels=x, logits=logits)
avg_distortion = tf.reduce_mean(distortion)
# kl divergence
rate = (approx_posterior.log_prob(approx_posterior_samples) - latent_prior.log_prob(approx_posterior_samples))
avg_rate = tf.reduce_mean(rate)
loss = avg_distortion + avg_rate
# custom loss
model.add_loss(loss)
Here is the error:
FailedPreconditionError: 2 root error(s) found.
(0) Failed precondition: Error while reading resource variable mixture_logits from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/mixture_logits/class tensorflow::Var does not exist.
[[{{node prior_1/log_prob/Categorical_2/logits_parameter/Identity/ReadVariableOp}}]]
[[prior_1/log_prob/LogSoftmax/_269]]
(1) Failed precondition: Error while reading resource variable mixture_logits from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/mixture_logits/class tensorflow::Var does not exist.
[[{{node prior_1/log_prob/Categorical_2/logits_parameter/Identity/ReadVariableOp}}]]
0 successful operations.
0 derived errors ignored.
I Have two questions as follows:
1.Is the way I model this type of vae correct(use Model(input,output))?
Whether the way i define my loss is correct?
I think my errors may came from above questions.
Finally,i have tried before in several ways:
1.
sess = tf.Session()
sess.run([tf.global_variables_initializer(),
tf.local_variables_initializer()])
2.
consider the "tensorflow.compat.v1.keras and tensorflow.keras " error
they really doesn't work for me!

Related

3D - Variational Autoencoder implementation Error

I'm trying to implement a 3D variational autoencoder following instructions in the book Generative Deep Learning and taking also some things from here link, those examples are in 2D so I have adapted it.
This is the code i'm using:
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, BatchNormalization, Conv3D, Dense, Flatten, Lambda, Reshape, UpSampling3D
from tensorflow.keras.regularizers import l2
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
## Functional API
input_shape = (32, 32, 32, 1)
z_dim = 3000
########### Encoder ############
enc_in = Input(shape = input_shape, name="encoder_input")
enc_conv1 = tf.keras.layers.Conv3D(filters=8, kernel_size=5, strides=(1,1,1), padding='same', activation="ReLU", kernel_initializer="he_uniform", name="conv_1")(enc_in)
max1 = tf.keras.layers.MaxPool3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name="max_pool_1")(enc_conv1)
enc_conv2 = tf.keras.layers.Conv3D(filters=16, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="conv_2")(max1)
max2 = tf.keras.layers.MaxPool3D(pool_size=2, name="max_pool_2")(enc_conv2)
enc_fc1 = Dense(units = 512, kernel_initializer = 'he_uniform', activation = 'ReLU')(Flatten()(max2))
mu = Dense(units = z_dim, kernel_initializer = 'he_uniform', activation = None, name="mu")(enc_fc1)
log_variance = Dense(units = z_dim, kernel_initializer = 'he_uniform', activation = None, name="log_variance")(enc_fc1)
def sampling(args):
mu, log_variance = args
epsilon = K.random_normal(shape=K.shape(mu), mean= 0., stddev= 1.)
return mu + K.exp(0.5 * log_variance) * epsilon
z = Lambda(sampling, output_shape = (z_dim, ), name="z")([mu, log_variance])
encoder = Model(enc_in, [mu, log_variance, z], name="encoder")
########## Decoder ############
dec_in = Input(shape = (z_dim, ), name="decoder_input")
dec_fc1 = Dense(units = 512, kernel_initializer = 'he_uniform', activation = 'ReLU')(dec_in)
dec_unflatten = Reshape(target_shape = (8,8,8,1))(dec_fc1)
dec_conv1 = tf.keras.layers.Conv3D(filters=32, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="deconv_1")(dec_unflatten)
ups1 = tf.keras.layers.UpSampling3D(size=(2, 2, 2), name="ups_1")(dec_conv1)
dec_conv2 = tf.keras.layers.Conv3D(filters=16, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="deconv_2")(ups1)
ups2 = tf.keras.layers.UpSampling3D(size=(2, 2, 2), name="ups_2")(dec_conv2)
dec_conv4 = Conv3D(filters = 1, kernel_size = (3, 3, 3), padding = 'same', activation="ReLU", kernel_initializer = 'he_uniform', name='decorder_output')(ups2)
decoder = Model(dec_in, dec_conv4, name="decoder")
model_input = enc_in
model_output = decoder(z)
v_autoencoder = Model(model_input, model_output)
######################
def vae_r_loss(T1_input, model_output):
r_loss = K.mean(K.square(T1_input - model_output), axis = [1,2,3])
return r_loss_factor * r_loss
def vae_kl_loss(T1_input, model_output):
kl_loss = -0.5 * K.sum(1 + log_variance - K.square(mu) - K.exp(log_variance), axis = 1)
return kl_loss
def vae_loss(T1_input, model_output):
r_loss = vae_r_loss(T1_input, model_output)
kl_loss = vae_kl_loss(T1_input, model_output)
return r_loss + kl_loss
learning_rate=0.001
r_loss_factor = 500
optimizer = keras.optimizers.Adam(lr=learning_rate)
v_autoencoder.compile(optimizer=optimizer, loss = vae_loss, metrics = [vae_r_loss, vae_kl_loss])
v_autoencoder.summary()
But when I try to train the model:
history = v_autoencoder.fit(T1_input, T1_input, epochs=10, validation_split=0.2, shuffle=True)
I get this error:
TypeError: You are passing KerasTensor(type_spec=TensorSpec(shape=(), dtype=tf.float32, name=None), name='Placeholder:0', description="created by layer 'tf.cast_4'"), an intermediate Keras symbolic input/output, to a TF API that does not allow registering custom dispatchers, such as `tf.cond`, `tf.function`, gradient tapes, or `tf.map_fn`. Keras Functional model construction only supports TF API calls that *do* support dispatching, such as `tf.math.add` or `tf.reshape`. Other APIs cannot be called directly on symbolic Kerasinputs/outputs. You can work around this limitation by putting the operation in a custom Keras layer `call` and calling that layer on this symbolic input/output.
The encoder and the decoder model seem to disconnected and the KerasTensor are not able to flow correctly. Also, the encoder returns three tensors [ mu , variance , z] whereas the decoder only requires z as an input.
Replace these lines,
model_input = enc_in
model_output = decoder(z)
v_autoencoder = Model(model_input, model_output)
with,
vae_input = Input(shape = input_shape, name="encoder_input")
mu_ , variance_ , z_ = encoder( vae_input )
vae_output = decoder( z_ )
v_autoencoder = Model( vae_input , [vae_output , mu_ , variance_] )
So, basically, the model will now output mu_ and variance_ which are the outputs of the encoder, and are required for vae_kl_loss. Earlier, you were using these KerasTensor directly in vae_kl_loss and hence you got the error.
Remove the axis argument from vae_kl_loss,
def vae_kl_loss( mu , log_variance ):
kl_loss = -0.5 * K.sum(1 + log_variance - K.square(mu) - K.exp(log_variance))
return kl_loss
Pass mu and variance to vae_kl_loss,
def vae_loss(T1_input, model_output):
mu = model_output[ 1 ]
variance = model_output[ 2 ]
vae_output = model_output[ 0 ]
r_loss = vae_r_loss(T1_input, vae_output )
kl_loss = vae_kl_loss( mu , variance )
return r_loss + kl_loss
Modified code:
import tensorflow as tf
from tensorflow.keras.layers import Input, BatchNormalization, Conv3D, Dense, Flatten, Lambda, Reshape, UpSampling3D
from tensorflow.keras.regularizers import l2
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
## Functional API
input_shape = (32, 32, 32, 1)
z_dim = 3000
########### Encoder ############
enc_in = Input(shape = input_shape, name="encoder_input")
enc_conv1 = tf.keras.layers.Conv3D(filters=8, kernel_size=5, strides=(1,1,1), padding='same', activation="ReLU", kernel_initializer="he_uniform", name="conv_1")(enc_in)
max1 = tf.keras.layers.MaxPool3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name="max_pool_1")(enc_conv1)
enc_conv2 = tf.keras.layers.Conv3D(filters=16, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="conv_2")(max1)
max2 = tf.keras.layers.MaxPool3D(pool_size=2, name="max_pool_2")(enc_conv2)
enc_fc1 = Dense(units = 512, kernel_initializer = 'he_uniform', activation = 'ReLU')(Flatten()(max2))
mu = Dense(units = z_dim, kernel_initializer = 'he_uniform', activation = None, name="mu")(enc_fc1)
log_variance = Dense(units = z_dim, kernel_initializer = 'he_uniform', activation = None, name="log_variance")(enc_fc1)
def sampling(args):
mu, log_variance = args
epsilon = K.random_normal(shape=K.shape(mu), mean= 0., stddev= 1.)
return mu + K.exp(0.5 * log_variance) * epsilon
z = Lambda(sampling, output_shape = (z_dim, ), name="z")([mu, log_variance])
encoder = Model(enc_in, [mu, log_variance, z], name="encoder")
########## Decoder ############
dec_in = Input(shape = (z_dim, ), name="decoder_input")
dec_fc1 = Dense(units = 512, kernel_initializer = 'he_uniform', activation = 'ReLU')(dec_in)
dec_unflatten = Reshape(target_shape = (8,8,8,1))(dec_fc1)
dec_conv1 = tf.keras.layers.Conv3D(filters=32, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="deconv_1")(dec_unflatten)
ups1 = tf.keras.layers.UpSampling3D(size=(2, 2, 2), name="ups_1")(dec_conv1)
dec_conv2 = tf.keras.layers.Conv3D(filters=16, kernel_size=3, padding='same', activation="ReLU", kernel_initializer="he_uniform", name="deconv_2")(ups1)
ups2 = tf.keras.layers.UpSampling3D(size=(2, 2, 2), name="ups_2")(dec_conv2)
dec_conv4 = Conv3D(filters = 1, kernel_size = (3, 3, 3), padding = 'same', activation="ReLU", kernel_initializer = 'he_uniform', name='decorder_output')(ups2)
decoder = Model(dec_in, dec_conv4, name="decoder")
vae_input = Input(shape = input_shape, name="encoder_input")
mu_ , variance_ , z_ = encoder( vae_input )
vae_output = decoder( z_ )
v_autoencoder = Model( vae_input , [vae_output , mu_ , variance_] )
######################
def vae_r_loss(T1_input, model_output):
r_loss = K.mean(K.square(T1_input - model_output), axis = [1,2,3])
return r_loss_factor * r_loss
def vae_kl_loss( mu , log_variance ):
kl_loss = -0.5 * K.sum(1 + log_variance - K.square(mu) - K.exp(log_variance))
return kl_loss
def vae_loss(T1_input, model_output):
mu = model_output[ 1 ]
variance = model_output[ 2 ]
vae_output = model_output[ 0 ]
r_loss = vae_r_loss(T1_input, vae_output )
kl_loss = vae_kl_loss( mu , variance )
return r_loss + kl_loss
learning_rate=0.001
r_loss_factor = 500
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
v_autoencoder.compile(optimizer=optimizer, loss = vae_loss, metrics = [vae_r_loss, vae_kl_loss])
v_autoencoder.summary()

ValueError on standardize_input_data()

I'm trying to implement a simple Unet network using Keras on Tensorflow 2.0 backend.
I use custom image generator without augumentated data. My templates anf masks are 1536x1536 RGB images (masks are black and white).
import numpy as np
import os
import cv2
import random
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Input, BatchNormalization, Activation, Dense, Dropout
from tensorflow.python.keras.layers.core import Lambda, RepeatVector, Reshape
from tensorflow.python.keras.layers.convolutional import Conv2D, Conv2DTranspose
from tensorflow.python.keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D
from tensorflow.python.keras.layers.merge import concatenate, add
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
def data_gen(templates_folder, masks_folder, image_width, batch_size): # Custom image generator
counter = 0
images_list = os.listdir(templates_folder)
random.shuffle(images_list)
while True:
templates_pack = np.zeros((batch_size, image_width, image_width, 3)).astype('float')
masks_pack = np.zeros((batch_size, image_width, image_width, 1)).astype('float')
for i in range(counter, counter + batch_size):
template = cv2.imread(templates_folder + '/' + images_list[i]) / 255.
templates_pack[i - counter] = template
mask = cv2.imread(masks_folder + '/' + images_list[i], cv2.IMREAD_GRAYSCALE) / 255.
mask = mask.reshape(image_width, image_width, 1) # Add extra dimension for parity with template size [1536 * 1536 * 3]
masks_pack[i - counter] = mask
counter += batch_size
if counter + batch_size >= len(images_list):
counter = 0
random.shuffle(images_list)
yield templates_pack, masks_pack
def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal", padding="same")(input_tensor)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal", padding="same")(x)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def get_unet(input_img, n_filters=16, dropout=0.5, batchnorm=True):
c1 = conv2d_block(input_img, n_filters=n_filters * 1, kernel_size=3, batchnorm=batchnorm)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout * 0.5)(p1)
c2 = conv2d_block(p1, n_filters=n_filters * 2, kernel_size=3, batchnorm=batchnorm)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = conv2d_block(p2, n_filters=n_filters * 4, kernel_size=3, batchnorm=batchnorm)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = conv2d_block(p3, n_filters=n_filters * 8, kernel_size=3, batchnorm=batchnorm)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = conv2d_block(p4, n_filters=n_filters * 16, kernel_size=3, batchnorm=batchnorm)
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
u6 = Dropout(dropout)(u6)
c6 = conv2d_block(u6, n_filters=n_filters * 8, kernel_size=3, batchnorm=batchnorm)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
u7 = Dropout(dropout)(u7)
c7 = conv2d_block(u7, n_filters=n_filters * 4, kernel_size=3, batchnorm=batchnorm)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
u8 = Dropout(dropout)(u8)
c8 = conv2d_block(u8, n_filters=n_filters * 2, kernel_size=3, batchnorm=batchnorm)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c1], axis=3)
u9 = Dropout(dropout)(u9)
c9 = conv2d_block(u9, n_filters=n_filters * 1, kernel_size=3, batchnorm=batchnorm)
outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
model = Model(inputs=[input_img], outputs=[outputs])
return model
callbacks = [
EarlyStopping(patience=10, verbose=1),
ReduceLROnPlateau(factor=0.1, patience=3, min_lr=0.00001, verbose=1),
ModelCheckpoint("model-prototype.h5", verbose=1, save_best_only=True,
save_weights_only=True)
]
train_templates_path = "E:/train/templates"
train_masks_path = "E:/train/masks"
valid_templates_path = "E:/valid/templates"
valid_masks_path = "E:/valid/masks"
TRAIN_SET_SIZE = len(os.listdir(train_templates_path))
VALID_SET_SIZE = len(os.listdir(valid_templates_path))
BATCH_SIZE = 1
EPOCHS = 100
STEPS_PER_EPOCH = TRAIN_SET_SIZE / BATCH_SIZE
VALIDATION_STEPS = VALID_SET_SIZE / BATCH_SIZE
IMAGE_WIDTH = 1536
train_generator = data_gen(train_templates_path, train_masks_path, IMAGE_WIDTH, batch_size = BATCH_SIZE)
val_generator = data_gen(valid_templates_path, valid_masks_path, IMAGE_WIDTH, batch_size = BATCH_SIZE)
model = get_unet(input_img, n_filters=16, dropout=0.05, batchnorm=True)
model.compile(optimizer=Adam(lr=0.001), loss="binary_crossentropy", metrics=["accuracy"])
results = model.fit_generator(train_generator, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, validation_data=val_generator, validation_steps=VALIDATION_STEPS, callbacks=callbacks)
For some reason I get a following error:
Epoch 1/100
Traceback (most recent call last):
File "E:/Explorium/python/unet_trainer.py", line 83, in <module>
results = model.fit_generator(train_generator, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, validation_data=val_generator, validation_steps=VALIDATION_STEPS, callbacks=callbacks)
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 1297, in fit_generator
steps_name='steps_per_epoch')
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training_generator.py", line 265, in model_iteration
batch_outs = batch_function(*batch_data)
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 973, in train_on_batch
class_weight=class_weight, reset_metrics=reset_metrics)
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 253, in train_on_batch
extract_tensors_from_dataset=True)
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 2472, in _standardize_user_data
exception_prefix='input')
File "C:\Users\E-soft\Anaconda3\envs\Explorium\lib\site-packages\tensorflow_core\python\keras\engine\training_utils.py", line 574, in standardize_input_data
str(data_shape))
ValueError: Error when checking input: expected img to have shape (1536, 1536, 1) but got array with shape (1536, 1536, 3)
It seems that Keras can't normalize data using standardize_input_data(), but I have no idea why it's happening.

Keras: Image segmentation using grayscale masks and ImageDataGenerator class

I am currently trying to implement a convolutional network using Keras 2.1.6 (with TensorFlow as backend) and its ImageDataGenerator to segment an image using a grayscale mask. I try to use an image as input, and a mask as label. Due to a low amount of training images, and memory constraints I utilize the ImageDataGenerator class provided in Keras.
However I get this error, after changing the values provided in the Keras example to the ones described later:
File "C:\Users\XXX\Anaconda3\lib\site-packages\keras\engine\training.py", line 2223, in fit_generator
batch_size = x.shape[0]
AttributeError: 'tuple' object has no attribute 'shape'
Which, as far as I know, happens because the generator does generate a tuple, and not an array. This first happened after I changed following parameters from the standard values provided in the Keras example to the following: color_mode='grayscale' for all mask generators, and class_mode='input' due to this being recommended for autoencoders.
The Keras example can be found in here.
The dataset I am using consists of 100 images (jpg) and 100 corresponding grayscale masks (png) and can be downloaded at this link
The architecture I wanted to implement is an autoencoder/U-Net based network and it is shown in the provided code:
from keras.preprocessing import image
from keras.models import Model
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from keras import initializers
image_path =
mask_path =
valid_image_path =
valid_mask_path =
img_size=160
batchsize=10
samplesize = 60
steps = samplesize / batchsize
train_datagen = image.ImageDataGenerator(shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
data_gen_args = dict(rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
image_generator = image_datagen.flow_from_directory(
image_path,
target_size=(img_size, img_size),
class_mode='input',
batch_size = batchsize,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
mask_path,
target_size=(img_size, img_size),
class_mode='input',
color_mode = 'grayscale',
batch_size = batchsize,
seed=seed)
vimage_generator = image_datagen.flow_from_directory(
valid_image_path,
target_size=(img_size, img_size),
class_mode='input',
batch_size = batchsize,
seed=seed)
vmask_generator = mask_datagen.flow_from_directory(
valid_mask_path,
target_size=(img_size, img_size),
class_mode='input',
color_mode = 'grayscale',
batch_size = batchsize,
seed=seed)
#Model
input_img = Input(shape=(img_size,img_size,3))
c11 = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(input_img)
mp1 = MaxPooling2D((2, 2), padding='same')(c11)
c21 = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(mp1)
mp2 = MaxPooling2D((2, 2), padding='same')(c21)
c31 = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(mp2)
encoded = MaxPooling2D((5, 5), padding='same')(c31)
c12 = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(encoded)
us12 = UpSampling2D((5,5))(c12)
c22 = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(us12)
us22 = UpSampling2D((2, 2))(c22)
c32 = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer=initializers.random_normal(stddev=0.01))(us22)
us32 = UpSampling2D((2, 2))(c32)
decoded = Conv2D(1, (3, 3), activation='softmax', padding='same')(us32)
model = Model(input_img, decoded)
model.compile(loss="mean_squared_error", optimizer=optimizers.Adam(),metrics=["accuracy"])
#model.summary()
#Generators, tr: training, v: validation
trgen = zip(image_generator,mask_generator)
vgen = zip(vimage_generator,vmask_generator)
model.fit_generator(
trgen,
steps_per_epoch= steps,
epochs=5,
validation_data = vgen,
validation_steps=10)
Here is a better version of Unet, you can use this code
def conv_block(tensor, nfilters, size=3, padding='same', initializer="he_normal"):
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(tensor, residual, nfilters, size=3, padding='same', strides=(2, 2)):
y = Conv2DTranspose(nfilters, kernel_size=(size, size), strides=strides, padding=padding)(tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, nfilters)
return y
def Unet(img_height, img_width, nclasses=3, filters=64):
# down
input_layer = Input(shape=(img_height, img_width, 3), name='image_input')
conv1 = conv_block(input_layer, nfilters=filters)
conv1_out = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_block(conv1_out, nfilters=filters*2)
conv2_out = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_block(conv2_out, nfilters=filters*4)
conv3_out = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_block(conv3_out, nfilters=filters*8)
conv4_out = MaxPooling2D(pool_size=(2, 2))(conv4)
conv4_out = Dropout(0.5)(conv4_out)
conv5 = conv_block(conv4_out, nfilters=filters*16)
conv5 = Dropout(0.5)(conv5)
# up
deconv6 = deconv_block(conv5, residual=conv4, nfilters=filters*8)
deconv6 = Dropout(0.5)(deconv6)
deconv7 = deconv_block(deconv6, residual=conv3, nfilters=filters*4)
deconv7 = Dropout(0.5)(deconv7)
deconv8 = deconv_block(deconv7, residual=conv2, nfilters=filters*2)
deconv9 = deconv_block(deconv8, residual=conv1, nfilters=filters)
# output
output_layer = Conv2D(filters=nclasses, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('softmax')(output_layer)
model = Model(inputs=input_layer, outputs=output_layer, name='Unet')
return model
Note if you have only two classes ie nclasses=2, you need to change
output_layer = Conv2D(filters=nclasses, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('softmax')(output_layer)
to
output_layer = Conv2D(filters=2, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('sigmoid')(output_layer)
Now for the data generators, you can use the builtin ImageDataGenerator class
here is the code from Keras docs
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
Another way to go is implement your own generator by extending the Sequence class from Keras
class seg_gen(Sequence):
def __init__(self, x_set, y_set, batch_size, image_dir, mask_dir):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.samples = len(self.x)
self.image_dir = image_dir
self.mask_dir = mask_dir
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
idx = np.random.randint(0, self.samples, batch_size)
batch_x, batch_y = [], []
drawn = 0
for i in idx:
_image = image.img_to_array(image.load_img(f'{self.image_dir}/{self.x[i]}', target_size=(img_height, img_width)))/255.
mask = image.img_to_array(image.load_img(f'{self.mask_dir}/{self.y[i]}', grayscale=True, target_size=(img_height, img_width)))
# mask = np.resize(mask,(img_height*img_width, classes))
batch_y.append(mask)
batch_x.append(_image)
return np.array(batch_x), np.array(batch_y)
Here is a sample code to train the model
unet = Unet(256, 256, nclasses=66, filters=64)
print(unet.output_shape)
p_unet = multi_gpu_model(unet, 4)
p_unet.load_weights('models-dr/top_weights.h5')
p_unet.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
tb = TensorBoard(log_dir='logs', write_graph=True)
mc = ModelCheckpoint(mode='max', filepath='models-dr/top_weights.h5', monitor='acc', save_best_only='True', save_weights_only='True', verbose=1)
es = EarlyStopping(mode='max', monitor='acc', patience=6, verbose=1)
callbacks = [tb, mc, es]
train_gen = seg_gen(image_list, mask_list, batch_size)
p_unet.fit_generator(train_gen, steps_per_epoch=steps, epochs=13, callbacks=callbacks, workers=8)
I got good results when i had only 2 classes by using dice loss, here is the code for it
def dice_coeff(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
What you are trying to build is an image segmentation model and not an autoencoder. Therefore, since you have separate generators for the images and the labels (i.e. masks), you need to set the class_mode argument to None to prevent generator from producing any labels arrays.
Further, you need to change the activation function of last layer from softmax to sigmoid, otherwise since the softmax normalizes the sum of its input elements to 1, the output would be all ones. You can also use binary_crossentropy for the loss function as well.

Implementing U-net for multi-class road segmentation

I am trying to train a U-net for image segmentation on satellite data and therewith extract a road network with nine different road types. Thus far I have tried many different U-net codes that are freely available on the web, however I was not able to tailor them to my specific case. I'm sincerely hoping you are able to help me.
The satellite image and associated labels can be downloaded via the following link:
Satellite image and associated labels
Additionally, I've written the following code to prep the data for the Unet
import skimage
from skimage.io import imread, imshow, imread_collection, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from sklearn.metrics import jaccard_similarity_score
from shapely.geometry import MultiPolygon, Polygon
import shapely.wkt
import shapely.affinity
from collections import defaultdict
#Importing image and labels
labels = skimage.io.imread("ede_subset_293_wegen.tif")
images = skimage.io.imread("ede_subset_293_20180502_planetscope.tif")[...,:-1]
#Scaling image
img_scaled = images / images.max()
#Make non-roads 0
labels[labels == 15] = 0
#Resizing image and mask and labels
img_scaled_resized = img_scaled[:6400, :6400,:4 ]
print(img_scaled_resized.shape)
labels_resized = labels[:6400, :6400]
print(labels_resized.shape)
#splitting images
split_img = [
np.split(array, 25, axis=0)
for array in np.split(img_scaled_resized, 25, axis=1)
]
split_img[-1][-1].shape
#splitting labels
split_labels = [
np.split(array, 25, axis=0)
for array in np.split(labels_resized, 25, axis=1)
]
#Convert to np.array
split_labels = np.array(split_labels)
split_img = np.array(split_img)
train_images = np.reshape(split_img, (625, 256, 256, 4))
train_labels = np.reshape(split_labels, (625, 256, 256))
x_trn = train_images[:400,:,:,:]
x_val = train_images[400:500,:,:,:]
x_test = train_images[500:625,:,:,:]
y_trn = train_labels[:400,:,:]
y_val = train_labels[400:500,:,:]
y_test = train_labels[500:625,:,:]
plt.imshow(train_images[88,:,:,:])
skimage.io.imshow(train_labels[88,:,:])
Furthermore, I found the following U-net on kaggle, which I think should have to work for this particular case:
def get_unet():
inputs = Input((8, ISZ, ISZ))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(N_Cls, 1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[jaccard_coef, jaccard_coef_int, 'accuracy'])
return model
I know it is a big question, but I'm getting pretty desperate. Any help is greatly appreciated!
i found that Conv2DTranspose works better than UpSampling2D and here is a quick implementation using the same
def conv_block(tensor, nfilters, size=3, padding='same', initializer="he_normal"):
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(tensor, residual, nfilters, size=3, padding='same', strides=(2, 2)):
y = Conv2DTranspose(nfilters, kernel_size=(size, size), strides=strides, padding=padding)(tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, nfilters)
return y
def Unet(img_height, img_width, nclasses=3, filters=64):
# down
input_layer = Input(shape=(img_height, img_width, 3), name='image_input')
conv1 = conv_block(input_layer, nfilters=filters)
conv1_out = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_block(conv1_out, nfilters=filters*2)
conv2_out = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_block(conv2_out, nfilters=filters*4)
conv3_out = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_block(conv3_out, nfilters=filters*8)
conv4_out = MaxPooling2D(pool_size=(2, 2))(conv4)
conv4_out = Dropout(0.5)(conv4_out)
conv5 = conv_block(conv4_out, nfilters=filters*16)
conv5 = Dropout(0.5)(conv5)
# up
deconv6 = deconv_block(conv5, residual=conv4, nfilters=filters*8)
deconv6 = Dropout(0.5)(deconv6)
deconv7 = deconv_block(deconv6, residual=conv3, nfilters=filters*4)
deconv7 = Dropout(0.5)(deconv7)
deconv8 = deconv_block(deconv7, residual=conv2, nfilters=filters*2)
deconv9 = deconv_block(deconv8, residual=conv1, nfilters=filters)
# output
output_layer = Conv2D(filters=nclasses, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('softmax')(output_layer)
model = Model(inputs=input_layer, outputs=output_layer, name='Unet')
return model
Now for the data generators, you can use the builtin ImageDataGenerator class
here is the code from Keras docs
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
Another way to go is implement your own generator by extending the Sequence class from Keras
class seg_gen(Sequence):
def __init__(self, x_set, y_set, batch_size, image_dir, mask_dir):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.samples = len(self.x)
self.image_dir = image_dir
self.mask_dir = mask_dir
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
idx = np.random.randint(0, self.samples, batch_size)
batch_x, batch_y = [], []
drawn = 0
for i in idx:
_image = image.img_to_array(image.load_img(f'{self.image_dir}/{self.x[i]}', target_size=(img_height, img_width)))/255.
mask = image.img_to_array(image.load_img(f'{self.mask_dir}/{self.y[i]}', grayscale=True, target_size=(img_height, img_width)))
# mask = np.resize(mask,(img_height*img_width, classes))
batch_y.append(mask)
batch_x.append(_image)
return np.array(batch_x), np.array(batch_y)
Here is a sample code to train the model
unet = Unet(256, 256, nclasses=66, filters=64)
print(unet.output_shape)
p_unet = multi_gpu_model(unet, 4)
p_unet.load_weights('models-dr/top_weights.h5')
p_unet.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
tb = TensorBoard(log_dir='logs', write_graph=True)
mc = ModelCheckpoint(mode='max', filepath='models-dr/top_weights.h5', monitor='acc', save_best_only='True', save_weights_only='True', verbose=1)
es = EarlyStopping(mode='max', monitor='acc', patience=6, verbose=1)
callbacks = [tb, mc, es]
train_gen = seg_gen(image_list, mask_list, batch_size)
p_unet.fit_generator(train_gen, steps_per_epoch=steps, epochs=13, callbacks=callbacks, workers=8)
I have tried using the dice loss when i had only two classes, here is the code for it
def dice_coeff(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss

Python Keras Tensorflow Embedding Layer Indices[i,j] = k is not in [0,max_features]

I am trying to do Author Identification, my train_vecs_w2v.shape = (15663, 400).
y_train.shape = (15663,3) which has 3 label one hot encoded.
Now the problem is I am having an error in the Embedding layer. Indices[0,X] = -1 is not in [0, 15663). How to solve this? Is it my code or Keras/Tensorflow?
print('Building Model')
n=19579
max_features = 15663
max_length = 400
EMBEDDING_DIM = 100
model7 = Sequential()
model7.add(Embedding(len(train_vecs_w2v), EMBEDDING_DIM, input_length=max_length, dtype='float32', trainable=True, weights=None, embeddings_initializer='uniform', embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None))
print(model7.output_shape)
model7.add(Convolution1D(filters =128, kernel_size = 3, strides=1, activation='relu', use_bias=False, border_mode='same'))
print(model7.output_shape)
model7.add(MaxPooling1D(pool_size = 3))
print(model7.output_shape)
model7.add(Convolution1D(filters = 64, kernel_size = 5, strides=1, activation='relu', border_mode='same'))
print(model7.output_shape)
model7.add(MaxPooling1D(pool_size = 5))
print(model7.output_shape)
model7.add(Flatten()) # model.output_shape == (None, 64*input_shape of convolution layer)
print(model7.output_shape)
model7.add(Dense(output_dim = 64, activation='relu')) # input_shape = (batch_size, input_dim)
print(model7.output_shape)
model7.add(Dense(output_dim = 32, activation='relu'))
print(model7.output_shape)
model7.add(Dense(output_dim = 3, activation='softmax'))
model7.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['categorical_accuracy'])
model7.fit(train_vecs_w2v, y_train_vec, epochs=50, batch_size=32, verbose=2)
The error I am getting
InvalidArgumentError (see above for traceback): indices[0,1] = -1 is not in [0, 15663)
[[Node: embedding_1/Gather = Gather[Tindices=DT_INT32, Tparams=DT_FLOAT, validate_indices=true, _device="/job:localhost/replica:0/task:0/device:CPU:0"](embedding_1/embeddings/read, embedding_1/Cast)]]
I think the problem here is with the word vectors count.
It should be
len(train_vecs_w2v) + 1

Categories