Save tensorflow model with StringLookup layer with encoded vocabulary - python

I'm having some issues saving a trained TensorFlow model, where I have a StringLookup layer and I'm required to use TFRecods as input for training. A minimal example to reproduce the issue:
First I define the training data
vocabulary = [str(i) for i in range(100, 200)]
X_train = np.random.choice(vocabulary, size=(100,))
y_train = np.random.choice([0,1], size=(100,))
I save it in a file as tfrecords
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _string_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value).encode('utf-8')]))
with tf.io.TFRecordWriter('train.tfrecords') as writer:
for i in range(len(X_train)):
example = tf.train.Example(features=tf.train.Features(feature={
'user_id': _string_feature(X_train[i]),
'label': _int64_feature(y_train[i])
}))
writer.write(example.SerializeToString())
Then I use the tf.data API to be able to stream the data into training (the original data doesn't fit into memory)
data = tf.data.TFRecordDataset(['train.tfrecords'])
features = {
'user_id': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64)
}
def parse(record):
parsed = tf.io.parse_single_example(record, features)
return (parsed['user_id'], parsed['label'])
data = data.map(parse)
The data looks like this:
print(list(data.take(5).as_numpy_iterator()))
[(b'166', 1), (b'144', 0), (b'148', 1), (b'180', 0), (b'192', 0)]
The strings of the original dataset were converted to bytes in the process. I have to pass this new vocabulary to the StringLookup contructor, as passing strings and training with bytes will throw an error
new_vocab = [w.encode('utf-8') for w in vocabulary]
inp = tf.keras.Input(shape=(1,), dtype=tf.string)
x = tf.keras.layers.StringLookup(vocabulary=new_vocab)(inp)
x = tf.keras.layers.Embedding(len(new_vocab)+1, 32)(x)
out = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs=[inp], outputs=[out])
model.compile(optimizer='adam', loss='BinaryCrossentropy')
model.fit(data.batch(10), epochs=5)
But when I try to save the model, I get an error because the vocabulary input to the StringLookup layer is encoded as bytes and can't be dumped into json
model.save('model/')
TypeError: ('Not JSON Serializable:', b'100')
I really don't know what to do, I read that TensorFlow recommends using encoded strings instead of normal strings but that doesn't allow to save the model. I also tried to preprocess the data decoding the strings before thay are fed to the model, but I wasn't able to do it without loading all the data into memory (using just tf.data operations)

Using your data and original vocabulary:
import tensorflow as tf
import numpy as np
vocabulary = [str(i) for i in range(100, 200)]
X_train = np.random.choice(vocabulary, size=(100,))
y_train = np.random.choice([0,1], size=(100,))
...
...
data = data.map(parse)
I ran your code (with an additional Flatten layer) and was able to save your model:
inp = tf.keras.Input(shape=(1,), dtype=tf.string)
x = tf.keras.layers.StringLookup(vocabulary=vocabulary)(inp)
x = tf.keras.layers.Embedding(len(vocabulary)+1, 32)(x)
x = tf.keras.layers.Flatten()(x)
out = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs=[inp], outputs=[out])
model.compile(optimizer='adam', loss='BinaryCrossentropy')
model.fit(data.batch(10), epochs=5)
model.save('model/')
Epoch 1/5
10/10 [==============================] - 1s 8ms/step - loss: 0.6949
Epoch 2/5
10/10 [==============================] - 0s 4ms/step - loss: 0.6864
Epoch 3/5
10/10 [==============================] - 0s 5ms/step - loss: 0.6787
Epoch 4/5
10/10 [==============================] - 0s 5ms/step - loss: 0.6707
Epoch 5/5
10/10 [==============================] - 0s 5ms/step - loss: 0.6620
INFO:tensorflow:Assets written to: model/assets
I do not see why you need new_vocab = [w.encode('utf-8') for w in vocabulary].
If you really need to use new_vocab, you can try setting it during training and afterwards setting vocabulary for saving your model, since the only difference is the encoding:
new_vocab = [w.encode('utf-8') for w in vocabulary]
lookup_layer = tf.keras.layers.StringLookup()
lookup_layer.adapt(new_vocab)
inp = tf.keras.Input(shape=(1,), dtype=tf.string)
x = lookup_layer(inp)
x = tf.keras.layers.Embedding(len(new_vocab)+1, 32)(x)
x = tf.keras.layers.Flatten()(x)
out = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs=[inp], outputs=[out])
model.compile(optimizer='adam', loss='BinaryCrossentropy')
model.fit(data.batch(10), epochs=5)
model.layers[1].adapt(vocabulary)
model.save('/model')
Admittingly, this is quite hacky.

Related

Why does my Tensorflow Keras model output weird loss and accuracy values while training?

I have trained a custom text classifier in Tensorflow with python for classifying sentences into questions/sentences containing information using this code:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
text = ""
with open("/content/train_new.txt") as source:
for line in source.readlines():
text = text + line
print("text: " + text)
sentences = []
labels = []
for item in text.split("<n>"):
parts = item.split("<t>")
print(parts)
sentences.append(parts[0])
labels.append(parts[1])
print(sentences)
print(labels)
print("----")
train_test_split_percentage = 80
training_size = round((len(sentences)/100)*train_test_split_percentage)
print("training size: " + str(training_size) + " of " + str(len(labels)))
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
vocab_size = 100
max_length = 10
tokenizer = Tokenizer(num_words = vocab_size, oov_token="<OOV>")
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding="post", truncating="post")
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding="post", truncating="post")
# convert training & testing data into numpy array
# Need this block to get it to work with TensorFlow 2.x
import numpy as np
training_padded = np.array(training_padded)
training_labels = np.asarray(training_labels).astype('float32').reshape((-1,1))
testing_padded = np.array(testing_padded)
testing_labels = np.asarray(testing_labels).astype('float32').reshape((-1,1))
# defining the model
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, 24, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='softmax')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
# training the model
num_epochs = 1000
history = model.fit(training_padded, training_labels, epochs=num_epochs, validation_data=(testing_padded, testing_labels), verbose=2)
However, while training, it prints weird accuracy and loss values like this:
Epoch 972/1000
9/9 - 0s - loss: -8.2316e+03 - accuracy: 0.7345 - val_loss: -2.7299e+04 - val_accuracy: 0.0000e+00
Epoch 973/1000
9/9 - 0s - loss: -8.2452e+03 - accuracy: 0.7345 - val_loss: -2.7351e+04 - val_accuracy: 0.0000e+00
Epoch 974/1000
9/9 - 0s - loss: -8.2571e+03 - accuracy: 0.7345 - val_loss: -2.7363e+04 - val_accuracy: 0.0000e+00
Epoch 975/1000
9/9 - 0s - loss: -8.2703e+03 - accuracy: 0.7345 - val_loss: -2.7416e+04 - val_accuracy: 0.0000e+00
The train_new.txt file contains data in the form of text<t>class_num<n>
When trying to predict using the model.predict() function, it always outputs [[1.]]
What's the issue with my code?
tf.keras.layers.Dense(1, activation='sigmoid')
You should use sigmoid as activation if you are doing a binary classification. However also,
tf.keras.layers.Dense(2, activation='softmax')
will be correct in the terms of probability.
Softmax outputs' sum will always be equal to one. That's why you get 1 as output everytime.

Validation Accuracy stuck at .5073

I am trying to create a regression model but my validation accuracy stays at .5073. I am trying to train on images and have the network find the position of an object and the rough area it covers. I increased the unfrozen layers and the plateau for accuracy dropped to .4927. I would appreciate any help finding out what I am doing wrong.
base = MobileNet(weights='imagenet', include_top=False, input_shape=(200,200,3), dropout=.3)
location = base.output
location = GlobalAveragePooling2D()(location)
location = Dense(16, activation='relu', name="locdense1")(location)
location = Dense(32, activation='relu', name="locdense2")(location)
location = Dense(64, activation='relu', name="locdense3")(location)
finallocation = Dense(3, activation='sigmoid', name="finalLocation")(location)
model = Model(inputs=base_model.input,outputs=finallocation)#[types, finallocation])
for layer in model.layers[:91]: #freeze up to 87
if ('loc' or 'Loc') in layer.name:
layer.trainable=True
else: layer.trainable=False
optimizer = Adam(learning_rate=.001)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=['accuracy'])
history = model.fit(get_batches(type='Train'), validation_data=get_batches(type='Validation'), validation_steps=500, steps_per_epoch=1000, epochs=10)
Data is generated from a tfrecord file which has image data and some labels. This is the last bit of that generator.
IMG_SIZE = 200
def format_position(image, positionx, positiony, width):
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
labels = tf.stack([positionx, positiony, width])
return image, labels
Get batches:
dataset is loaded from two directories with tfrecord files, one for training, and other for validation
def get_batches(type):
dataset = load_dataset(type=type)
if type == 'Train':
databatch = dataset.repeat()
databatch = dataset.batch(32)
databatch = databatch.prefetch(2)
return databatch
```positionx positiony width``` are all normalized from 0-1 (relative position with respect to the image.
Here is an example output:
Epoch 1/10
1000/1000 [==============================] - 233s 233ms/step - loss: 0.0267 - accuracy: 0.5833 - val_loss: 0.0330 - val_accuracy: 0.5073
Epoch 2/10
1000/1000 [==============================] - 283s 283ms/step - loss: 0.0248 - accuracy: 0.6168 - val_loss: 0.0337 - val_accuracy: 0.5073
Epoch 3/10
1000/1000 [==============================] - 221s 221ms/step - loss: 0.0238 - accuracy: 0.6309 - val_loss: 0.0312 - val_accuracy: 0.5073
The final activation function in your model should not be sigmoid since it will output numbers between 0 and 1 and I am assuming your labels (i.e., positionx, positiony, and width are not in this range). You could replace it with either 'linear' or 'relu'.
You're doing regression, and your loss function is 'mean_squared_error'. You cannot use accuracy as the metric function. You should use 'mae' (mean absolute error) or 'mse' to check the difference between your predictions and actual target values.

Why a Tensorflow fit call with a validation generator uses the same samples?

I am trying to fit a NN on some data that I stream to my model with generators. I use generators both for the train and for the validation samples. I expected that the fit method would iterate over both the train and the validation datasets, however I saw that the model uses the same validation samples over and over again. That is, the validation generator resets every epoch.
Here is a reproducible example:
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense
def gen(use):
while True:
for i in range(10):
print(use + f' using mat of {i}')
X = (np.ones(40)*i).reshape(10,4)
y = np.ones(10).reshape(-1,1)
yield (X,y)
dataset_train = tf.data.Dataset.from_generator(generator = lambda:gen('train'),
output_types = (tf.float32, tf.float32),
output_shapes = ((10,4), (10,1)))
dataset_val = tf.data.Dataset.from_generator(generator = lambda:gen('validation'),
output_types = (tf.float32, tf.float32),
output_shapes = ((10,4), (10,1)))
dataset_train.batch(2)
dataset_val.batch(2)
model = tf.keras.models.Sequential()
model.add(Dense(units = 10))
model.compile(loss = 'mse')
history = model.fit(dataset_train,
steps_per_epoch = 1,
epochs = 5,
shuffle = False,
verbose = 2,
validation_data = dataset_val,
validation_steps = 1)
Note that I use tensorflow 2.2.0-rc3 (the google colab default version).
In this code, my generator produces a 10 by 4 martix of some number n that changes in each iteration. This matrix represent my input features. The labels here are just a vector of ones (10 by 1). What I would have wanted to get in this toy example is that for each epoch i, the features matrix would be a matrix of is. The output I recieve:
Epoch 1/5
train using mat of 0
validation using mat of 0
1/1 - 0s - loss: 1.0000 - val_loss: 0.9937
Epoch 2/5
train using mat of 1
validation using mat of 0
1/1 - 0s - loss: 1.5841 - val_loss: 0.9909
Epoch 3/5
train using mat of 2
validation using mat of 0
1/1 - 0s - loss: 3.8616 - val_loss: 0.9902
Epoch 4/5
train using mat of 3
validation using mat of 0
1/1 - 0s - loss: 7.7457 - val_loss: 0.9906
Epoch 5/5
train using mat of 4
validation using mat of 0
1/1 - 0s - loss: 13.1401 - val_loss: 0.9915
So the training generator works as I expected, but the validation is stuck on 0.
Is there a way to iterate also over the validation dataset?
Internally, Tensorflow extracts the validation data at once and uses it for all the epochs. Refer here. Hence, that is the reason for your output.
I think, even in the documentation it is inline with this, i.e. there is no mention about the generators in validation_data.
From Documentation:
validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. validation_data will override validation_split. validation_data could be:
tuple (x_val, y_val) of Numpy arrays or tensors
tuple (x_val, y_val, val_sample_weights) of Numpy arrays
dataset
For the first two cases, batch_size must be provided. For the last case, validation_steps could be provided.

Training accuracy while fitting the model not reflected in confusion matrix

I am training an image classification model to classify certain images containing mountain, waterfall and people classes. Currently, I am using Vgg-net (transfer learning) to train this data. While training, I am getting almost 93% accuracy on training data and 90% accuracy on validation data. However, when I want to check the classes being wrongly classified during training by using confusion matrix on the training data, the accuracy seems to be much less. Only about 30% of the data is classified correctly.
I have tried checking the confusion matrix on other similar image classification problems but there it seems to be showing the accuracy that we see while training.
Code to create ImageDataGenerator objects for training and validation
from keras.applications.inception_v3 import preprocess_input, decode_predictions
#Train DataSet Generator with Augmentation
print("\nTraining Data Set")
train_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
train_flow = train_generator.flow(
train_images, train_labels,
batch_size = BATCH_SIZE
)
#Validation DataSet Generator with Augmentation
print("\nValidation Data Set")
val_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
val_flow = val_generator.flow(
validation_images,validation_labels,
batch_size = BATCH_SIZE
)
Code to build the model and compile it
# Initialize InceptionV3 with transfer learning
base_model = applications.vgg16.VGG16(weights='imagenet',
include_top=False,
input_shape=(WIDTH, HEIGHT,3))
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# and a dense layer
x = Dense(1024, activation='relu')(x)
predictions = Dense(len(categories), activation='softmax')(x)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer=optimizers.RMSprop(lr=1e-4), metrics=['accuracy'], loss='categorical_crossentropy')
model.summary()
Code to fit the model to the training dataset and validate using validation dataset
import math
top_layers_file_path=r"/content/drive/My Drive/intel-image-classification/intel-image-classification/top_layers.iv3.hdf5"
checkpoint = ModelCheckpoint(top_layers_file_path, monitor='loss', verbose=1, save_best_only=True, mode='min')
tb = TensorBoard(log_dir=r'/content/drive/My Drive/intel-image-classification/intel-image-classification/logs', batch_size=BATCH_SIZE, write_graph=True, update_freq='batch')
early = EarlyStopping(monitor="loss", mode="min", patience=5)
csv_logger = CSVLogger(r'/content/drive/My Drive/intel-image-classification/intel-image-classification/logs/iv3-log.csv', append=True)
history = model.fit_generator(train_flow,
epochs=30,
verbose=1,
validation_data=val_flow,
validation_steps=math.ceil(validation_images.shape[0]/BATCH_SIZE),
steps_per_epoch=math.ceil(train_images.shape[0]/BATCH_SIZE),
callbacks=[checkpoint, early, tb, csv_logger])
training steps show the following accuracy:
Epoch 1/30
91/91 [==============================] - 44s 488ms/step - loss: 0.6757 - acc: 0.7709 - val_loss: 0.4982 - val_acc: 0.8513
Epoch 2/30
91/91 [==============================] - 32s 349ms/step - loss: 0.4454 - acc: 0.8395 - val_loss: 0.3980 - val_acc: 0.8557
.
.
Epoch 20/30
91/91 [==============================] - 32s 349ms/step - loss: 0.2026 - acc: 0.9238 - val_loss: 0.2684 - val_acc: 0.8940
.
.
Epoch 30/30
91/91 [==============================] - 32s 349ms/step - loss: 0.1739 - acc: 0.9364 - val_loss: 0.2616 - val_acc: 0.8984
Ran predictions on the training dataset itself:
import math
import numpy as np
predictions = model.predict_generator(
train_flow,
verbose=1,
steps=math.ceil(train_images.shape[0]/BATCH_SIZE))
predicted_classes = [x[0] for x in enc.inverse_transform(predictions)]
true_classes = [x[0] for x in enc.inverse_transform(train_labels)]
enc - OneHotEncoder
However, the confusion matrix looks like the following:
import sklearn
sklearn.metrics.confusion_matrix(
true_classes,
predicted_classes,
labels=['mountain','people','waterfall'])
Confusion Matrix (Could not upload a better looking picture)
([[315, 314, 283],
[334, 309, 273],
[337, 280, 263]])
The complete code has been uploaded on https://nbviewer.jupyter.org/github/paridhichoudhary/scene-image-classification/blob/master/Classification_v7.ipynb
I believe it's because of the train_generator.flow has shuffle=True by default. This result in predicted_classes doesn't match the train_labels.
Maybe set shuffle=False on train_generator.flow should help this, or instead use something like this might easier to understand.
predicted_classes = []
true_classes = []
for i in range(len(train_flow)): # you can just use `len(train_flow)` instead of `math.ceil(train_images.shape[0]/BATCH_SIZE))`
x, y = train_flow[i] # you can use `train_flow[i]` like this
z = model.predict(x)
for j in range(x.shape[0]):
predicted_classes.append(z[j])
true_classes.append(y[j])
I didn't try this yet but it should work though.

Tensorflow keras with tf dataset input

I'm new to tensorflow keras and dataset. Can anyone help me understand why the following code doesn't work?
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.keras.utils import multi_gpu_model
from tensorflow.python.keras import backend as K
data = np.random.random((1000,32))
labels = np.random.random((1000,10))
dataset = tf.data.Dataset.from_tensor_slices((data,labels))
print( dataset)
print( dataset.output_types)
print( dataset.output_shapes)
dataset.batch(10)
dataset.repeat(100)
inputs = keras.Input(shape=(32,)) # Returns a placeholder tensor
# A layer instance is callable on a tensor, and returns a tensor.
x = keras.layers.Dense(64, activation='relu')(inputs)
x = keras.layers.Dense(64, activation='relu')(x)
predictions = keras.layers.Dense(10, activation='softmax')(x)
# Instantiate the model given inputs and outputs.
model = keras.Model(inputs=inputs, outputs=predictions)
# The compile step specifies the training configuration.
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Trains for 5 epochs
model.fit(dataset, epochs=5, steps_per_epoch=100)
It failed with the following error:
model.fit(x=dataset, y=None, epochs=5, steps_per_epoch=100)
File "/home/wuxinyu/pyEnv/lib/python3.5/site-packages/tensorflow/python/keras/engine/training.py", line 1510, in fit
validation_split=validation_split)
File "/home/wuxinyu/pyEnv/lib/python3.5/site-packages/tensorflow/python/keras/engine/training.py", line 994, in _standardize_user_data
class_weight, batch_size)
File "/home/wuxinyu/pyEnv/lib/python3.5/site-packages/tensorflow/python/keras/engine/training.py", line 1113, in _standardize_weights
exception_prefix='input')
File "/home/wuxinyu/pyEnv/lib/python3.5/site-packages/tensorflow/python/keras/engine/training_utils.py", line 325, in standardize_input_data
'with shape ' + str(data_shape))
ValueError: Error when checking input: expected input_1 to have 2 dimensions, but got array with shape (32,)
According to tf.keras guide, I should be able to directly pass the dataset to model.fit, as this example shows:
Input tf.data datasets
Use the Datasets API to scale to large datasets or multi-device training. Pass a tf.data.Dataset instance to the fit method:
# Instantiates a toy dataset instance:
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
dataset = dataset.repeat()
Don't forget to specify steps_per_epoch when calling fit on a dataset.
model.fit(dataset, epochs=10, steps_per_epoch=30)
Here, the fit method uses the steps_per_epoch argument—this is the number of training steps the model runs before it moves to the next epoch. Since the Dataset yields batches of data, this snippet does not require a batch_size.
Datasets can also be used for validation:
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32).repeat()
val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))
val_dataset = val_dataset.batch(32).repeat()
model.fit(dataset, epochs=10, steps_per_epoch=30,
validation_data=val_dataset,
validation_steps=3)
What's the problem with my code, and what's the correct way of doing it?
To your original question as to why you're getting the error:
Error when checking input: expected input_1 to have 2 dimensions, but got array with shape (32,)
The reason your code breaks is because you haven't applied the .batch() back to the dataset variable, like so:
dataset = dataset.batch(10)
You simply called dataset.batch().
This breaks because without the batch() the output tensors are not batched, i.e. you get shape (32,) instead of (1,32).
You are missing defining an iterator which is the reason why there is an error.
data = np.random.random((1000,32))
labels = np.random.random((1000,10))
dataset = tf.data.Dataset.from_tensor_slices((data,labels))
dataset = dataset.batch(10).repeat()
inputs = Input(shape=(32,)) # Returns a placeholder tensor
# A layer instance is callable on a tensor, and returns a tensor.
x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
# Instantiate the model given inputs and outputs.
model = keras.Model(inputs=inputs, outputs=predictions)
# The compile step specifies the training configuration.
model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Trains for 5 epochs
model.fit(dataset.make_one_shot_iterator(), epochs=5, steps_per_epoch=100)
Epoch 1/5
100/100 [==============================] - 1s 8ms/step - loss: 11.5787 - acc: 0.1010
Epoch 2/5
100/100 [==============================] - 0s 4ms/step - loss: 11.4846 - acc: 0.0990
Epoch 3/5
100/100 [==============================] - 0s 4ms/step - loss: 11.4690 - acc: 0.1270
Epoch 4/5
100/100 [==============================] - 0s 4ms/step - loss: 11.4611 - acc: 0.1300
Epoch 5/5
100/100 [==============================] - 0s 4ms/step - loss: 11.4546 - acc: 0.1360
This is the result on my system.

Categories