Bad accuracy when prediction happens - python

After I trained my model for the toxic challenge at Keras the accuracy of the prediction is bad. I'm not sure if I'm doing something wrong, but the accuracy during the training period was pretty good ~0.98.
How I trained
import sys, os, re, csv, codecs, numpy as np, pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
train = pd.read_csv('train.csv')
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
y = train[list_classes].values
list_sentences_train = train["comment_text"]
max_features = 20000
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences_train))
list_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train)
maxlen = 200
X_t = pad_sequences(list_tokenized_train, maxlen=maxlen)
inp = Input(shape=(maxlen, ))
embed_size = 128
x = Embedding(max_features, embed_size)(inp)
x = LSTM(60, return_sequences=True,name='lstm_layer')(x)
x = GlobalMaxPool1D()(x)
x = Dropout(0.1)(x)
x = Dense(50, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(6, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
batch_size = 32
epochs = 2
print(X_t[0])
model.fit(X_t,y, batch_size=batch_size, epochs=epochs, validation_split=0.1)
model.save("m.hdf5")
This is how I predict
model = load_model('m.hdf5')
list_sentences_train = np.array(["I love you Stackoverflow"])
max_features = 20000
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences_train))
list_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train)
maxlen = 200
X_t = pad_sequences(list_tokenized_train, maxlen=maxlen)
print(X_t)
print(model.predict(X_t))
Output
[[ 1.97086316e-02 9.36032447e-05 3.93966911e-03 5.16672269e-04
3.67353857e-03 1.28102733e-03]]

In inference (i.e. prediction) phase, you should use the same pre-processing steps you have used during training of the model. Therefore, you should not create a new Tokenizer instance and fit it on your test data. Rather, if you want to be able to do prediction later with the same model, besides the model you must also save all the statistics you obtained from the training data like the vocabulary in Tokenizer instance. Therefore it would be like this:
import pickle
# building and training of the model as you have done ...
# store all the data we need later: model and tokenizer
model.save("m.hdf5")
with open('tokenizer.pkl', 'wb') as handler:
pickle.dump(tokenizer, handler)
And now in prediction phase:
import pickle
model = load_model('m.hdf5')
with open('tokenizer.pkl', 'rb') as handler:
tokenizer = pickle.load(handler)
list_sentences_train = ["I love you Stackoverflow"]
# use the the same tokenizer instance you used in training phase
list_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train)
maxlen = 200
X_t = pad_sequences(list_tokenized_train, maxlen=maxlen)
print(model.predict(X_t))

Related

Convolutional LSTM Model Dimension Incompatibility when making predictions & prediction dimension issues

I structured a Convolutional LSTM model to predict the forthcoming Bitcoin price data, using the analyzed past data of the Bitcoin close price and other features.
Let me jump straight to the code:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
import tensorflow.keras as keras
import keras_tuner as kt
from keras_tuner import HyperParameters as hp
from keras.models import Sequential
from keras.layers import InputLayer, ConvLSTM1D, LSTM, Flatten, RepeatVector, Dense, TimeDistributed
from keras.callbacks import EarlyStopping
from tensorflow.keras.metrics import RootMeanSquaredError
from tensorflow.keras.optimizers import Adam
import keras.backend as K
from keras.losses import Huber
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
DIR = '../input/btc-features-targets'
SEG_DIR = '../input/segmented'
segmentized_features = os.listdir(SEG_DIR)
btc_train_features = []
for seg in segmentized_features:
train_features = pd.read_csv(f'{SEG_DIR}/{seg}')
train_features.set_index('date', inplace=True)
btc_train_features.append(scaler.fit_transform(train_features.values))
btc_train_targets = pd.read_csv(f'{DIR}/btc_train_targets.csv')
btc_train_targets.set_index('date', inplace=True)
btc_test_features = pd.read_csv(f'{DIR}/btc_test_features.csv')
btc_tef1 = btc_test_features.iloc[:111]
btc_tef2 = btc_test_features.iloc[25:]
btc_tef1.set_index('date', inplace=True)
btc_tef2.set_index('date', inplace=True)
btc_test_targets = pd.read_csv(f'{DIR}/btc_test_targets.csv')
btc_test_targets.set_index('date', inplace=True)
btc_trt_log = np.log(btc_train_targets)
btc_tefs1 = scaler.fit_transform(btc_tef1.values)
btc_tefs2 = scaler.fit_transform(btc_tef2.values)
btc_tet_log = np.log(btc_test_targets)
scaled_train_features = []
for features in btc_train_features:
shape = features.shape
scaled_train_features.append(np.expand_dims(features, [0,3]))
shape_2 = btc_tefs1.shape
btc_tefs1 = np.expand_dims(btc_tefs1, [0,3])
shape_3 = btc_tefs2.shape
btc_tefs2 = np.expand_dims(btc_tefs2, [0,3])
btc_trt_log = btc_trt_log.values[0]
btc_tet_log = btc_tet_log.values[0]
def build(hp):
model = keras.Sequential()
# Input Layer
model.add(InputLayer(input_shape=(111,32,1)))
# ConvLSTM1D
convLSTM_hp_filters = hp.Int(name='convLSTM_filters', min_value=32, max_value=512, step=32)
convLSTM_hp_kernel_size = hp.Choice(name='convLSTM_kernel_size', values=[3,5,7])
convLSTM_activation = hp.Choice(name='convLSTM_activation', values=['selu', 'relu'])
model.add(ConvLSTM1D(filters=convLSTM_hp_filters,
kernel_size=convLSTM_hp_kernel_size,
padding='same',
activation=convLSTM_activation,
use_bias=True,
bias_initializer='zeros'))
# Flatten
model.add(Flatten())
# RepeatVector
model.add(RepeatVector(5))
# LSTM
LSTM_hp_units = hp.Int(name='LSTM_units', min_value=32, max_value=512, step=32)
LSTM_activation = hp.Choice(name='LSTM_activation', values=['selu', 'relu'])
model.add(LSTM(units=LSTM_hp_units, activation=LSTM_activation, return_sequences=True))
# TimeDistributed Dense
dense_units = hp.Int(name='dense_units', min_value=32, max_value=512, step=32)
dense_activation = hp.Choice(name='dense_activation', values=['selu', 'relu'])
model.add(TimeDistributed(Dense(units=dense_units, activation=dense_activation)))
# TimeDistributed Dense_Output
model.add(Dense(1))
# Set Learning Rate
hp_learning_rate = hp.Choice(name='learning_rate', values=[1e-2, 1e-3, 1e-4])
# Compile Model
model.compile(optimizer=Adam(learning_rate=hp_learning_rate),
loss=Huber(),
metrics=[RootMeanSquaredError()])
return model
tuner = kt.Hyperband(build,
objective=kt.Objective('root_mean_squared_error', direction='min'),
max_epochs=10,
factor=3)
early_stop = EarlyStopping(monitor='root_mean_squared_error', patience=5)
opt_hps = []
for train_features in scaled_train_features:
tuner.search(train_features, btc_trt_log, epochs=50, callbacks=[early_stop])
opt_hps.append(tuner.get_best_hyperparameters(num_trials=1)[0])
models, epochs = ([] for _ in range(2))
for hps in opt_hps:
model = tuner.hypermodel.build(hps)
models.append(model)
history = model.fit(train_features, btc_trt_log, epochs=70, verbose=0)
rmse = history.history['root_mean_squared_error']
best_epoch = rmse.index(min(rmse)) + 1
epochs.append(best_epoch)
hypermodel = tuner.hypermodel.build(opt_hps[0])
for train_features, epoch in zip(scaled_train_features, epochs): hypermodel.fit(train_features, btc_trt_log, epochs=epoch)
tp1 = hypermodel.predict(btc_tefs1).flatten()
tp2 = hypermodel.predict(btc_tefs2).flatten()
test_predictions = np.concatenate((tp1, tp2[86:]), axis=None)
The hyperparameters of the model are configured using keras_tuner; as there were ResourceExhaustError issues output by the notebook when training is done with the full features dataset, sequentially segmented datasets are used instead (and apparently, referring to the study done utilizing the similar model architecture, training is able to be efficiently done through this training approach).
The input dimension of each segmented dataset is (111,32,1).
There aren't any issues reported until before the last code block. The models work fine. Yet, when the .predict() function is executed, the notebook prints out an error, which states that the dimension of the input features for making predictions is incompatible with the dimension of the input features used while training. I did not understand the reason behind its occurrence, since as far as I know, the input dimensions of a train dataset for a DNN model cannot be identical as the input dimensions of a test dataset.
Even though all the price data from 2018 to early 2021 are used as training datasets, predictions are only needed for the mid 2021 timeframe.
The dataset used for prediction has a dimension of (136,32,1).
I tried matching the dimension of this dataset to (111,32,1), through index slicing.
Now this showed issues in the output dimension. While predictions should be made for 136 data points, the result only returned 10.
Are there any issues relevant to the model configuration? Cannot interpret the current situation.

Python keras sequential model predicts the same value (y_train average) for all inputs

I'm trying to build a sequential neural network with keras. I generate a dataset with inserting randoms in a known function and train my model with this dataset, long enough to get a steady loss. Then I ask the model to predict the x_train values, but instead of predicting something close to y_train, it returns the same value regardless of the input x. This value also happens to be the average of y_train values. I don't understand what I'm doing wrong and why this is happening.
I'm using the following function for training the model:
def train_model(x_train,y_train,batch_size,input_size,layer_sizes,activations,optimizer,epochs,loss='MeanSquaredError'):
assert len(layer_sizes) == len(activations)
n_layers=len(layer_sizes)
model = Sequential()
model.add(LayerNormalization(input_dim=input_size))
model.add(Dense(layer_sizes[0],kernel_regularizer='l2',kernel_initializer='ones',activation=activations[0],input_dim=input_size,name='layer1'))
for i in range(1,n_layers):
model.add(Dense(layer_sizes[i],kernel_initializer='ones',activation=activations[i],name=f'layer{i+1}'))
model.compile(
optimizer = optimizer,
loss = loss, #MeanSquaredLogarithmicError
)
print(model.summary())
history = model.fit(x_train,y_train,batch_size=batch_size,epochs=epochs)
loss_history = history.history['loss']
plt.scatter(x=np.arange(1,epochs+1),y=loss_history)
plt.show()
return model
I then created an arbitrary function (just for test purposes) as:
def func(x1,x2,x3,x4):
y=(x1**3+(x2*x3+2))/(x4+x2*x1)
return y
and made a random dataset with this function:
def random_points_in_range(n,ranges):
points = np.empty((n,len(ranges)))
for i,element in enumerate(ranges):
start=min(element[1],element[0])
interval=abs(element[1]-element[0])
rand_check = np.random.rand(n)
randoms = ( rand_check*interval ) + start
points[:,i] = randoms.T
return points
def generate_random_dataset(n=200,ranges=[(0,10),(0,10),(0,10),(0,10)]):
x_dataset = random_points_in_range(n,ranges)
y_dataset = np.empty(n)
for i in range(n):
x1,x2,x3,x4 = x_dataset[i]
y_dataset[i] = func(x1,x2,x3,x4)
return x_dataset,y_dataset
I then train a model with these functions:
x_train,y_train = generate_random_dataset()
layer_sizes = [6,8,10,10,1]
activations = [LeakyReLU(),'relu','swish','relu','linear']
opt = Adam(learning_rate=0.001)
epochs = 3000
model=train_model(x_train,y_train,5,4,layer_sizes,activations,opt,epochs,loss='MeanSquaredError')
if you want to run the code these are things you need to import:
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LayerNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers

Custom Keras loss function with the output's gradient [duplicate]

I am using TF2 (2.3.0) NN to approximate the function y which solves the ODE: y'+3y=0
I have defined cutsom loss class and function in which I am trying to differentiate the single output with respect to the single input so the equation holds, provided that y_true is zero:
from tensorflow.keras.losses import Loss
import tensorflow as tf
class CustomLossOde(Loss):
def __init__(self, x, model, name='ode_loss'):
super().__init__(name=name)
self.x = x
self.model = model
def call(self, y_true, y_pred):
with tf.GradientTape() as tape:
tape.watch(self.x)
y_p = self.model(self.x)
dy_dx = tape.gradient(y_p, self.x)
loss = tf.math.reduce_mean(tf.square(dy_dx + 3 * y_pred - y_true))
return loss
but running the following NN:
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input
from custom_loss_ode import CustomLossOde
num_samples = 1024
x_train = 4 * (tf.random.uniform((num_samples, )) - 0.5)
y_train = tf.zeros((num_samples, ))
inputs = Input(shape=(1,))
x = Dense(16, 'tanh')(inputs)
x = Dense(8, 'tanh')(x)
x = Dense(4)(x)
y = Dense(1)(x)
model = Model(inputs=inputs, outputs=y)
loss = CustomLossOde(model.input, model)
model.compile(optimizer=Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.99),loss=loss)
model.run_eagerly = True
model.fit(x_train, y_train, batch_size=16, epochs=30)
for now I am getting 0 loss from the fisrt epoch, which doesn't make any sense.
I have printed both y_true and y_test from within the function and they seem OK so I suspect that the problem is in the gradien which I didn't succeed to print.
Apprecitate any help
Defining a custom loss with the high level Keras API is a bit difficult in that case. I would instead write the training loop from scracth, as it allows a finer grained control over what you can do.
I took inspiration from those two guides :
Advanced Automatic Differentiation
Writing a training loop from scratch
Basically, I used the fact that multiple tape can interact seamlessly. I use one to compute the loss function, the other to calculate the gradients to be propagated by the optimizer.
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input
num_samples = 1024
x_train = 4 * (tf.random.uniform((num_samples, )) - 0.5)
y_train = tf.zeros((num_samples, ))
inputs = Input(shape=(1,))
x = Dense(16, 'tanh')(inputs)
x = Dense(8, 'tanh')(x)
x = Dense(4)(x)
y = Dense(1)(x)
model = Model(inputs=inputs, outputs=y)
# using the high level tf.data API for data handling
x_train = tf.reshape(x_train,(-1,1))
dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train)).batch(1)
opt = Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.99)
for step, (x,y_true) in enumerate(dataset):
# we need to convert x to a variable if we want the tape to be
# able to compute the gradient according to x
x_variable = tf.Variable(x)
with tf.GradientTape() as model_tape:
with tf.GradientTape() as loss_tape:
loss_tape.watch(x_variable)
y_pred = model(x_variable)
dy_dx = loss_tape.gradient(y_pred, x_variable)
loss = tf.math.reduce_mean(tf.square(dy_dx + 3 * y_pred - y_true))
grad = model_tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(grad, model.trainable_variables))
if step%20==0:
print(f"Step {step}: loss={loss.numpy()}")

LSTM model has constant accuracy and doesn't variate

i'm stuck as you can see, with my lstm model. I'm trying to predict the amount of tons to produce per month. When i run the model to train the accuracy is almost constant, it has a minimal variation like:
0.34406
0.34407
0.34408
I tried different combination of activations, initializers and parameters, and the acc don't increase.
I don't know if the problem here is my data, my model or this value is the max acc the model can reach.
Here is the code (if you notice some libraries unused, its because i made some changes by the first version)
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from sklearn import preprocessing
import keras
%tensorflow_version 2.x
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dropout
from keras.optimizers import Adam
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
from plotly.offline import iplot
import matplotlib.pyplot as plt
import chart_studio.plotly as py
import plotly.offline as pyoff
import plotly.graph_objs as go
df_ventas = pd.read_csv('/content/drive/My Drive/proyectoPanimex/DEOPE.csv', parse_dates=['Data Emissão'], index_col=0, squeeze=True)
#df_ventas = df_ventas.resample('M').sum().reset_index()
df_ventas = df_ventas.drop(columns= ['weekday', 'month'], axis=1)
df_ventas = df_ventas.reset_index()
df_ventas = df_ventas.rename(columns= {'Data Emissão':'Fecha','Un':'Cantidad'})
df_ventas['dia'] = [x.day for x in df_ventas.Fecha]
df_ventas['mes']=[x.month for x in df_ventas.Fecha]
df_ventas['anio']=[x.year for x in df_ventas.Fecha]
df_ventas = df_ventas[:-48]
df_ventas = df_ventas.drop(columns='Fecha')
df_diff = df_ventas.copy()
df_diff['cantidad_anterior'] = df_diff['Cantidad'].shift(1)
df_diff = df_diff.dropna()
df_diff['diferencia'] = (df_diff['Cantidad'] - df_diff['cantidad_anterior'])
df_supervised = df_diff.drop(['cantidad_anterior'],axis=1)
#adding lags
for inc in range(1,31):
nombre_columna = 'retraso_' + str(inc)
df_supervised[nombre_columna] = df_supervised['diferencia'].shift(inc)
df_supervised = df_supervised.dropna()
df_supervisedNumpy = df_supervised.to_numpy()
train = df_supervisedNumpy
scaler = MinMaxScaler(feature_range=(0, 1))
X_train = scaler.fit(train)
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
X_train, y_train = train_scaled[:, 1:], train_scaled[:, 0:1]
X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1])
#LSTM MODEL
model = Sequential()
act = 'tanh'
actF = 'relu'
model.add(LSTM(200, activation = act, input_dim=34, return_sequences=True ))
model.add(Dropout(0.15))
#model.add(Flatten())
model.add(LSTM(200, activation= act))
model.add(Dropout(0.2))
#model.add(Flatten())
model.add(Dense(200, activation= act))
model.add(Dropout(0.3))
model.add(Dense(1, activation= actF))
optimizer = keras.optimizers.Adam(lr=0.00001)
model.compile(optimizer=optimizer, loss=keras.losses.binary_crossentropy, metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size = 100,
epochs = 50, verbose = 1)
hist = pd.DataFrame(history.history)
hist['Epoch'] = history.epoch
hist
History plot:
loss acc Epoch
0 0.847146 0.344070 0
1 0.769400 0.344070 1
2 0.703548 0.344070 2
3 0.698137 0.344070 3
4 0.653952 0.344070 4
As you can see the only value that change its loss, but what is going on with Acc?. I'm starting with machine learning, and i have no more knowledge to can see my errors. Thanks!
A Dense(1, activation='softmax') will always freeze and not learn anything
A Dense(1, activation='relu') will very probably freeze and not learn anything
A Dense(1, activation='sigmoid') is ideal for classification (binary) problems and somewhat good for regression with values between 0 and 1.
A Dense(1, activation='tanh') is somewhat good for regression with values between -1 and 1
A Dense(1, activation='softplus') is somewhat good for regression with values between 0 and +infinite
A Dense(1, actiavation='linear') is good for regression in general with no limits (but it's highly recommended that the data be normalized before)
For regression, you can't use accuracy, but the metrics 'mae' and 'mse' don't provide "relative" difference, they provide "absolute" mean difference, one linear, the other squared.
Your output activation should be linear for continuous prediction or softmax for classification. Also multiply your learning rate by 100. Your loss should be mean_absolute_error. You could also easily divide your lstm neurons by a factor of 10. The tanh should be replaced by relu or the likes.
For your accuracy problem, it makes no sense to use accuracy, since you're not trying to classify. For metrics, you can use mae. You're trying to know how far the prediction is from the actual target, on a continuous scale. Accuracy is for categories, not continuous data.

Tensorflow lstm incremental learning and multiple predictions

I am training a tensorflow model and later plan to use it for predictions.
import numpy as np
import pandas as pd
import sys
import tensorflow as tf
from tensorflow.contrib import learn
from sklearn.metrics import mean_squared_error, mean_absolute_error
from lstm_predictor import load_csvdata, lstm_model
import pymysql as mariadb
LOG_DIR = './ops_logs'
K = 1 # history used for lstm.
TIMESTEPS = 65*K
RNN_LAYERS = [{'steps': TIMESTEPS}]
DENSE_LAYERS = [10, 10]
TRAINING_STEPS = 1000
BATCH_SIZE = 1
PRINT_STEPS = TRAINING_STEPS / 10
def train_model(symbol=1,categ='M1',limit=1000,upgrade=False):
MODEL_DIR = 'model/'+str(symbol)+categ
regressor = learn.TensorFlowEstimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, DENSE_LAYERS),
n_classes=0,
verbose=1,
steps=TRAINING_STEPS,
optimizer='Adagrad',
learning_rate=0.03,
continue_training=True,
batch_size=BATCH_SIZE )
X, y = load_csvdata(df, K )
regressor.fit(X['train'], y['train'] , logdir=MODEL_DIR ) #logdir=LOG_DIR)
X['test']=X['train'][-10:]
y['test']=y['train'][-10:]
predicted = regressor.predict(X['test'])
print('actual', 'predictions')
for i,yi in enumerate(y['test']):
print(yi[0], ' ' ,predicted[i])
mse = mean_absolute_error(y['test'], predicted)
print ("mean_absolute_error : %f" % mse)
###############################
regressor.save( LOG_DIR )
train_model()
Then I want to write a predict function which would read the model from model/** and make predictions.
def predict(symbol=1,categ='M1'):
pass
# how to load saved model data ?
But I am unable to load the model using
regressor = learn.TensorFlowEstimator.restore( LOG_DIR )
Since its currently not implemented.
Suggest me how can I do repeated predictions at multiple times in future?
The model checkpoints are saved as:
checkpoint model.ckpt-8001.meta
events.out.tfevents.1476102309.hera.creatory.org model.ckpt-8301-00000-of-00001
events.out.tfevents.1476102926.hera.creatory.org model.ckpt-8301.meta
events.out.tfevents.1476105626.hera.creatory.org model.ckpt-8601-00000-of-00001
events.out.tfevents.1476106521.hera.creatory.org model.ckpt-8601.meta
events.out.tfevents.1476106839.hera.creatory.org model.ckpt-8901-00000-of-00001
events.out.tfevents.1476107001.hera.creatory.org model.ckpt-8901.meta
events.out.tfevents.1476107462.hera.creatory.org model.ckpt-9000-00000-of-00001
graph.pbtxt model.ckpt-9000.meta
model.ckpt-8001-00000-of-00001

Categories