I have a large dataset of n_samples, n_features, n_classes = 346679, 10233, 86. I am trying to build a classifier on this dataset. For this, I am using a Multi-Layer Perceptron which is built using the keras sequential model.
DataGeneratorClass
class DataGeneratorKeras:
def __init__(self, num_rows, n_classes, n_samples, n_features, batch_size=1, shuffle=True):
self.num_rows = num_rows
self.n_samples = n_samples
self.n_features = n_features
self.n_classes = n_classes
self.batch_size = batch_size
self.shuffle = shuffle
self.flag = False
def __get_exploration_order(self, list_ids):
"""
Generates order of exploration
:param list_ids:
:return:
"""
# Find exploration order
indexes = np.arange(len(list_ids))
if self.shuffle:
np.random.shuffle(indexes)
return indexes
def __data_generation(self, list_ids_temp, n_classes):
"""
Generates data of batch_size samples
:param list_ids_temp:
:param n_classes:
:return:
"""
index = list_ids_temp[0]
fv = load_npz("data_file_" + str(index) + ".npz")
labels_complete = load(...) # Load labels
partial_labels = labels_complete[index]
del labels_complete
y = self.sparsify(partial_labels, n_classes)
return fv, y
#staticmethod
def sparsify(y, n_classes):
"""
:return:
"""
label_encoder = np_utils.to_categorical(y, n_classes)
return label_encoder
def generate(self, list_ids):
"""
Generates batches of samples
:param list_ids:
:return:
"""
# Infinite loop
while 1:
# Generate order of exploration of dataset
indexes = self.__get_exploration_order(list_ids)
# Generate batches
imax = int(len(indexes) / self.batch_size)
for i in range(imax):
# Find list of IDs
list_ids_temp = [list_ids[k] for k in indexes[i * self.batch_size:(i + 1) * self.batch_size]]
# Generate data
x, y = self.__data_generation(list_ids_temp, self.n_classes)
yield x.toarray(), y
The Script class
class Script:
def __init__(self, num_rows, batch_size, test_size, n_classes, n_samples, n_features):
self.batch_size = batch_size
self.num_rows = num_rows
self.test_size = test_size
self.n_classes = n_classes
self.n_samples = n_samples
self.n_features = n_features
def main(self):
validation = int(self.test_size * self.num_rows)
train = self.num_rows - validation
params = {
'num_rows': self.num_rows,
'n_samples': self.n_samples,
'n_features': self.n_features,
'n_classes': self.n_classes,
'batch_size': self.batch_size,
'shuffle': True
}
partition = {'train': range(train), 'validation': range(train, self.num_rows)}
# Generators
training_generator = DataGeneratorKeras(**params).generate(partition['train'])
validation_generator = DataGeneratorKeras(**params).generate(partition['validation'])
return training_generator, validation_generator, partition
if __name__ == "__main__":
script = Script(num_rows=347, test_size=0.25, n_classes=86, n_samples=346679, n_features=10233, batch_size=1)
training_generator, validation_generator, partition = script.main()
Building the model
def classifier_base_data(dropout, learning_rate):
model = Sequential()
model.add(Dense(2**13, input_shape=(script.n_features,), activation='relu', name="l_input"))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(2**12, input_dim=2**13, activation='relu', name="l_hidden_1"))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(2**11, input_dim=2**12, activation='relu', name="l_hidden_2"))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(2**10, input_dim=2**11, activation='relu', name="l_hidden_3"))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(2**9, input_dim=2**10, activation='relu', name="l_hidden_4"))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(2**8, input_dim=2**9, activation='relu', name="l_hidden_5"))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(2**7, input_dim=2**8, activation='relu', name="l_hidden_6"))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(script.n_classes, activation='softmax', name="l_output"))
optimizer = adam(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
print model.summary()
return model
When I run the model using keras fit function, I am able to achieve val_acc and acc above 25%.
history = model.fit(x_train.toarray(), y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_validation.toarray(), y_validation))
Since the data is large, I am using the DataGenerator by keras following a very well written tutorial keras-datagen-tutorial. When I run the model using the fit_generator, I get 0% val_acc.
model.fit_generator(
generator = training_generator,
steps_per_epoch = len(partition['train']),
epochs = epochs,
validation_data = validation_generator,
validation_steps = len(partition['validation']),
verbose = 1
)
Is there any issue in the DataGenerator written?
Related
I have a code, with it, I wanted to train a neural network and save the finished model as a file. But I am getting an error due to incorrect distribution of training and training data. Can't understand why:
`import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
class ChatBot(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden):
out, hidden = self.lstm(x, hidden)
out = self.fc(out[:, -1, :])
return out, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = (weight.new(self.num_layers, batch_size, self.hidden_size).zero_(),
weight.new(self.num_layers, batch_size, self.hidden_size).zero_())
return hidden
class ChatDataset(torch.utils.data.Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def train(model, train_loader, loss_fn, optimizer, device):
model.train()
for inputs, targets in train_loader:
inputs = inputs.to(device)
targets = targets.to(device)
hidden = model.init_hidden(inputs.size(0))
hidden = tuple([each.data for each in hidden])
optimizer.zero_grad()
outputs, _ = model(inputs, hidden)
loss = loss_fn(outputs.view(-1), targets.view(-1))
loss.backward()
optimizer.step()
def evaluate(model, val_loader, loss_fn, device):
model.eval()
total_loss = 0
with torch.no_grad():
for inputs, targets in val_loader:
inputs = inputs.to(device)
targets = targets.to(device)
hidden = model.init_hidden(inputs.size(0))
hidden = tuple([each.data for each in hidden])
outputs, _ = model(inputs, hidden)
total_loss += loss_fn(outputs, targets).item()
return total_loss / len(val_loader)
device = torch.device("cuda" if
torch.cuda.is_available() else "cpu")
input_size = 500
hidden_size = 128
num_layers = 2
output_size = 500
model = ChatBot(input_size, hidden_size, num_layers, output_size)
model = model.to(device)
data = [("Hi, how are you?", "I'm doing well, thank you for asking."),
("What's your name?", "I'm a chatbot, I don't have a name."),
("What's the weather like?", "I'm not sure, I don't have access to current weather information."),
("What's the time?", "I'm not sure, I don't have access to the current time.")]
dataset = ChatDataset(data)
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [int(0.8 * len(dataset)), int(0.2 * len(dataset))])
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=32, shuffle=False)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
num_epochs = 100
for epoch in range(num_epochs):
train(model, train_loader, loss_fn, optimizer, device)
val_loss = evaluate(model, val_loader, loss_fn, device)
print("Epoch [{}/{}], Validation Loss: {:.4f}".format(epoch+1, num_epochs, val_loss))
torch.save(model.state_dict(), 'chatbot_model.pt')`
But, when I start this code, I have an error:
` ValueError
Traceback (most recent call last)
<ipython-input-8-ae2a6dd1bc7c> in
<module>
78 dataset = ChatDataset(data)
79
---> 80 train_dataset, val_dataset = torch.utils.data.random_split(dataset, [int(0.8 * len(dataset)), int(0.2 * len(dataset))])
81
82 train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
/usr/local/lib/python3.8/dist-packages/torch/utils/data/dataset.py in random_split(dataset, lengths, generator)
345 # Cannot verify that dataset is Sized
346 if sum(lengths) != len(dataset): # type: ignore[arg-type]
--> 347 raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
348
349 indices = randperm(sum(lengths), generator=generator).tolist() # type: ignore[call-overload]
ValueError: Sum of input lengths does not equal the length of the input dataset!`
I don't know, why this error. Everything seems to be correct.
The typecasting of the values to an integer is causing a difference in the total number of images in the dataset and the distribution of the number of images in train and test.
Not the most ideal code, but replacing it with the following will work :
num_train_images = int(0.8 * len(dataset))
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [num_train_images, len(dataset) - num_train_images])
I suspect there could be a loss of precision in this calculation,
[int(0.8 * len(dataset)), int(0.2 * len(dataset))]
so the number of records in the dataset is not fully accounted for.
for example:
int(.8 * 56) + int(.2 * 56) = 55
I was comparing loss for two simple MLP models with and without dropout on both TF/Keras and Pytorch frameworks (on Keras imdb dataset). But with PyTorch I am not getting the same results as I hoped for and was wondering perhaps what I am doing incorrectly.
# Keras - IMDB Dataset
model = Sequential()
model.add(Dense(16, activation = "relu", input_shape= (10000,)))
model.add(Dropout(0.5)) # comment out this line for no dropout model
model.add(Dense(16, activation = "relu"))
model.add(Dropout(0.5)) # comment out this line for no dropout model
model.add(Dense(1, activation = "sigmoid"))
model.compile(
optimizer = "rmsprop",
loss = "binary_crossentropy",
metrics = ["accuracy"]
)
history = model.fit(
X_train,
y_train,
epochs = 20,
batch_size = 512,
validation_data = (X_val, y_val)
)
The results I obtained in keras (Left figure without dropout and right with dropout)
# Pytorch - same IMDB dataset from keras
class MLP(nn.Module):
def __init__(self, in_dims, l1, l2, out_dims):
super(MLP, self).__init__()
self.fc1 = nn.Linear(in_dims, l1)
self.fc2 = nn.Linear(l1, l2)
self.fc3 = nn.Linear(l2, out_dims)
self.dropout = nn.Dropout(p=0.5)
def forward(self, X):
out = F.relu(self.fc1(X))
out = self.dropout(out) # comment out this line for no dropout model
out = F.relu(self.fc2(out))
out = self.dropout(out) # comment out this line for no dropout model
out = F.sigmoid(self.fc3(out))
return out
model = MLP(10000, 16, 16, 1)
optimizer = optim.RMSprop(model.parameters(), lr = 0.001)
criterion = nn.BCELoss()
min_val_loss = np.inf
losses = []
val_losses = []
accuracy = []
val_accuracy = []
for e in range(0,20):
running_loss = 0
for i,(X_train, y_train) in enumerate(train_loader):
yhat = model.forward(X_train)
loss = criterion(yhat.flatten(), y_train)
running_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(running_loss / (i+1)) #note its i+1 since i starts from 0
model.eval()
with torch.no_grad():
running_val_loss = 0
for i,(X_val, y_val) in enumerate(val_loader):
yhat_val = model.forward(X_val)
val_loss = criterion(yhat_val.flatten(), y_val)
running_val_loss += val_loss.item()
val_losses.append(running_val_loss / (i + 1))
if val_loss < min_val_loss:
best_params = model.state_dict()
min_val_loss = val_loss
print(f"epochs : {e}, train_loss : {loss}, val_loss : {val_loss}")
Figure on the left is the result from no dropout model which has similar results to the keras model. However the one with dropout doesnot have the same behaviour.
I'm trying to predict joint torque from 8 input features with an LSTM model.
I've tried using TimeseriesGenerator but kept getting an error
Key: 10 when I tried running the .fit_generator function in the model.
I also sliced my dataset into sub time frames, but am pretty confused on this whole concept.
The dataset is at this link.
Here's my code:
file = r'/content/drive/MyDrive/only_force.csv'
df = pd.read_csv(file)
X = df.iloc[:, :9]
y = df.iloc[:,9]
first_slice = X[:1081]
second_slice = X[1081:2076]
third_slice = X[2076:3122]
fourth_slice = X[3122:4038]
fifth_slice = X[4038:5186]
sixth_slice = X[5186:6270]
seventh_slice = X[6270:7464]
eighth_slice = X[7464:]
from keras.preprocessing.sequence import TimeseriesGenerator
look_back = 10
train_generator = TimeseriesGenerator(X_train, X_train, length = look_back, batch_size = 32)
test_generatory = TimeseriesGenerator(X_test, X_test, length = look_back, batch_size = 32)
[verbose, epochs, batch_size] = [1, 500, 32]
input_shape = (X_train.shape[1],1)
model = Sequential()
# LSTM
model.add(LSTM(64, input_shape=input_shape, return_sequences = False))
model.add(Dropout(0.2))
model.add(Dense(64, activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
#model.add(Dropout(0.2))
model.add(Dense(32, activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
model.add(Dense(1,activation='relu'))
earlystopper = EarlyStopping(monitor='val_loss', min_delta=0, patience = 30, verbose =1, mode = 'auto')
model.summary()
model.compile(loss = 'mse', optimizer = Adam(learning_rate = 0.0005), metrics=[tf.keras.metrics.RootMeanSquaredError()])
history = model.fit(X_train, y_train, batch_size = batch_size, epochs = epochs, verbose = verbose, validation_data=(X_test,y_test), callbacks = [earlys
#model.fit_generator(train_generator, epochs=epochs, verbose=1)
I am trying to implement an handwriting ocr based on the keras ocr example: link.
However I get the following error:
InvalidArgumentError: All labels must be nonnegative integers, batch: 0 labels: 1,0,11,9,45,0,25,17,27,41,39,9,37,0,23,1,39,9,35,0,11,35,29,25,0,1,0,27,9,1,35,3,49,0,43,17,23,23,1,13,9,0,69,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
[[{{node ctc_6/CTCLoss}}]]
[[{{node training_5/SGD/gradients/ctc_6/CTCLoss_grad/mul}}]]
Here are the generator, the ctc and the train function:
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
#Generation of data: load the images, resize, gray, normalize them
class DataGenerator(keras.utils.Sequence):
def __init__(self, list_Files, labels,downsample_factor, max_string_length=80, batch_size=32, dim=(512,64), shuffle=True):
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.list_Files = list_Files
self.shuffle = shuffle
self.on_epoch_end()
self.max_string_length = max_string_length
self.downsample_factor = downsample_factor
#TODO: Add weight save
def on_epoch_end(self):
self.indexes = np.arange(len(self.list_Files))
if self.shuffle==True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_Files_temp):
#*[2,2] --> 2,2 (unpack values)
X = np.ones([self.batch_size, *self.dim,1])
y = np.ones([self.batch_size, self.max_string_length])*-1 #As in the keras_ocr example why -1?
X_length = np.zeros([self.batch_size,1])
y_length = np.zeros([self.batch_size,1])
#TODO: add mix with blank inputs as it is said to be important for transitional invariance
for i, file in enumerate(list_Files_temp):
im = cv2.imread(file)# load the file as numpy array
im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY) #Transform the file into a Gray image
im = cv2.resize(im, self.dim[::-1]) #Resize it (cv2 takes width first)
im = im / 255 #Normalization
X[i,0:self.dim[0],:,0] = im
X_length[i] = self.dim[0] // self.downsample_factor -2 #?????
seq = text_to_labels(self.labels[file])
y[i,0:len(seq)] = text_to_labels(self.labels[file]) #Transform the text into a list of integers
y_length[i] = len(y[i])
print("LEN={0}".format(y_length[i]))
inputs={'the_input': X,
'the_labels': y,
'input_length':X_length,
'label_length':y_length
}
outputs = {'ctc': np.zeros([self.batch_size])}
print(y)
return (inputs, outputs)
def __len__(self):
'Number of batches per epoch'
return int(np.floor(len(self.list_Files) / self.batch_size))
def __getitem__(self, index):
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_Files_temp = [self.list_Files[k] for k in indexes]
#print(list_Files_temp[0])
(inputs, outputs) = self.__data_generation(list_Files_temp)
return (inputs, outputs)
def train(dim_images,partition,labels):
#Misc parameters
absolute_max_string_length = 80
output_size = len(alphabet) + 1 #+1 for the CTC blank symbol
#Network parameters
img_h = dim_images[0]
img_w = dim_images[1]
conv_filters = 16
kernel_size = (3,3)
pool_size = 2
time_dense_size = 32
rnn_size = 512
act = 'relu'
input_shape = (*DIM_IMAGES,1)
downsample_factor = pool_size**2
#Convolutional layer
input_data = Input(name='the_input', shape=input_shape)
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal', name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv2')(inner)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)
conv_to_rnn_dims = (img_w // (pool_size ** 2), (img_h // (pool_size ** 2)) * conv_filters)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
#Recurrent layer
gru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(gru1_merged)
# transforms RNN output to character activations:
inner = Dense(output_size, kernel_initializer='he_normal',
name='dense2')(concatenate([gru_2, gru_2b]))
#Prediction (need to be decoded)
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()
labelsI = Input(name='the_labels',
shape =[absolute_max_string_length], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(
ctc_lambda_func, output_shape=(1,),
name='ctc')([y_pred, labelsI, input_length, label_length])
#Genrators
training_generator = DataGenerator(partition['train'],labels,downsample_factor, batch_size=BATCH_SIZE, dim=DIM_IMAGES, shuffle=True)
valid_generator = DataGenerator(partition['valid'], labels,downsample_factor, batch_size=BATCH_SIZE, dim=DIM_IMAGES, shuffle=False)
# clipnorm seems to speeds up convergence
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
model = Model(inputs=[input_data, labelsI, input_length, label_length],
outputs=loss_out)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
# captures output of softmax so we can decode the output during visualization
test_func = K.function([input_data], [y_pred])
model.fit_generator(
generator=training_generator,
steps_per_epoch=(len(partition['train'])-len(partition['valid'])) // BATCH_SIZE,
epochs=20,
validation_data=valid_generator,
validation_steps=len(partition['valid'])//BATCH_SIZE)
I guess the '-1' labels come from this line:
y = np.ones([self.batch_size, self.max_string_length])*-1
In the original code, the there was a similar line (line 220) but it runs well:
self.Y_data = np.ones([self.num_words, self.absolute_max_string_len]) * -1
I thought the '-1' were a way of padding the sequence, but this value seems forbidden by the ctc function, is there something I am missing here?
It seems I just mixed up my image length and image width. Plus, the "label_length" should be equal to the real length of the sentence (before paddding with -1). Therefore the line:
y_length[i] = len(y[i])
Should be replaced by:
y_length[i] = len(seq)
Following is the code of one simple example for what I want to implement:
Error: raise TypeError("inputs must be a sequence"), TypeError: inputs must be a sequence
How to solve this to make the program can work? Any help will be appreciated.
from keras.models import Sequential
from keras.layers import LSTM, Dense, Flatten
import numpy as np
from keras.engine.topology import Layer
import tensorflow as tf
class MyLayer(Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(MyLayer, self).build(input_shape)
def call(self, x):
"Some other tf function will be put at here"
outputs, state = tf.contrib.rnn.static_rnn(tf.contrib.rnn.LSTMBlockCell(32), x, dtype=tf.float32)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_model(timesteps, data_dim):
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(LSTM(32, return_sequences=True))
model.add(MyLayer()) # this is my layer
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
return model
def run_demo():
data_dim = 16
timesteps = 8
num_classes = 10
model = get_model(timesteps, data_dim)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
"""Generate the traning and validation data"""
x_train = np.random.random((1000, timesteps, data_dim))
y_train = np.random.random((1000, num_classes))
x_val = np.random.random((100, timesteps, data_dim))
y_val = np.random.random((100, num_classes))
model.fit(x_train, y_train, batch_size=64, epochs=5, validation_data=(x_val, y_val))
if __name__ == "__main__":
run_demo()
Sorry, I am not too familiar with Recurrent Model.
But I think the problem is input size.
The size into custom layer (?, 8, 32)
but tf.nn.static_rnn require list like
so you need to change the input size then problem will fix.
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Flatten, Dense
class MyLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.cell = tf.nn.rnn_cell.BasicRNNCell(32)
super(MyLayer, self).build(input_shape)
def call(self, x):
"Some other tf function will be put at here"
rnn_inputs = tf.unstack(x, axis=1)
outputs, state = tf.nn.static_rnn(self.cell, rnn_inputs, dtype=tf.float32)
for i in range(len(outputs)):
outputs[i] = tf.expand_dims(outputs[i], axis=1)
outputs = tf.concat(outputs, axis=1)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_model(timesteps, data_dim):
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(LSTM(32, return_sequences=True))
model.add(MyLayer()) # this is my layer
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
return model
def run_demo():
data_dim = 16
timesteps = 8
num_classes = 10
model = get_model(timesteps, data_dim)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
"""Generate the traning and validation data"""
x_train = np.random.random((1000, timesteps, data_dim))
y_train = np.random.random((1000, num_classes))
x_val = np.random.random((100, timesteps, data_dim))
y_val = np.random.random((100, num_classes))
model.fit(x_train, y_train, batch_size=64, epochs=5, validation_data=(x_val, y_val))
if __name__ == "__main__":
run_demo()