How to fix grid search issues in LSTM - python

I want to do grid search for my model, and here my model shown below.
def model_lstm(time_steps=24, n_features=40,
optimizer = tf.keras.optimizers.Adam,
learning_rate = 0.001,
dropout = 0.5,
n_units_LSTM = 256,
n_units_1 = 200):
activation2 = 'relu'
model = Sequential()
model.add(LSTM(units=n_units_LSTM, input_shape=(time_steps, n_features)))
model.add(Dropout(dropout))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(1, activation='sigmoid'))
optimizer = optimizer(learning_rate=learning_rate)
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics = ['accuracy'])
print(model.summary())
return model
there are 4 parameters that i want to grid, learning rate, n_unit_LSTM, n_units_1, and dropout. i want to get the same value of each dense layer. so i add variable named n_units_1.
def grid_search(data, metode):
keras.backend.clear_session()
x_train, y_train = sequence_data(data)
params = {
'learning_rate' : [0.01, 0.001, 0.0001],
'n_units_LSTM' : [64,128,256],
'n_units_1' : [50, 100, 150, 200, 250, 300],
'dropout' : [0.1, 0.2, 0.25, 0.5]
}
scorers = {
'accuracy_score' : make_scorer(accuracy_score)
}
model = KerasClassifier(build_fn=model_lstm, verbose=0)
cv = cross_validate(5)
start = time.time()
grid = GridSearchCV(estimator = model,
param_grid = params,
n_jobs = -1,
verbose = 1,
cv = cv,
scoring = scorers,
refit = 'accuracy_score')
tf.random.set_seed(123)
grid.fit(x_train, y_train)
end = time.time()
runtime = end-start
result = grid.best_params_
results = grid.cv_results_
print('-----------------------------------------')
print(f'Best Parameter : {result}')
print(f'Runtime : {runtime}')
print('-----------------------------------------')
return grid
and when i run my grid, i got an error.
A task has failed to un-serialize. Please ensure that the arguments of the function are all picklable.

Step 1: Lets get the model to predict an age based on 9 abalone features. I changed the model to mean_squared_error for the age prediction Step 2: Add a gridsearch (pending)
import pandas as pd
from pandas import Series
from pandas import concat
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, LSTM, Embedding,Flatten,Dropout, Dense, Concatenate, TimeDistributed, Bidirectional,Attention
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
def create_dataset(dataset, look_back=8):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back)]
dataX.append(a)
dataY.append(dataset[i + look_back])
return np.array(dataX), np.array(dataY)
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
def timeseries_to_supervised(data, lag=1):
df = pd.DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
return df
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
df=pd.read_csv('abalone.data',names=['Sex'
,'Length'
,'Diameter'
,'Height'
,'Whole weight'
,'Shucked weight'
,'Viscera weight'
,'Shell weight'
,'Rings'])
df.reset_index(inplace=True)
df['Age']=df['Rings'].apply(lambda x: x/1.5)
encoder=LabelEncoder()
df['Sex']=encoder.fit_transform(df['Sex'])
raw_values = df.values
#diff_values = difference(raw_values, 1)
features=10
#supervised = timeseries_to_supervised(diff_values, features)
supervised = timeseries_to_supervised(raw_values, features)
supervised_values = supervised.values[features:,:]
train_size = int(len(df) * 0.70)
test_size = len(df) - train_size
# split data into train and test-sets
train, test = supervised_values[0:-train_size, :], supervised_values[-train_size:, :]
def model_lstm(X,time_steps=24, n_features=9,
optimizer = tf.keras.optimizers.Adam,
learning_rate = 0.001,
dropout = 0.5,
n_units_LSTM = 256,
n_units_1 = 200,
batch_size=1
):
activation2 = 'relu'
model = Sequential()
model.add(LSTM(units=n_units_LSTM, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dropout(dropout))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(1))
optimizer = optimizer(learning_rate=learning_rate)
model.compile(loss='mean_squared_error',
optimizer=optimizer)
print(model.summary())
return model
scaler, train_scaled, test_scaled = scale(train, test)
X_train, y_train = train_scaled[:, 0:-1], train_scaled[:, -1]
X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1])
X_test, y_test = test_scaled[:, 0:-1], test_scaled[:, -1]
X = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
#print(y)
model=model_lstm(X_train)
history=model.fit(X_train,y_train, epochs=100)
model.summary()
plt.plot(history.history['loss'])
plt.title('loss accuracy')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

Related

I am trying to use CNN for stock price prediction but my code does not seem to work, what do I need to change or add?

import math
import numpy as np
import pandas as pd
import pandas_datareader as pdd
from sklearn.preprocessing import MinMaxScaler
from keras.layers import Dense, Dropout, Activation, LSTM, Convolution1D, MaxPooling1D, Flatten
from keras.models import Sequential
import matplotlib.pyplot as plt
df = pdd.DataReader('AAPL', data_source='yahoo', start='2012-01-01', end='2020-12-31')
data = df.filter(['Close'])
dataset = data.values
len(dataset)
# 2265
training_data_size = math.ceil(len(dataset)*0.7)
training_data_size
# 1586
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
# array([[0.04288701],
# [0.03870297],
# [0.03786614],
# ...,
# [0.96610873],
# [0.98608785],
# [1. ]])
train_data = scaled_data[0:training_data_size,:]
x_train = []
y_train = []
for i in range(60, len(train_data)):
x_train.append(train_data[i-60:i, 0])
y_train.append(train_data[i,0])
if i<=60:
print(x_train)
print(y_train)
'''
[array([0.04288701, 0.03870297, 0.03786614, 0.0319038 , 0.0329498 ,
0.03577404, 0.03504182, 0.03608791, 0.03640171, 0.03493728,
0.03661088, 0.03566949, 0.03650625, 0.03368202, 0.03368202,
0.03598329, 0.04100416, 0.03953973, 0.04110879, 0.04320089,
0.04089962, 0.03985353, 0.04037657, 0.03566949, 0.03640171,
0.03619246, 0.03253139, 0.0294979 , 0.03033474, 0.02960253,
0.03002095, 0.03284518, 0.03357739, 0.03410044, 0.03368202,
0.03472803, 0.02803347, 0.02792885, 0.03556487, 0.03451886,
0.0319038 , 0.03127613, 0.03274063, 0.02688284, 0.02635988,
0.03211297, 0.03096233, 0.03472803, 0.03713392, 0.03451886,
0.03441423, 0.03493728, 0.03587866, 0.0332636 , 0.03117158,
0.02803347, 0.02897494, 0.03546024, 0.03786614, 0.0401674 ])]
[0.03933056376752886]
'''
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_train.shape
# (1526, 60, 1)
model = Sequential()
model.add(Convolution1D(64, 3, input_shape= (100,4), padding='same'))
model.add(MaxPooling1D(pool_size=2))
model.add(Convolution1D(32, 3, padding='same'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation('linear'))
model.summary()
model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=50, epochs=50, validation_data = (X_test, y_test), verbose=2)
test_data = scaled_data[training_data_size-60: , :]
x_test = []
y_test = dataset[training_data_size: , :]
for i in range(60, len(test_data)):
x_test.append(test_data[i-60:i, 0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)
rsme = np.sqrt(np.mean((predictions - y_test)**2))
rsme
train = data[:training_data_size]
valid = data[training_data_size:]
valid['predictions'] = predictions
plt.figure(figsize=(16,8))
plt.title('PFE')
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price in $', fontsize=18)
plt.plot(train['Close'])
plt.plot(valid[['Close', 'predictions']])
plt.legend(['Train', 'Val', 'predictions'], loc='lower right')
plt.show
import numpy as np
y_test, predictions = np.array(y_test), np.array(predictions)
mape = (np.mean(np.abs((predictions - y_test) / y_test))) * 100
accuracy = 100 - mape
print(accuracy)
This above is my code. I tried to edit it but does not seem to be working. I am suspecting that I did not format my dataset well but I am new to this field so I do not know what should I do to my codes such that it will fit in. I hope you guys can enlighten me on this, Thank you!
I encountered errors like : ''IndexError: index 2264 is out of bounds for axis 0 with size 2264'' and
'' ValueError: Input 0 of layer dense is incompatible with the layer: expected axis -1 of input shape to have value 800 but received input with shape [None, 480]''
Your model doesn't tie to your data.
Change this line:
model.add(Convolution1D(64, 3, input_shape= (60,1), padding='same'))

Applying K-fold cross validation to ANN

I developed an ANN based on a Machine Learning course that goes as follows:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
dataset = pd.read_excel('CHURN DATA (2).xlsx')
dataset.replace([np.inf, -np.inf], np.nan, inplace=True)
dataset = dataset.fillna(0)
X = dataset.iloc[:, 2:45].values
y = dataset.iloc[:, 45].values
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
X[:, 1] = le.fit_transform(X[:,1])
X[:, 2] = le.fit_transform(X[:,2])
X[:, 3] = le.fit_transform(X[:,3])
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(),[0])], remainder = 'passthrough')
X = np.array(ct.fit_transform(X))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
ann = tf.keras.models.Sequential()
ann.add(tf.keras.layers.Dense(units = 43, activation = 'relu'))
ann.add(tf.keras.layers.Dense(units = 43, activation = 'relu'))
ann.add(tf.keras.layers.Dense(units = 1, activation = 'sigmoid'))
ann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
ann.fit(X_train, y_train, batch_size = 256, epochs = 100)
y_pred = ann.predict(X_test)
y_pred = (y_pred > 0.5)
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred)
However, when trying to add kfold crossvalidation like so
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(ann, X = X_train, y = y_train, cv = 10)
mean = accuracies.mean()
variance = accuracies.std()
I get the follow error:
TypeError: If no scoring is specified, the estimator passed should have a 'score' method. The estimator <tensorflow.python.keras.engine.sequential.Sequential object at 0x000001A52F049F88> does not.
When I try using accuracy for scoring as
accuracies = cross_val_score(estimator = ann,scoring = "accuracy", X = X_train, y = y_train, cv = 10)
I get the following error:
Cannot clone object '<tensorflow.python.keras.engine.sequential.Sequential object at 0x000001A52F049F88>' (type <class 'tensorflow.python.keras.engine.sequential.Sequential'>): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' methods.
The error message says it all. You can't just pass a Keras model in Sklearn. There is a Keras wrapper for Sklearn, so both can indeed be used together. It's tensorflow.keras.wrappers.scikit_learn.KerasClassifier.
Here's a reproducible example with the MNIST:
import tensorflow as tf
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
(X_train, y_train), (_, _) = tf.keras.datasets.mnist.load_data()
X_train = X_train[..., None]
def build_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
model = build_model()
history = model.fit(X_train, y_train, epochs=1)
keras_clf = KerasClassifier(build_model)
accuracies = cross_val_score(estimator=keras_clf, scoring="accuracy",
X=X_train, y=y_train, cv=5)
print(accuracies)
array([0.74008333, 0.65 , 0.71075 , 0.561 , 0.66683333])

Regression problems in Keras

I am applying ML based regression techniques for developing prediction model for my experimental setup.
I applied various models : LR, Decision Tree and Random Forest.
I am getting 84% accuracy for RF model. I now want to improve this score with Keras DL mode.
Can anyone guide me for approaching regression based techniques using DL with Keras.
I used following model but accuracy could not go beyond 70%:
model = Sequential()
model.add(Dense(20,input_dim=5, activation='relu'))
#second hidden layer
model.add(Dense(20, activation='relu'))
#output layer
model.add(Dense(1, activation='linear'))
#compile ANN
model.compile(optimizer="Adam", loss='mean_squared_error', metrics=['accuracy'])
How can one apply DL for regression techniques.
Here is both regression and classification, with Keras and TF. The data set is available from the link at the end of this post.
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set(style="darkgrid")
# Classification with TensorFlow 2.0
cols = ['price', 'maint', 'doors', 'persons', 'lug_capacity', 'safety','output']
cars = pd.read_csv(r'C:/your_path/car_evaluation.csv', names=cols, header=None)
cars.head()
plot_size = plt.rcParams["figure.figsize"]
plot_size [0] = 8
plot_size [1] = 6
plt.rcParams["figure.figsize"] = plot_size
cars.output.value_counts().plot(kind='pie', autopct='%0.05f%%', colors=['lightblue', 'lightgreen', 'orange', 'pink'], explode=(0.05, 0.05, 0.05,0.05))
price = pd.get_dummies(cars.price, prefix='price')
maint = pd.get_dummies(cars.maint, prefix='maint')
doors = pd.get_dummies(cars.doors, prefix='doors')
persons = pd.get_dummies(cars.persons, prefix='persons')
lug_capacity = pd.get_dummies(cars.lug_capacity, prefix='lug_capacity')
safety = pd.get_dummies(cars.safety, prefix='safety')
labels = pd.get_dummies(cars.output, prefix='condition')
X = pd.concat([price, maint, doors, persons, lug_capacity, safety] , axis=1)
labels.head()
y = labels.values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
#Model Training
from tensorflow.keras.layers import Input, Dense, Activation,Dropout
from tensorflow.keras.models import Model
input_layer = Input(shape=(X.shape[1],))
dense_layer_1 = Dense(15, activation='relu')(input_layer)
dense_layer_2 = Dense(10, activation='relu')(dense_layer_1)
output = Dense(y.shape[1], activation='softmax')(dense_layer_2)
model = Model(inputs=input_layer, outputs=output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
print(model.summary())
history = model.fit(X_train, y_train, batch_size=8, epochs=50, verbose=1, validation_split=0.2)
score = model.evaluate(X_test, y_test, verbose=1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
# Regression with TensorFlow 2.0
petrol_cons = pd.read_csv(r'C:/your_path/petrol_consumption.csv')
petrol_cons.head()
X = petrol_cons.iloc[:, 0:4].values
y = petrol_cons.iloc[:, 4].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
input_layer = Input(shape=(X.shape[1],))
dense_layer_1 = Dense(100, activation='relu')(input_layer)
dense_layer_2 = Dense(50, activation='relu')(dense_layer_1)
dense_layer_3 = Dense(25, activation='relu')(dense_layer_2)
output = Dense(1)(dense_layer_3)
model = Model(inputs=input_layer, outputs=output)
model.compile(loss="mean_squared_error" , optimizer="adam", metrics=["mean_squared_error"])
history = model.fit(X_train, y_train, batch_size=2, epochs=100, verbose=1, validation_split=0.2)
from sklearn.metrics import mean_squared_error
from math import sqrt
pred_train = model.predict(X_train)
print(np.sqrt(mean_squared_error(y_train,pred_train)))
pred = model.predict(X_test)
print(np.sqrt(mean_squared_error(y_test,pred)))
# path to dataset
# https://www.kaggle.com/elikplim/car-evaluation-data-set

python RNN LSTM error

This is a recurrent neural network LSTM model meant to predict the future values of forex market movement.
The data set shape is (1713, 50), the first column is the Date time index and the others are numeric values.
but right after printing the Training data and Validation data shapes the error start.
When I tried to implement this code:
from sklearn.preprocessing import MinMaxScaler
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv(r"E:\Business\Stocks\StocksDF.csv", parse_dates=[0], index_col=[0], low_memory=False, dtype='object')
features = len(df.columns)
val_ratio = 0.2
epochs = 500
batch_size = df.__len__()
sequence_length = 822
data = df.as_matrix()
data_processed = []
for index in range(len(data) - sequence_length):
data_processed.append(data[index: index + sequence_length])
data_processed = np.array(data_processed)
val_split = round((1 - val_ratio) * data_processed.shape[0])
train = data_processed[:, int(val_split), :]
val = data_processed[int(val_split):, :]
print('Training data: {}'.format(train.shape))
print('Validation data: {}'.format(val.shape))
train_samples, train_nx, train_ny = train.shape
val_samples, val_nx, val_ny = val.shape
train = train.reshape((train_samples, train_nx * train_ny))
val = val.reshape((val_samples, val_nx * val_ny))
preprocessor = MinMaxScaler().fit(train)
train = preprocessor.transform(train)
val = preprocessor.transform(val)
train = train.reshape((train_samples, train_nx, train_ny))
val = val.reshape((val_samples, val_nx, val_ny))
X_train = train[:, : -1]
y_train = train[:, -1][:, -1]
X_val = val[:, : -1]
y_val = val[:, -1][:, -1]
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], features))
X_val = np.reshape(X_val, (X_val.shape[0], X_val.shape[1], features))
model = Sequential()
model.add(LSTM(input_shape=(X_train.shape[1:]), units=100, return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(2, return_sequences=False))
model.add(Dropout(0.25))
model.add(Dense(units=1))
model.add(Activation("relu"))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae', 'mse', 'accuracy'])
history = model.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2)
preds_val = model.predict(X_val)
diff = []
for i in range(len(y_val)):
pred = preds_val[i][0]
diff.append(y_val[i] - pred)
real_min = preprocessor.data_min_[104]
real_max = preprocessor.data_max_[104]
print(preprocessor.data_min_[:1])
print(preprocessor.data_max_[:1])
preds_real = preds_val * (real_max - real_min) + real_min
y_val_real = y_val * (real_max - real_min) + real_min
plt.plot(preds_real, label='Predictions')
plt.plot(y_val_real, label='Actual values')
plt.xlabel('test')
plt.legend(loc=0)
plt.show()
print(model.summary())
I got this error:
Using TensorFlow backend.
Traceback (most recent call last):
Training data: (891, 50)
File "E:/Tutorial/new.py", line 31, in
Validation data: (178, 822, 50)
train_samples, train_nx, train_ny = train.shape
ValueError: not enough values to unpack (expected 3, got 2)
There's an error in this line:
train = data_processed[:, int(val_split), :]
It should be:
train = data_processed[:int(val_split), :, :]
val = data_processed[int(val_split):, :, :]

Lasgne performs much worse than Keras?

I'm doing a comparison between Keras (with Theano) and Lasagne on a toy regression problem in order to choose one of the two for my final application. As a result of this comparison, I see that Lasagne is performing so much worse than Keras that I'm starting to doubt about my code. Since I'm quite new to both Keras and Lasagne, I would like to check this with someone more experienced than me. The network should be trained to find the mean of a 16x16 matrix. I made different try: first, tried with a 2D conv layer + dense layer (since my final application will require using CNN). Then, since Lasagne results were horrible, I tried with a standard one layer MLP. Again, awful Lasagne performance. I tried to use same specs for both cases: same batch size, same initialization, same optimizer (tested both SGD with Nesterov momentum and ADAM), and of course, same number of epochs and network architecture. Can someone tell me what is going on? Is there something wrong in my code? Why so much difference in the performance? If everything is correct, why Keras perform so much better than Lasagne?
Here the codes I am using:
Keras:
# -*- coding: utf-8 -*-
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Convolution2D
from keras import backend as K
from keras.optimizers import SGD
import matplotlib.pyplot as plt
batch_size = 500
nb_output = 1
nb_epoch = 10
# input image dimensions
img_rows, img_cols = 16, 16
# number of convolutional filters to use
nb_filters = 20
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
X_train = np.random.randn(10000, 16*16)
Y_train = np.mean(X_train, 1)
X_train = X_train.astype('float32')
X_test = np.random.randn(1000, 16*16)
Y_test = np.mean(X_test, 1)
if K._BACKEND == 'theano':
X_train = np.reshape(X_train, (10000, 1, 16, 16))
X_test = np.reshape(X_test, (1000, 1, 16, 16))
else:
X_train = np.reshape(X_train, (10000, 16, 16, 1))
X_test = np.reshape(X_test, (1000, 16, 16, 1))
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='same',
input_shape=X_train.shape[1:], init='glorot_uniform'))
model.add(Activation('relu'))
#model.add(Flatten(input_shape=X_train.shape[1:]))
model.add(Flatten())
model.add(Dense(10, init='glorot_uniform'))
model.add(Activation('sigmoid'))
model.add(Dense(nb_output, init='glorot_uniform'))
model.add(Activation('linear'))
sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)#decay=1e-6,
model.compile(loss='mse',
optimizer=sgd)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=1)
predicts = model.predict(X_test, batch_size=1000, verbose=0)
print('Test score:', score[0])
plt.figure()
plt.scatter(Y_test, predicts)
Lasagne (adapted from mnist example):
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import matplotlib.pyplot as plt
def load_dataset():
np.random.seed(1337)
X_train = np.random.randn(10000, 16*16)
X_train = X_train.astype('float32')
Y_train = np.mean(X_train, 1)
X_test = np.random.randn(1000, 16*16)
X_test = X_test.astype('float32')
Y_test = np.mean(X_test, 1)
X_train = np.reshape(X_train, (10000, 1, 16, 16))
X_test = np.reshape(X_test, (1000, 1, 16, 16))
return X_train, Y_train, X_test, Y_test
def build_cnn(input_var=None):
network = lasagne.layers.InputLayer(shape=(None, 1, 16, 16),
input_var=input_var)
network = lasagne.layers.Conv2DLayer(
network, num_filters=20, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
network = lasagne.layers.DenseLayer(
network,
num_units=10,
nonlinearity=lasagne.nonlinearities.sigmoid)
network = lasagne.layers.DenseLayer(
network,
num_units=1,
nonlinearity=lasagne.nonlinearities.linear)
return network
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(model='cnn', num_epochs=10):
print("Loading data...")
X_train, y_train, X_test, y_test = load_dataset()
input_var = T.tensor4('inputs')
target_var = T.vector('targets')
print("Building model and compiling functions...")
network = build_cnn(input_var)
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.squared_error(prediction, target_var)
loss = loss.mean()
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.1, momentum=0.9)
# updates = lasagne.updates.adam(loss, params)
test_prediction = lasagne.layers.get_output(network)
test_loss = lasagne.objectives.squared_error(test_prediction,
target_var)
test_loss = test_loss.mean()
train_fn = theano.function([input_var, target_var], loss, updates=updates)
val_fn = theano.function([input_var, target_var], test_loss)
preds = theano.function([input_var], test_prediction)
print("Starting training...")
for epoch in range(num_epochs):
train_err = 0.0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=False):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
test_err = 0.0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err = val_fn(inputs, targets)
test_err += err
test_batches += 1
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" test loss:\t\t{:.6f}".format(test_err / test_batches))
pds = preds(X_test)
plt.scatter(y_test, pds)
plt.show()
if __name__ == '__main__':
main()
Both codes are easily adaptable to a one layer MLP. If you run them, you will get this scatter plot at the end:
lasagne:
keras:
.
On x axis: true values, on y axis predicted values.

Categories