I am new to KerasRegressor. I am trying to run neural net following this codes:
import tensorflow as tf
import numpy as np
from sklearn import datasets, linear_model
from sklearn.model_selection import cross_val_score, KFold
from keras.models import Sequential
from sklearn.metrics import accuracy_score
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
seed = 1
def baseline_model():
model = Sequential()
model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=100, verbose=False)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, X_train,y_train, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
estimator.fit(X_train, y_train)
prediction = estimator.predict(X_test)
Then I stumbled the following errors:
ValueError: Input 0 of layer sequential_45 is incompatible with the layer: expected axis -1 of input shape to have value 10 but received input with shape [None, 39]
UPDATE: I fixed the error, it was in input_dim, which should be change to X_train.shape[-1]. However, I have a very large error, even after I try normalized X_train:
Results: -1674844.52 (3109620.06) MSE
I wonder how I should solve this? Thanks!
Related
I just started with ML (Autoencoders in particular) and I having problems to make my code run.
I have built an input vector "x" as "artificial data", and I am trying to reduced the dimensionality of this "artificial data" using autoencoder.
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Lambda
import tensorflow.keras.backend as K
from keras.models import Input, Model, load_model
from keras.layers import Dense
from sklearn.model_selection import train_test_split
N=64
z1=tf.linspace(0,1,N)
z2=tf.linspace(0,2,N)
z3=tf.linspace(0,3,N)
z4=tf.linspace(0,4,N)
z5=tf.linspace(0,5,N)
y1=np.sin(z1)
y2=np.sin(z2)
y3=np.sin(z3)
y4=np.sin(z4)
y5=np.sin(z5)
x=tf.concat([y1,y2,y3,y4,y5,z1,z2,z3,z4,z5],0)
x=np.matrix(x).T
main_input = layers.Input(shape=(N,), name='main_input')
encoded = Dense(32, activation='tanh')(main_input)
decoded = Dense(N, activation='tanh')(encoded)
ae = Model(inputs=main_input, outputs=decoded)
print('Full autoencoder')
print(ae.summary())
print('\n Encoder portion of autoencoder') # print(encoder.summary())
ae.compile(optimizer='adam', loss='mse', metrics=['mse'])
batch_size = 2
epochs = 100
x_train, x_test, _, _ = train_test_split(x, x, test_size=0.33, random_state=42)
results = ae.fit(x_train,x_train,
batch_size = batch_size,
epochs = epochs,
validation_data = (x_train,x_train))
I am getting the following error:
ValueError: Exception encountered when calling layer "model" (type Functional).
Input 0 of layer "dense" is incompatible with the layer: expected axis -1 of input shape to have value 64, but received input with shape (2, 1)
Call arguments received:
• inputs=tf.Tensor(shape=(2, 1), dtype=float32)
• training=True
• mask=None
thanks a lot in advance!
The dense network is very accurate when using sin for the pattern matching.
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Lambda
import tensorflow.keras.backend as K
from keras.models import Input, Model, load_model
from keras.layers import Dense
from sklearn.model_selection import train_test_split
N=64
z1=np.linspace(0,1,N)
z2=np.linspace(0,2,N)
z3=np.linspace(0,3,N)
z4=np.linspace(0,4,N)
z5=np.linspace(0,5,N)
y1=np.sin(z1)**2
y2=np.sin(z2)**3
y3=np.sin(z3)
y4=np.sin(z4)
y5=np.sin(z5)
X=np.concatenate((z1,z2,z3,z4,z5))
y=np.concatenate((y1,y2,y3,y4,y5,))
#y=np.matrix(y).T
#plt.plot(X,y)
X_train, X_test, y_train, y_test= train_test_split(X,y,test_size=0.3)
model=Sequential()
model.add(layers.Input(shape=(1,), name='main_input'))
model.add(Dense(200, activation='tanh'))
model.add(Dense(100, activation='tanh'))
model.add(Dense(32, activation='tanh'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
history=model.fit(X_train, y_train, epochs=1000, verbose=0)
predictionResults=model.predict(X_test)
index=0
results=predictionResults.flatten()
for value in X_test:
plt.scatter(value,results[index])
index+=1
plt.plot(X,y)
plt.show()
plt.plot(history.history['loss'])
plt.title('loss accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
So I'm trying to train a neural network and at no point does it appear that pickle is even being used, so I'm somewhat confused. Here's the details:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
X.shape
(149, 8)
y.shape
(149,)
# define base model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(149, input_dim=149, kernel_initializer='normal', activation='relu'))
model.add(Dense(75))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model(), epochs=100, batch_size=5, verbose=2)
kfold = KFold(n_splits=3)
results = cross_val_score(estimator, X, y, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
Which is where the problem arises...
TypeError: can't pickle _thread._local objects
What am I doing wrong?
You need to pass a callable function as build_fn in KerasRegressor. Removing the rounds brackets should make it works.
Following your code, chage:
estimator = KerasRegressor(build_fn=baseline_model(), epochs=100, batch_size=5, verbose=2)
into:
estimator = KerasRegressor(build_fn=baseline_model, epochs=100, batch_size=5, verbose=2)
hey everyone can you please help me to find the solution of this problem
i want to apply cross validation in neural networks but it is showing nan values in output,
here is my codes
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
encoder = LabelEncoder()
encoder.fit(y)
encoded_Y = encoder.transform(y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y)
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model, epochs=10, batch_size=5, verbose=1)
kfold = KFold(n_splits=10, shuffle=True)
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
I am testing the code below.
#%matplotlib inline
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegressionCV
iris = sns.load_dataset("iris")
iris.head()
sns.pairplot(iris, hue='species')
X = iris.values[:, 0:4]
y = iris.values[:, 4]
train_X, test_X, train_y, test_y = train_test_split(X, y, train_size=0.5, random_state=0)
lr = LogisticRegressionCV()
lr.fit(train_X, train_y)
pred_y = lr.predict(test_X)
print("Test fraction correct (Accuracy) = {:.2f}".format(lr.score(test_X, test_y)))
# Test fraction correct (Accuracy) = 0.93
import keras
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.utils import np_utils
train_y_ohe = pd.get_dummies(train_y)
test_y_ohe = pd.get_dummies(test_y)
model = Sequential()
model.add(Dense(16, input_shape=(4,)))
model.add(Activation('sigmoid'))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
loss, accuracy = model.evaluate(test_X, test_y_ohe, show_accuracy=True, verbose=0)
print("Test fraction correct (Accuracy) = {:.2f}".format(accuracy))
Everything works fine until the next-to-last-line of code.
When I try to run this:
loss, accuracy = model.evaluate(test_X, test_y_ohe, show_accuracy=True, verbose=0)
I get this error:
TypeError: evaluate() got an unexpected keyword argument 'show_accuracy'
I did a bit of research, and found that 'show_accuracy=True' may have been depreciated a short time ago. Is there some other way of doing this now? How can I evaluate, and print, the accuracy of the model?
I found the code sample here:
https://blog.fastforwardlabs.com/2016/02/24/hello-world-in-keras-or-scikit-learn-versus.html
The show_accuracy argument is deprecated in new versions of keras,remove this argument from model.evaluate() and use instead metrics=['accuracy'] in model.compile()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit model
train_y_ohe = pd.get_dummies(train_y)
model.fit(train_X, train_y_ohe,epochs=1000,batch_size=20)
loss, accuracy = model.evaluate(test_X, test_y_ohe, verbose=0)
print("Test fraction correct (Accuracy) = {:.2f}".format(accuracy))
#Test fraction correct (Accuracy) = 0.97
I'm writing a code for doing a multiclass classification. I have custom datasets with 7 columns (6 features and 1 label), the training dataset has 2 types of label (1 and 2), and the testing dataset has 3 types of labels (1, 2, and 3). The aim of the model is to see how well the model predicting the label '3'.
As of now, I'm trying the MLP algorithm, the code is as follows:
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.embeddings import Embedding
from keras import optimizers
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from keras.models import load_model
from sklearn.externals import joblib
from joblib import dump, load
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
#from keras.layers import Dense, Embedding, LSTM, GRU
#from keras.layers.embeddings import Embedding
#Load the test dataset
df1 = pd.read_csv("/home/user/Desktop/FinalTestSet.csv")
test = df1
le = LabelEncoder()
test['Average_packets_per_flow'] = le.fit_transform(test['Average_packets_per_flow'])
test['Average_PktSize_per_flow'] = le.fit_transform(test['Average_PktSize_per_flow'])
test['Avg_pkts_per_sec'] = le.fit_transform(test['Avg_pkts_per_sec'])
test['Avg_bytes_per_sec'] = le.fit_transform(test['Avg_bytes_per_sec'])
test['N_pkts_per_flow'] = le.fit_transform(test['N_pkts_per_flow'])
test['N_pkts_size_per_flow'] = le.fit_transform(test['N_pkts_size_per_flow'])
#Select the x and y columns from dataset
xtest_Val = test.iloc[:,0:6].values
Ytest = test.iloc[:,6].values
#print Ytest
#MinMax Scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
Xtest = scaler.fit_transform(xtest_Val)
#print Xtest
#Load the train dataset
df2 = pd.read_csv("/home/user/Desktop/FinalTrainingSet.csv")
train = df2
le = LabelEncoder()
test['Average_packets_per_flow'] = le.fit_transform(test['Average_packets_per_flow'])
test['Average_PktSize_per_flow'] = le.fit_transform(test['Average_PktSize_per_flow'])
test['Avg_pkts_per_sec'] = le.fit_transform(test['Avg_pkts_per_sec'])
test['Avg_bytes_per_sec'] = le.fit_transform(test['Avg_bytes_per_sec'])
test['N_pkts_per_flow'] = le.fit_transform(test['N_pkts_per_flow'])
test['N_pkts_size_per_flow'] = le.fit_transform(test['N_pkts_size_per_flow'])
#Select the x and y columns from dataset
xtrain_Val = train.iloc[:,0:6].values
Ytrain = train.iloc[:,6].values
#print Ytrain
#MinMax Scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
# Fit the model
Xtrain = scaler.fit_transform(xtrain_Val)
#Reshape data for CNN
Xtrain = Xtrain.reshape((Xtrain.shape[0], 1, 6, 1))
print(Xtrain)
#Xtest = Xtest.reshape((Xtest.shape[0], 1, 6, 1))
#print Xtrain.shape
max_length=70
EMBEDDING_DIM=100
vocab_size=100
num_labels=2
#Define model
def init_model():
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=Xtrain.shape[0]))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(64, activation='softmax'))
model.add(Flatten())
#adam optimizer
adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(optimizer = adam, loss='categorical_crossentropy', metrics=['accuracy'])
return model
print('Train...')
model = init_model()
#To avoid overfitting
callbacks = [EarlyStopping('val_loss', patience=3)]
hist = model.fit(Xtrain, Ytrain, epochs=50, batch_size=50, validation_split=0.20, callbacks=callbacks, verbose=1)
#Evaluate model and print results
score, acc = model.evaluate(Xtest, Ytest, batch_size=50)
print('Test score:', score)
print('Test accuracy:', acc)
However, I'm getting the following error:
ValueError: Input 0 is incompatible with layer flatten_1: expected min_ndim=3, found ndim=2
I tried to remove the flatten layers, but getting different error:
ValueError: Error when checking input: expected dense_1_input to have shape (424686,) but got array with shape (6,)
424686 is the number of rows in dataset and 6 is the number of features.
I appreciate any suggestion. Thank you.
Based on Omarfoq suggestion, now I used three labels for both the training and testing datasets. The code and error remains unchanged.
Can anyone please suggest me the solution? Thank you.
I would say that what you are trying is not logical, your model will never predict class "3" if it doesn't exist in the training set. What you are trying have no sense. Try to reformulate your problem.