Plot training and validation accuracy and loss - python

I am new to Python and trying to plot the training and validation accuracy and loss for my MLP Regressor, however, I am getting the following error, what am I doing wrong?
TypeError: fit() got an unexpected keyword argument 'validation_split'
mlp_new = MLPRegressor(hidden_layer_sizes=(18, 18,18),
max_iter = 10000000000,activation = 'relu',
solver = 'adam', learning_rate='constant',
alpha=0.05,validation_fraction=0.2,random_state=0,early_stopping=True)
mlp_new.fit(X_train, y_train)
mlp_new_y_predict = mlp_new.predict((X_test))
mlp_new_y_predict
import keras
from matplotlib import pyplot as plt
history = mlp_new.fit(X_train, y_train, validation_split = 0.1, epochs=50, batch_size=4)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()

Yes, you definitely can find a validation_split arg in the keras model .fit() method.
But:
The model you are going to use here is not that one.
Check the documentation below, Methods section:
method .fit(..) has only two args: X and y.
https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html#sklearn.neural_network.MLPRegressor.fit

Related

Python : How to make accuracy and loss graph from my image dataset with my own h5 model

just like my question above. I want to plot a trainig graph and validation graph. But all I found are import from other online dataset(which I don't want since I have my own, already tried to edit and make it for my own, but failed, miserably). I don't know how to make it, I have my trained models which is h5 files, but I don't know how to plotting the graph with this. Did I done it wrong? Or I made a mistake somewhere?
Here's the code I use and modify, but failed.
from tensorflow import keras
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
# create a new generator
imagegen = ImageDataGenerator()
val_datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
)
# load val data
val_generator = val_datagen.flow_from_directory(r"dataset path",
class_mode="categorical",
shuffle=False,
batch_size=3,
target_size=(200, 200))
history = keras.models.load_model(r"dataset path.h5")
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()
and got this error.
File "C:\Users\User\PycharmProjects\Validation Graph\main.py", line 27, in <module>
plt.plot(history.history['accuracy'])
TypeError: 'NoneType' object is not subscriptable
This is one of many that failed, but I'm tried of fixing this by myself. Please tell me what I did wrong, and how I make it right with my dataset still include in this.
below is a function that will plot training and validation losses and accuracy
import seaborn as sns
sns.set_style('darkgrid')
def tr_plot(tr_data, start_epoch):
#Plot the training and validation data
tacc=tr_data.history['accuracy']
tloss=tr_data.history['loss']
vacc=tr_data.history['val_accuracy']
vloss=tr_data.history['val_loss']
Epoch_count=len(tacc)+ start_epoch
Epochs=[]
for i in range (start_epoch ,Epoch_count):
Epochs.append(i+1)
index_loss=np.argmin(vloss)# this is the epoch with the lowest validation loss
val_lowest=vloss[index_loss]
index_acc=np.argmax(vacc)
acc_highest=vacc[index_acc]
plt.style.use('fivethirtyeight')
sc_label='best epoch= '+ str(index_loss+1 +start_epoch)
vc_label='best epoch= '+ str(index_acc + 1+ start_epoch)
fig,axes=plt.subplots(nrows=1, ncols=2, figsize=(25,10))
axes[0].plot(Epochs,tloss, 'r', label='Training loss')
axes[0].plot(Epochs,vloss,'g',label='Validation loss' )
axes[0].scatter(index_loss+1 +start_epoch,val_lowest, s=150, c= 'blue', label=sc_label)
axes[0].scatter(Epochs, tloss, s=100, c='red')
axes[0].set_title('Training and Validation Loss')
axes[0].set_xlabel('Epochs', fontsize=18)
axes[0].set_ylabel('Loss', fontsize=18)
axes[0].legend()
axes[1].plot (Epochs,tacc,'r',label= 'Training Accuracy')
axes[1].scatter(Epochs, tacc, s=100, c='red')
axes[1].plot (Epochs,vacc,'g',label= 'Validation Accuracy')
axes[1].scatter(index_acc+1 +start_epoch,acc_highest, s=150, c= 'blue', label=vc_label)
axes[1].set_title('Training and Validation Accuracy')
axes[1].set_xlabel('Epochs', fontsize=18)
axes[1].set_ylabel('Accuracy', fontsize=18)
axes[1].legend()
plt.tight_layout
plt.show()
return index_loss
to use this first train your model using model.fit but first you need to create a training generator and a validation generator
gen=ImageDataGenerator(rescale=1/255,rotation_range=20, width_shift_range=0.2,
height_shift_range=0.2, horizontal_flip=True)
train_gen=gen.flow_from_directory(r"dataset path", target_size=(200,200),
batch_size=32, shuffle=True, seed=123,
class_mode='categorical, subset='training')
valid_gen=gen.flow_from_directory(r"dataset path", target_size=(200,200),
batch_size=32, shuffle=True, seed=123,
class_mode='categorical, subset='validation')
next train your model. I assume you created a model
epochs=10
history=model.fit(x=train_gen, , epochs=epochs, verbose=1,
validation_data=valid_gen, shuffle=False, initial_epoch=0)
then to plot the results
loss_index=tr_plot(history,0)

Confused about why something ran once but not another time

So I ran this code last night, and it worked fine it did plot the training loss as a fcn of epoch value. However, when I tried to run it today I changed the batch size from 1 to 8 and it gave me a 'plt not found' error. I then moved the plotting to below the matplotlib import line and it worked. This seems to suggest that line must come before the plotting, but how was I able to plot last night with the plot commands before the import?
This is just part of the complete code yes, but the rest wasn't relevant. This was in Jupyter notebook too, so perhaps I had ran the code before without the plot lines inside the tf.device block, and it saved the import or something?
with tf.device(device_name):
inputx = Input(shape=(7,))
x = Dense(4, activation='elu',name='x1')(inputx)
x = Dense(16, activation='elu',name='x2')(x)
x = Dense(25, activation='elu',name='x3')(x)
x = Dense(10, activation='elu',name='x4')(x)
xke = Dense(5,name='x5')(x)
model = Model(inputx, xke)
adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)
model.compile(optimizer=adam,
loss=['mean_squared_error','mean_squared_error','mean_squared_error','mean_squared_error','mean_squared_error'],
loss_weights=[1,1,1,1,1],)
model.summary()
history = model.fit(X_train, y_train, batch_size=1, epochs=30, verbose=1)
plt.plot(history.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend('train', loc='upper left')
plt.show()
from sklearn.metrics import mean_squared_error as mse
train_pred = model.predict(X_train)
train_rmse_sk = np.sqrt(mse(y_train, train_pred, multioutput= "raw_values"))
print("The training rmse value is: ", train_rmse_sk, "\n")
import matplotlib.pyplot as plt

How to optimize a model using the functional api of Keras

I am trying to build a model using the functional api of Keras.
Here is the entire model that I have made. I am not sure if it is correct, and I would be very happy if someone could take a look at it for a moment.
I have first splittet the data into train and test data set.
from sklearn.model_selection import train_test_split
X1_train, X1_test, X2_train, X2_test, y_train, y_test = train_test_split(X1_scaled, X2_scaled, end_y, test_size=0.2)
[i.shape for i in (X1_train, X1_test, X2_train, X2_test, y_train, y_test)]
Here is the part, where I start to build the model
from tensorflow.keras import layers, Model, utils
# Build the model
input1 = layers.Input((10, 6))
input2 = layers.Input((10, 2, 5))
x1 = layers.Flatten()(input1)
x2 = layers.Flatten()(input2)
concat = layers.concatenate([x1, x2])
# Add hidden and dropout layers
hidden1 = layers.Dense(64, activation='relu')(concat)
hid1_out = layers.Dropout(0.5)(hidden1)
hidden2 = layers.Dense(32, activation='relu')(hid1_out)
hid2_out = layers.Dropout(0.5)(hidden2)
output = layers.Dense(1, activation='sigmoid')(hid2_out)
model = Model(inputs=[input1, input2], outputs=output)
# summarize layers
print(model.summary())
# compile the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit the keras model on the dataset
history = model.fit([X1_train, X2_train], y_train, epochs=200, batch_size=5, verbose=0, validation_data=([X1_test, X2_test], y_test))
# evaluate the keras model
_, train_accuracy = model.evaluate([X1_train, X2_train], y_train, verbose=0)
_, test_accuracy = model.evaluate([X1_test, X2_test], y_test, verbose=0)
print('Accuracy NN: %.2f' % (train_accuracy*100))
print('Accuracy NN: %.2f' % (test_accuracy*100))
A problem occurs here. No plot is showing.
# Plots
from matplotlib import pyplot
pyplot.subplot(211)
pyplot.title('Loss')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
# plot accuracy
pyplot.subplot(212)
pyplot.title('Accuracy')
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='test')
pyplot.legend()
pyplot.show(`
Could someone give me any hints on how to manage it ?
Thank you for giving me some of your time
below is the code for a function that will produce two plots side by side. The first plot
shows the training loss and validation loss versus epochs. The second plot shows training accuracy and validation accuracy versus epochs. It also places a dot in the first plot for the epoch with the lowest validation loss and a dot on the second plot for the epoch with the highest validation accuracy.
def tr_plot(history):
#Plot the training and validation data
tacc=history.history['accuracy']
tloss=history.history['loss']
vacc=history.history['val_accuracy']
vloss=history.history['val_loss']
Epoch_count=len(tacc)
Epochs=[]
for i in range (Epoch_count):
Epochs.append(i+1)
index_loss=np.argmin(vloss)# this is the epoch with the lowest validation loss
val_lowest=vloss[index_loss] # lowest validation loss value
index_acc=np.argmax(vacc) # this is the epoch with the highest training accuracy
acc_highest=vacc[index_acc] # this is the highest accuracy value
plt.style.use('fivethirtyeight')
sc_label='best epoch= '+ str(index_loss+1 )
vc_label='best epoch= '+ str(index_acc + 1)
fig,axes=plt.subplots(nrows=1, ncols=2, figsize=(20,8))
axes[0].plot(Epochs,tloss, 'r', label='Training loss')
axes[0].plot(Epochs,vloss,'g',label='Validation loss' )
axes[0].scatter(index_loss+1 ,val_lowest, s=150, c= 'blue', label=sc_label)
axes[0].set_title('Training and Validation Loss')
axes[0].set_xlabel('Epochs')
axes[0].set_ylabel('Loss')
axes[0].legend()
axes[1].plot (Epochs,tacc,'r',label= 'Training Accuracy')
axes[1].plot (Epochs,vacc,'g',label= 'Validation Accuracy')
axes[1].scatter(index_acc+1 ,acc_highest, s=150, c= 'blue', label=vc_label)
axes[1].set_title('Training and Validation Accuracy')
axes[1].set_xlabel('Epochs')
axes[1].set_ylabel('Accuracy')
axes[1].legend()
plt.tight_layout
plt.show()
The resulting plot looks like this

how to plot training error and validation error vs number of epochs?

how to plot training error and validation error vs number of epochs?
train_data = generate_arrays_for_training(indexPat, filesPath, end=75)
validation_data=generate_arrays_for_training(indexPat, filesPath, start=75)
model.fit_generator(generate_arrays_for_training(indexPat, filesPath, end=75), #end=75),#It take the first 75%
validation_data=generate_arrays_for_training(indexPat, filesPath, start=75),#start=75), #It take the last 25%
#steps_per_epoch=10000, epochs=10)
steps_per_epoch=int((len(filesPath)-int(len(filesPath)/100*25))),#*25),
validation_steps=int((len(filesPath)-int(len(filesPath)/100*75))),#*75),
verbose=2,
epochs=300, max_queue_size=2, shuffle=True, callbacks=[callback])
This might be what you're looking for, but you should provide more details in order to get a more suitable answer
import matplotlib.pyplot as plt
hist = model.fit_generator(...)
plt.figure()
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','val'], loc = 'upper left')
plt.show()

Plot graph for either loss or accuracy of the keras predictive model

So I am trying to plot a graph for my model, say I have 20 epochs and the graph should show the accuracy/loss on each epoch. As of now I found this code on Keras website.
history = model.fit(x_train, y_train, epochs = 30, batch_size = 128,validation_split = 0.2)
plot(history)
I tried using this on my data.
import matplotlib.pyplot as plt
plt.plot(history)
So this is the error I am getting
TypeError: float() argument must be a string or a number, not 'History'
Is there any way of correcting this or any other way of plotting a graph for each epoch?
Thank you.
model_history = model.fit(...
plt.figure()
plt.subplot(211)
plt.plot(model_history.history['accuracy'])
plt.subplot(212)
plt.plot(model_history.history['loss'])
This code worked for me.
print(history.history.keys()) # Displays keys from history, in my case loss,acc
plt.plot(history.history['acc']) #here I am trying to plot only accuracy, the same can be used for loss as well
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()

Categories