How to get weights from tensorflow fully_connected with Google Colab - python

I'm trying to extract the weights from a model after training it. Here's a code.
1- how to save the model in the previously uploaded directory path?
2. how to get weights?
3. I have some csv file as the same used for modeling but without class. how can I estimate the classes with this model?
# Preprocess
import pandas as pd
import keras
from sklearn.model_selection import train_test_split
# Prepare data
churn = pd.read_csv('Churn_Modelling3.csv')
# split data into train and test sets
x_train, x_test, y_train, y_test = train_test_split(churn.iloc[:,0:10],churn.iloc[:, 10], test_size=0.2)
# Create a Model
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
model = Sequential()
# Input layer
model.add(Dense(32, activation=tf.nn.sigmoid,input_dim=x_train.shape[1]))
# Hidden layers
model.add(Dense(64, activation=tf.nn.sigmoid))
model.add(Dense(32, activation=tf.nn.sigmoid))
# Output layer
model.add(Dense(1, activation=tf.nn.sigmoid))
# Compile the Model
from keras.optimizers import SGD
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile( optimizer= 'sgd', loss='binary_crossentropy', metrics=['accuracy'])
# Fit the model
history = model.fit(x_train, y_train, epochs=20, batch_size=128,validation_data=(x_test, y_test))
# ٍEvaluate the Model
score = model.evaluate(x_test, y_test, batch_size=128)

How to save the model in the previously uploaded directory path? => You can use any of the below commands,
`model.save_weights('./Weights/Weights')` or
`tf.keras.experimental.export_saved_model` or
`model.save`
How to get weights? => We can Load the Weights using any of the below commands:
model.load_weights('my_model_weights.h5') or
keras.models.load_model('my_model.h5') or
tf.keras.experimental.load_from_saved_model
I have some csv file as the same used for modeling but without class. how can I estimate the classes with this model? => One of the ways to do it is to iterate the below code for each instance of CSV data Label.
ynew = model.predict(Xnew)
Fore more information, you can refer to the below links:
https://www.tensorflow.org/tutorials/keras/save_and_restore_models
https://www.tensorflow.org/guide/saved_model
https://www.tensorflow.org/api_docs/python/tf/keras/Model

Related

Multiple variable output in Neural Network | Why is Keras yielding negative binary_cross_entropy?

I am encountering an issue for a school project.
I have to predict on a test set, based on textual data, the age and gender of a person. My training dataset has 4 features (ID, keywords, age, sex).
I created a neural network (please see the code below) but when fitting the latter, my loss values are extremely negative.
Could you please tell me how to alleviate this issue?
import pandas as pd
import numpy as np
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
#Load the datasets
chunk_train = pd.read_csv('/Users/romeoleon/Downloads/train.csv',chunksize=10**6)
data_train = pd.concat(chunk_train)
#Map the values for sex columns
data_train.sex = data_train.sex.map({'M':0,'F':1})
#Remove the rows with missing data
print('Missing rows represent {} percent of the dataframe'.format(data_train['keywords'].isna().sum()/len(data_train.keywords)*100))
#Drop the missing values
data_train.dropna(inplace=True)
#Plot the distribution of numerical variables
sns.histplot(data_train.age,bins=85)
plt.show()
sns.countplot(x='sex',data=data_train)
plt.show()
#Prepare the data to feed it to the NN
import numpy as np
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
x_train, x_test, y_train, y_test = train_test_split(data_train['keywords'],data_train[["age","sex"]],test_size=0.2)
#Choose parameters
vocab_size = 1000
maxlen = 300
batch_size = 32
embedding_dims = 100
hidden_dims = 5
filters = 250
kernel_size = 3
epochs = 10
#Tokenize the words
tokenizer = Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts(x_train)
X_train = tokenizer.texts_to_matrix(x_train)
X_test = tokenizer.texts_to_matrix(x_test)
#Pad sequencing : Ensure all sequences have the same length
from tensorflow.keras.preprocessing import sequence
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, MaxPooling1D
from tensorflow.keras.layers import Embedding, LSTM
from tensorflow.keras.layers import Conv1D, Flatten
#Create the model
model = Sequential()
model.add(Embedding(vocab_size, embedding_dims, input_length=maxlen, trainable=True))
model.add(Dropout(0.5))
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu'))
#model.add(MaxPooling1D(pool_size=4))
model.add(Flatten())
model.add(Dense(hidden_dims, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='sigmoid'))
# Compile neural model
model.compile(loss='binary_crossentropy', # Cross-entropy
optimizer='adam', # Root Mean Square Propagation
metrics=['accuracy']) # Accuracy performance metric
model.summary()
#Fit the model
model.fit(X_train, y_train,
batch_size=batch_size,
epochs=1,
validation_data=(X_test, y_test), verbose=1)
You can find below a screenshot of the structure of my training dataset:
When using 'binary_crossentropy' as the loss function, dense at the output end should have only 1 unit rather than 2. (1 unit have 2 states, which is 1 or 0)
Using this instead:
model.add(Dense(1, activation='sigmoid'))

In python Cross-validation in neural network with multi class labes in target variable

hey everyone can you please help me to find the solution of this problem
i want to apply cross validation in neural networks but it is showing nan values in output,
here is my codes
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
encoder = LabelEncoder()
encoder.fit(y)
encoded_Y = encoder.transform(y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y)
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model, epochs=10, batch_size=5, verbose=1)
kfold = KFold(n_splits=10, shuffle=True)
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))

Value error while accessing 10,000 .png image files from a folder

I'm trying to train a large data of about 10,000 images using the vgg16 pre-trained network for this i coded this but it seems to generate
ValueError: too many values to unpack (expected 2).
path= "C:/Users/52/.spyder-py3/IAM/train_patches/*.png"
(X_train,y_train),(X_test,y_test) = path //The error is occurring here
initially when i was just using it simply it was working but now when i'm using datagen function its not working. Please kindly help me in making this code correct
from keras.models import model_from_json
from keras.applications import VGG16
import numpy as np
import glob
import os
import keras
from keras.utils import to_categorical
from keras import backend as K
from PIL import Image
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from sklearn.model_selection import train_test_split
from keras import optimizers
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
seed=10
np.random.seed(seed)
path= "C:/Users/52/.spyder-py3/IAM/train_patches/*.png"
(X_train,y_train) == path //The error is occurring here
sample_image = X_train[1,:,:,:]
plt.imshow(sample_image), plt.axis('off')
plt.show()
classes = 651
Y_train = to_categorical(y_train,classes)
X_train = X_train.astype('float32')
X_train = X_train/255
img_rows, img_cols = 500,500
channels=3
#Include_top=False, Does not load the last two fully connected layers which act as the classifier.
#We are just loading the convolutional layers.
vgg_conv = VGG16(weights='imagenet',include_top=False,input_shape=(img_rows,img_cols,3))
# freeze the layer except the last 4 layers
for layer in vgg_conv.layers[:-4]:
layer.trainable=False
num_classes=10
model = Sequential()
# Add the vgg convolutional base model
model.add(vgg_conv)
# Add new layers
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# Show a summary of the model. Check the number of trainable parameters
model.summary()
datagen = ImageDataGenerator(rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
datagen.fit(X_train)
print("Size is: ",X_train.shape[0])
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
#generator=datagen.flow(datagen.flow(X_train,Y_train,batch_size=128))
history = model.fit_generator(datagen.flow(X_train,Y_train,batch_size=128),
steps_per_epoch=X_train.shape[0]/128,
epochs = 2,
verbose=1)
acc = history.history['acc']
loss = history.history['loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.title('Training accuracy')
plt.legend()
plt.show()
model_json = model.to_json()
open('C:/Users/52/.spyder-py3/IAM/imdata.json','w').write(model_json)
model.save_weights('C:/Users/52/.spyder-py3/IAM/imdata.h5',overwrite=True)
The error means that you're trying to make an assignment on 4 variables, but the function to thr right of the = only outputs two.
I think it's beacuse you're reading the data and labels and also making training/testing split.
Try to read all the images and labels in only two variables and then make the split.

Why i have a small accuracy?

I have a code to train MNIST dataset to work on the street view house number project, but when I run the code I have acc = 0,1
Import libraries and modules
import numpy as np
np.random.seed(123) # for reproducibility
from keras import backend as K
K.set_image_dim_ordering('th')
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import load_model
from keras.utils import CustomObjectScope
from keras.initializers import glorot_uniform
4. Load pre-shuffled MNIST data into train and test sets
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = keras.utils.normalize(X_train,axis=1)
X_test = keras.utils.normalize(X_test, axis=1)
7. Define model architecture
model = Sequential()
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
8. Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
9. Fit model on training data
model.fit(np.array(X_train), np.array(Y_train), batch_size=32, epochs=3,verbose=1)
In step 4, are you normalizing the data correctly? If I recall correctly X_train has shape batch, width, height. I don't really know what you would want to normalize, but axis=1 doesn't seem it's supposed to be there. I think you should the normalization.
If you still have low accuracy, try training more epochs than 3. 3 epochs are not that many.
There are several reasons why you have an accuracy like that.
Your data is not normalized correctly.
You are trying to do image recognition with 3 dense layers in 3 epochs, which will not work.
There is no optimization in your code.
Look at the https://keras.io/examples/mnist_cnn/ . It is the Keras documentation on working with the MNIST data using neural network.

'tf' is not defined on load_model() - using lambda

I have a Keras model that I am trying to export and use in a different python code.
Here is my code:
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, GRU, Flatten, Dropout, Lambda
from keras.layers.embeddings import Embedding
import tensorflow as tf
EMBEDDING_DIM = 100
model = Sequential()
model.add(Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=max_length, trainable=False))
model.add(Lambda(lambda x: tf.reduce_mean(x, axis=1)))
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train_pad, y_train, batch_size=128, epochs=25, validation_data=(X_val_pad, y_val), verbose=2)
model.save('my_model.h5')
In another file, when I import my_model.h5 :
from keras.models import load_model
from keras.layers import Lambda
import tensorflow as tf
def learning(test_samples):
model = load_model('my_model.h5')
#ERROR HERE
#rest of the code
The error is the following:
in <lambda>
model.add(Lambda(lambda x: tf.reduce_mean(x, axis=1)))
NameError: name 'tf' is not defined
After research, I got that the fact that I used lambda in my model is the reason for this problem, but I added these references and it didn't help:
from keras.models import load_model
from keras.layers import Lambda
import tensorflow as tf
What could be the problem?
Thank you
When loading the model, you need to explicitly handle custom objects or custom layers (CTRL+f the docs for Handling custom layers):
import tensorflow as tf
import keras
model = keras.models.load_model('my_model.h5', custom_objects={'tf': tf})
It happened to me too. You need to import tensorflow inside your lambda function. So you probably want to put the code in a separate function:
def reduce_mean(x):
import tensorflow as tf
return tf.reduce_mean(x, axis=1)
model = Sequential()
model.add(Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=max_length, trainable=False))
model.add(Lambda(reduce_mean))
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train_pad, y_train, batch_size=128, epochs=25, validation_data=(X_val_pad, y_val), verbose=2)
model.save('my_model.h5')
Besides, when you try to load the model structure from a json file, passing custom objects solves the problem.
model = model_from_json(open("model_structure.json", "r").read(), custom_objects={'tf': tf})
One weird solution that has worked for me is to import the module used to create the model. So say you have file you use to create a model and save it.
make_model.py
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, GRU, Flatten, Dropout, Lambda
from keras.layers.embeddings import Embedding
import tensorflow as tf
EMBEDDING_DIM = 100
model = Sequential()
model.add(Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=max_length, trainable=False))
model.add(Lambda(lambda x: tf.reduce_mean(x, axis=1)))
...
model.fit(X_train_pad, y_train, batch_size=128, epochs=25, validation_data=(X_val_pad, y_val), verbose=2)
model.save('my_model.h5')
If you load the model in another file load_model.py, you may be able to get around the error via import of the first module.
from keras.models import load_model
from keras.layers import Lambda
import tensorflow as tf
import make_model
def learning(test_samples):
model = load_model('my_model.h5')
This worked for me since the error read
UserWarning: make_model is not loaded, but a Lambda layer uses it. It may cause errors.
...
NameError: name 'tf' is not defined
Happened to me as well - however problem was that the code was refactored and the Lambda layer was replaced by something else. In this case, you can always rollback to the point where it worked.
The simple workaround in case you are not able to restore the previous solution is adding: custom_objects={'tf': tf} to restore_model call

Categories