'Sequential' object has no attribute '_in_multi_worker_mode' - python

I tried to use google colab resources to save my CNN model weights and I get this error. I tried googling it but nothing helps.
'Sequential' object has no attribute '_in_multi_worker_mode'
My code:
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)
cnn_model = Sequential()
cnn_model.add(Conv2D(filters = 64, kernel_size = (3,3), activation = "relu", input_shape = Input_shape ))
cnn_model.add(Conv2D(filters = 64, kernel_size = (3,3), activation = "relu"))
cnn_model.add(MaxPooling2D(2,2))
cnn_model.add(Dropout(0.4))
cnn_model = Sequential()
cnn_model.add(Conv2D(filters = 128, kernel_size = (3,3), activation = "relu"))
cnn_model.add(Conv2D(filters = 128, kernel_size = (3,3), activation = "relu"))
cnn_model.add(MaxPooling2D(2,2))
cnn_model.add(Dropout(0.3))
cnn_model.add(Flatten())
cnn_model.add(Dense(units = 512, activation = "relu"))
cnn_model.add(Dense(units = 512, activation = "relu"))
cnn_model.add(Dense(units = 10, activation = "softmax"))
history = cnn_model.fit(X_train, y_train, batch_size = 32,epochs = 1,
shuffle = True, callbacks = [cp_callback])
Stack trace:
AttributeError Traceback (most recent call last)
<ipython-input-19-35c1db9636b7> in <module>()
----> 1 history = cnn_model.fit(X_train, y_train, batch_size = 32,epochs = 1, shuffle = True, callbacks = [cp_callback])
4 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/callbacks.py in on_train_begin(self, logs)
903 def on_train_begin(self, logs=None):
904 # pylint: disable=protected-access
--> 905 if self.model._in_multi_worker_mode():
906 # MultiWorkerTrainingState is used to manage the training state needed
907 # for preemption-recovery of a worker in multi-worker training.
AttributeError: 'Sequential' object has no attribute '_in_multi_worker_mode'

I've recently faced the same issue
instead of,
from tensorflow.keras.callbacks import ModelCheckpoint
use,
from keras.callbacks import ModelCheckpoint

Check your tensorflow version. You actually only need to synchronize it. check if all your import uses
from keras import ...
or
from tensorflow.keras import ...
use only one of the above for your keras imports. using different (both) at the same time can cause collision by the libraries.

Instead of
tf.keras.callbacks.ModelCheckpoint
in your model building process, you can use
from keras.callbacks import ModelCheckpoint
in order to import ModelCheckpoint, and then just use ModelCheckpoint in the later code.

Please check if your version of tensorflow matches the latest one.In my case the error was solved when is updated it to 2.1.0.

Related

trying to callibrate keras model

I'm trying to calibrate my CNN model by Sklearn implementation CalibratedClassifierCV, tried to wrap it as KerasClassifier and to override the predict function but without success.
someone could say me what I did wrong?
this is the model code:
def create_model():
model = Sequential()
model.add(Conv2D(64, kernel_size=(3,3), activation = 'relu', input_shape=(28, 28 ,1) ))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.20))
model.add(Dense(24, activation = 'softmax'))
model.compile(loss = keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
return model
this is me trying to calibrate it :
model = KerasClassifier(build_fn=create_model,epochs=5, batch_size=128,validation_data=(evalX_cnn, eval_y_cnn))
model.fit(trainX_cnn, train_y_cnn)
model_c = CalibratedClassifierCV(base_estimator=model, cv='prefit')
model_c.fit(valX_cnn, val_y_cnn)
the output :
-------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-19-3d3ce9ce4fca> in <module>
----> 1 model_c.fit(np.array(valX_cnn), np.array(val_y_cnn))
~\anaconda3\lib\site-packages\sklearn\calibration.py in fit(self, X, y, sample_weight)
286 pred_method, method_name = _get_prediction_method(base_estimator)
287 n_classes = len(self.classes_)
--> 288 predictions = _compute_predictions(pred_method, method_name, X, n_classes)
289
290 calibrated_classifier = _fit_calibrator(
~\anaconda3\lib\site-packages\sklearn\calibration.py in _compute_predictions(pred_method, method_name, X, n_classes)
575 (X.shape[0], 1).
576 """
--> 577 predictions = pred_method(X=X)
578
579 if method_name == "decision_function":
TypeError: predict_proba() missing 1 required positional argument: 'x'
valX_cnn and val_y_cnn are of type np.array.
tried even to override the method:
keras.models.Model.predict_proba = keras.models.Model.predict
The problem is because predict_proba from KerasClassifier requires x as input while predict_proba method from sklearn accepts X as input argument (note the difference: X is not x).
You can simply overdrive the problem wrapping KerasClassifier into a new class to correct the predict_proba method.
samples,classes = 100,3
X = np.random.uniform(0,1, (samples,28,28,1))
Y = tf.keras.utils.to_categorical(np.random.randint(0,classes, (samples)))
class MyKerasClassifier(KerasClassifier):
def predict_proba(self, X):
return self.model.predict(X)
model = MyKerasClassifier(build_fn=create_model, epochs=3, batch_size=128)
model.fit(X, Y)
model_c = CalibratedClassifierCV(base_estimator=model, cv='prefit')
model_c.fit(X, Y)
The wrappers are going to be deprecated. From tensorflow>=2.7.0 you might want to use scikeras.
pckg repo>
https://github.com/adriangb/scikeras
code example>
from sklearn.datasets import make_classification
from tensorflow import keras
from scikeras.wrappers import KerasClassifier
X, y = make_classification(1000, 20, n_informative=10, random_state=0)
X = X.astype(np.float32)
y = y.astype(np.int64)
def get_model(hidden_layer_dim, meta):
# note that meta is a special argument that will be
# handed a dict containing input metadata
n_features_in_ = meta["n_features_in_"]
X_shape_ = meta["X_shape_"]
n_classes_ = meta["n_classes_"]
model = keras.models.Sequential()
model.add(keras.layers.Dense(n_features_in_, input_shape=X_shape_[1:]))
model.add(keras.layers.Activation("relu"))
model.add(keras.layers.Dense(hidden_layer_dim))
model.add(keras.layers.Activation("relu"))
model.add(keras.layers.Dense(n_classes_))
model.add(keras.layers.Activation("softmax"))
return model
clf = KerasClassifier(
get_model,
loss="sparse_categorical_crossentropy",
hidden_layer_dim=100,
)
clf.fit(X, y)
y_proba = clf.predict_proba(X)

Opencv readNetFromTensorflow fails with Unknown enumeration DT_VARIANT

I trained a keras model and saved it as PB as in the below script.
import sys
import os
import tensorflow.compat.v1 as tf
from keras import backend as K
from tensorflow.python.keras.backend import set_session
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dropout, Flatten, Dense, Activation
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras import callbacks
import time
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
#param session The TensorFlow session to be frozen.
#param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
#param output_names Names of the relevant graph outputs.
#param clear_devices Remove the device directives from the graph for better portability.
#return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ''
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
start = time.time()
tf.compat.v1.disable_eager_execution()
DEV = False
argvs = sys.argv
argc = len(argvs)
if argc > 1 and (argvs[1] == "--development" or argvs[1] == "-d"):
DEV = True
epochs = 2
train_data_path = 'data/train'
validation_data_path = 'data/test'
"""
Parameters
"""
img_width, img_height = 150, 150
batch_size = 32
samples_per_epoch = 24
validation_steps = 24
nb_filters1 = 32
nb_filters2 = 64
conv1_size = 3
conv2_size = 2
pool_size = 2
classes_num = 2
lr = 0.0004
model = Sequential()
model.add(Convolution2D(nb_filters1, conv1_size, conv1_size, padding ="same", input_shape=(img_width, img_height, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Convolution2D(nb_filters2, conv2_size, conv2_size, padding ="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(classes_num, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=lr),
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
"""
Tensorboard log
"""
log_dir = './tf-log/'
tb_cb = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0)
cbks = [tb_cb]
model.fit(
train_generator,
epochs=epochs,
validation_data=validation_generator,
callbacks=cbks)
target_dir = './models/'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
model.save('./mymodels/model')
pred_node_names = [None]
pred = [None]
for i in range(1):
pred_node_names[i] = "output_node"+str(i)
pred[i] = tf.identity(model.outputs[i], name=pred_node_names[i])
frozen_graph = freeze_session(tf.keras.backend.get_session(), output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, './', 'xor.pbtxt', as_text=True)
tf.train.write_graph(frozen_graph, './', 'xor.pb', as_text=False)
#Calculate execution time
end = time.time()
dur = end-start
if dur<60:
print("Execution Time:",dur,"seconds")
elif dur>60 and dur<3600:
dur=dur/60
print("Execution Time:",dur,"minutes")
else:
dur=dur/(60*60)
print("Execution Time:",dur,"hours")
Post that I want to load the model in opencv to perform the predictions but readNetFromTensorflow() fails with the following error
[libprotobuf ERROR D:\a\opencv-python\opencv-python\opencv\3rdparty\protobuf\src\google\protobuf\text_format.cc:292] Error parsing text-format opencv_tensorflow.GraphDef: 700:9: Unknown enumeration value of "DT_VARIANT" for field "type".
Traceback (most recent call last):
File "C:\Users\admin\Downloads\Image-Classification-by-Keras-and-Tensorflow-master\Using Tensorflow\classify_opencv.py", line 4, in <module>
tensorflowNet = cv2.dnn.readNetFromTensorflow('xor.pb','xor.pbtxt')
cv2.error: OpenCV(4.5.4-dev) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\tensorflow\tf_io.cpp:54: error: (-2:Unspecified error) FAILED: ReadProtoFromTextFile(param_file, param). Failed to parse GraphDef file: xor.pbtxt in function 'cv::dnn::ReadTFNetParamsFromTextFileOrDie
I went through the .pbtxt file, it indeed contains DT_VARIANT type which I guess is not defined in 3rd party protobuf.
How do I solve this error?

Too many failed attempts to build model

I am currently using kerastuner ( 1.0.0) to fine-tune hyperparamteters in a CNN and it keeps displaying the error 'Too many failed attempts to build model.'. I have tried all other availabel solutions including those on this stackoverflow but none of them works. Does my model have too many parameters to tune?
from tensorflow.keras.datasets import fashion_mnist
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Conv2D, Dropout, MaxPooling2D
from tensorflow.keras.initializers import LecunNormal
!pip install keras-tuner==1.0.0
from kerastuner.tuners import RandomSearch
from kerastuner import HyperModel
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
class MCDropout(tf.keras.layers.Dropout):
def call(self, inputs):
return super().call(inputs, training=True)
NUM_CLASSES = 10
KERNEL_SIZE = (3, 3)
class CNNHyperModel(HyperModel):
def __init__(self, num_classes, kernel_size):
self.num_classes = num_classes
self.kernel_size = kernel_size
def build_model(self, hp):
model = keras.Sequential()
model.add(Conv2D(ilters=hp.Choice('input_units', values=[32, 16], default=16),
kernel_size=(self.kernel_size),
activation='selu',
kernel_intializer = LecunNormal,
input_shape=x_train.shape[1:]))
model.add(MCDropout(rate=hp.Float('dropout_0',min_value=0.0,max_value=0.2,default=0.1,step=0.1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
for i in range(hp.Choice('n_filters', 1, 4)):
model.add(Conv2D(hp.Choice(f'num_filters_{i}', values=[32, 16, 64], default=32),
kernel_size=(self.kernel_size),
activation='selu',
kernel_intializer = LecunNormal))
model.add(MCDropout(hp.Float(f'dropout_{i}',min_value=0.0,max_value=0.2,default=0.1,step=0.1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(units=hp.Choice('units',values = [128, 256, 512, 1024], defaults = 512),activation='selu'))
model.add(MCDropout(rate=hp.Float('dropout_5',min_value=0.3,max_value=0.7,default=0.4,step=0.1)))
model.add(Dense(self.num_classes, activation='softmax'))
model.compile(optimizer=keras.optimizers.Nadam(hp.Choice('learning_rate',
values=[1e-2, 1e-3, 1e-4])),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
hypermodel = CNNHyperModel(num_classes=NUM_CLASSES, kernel_size = KERNEL_SIZE)
SEED = 1
tuner = RandomSearch(
hypermodel,
objective='val_accuracy',
max_trials = 1,
executions_per_trial = 1
)
tuner.search(x=x_train,
y=y_train,
epochs=1,
batch_size=64,
validation_data =(x_test, y_test))```
I RUN THIS CODE AND IT KEEP DISPLAYING TOO MANY FAILED ATTEMPTS TO BUILD MODELS. (BTW I AM WRITING THIS ON GG COLAB.)
IS THERE ANYWAY TO FIX IT?

Why I am getting error while using ResNet50 for transfer learning?

So I am trying to use ResNet50 for transfer learning with my keras model(i.e Trinity), but the problem is I am getting an error when I try to execute Trinity.fit().
Exact error "ValueError: Input 0 of layer sequential_8 is incompatible with the layer: expected ndim=4, found ndim=2. Full shape received: [None, 2048]"
What is the reason for this error? How can I solve it?
Here is the python code:
1) from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D
from keras.layers import Activation, Dense, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
import os
2) import keras
import wandb
from wandb.keras import WandbCallback
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.applications.resnet50 import ResNet50, decode_predictions,preprocess_input
import matplotlib.pyplot as plt
3) training_dir = '../input/fruits/fruits-360/Training/'
validation_dir = '../input/fruits/fruits-360/Test/'
test_dir = '../input/fruits/fruits-360/test-multiple_fruits/'
4) image_size = 224
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = data_generator.flow_from_directory(
'../input/fruits/fruits-360/Training/',
target_size=(image_size, image_size),
batch_size=24,
class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
'../input/fruits/fruits-360/Test/',
target_size=(image_size, image_size),
batch_size=24,
class_mode='categorical')
#OUTPUT
#[Found 67692 images belonging to 131 classes.
#Found 22688 images belonging to 131 classes.]
5) resnet_model = ResNet50(weights="imagenet")
6) x_train_preprocessed = train_generator
x_test_preprocessed = validation_generator
7) last_layer = resnet_model.get_layer("avg_pool")
resnet_layers = keras.Model(inputs=resnet_model.inputs, outputs=last_layer.output)
resnet_layers.summary()
8) x_train_features = resnet_layers.predict(x_train_preprocessed)
x_test_features = resnet_layers.predict(x_test_preprocessed)
9) Trinity = Sequential()
Trinity.add(Conv2D(filters = 16, kernel_size = 2,input_shape=(224,224,3),padding='same'))
Trinity.add(Activation('relu'))
Trinity.add(MaxPooling2D(pool_size=2))
Trinity.add(Conv2D(filters = 32,kernel_size = 2,activation= 'relu',padding='same'))
Trinity.add(MaxPooling2D(pool_size=2))
Trinity.add(Conv2D(filters = 64,kernel_size = 2,activation= 'relu',padding='same'))
Trinity.add(MaxPooling2D(pool_size=2))
Trinity.add(Conv2D(filters = 128,kernel_size = 2,activation= 'relu',padding='same'))
Trinity.add(MaxPooling2D(pool_size=2))
Trinity.add(Dropout(0.3))
Trinity.add(Flatten())
Trinity.add(Dense(132))
Trinity.add(Activation('relu'))
Trinity.add(Dropout(0.4))
Trinity.add(Dense(131,activation = 'softmax'))
Trinity.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
10) Trinity.fit(x_train_features,epochs=50,validation_data=x_test_features)
#OUTPUT OF 7)
#OUTPUT OF 10)
you should add include_top=False to the instance of resnet, then add your model to the top of resnet. that way it should work. Here is an example:
base_model = ResNet50V2(include_top=False, weights="imagenet", input_shape=(224,224,3), pooling="avg")
base_model.summary()
model2 = Sequential()
model2.add(base_model)
model2.add(Dense(64, activation="relu"))
model2.add(Dropout(0.2))
model2.add(Dense(64, activation="relu"))
model2.add(Dropout(0.2))
model2.add(Dense(32, activation="relu"))
model2.add(Dropout(0.2))
model2.add(Dense(32, activation="relu"))
model2.add(Dropout(0.2))
model2.add(Dense(1, activation="sigmoid"))
base_model.trainable = False
model2.summary()
model2.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["accuracy"])
and here is another example from keras website: https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/

Session keyword arguments are not support during eager execution. You passed: {'learning_rate': 1e-05}

I want to conduct a hyperparameter tuning for the learning rate. However, I got the error that I do not know how to solve.
I used the Tensorflow.Keras package.
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.datasets.mnist import load_data
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Flatten, BatchNormalization, Dropout, Dense)
from keras.wrappers.scikit_learn import KerasClassifier
(x_train_all, y_train_all), (x_test, y_test) = load_data()
x_train, x_valid, x_test = x_train_all[5000:]/255.0, x_train_all[:5000]/255.0, x_test/255.0
y_train, y_valid = y_train_all[5000:], y_train_all[:5000]
tf.cast(x_train, tf.float32)
tf.cast(x_valid, tf.float32)
tf.cast(x_test, tf.float32)
def my_model(learning_rate = 5e-3):
model = Sequential([
Flatten(input_shape = (28, 28)),
BatchNormalization(),
Dropout(rate = 0.2),
Dense(300, activation = "elu", kernel_initializer = "he_normal"),
Dropout(rate = 0.2),
BatchNormalization(),
Dense(300, activation = "elu", kernel_initializer = "he_normal"),
Dropout(rate = 0.2),
BatchNormalization(),
Dense(10, activation = "softmax",kernel_initializer = "he_normal")])
opt = Adam(lr = learning_rate)
model.summary()
model.compile(loss = "sparse_categorical_crossentropy", optimizer = opt, learning_rate = learning_rate, metrics = ["accuracy"])
return model
from sklearn.model_selection import RandomizedSearchCV
keras_classifier = KerasClassifier(my_model)
param_distribs = {"learning_rate": [1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3]}
rnd_search_cv = RandomizedSearchCV(keras_classifier, param_distribs, n_iter = 10, cv = 3)
rnd_search_cv.fit(x_train, y_train, epochs = 10, validation_data = (x_valid, y_valid))
I got the value error as the following:
ValueError: Session keyword arguments are not support during eager
execution. You passed: {'learning_rate': 1e-05}
Mentioning the Solution in this Section (even though it is present in Comment's section), for the benefit of the community.
The issue is resolved by removing learning_rate = learning_rate in model.compile.
Correct Code is mentioned below:
def my_model(learning_rate = 5e-3):
model = Sequential([
Flatten(input_shape = (28, 28)),
BatchNormalization(),
Dropout(rate = 0.2),
Dense(300, activation = "elu", kernel_initializer = "he_normal"),
Dropout(rate = 0.2),
BatchNormalization(),
Dense(300, activation = "elu", kernel_initializer = "he_normal"),
Dropout(rate = 0.2),
BatchNormalization(),
Dense(10, activation = "softmax",kernel_initializer = "he_normal")])
opt = Adam(lr = learning_rate)
model.summary()
model.compile(loss = "sparse_categorical_crossentropy", optimizer = opt, metrics = ["accuracy"])
return model

Categories