I'm trying to do Saliency Map Method using cleverhans.
My model needs to be keras sequential so for that reason I've searched and found cleverhans.utils_keras, Sequential uses KerasModelWrapper. But for some reason I still get it should be cleverhans model. Here's the stacktrace
TypeError Traceback (most recent call last)
in
2 # https://github.com/tensorflow/cleverhans/blob/master/cleverhans/utils_keras.py
3
----> 4 jsma = SaliencyMapMethod(model, sess=sess)
5 jsma_params = {'theta': 10.0, 'gamma': 0.15,
6 'clip_min': 0., 'clip_max': 1.,
c:\users\jeredriq\appdata\local\programs\python\python35\lib\site-packages\cleverhans\attacks__init__.py in init(self, model, sess, dtypestr, **kwargs)
911 """
912
--> 913 super(SaliencyMapMethod, self).init(model, sess, dtypestr, **kwargs)
914
915 self.feedable_kwargs = ('y_target',)
c:\users\jeredriq\appdata\local\programs\python\python35\lib\site-packages\cleverhans\attacks__init__.py in init(self, model, sess, dtypestr, **kwargs)
55
56 if not isinstance(model, Model):
---> 57 raise TypeError("The model argument should be an instance of"
58 " the cleverhans.model.Model class.")
59
TypeError: The model argument should be an instance of the cleverhans.model.Model class.
And here's my code
import numpy as np
from keras import backend
import tensorflow as tf
from keras.callbacks import ModelCheckpoint
from matplotlib import gridspec
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
from keras.datasets import mnist
from keras.layers import Dense, Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from cleverhans.attacks import FastGradientMethod
from cleverhans.attacks import BasicIterativeMethod
from cleverhans.attacks import SaliencyMapMethod
from cleverhans.attacks import DeepFool
from cleverhans.utils_keras import Sequential
sess = backend.get_session()
x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
y = tf.placeholder(tf.float32, shape=(None, 10))
# Managing Mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train/=255
X_test/=255
y_train_cat = np_utils.to_categorical(y_train)
y_test_cat = np_utils.to_categorical(y_test)
num_classes = y_test_cat.shape[1]
### Defining Model ###
model = Sequential() # <----- I use Sequential from CleverHans
model.add(Conv2D(32, (5, 5), input_shape=(28,28,1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train_cat, epochs=10, batch_size=1024, verbose=1, validation_split=0.7)
### And the problem part ###
jsma = SaliencyMapMethod(model, sess=sess) # <---- Where I get the exception
jsma_params = {'theta': 10.0, 'gamma': 0.15,
'clip_min': 0., 'clip_max': 1.,
'y_target': None}
sample_size = 20
one_hot_target = np.zeros((sample_size, 10), dtype=np.float32)
one_hot_target[:, 1] = 1
jsma_params['y_target'] = one_hot_target
X_test_small = X_test[0:sample_size,:]
y_test_small = y_test[0:sample_size]
adv_x = jsma.generate_np(X_test_small, **jsma_params)
I've the same question on github too.
The Sequential defined in cleverhans.utils_keras is still keras' Sequential model. What is needed is cleverhans.model.Model. A keras model can be wrapped to provide this behaviour by using the KerasModelWrapper class.
Replace
jsma = SaliencyMapMethod(model, sess=sess)
with
jsma = SaliencyMapMethod(KerasModelWrapper(model), sess=sess)
Related
I am currently using kerastuner ( 1.0.0) to fine-tune hyperparamteters in a CNN and it keeps displaying the error 'Too many failed attempts to build model.'. I have tried all other availabel solutions including those on this stackoverflow but none of them works. Does my model have too many parameters to tune?
from tensorflow.keras.datasets import fashion_mnist
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Conv2D, Dropout, MaxPooling2D
from tensorflow.keras.initializers import LecunNormal
!pip install keras-tuner==1.0.0
from kerastuner.tuners import RandomSearch
from kerastuner import HyperModel
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
class MCDropout(tf.keras.layers.Dropout):
def call(self, inputs):
return super().call(inputs, training=True)
NUM_CLASSES = 10
KERNEL_SIZE = (3, 3)
class CNNHyperModel(HyperModel):
def __init__(self, num_classes, kernel_size):
self.num_classes = num_classes
self.kernel_size = kernel_size
def build_model(self, hp):
model = keras.Sequential()
model.add(Conv2D(ilters=hp.Choice('input_units', values=[32, 16], default=16),
kernel_size=(self.kernel_size),
activation='selu',
kernel_intializer = LecunNormal,
input_shape=x_train.shape[1:]))
model.add(MCDropout(rate=hp.Float('dropout_0',min_value=0.0,max_value=0.2,default=0.1,step=0.1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
for i in range(hp.Choice('n_filters', 1, 4)):
model.add(Conv2D(hp.Choice(f'num_filters_{i}', values=[32, 16, 64], default=32),
kernel_size=(self.kernel_size),
activation='selu',
kernel_intializer = LecunNormal))
model.add(MCDropout(hp.Float(f'dropout_{i}',min_value=0.0,max_value=0.2,default=0.1,step=0.1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(units=hp.Choice('units',values = [128, 256, 512, 1024], defaults = 512),activation='selu'))
model.add(MCDropout(rate=hp.Float('dropout_5',min_value=0.3,max_value=0.7,default=0.4,step=0.1)))
model.add(Dense(self.num_classes, activation='softmax'))
model.compile(optimizer=keras.optimizers.Nadam(hp.Choice('learning_rate',
values=[1e-2, 1e-3, 1e-4])),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
hypermodel = CNNHyperModel(num_classes=NUM_CLASSES, kernel_size = KERNEL_SIZE)
SEED = 1
tuner = RandomSearch(
hypermodel,
objective='val_accuracy',
max_trials = 1,
executions_per_trial = 1
)
tuner.search(x=x_train,
y=y_train,
epochs=1,
batch_size=64,
validation_data =(x_test, y_test))```
I RUN THIS CODE AND IT KEEP DISPLAYING TOO MANY FAILED ATTEMPTS TO BUILD MODELS. (BTW I AM WRITING THIS ON GG COLAB.)
IS THERE ANYWAY TO FIX IT?
So I am trying to use ResNet50 for transfer learning with my keras model(i.e Trinity), but the problem is I am getting an error when I try to execute Trinity.fit().
Exact error "ValueError: Input 0 of layer sequential_8 is incompatible with the layer: expected ndim=4, found ndim=2. Full shape received: [None, 2048]"
What is the reason for this error? How can I solve it?
Here is the python code:
1) from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D
from keras.layers import Activation, Dense, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
import os
2) import keras
import wandb
from wandb.keras import WandbCallback
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.applications.resnet50 import ResNet50, decode_predictions,preprocess_input
import matplotlib.pyplot as plt
3) training_dir = '../input/fruits/fruits-360/Training/'
validation_dir = '../input/fruits/fruits-360/Test/'
test_dir = '../input/fruits/fruits-360/test-multiple_fruits/'
4) image_size = 224
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = data_generator.flow_from_directory(
'../input/fruits/fruits-360/Training/',
target_size=(image_size, image_size),
batch_size=24,
class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
'../input/fruits/fruits-360/Test/',
target_size=(image_size, image_size),
batch_size=24,
class_mode='categorical')
#OUTPUT
#[Found 67692 images belonging to 131 classes.
#Found 22688 images belonging to 131 classes.]
5) resnet_model = ResNet50(weights="imagenet")
6) x_train_preprocessed = train_generator
x_test_preprocessed = validation_generator
7) last_layer = resnet_model.get_layer("avg_pool")
resnet_layers = keras.Model(inputs=resnet_model.inputs, outputs=last_layer.output)
resnet_layers.summary()
8) x_train_features = resnet_layers.predict(x_train_preprocessed)
x_test_features = resnet_layers.predict(x_test_preprocessed)
9) Trinity = Sequential()
Trinity.add(Conv2D(filters = 16, kernel_size = 2,input_shape=(224,224,3),padding='same'))
Trinity.add(Activation('relu'))
Trinity.add(MaxPooling2D(pool_size=2))
Trinity.add(Conv2D(filters = 32,kernel_size = 2,activation= 'relu',padding='same'))
Trinity.add(MaxPooling2D(pool_size=2))
Trinity.add(Conv2D(filters = 64,kernel_size = 2,activation= 'relu',padding='same'))
Trinity.add(MaxPooling2D(pool_size=2))
Trinity.add(Conv2D(filters = 128,kernel_size = 2,activation= 'relu',padding='same'))
Trinity.add(MaxPooling2D(pool_size=2))
Trinity.add(Dropout(0.3))
Trinity.add(Flatten())
Trinity.add(Dense(132))
Trinity.add(Activation('relu'))
Trinity.add(Dropout(0.4))
Trinity.add(Dense(131,activation = 'softmax'))
Trinity.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
10) Trinity.fit(x_train_features,epochs=50,validation_data=x_test_features)
#OUTPUT OF 7)
#OUTPUT OF 10)
you should add include_top=False to the instance of resnet, then add your model to the top of resnet. that way it should work. Here is an example:
base_model = ResNet50V2(include_top=False, weights="imagenet", input_shape=(224,224,3), pooling="avg")
base_model.summary()
model2 = Sequential()
model2.add(base_model)
model2.add(Dense(64, activation="relu"))
model2.add(Dropout(0.2))
model2.add(Dense(64, activation="relu"))
model2.add(Dropout(0.2))
model2.add(Dense(32, activation="relu"))
model2.add(Dropout(0.2))
model2.add(Dense(32, activation="relu"))
model2.add(Dropout(0.2))
model2.add(Dense(1, activation="sigmoid"))
base_model.trainable = False
model2.summary()
model2.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["accuracy"])
and here is another example from keras website: https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/
I tried to use google colab resources to save my CNN model weights and I get this error. I tried googling it but nothing helps.
'Sequential' object has no attribute '_in_multi_worker_mode'
My code:
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)
cnn_model = Sequential()
cnn_model.add(Conv2D(filters = 64, kernel_size = (3,3), activation = "relu", input_shape = Input_shape ))
cnn_model.add(Conv2D(filters = 64, kernel_size = (3,3), activation = "relu"))
cnn_model.add(MaxPooling2D(2,2))
cnn_model.add(Dropout(0.4))
cnn_model = Sequential()
cnn_model.add(Conv2D(filters = 128, kernel_size = (3,3), activation = "relu"))
cnn_model.add(Conv2D(filters = 128, kernel_size = (3,3), activation = "relu"))
cnn_model.add(MaxPooling2D(2,2))
cnn_model.add(Dropout(0.3))
cnn_model.add(Flatten())
cnn_model.add(Dense(units = 512, activation = "relu"))
cnn_model.add(Dense(units = 512, activation = "relu"))
cnn_model.add(Dense(units = 10, activation = "softmax"))
history = cnn_model.fit(X_train, y_train, batch_size = 32,epochs = 1,
shuffle = True, callbacks = [cp_callback])
Stack trace:
AttributeError Traceback (most recent call last)
<ipython-input-19-35c1db9636b7> in <module>()
----> 1 history = cnn_model.fit(X_train, y_train, batch_size = 32,epochs = 1, shuffle = True, callbacks = [cp_callback])
4 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/callbacks.py in on_train_begin(self, logs)
903 def on_train_begin(self, logs=None):
904 # pylint: disable=protected-access
--> 905 if self.model._in_multi_worker_mode():
906 # MultiWorkerTrainingState is used to manage the training state needed
907 # for preemption-recovery of a worker in multi-worker training.
AttributeError: 'Sequential' object has no attribute '_in_multi_worker_mode'
I've recently faced the same issue
instead of,
from tensorflow.keras.callbacks import ModelCheckpoint
use,
from keras.callbacks import ModelCheckpoint
Check your tensorflow version. You actually only need to synchronize it. check if all your import uses
from keras import ...
or
from tensorflow.keras import ...
use only one of the above for your keras imports. using different (both) at the same time can cause collision by the libraries.
Instead of
tf.keras.callbacks.ModelCheckpoint
in your model building process, you can use
from keras.callbacks import ModelCheckpoint
in order to import ModelCheckpoint, and then just use ModelCheckpoint in the later code.
Please check if your version of tensorflow matches the latest one.In my case the error was solved when is updated it to 2.1.0.
I'm currently coding a ML Vision programme to classify pictures categorially.
My CNN doesn't learn at all and just guesses the output.
import os
import shutil
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from openpyxl import load_workbook
import random
random.seed(40)
import numpy as np
np.random.seed(40) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import tensorflow as tf
path_labels = '/Users/felix/Desktop/Daten/Labels.xlsx'
#import label data and construct label data frame
workbook = load_workbook(path_labels)
features = []
labels = []
for row in range (2, workbook['Sheet1' ].max_row+1):
cell_Bezeichnung = workbook ['Sheet1']["{}{}".format('A', row) ].value
cell_Label = workbook['Sheet1']["{}{}".format("B",row)].value
features.append(str(cell_Bezeichnung)+'.JPG')
labels.append(str(cell_Label))
data = pd.DataFrame(data={'Datei': features, 'Label': labels})
data = shuffle(data)
data = data.reset_index(drop=True)
#one hot encoding
targets = data
targets = targets.drop('Datei', 1)
targets = pd.get_dummies(targets)
print(targets.head())
files = data.drop('Label', 1)
print (files.head())
from keras.preprocessing import image
from tqdm import tqdm
os.environ['KMP_DUPLICATE_LIB_OK']='True'
path_images = '/Users/felix/Desktop/Daten/Bilddaten_zugeschnitten/'
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img_path = path_images+img_path
img = image.load_img(img_path.item(0), target_size=(224, 224)) #Variation mit mehr als 100x100 pixel
#tf.image.rgb_to_grayscale(img,name=None)
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
#ImageFile.LOAD_TRUNCATED_IMAGES = True
from IPython.display import display
from PIL import Image
# pre-process the data for Keras
tensors = paths_to_tensor(files.values).astype('float32')/255
testing_share = 0.3 #0.01
testing_index = tensors.shape[0] - round(tensors.shape[0]*testing_share)
x_test, y_test = tensors[testing_index:], targets[testing_index:]
x_train, y_train = tensors[:testing_index], targets[:testing_index]
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
from keras.layers import Conv2D, Conv3D, GlobalMaxPooling2D, GlobalAveragePooling2D, BatchNormalization, GlobalMaxPooling3D, AveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
model = Sequential()
### Define architecture.
model.add(Conv2D(32, 3, strides=(1,1),padding="same",input_shape=(224,224,3), activation="relu"))
model.add(Conv2D(64, 3, strides=(2,2),padding="same", activation="relu"))
model.add(Conv2D(128, 3, strides=(3,3),padding="same", activation="relu"))
model.add(AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))
model.add(GlobalMaxPooling2D(data_format=None))
model.add(Dropout(0,2))
model.add(Dense(128, activation="relu"))
model.add(Dense(y_test.shape[1], activation="softmax"))
#dropouts sind layer die funkionen deaktivieren
#netzwerk soll lernen, weiter zu arbeiten, wenn kernel sterben
model.summary()
# compile the model
import keras
from keras import optimizers
from keras.optimizers import SGD
from keras.optimizers import rmsprop
from keras.optimizers import adam
#optimizer = optimizers.sgd(lr=0.0001,) #0.1
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
error = model.predict(x_test)
print(error)
[[0.25006285 0.24996501 0.25002083 0.24995136]
[0.25008076 0.24997567 0.2500124 0.24993122]
[0.2500582 0.24996252 0.2500291 0.2499501 ]
[0.25007743 0.2499672 0.25001416 0.2499412 ]
[0.25007534 0.24996328 0.25002202 0.24993943]
[0.25008073 0.24996676 0.25001672 0.2499358 ]
[0.25007495 0.2499676 0.2500172 0.24994019]
[0.2500594 0.24995789 0.250027 0.24995567]
[0.25008485 0.2499618 0.25001773 0.24993569]
[0.25007278 0.24996817 0.25001302 0.2499461 ]
[0.25007483 0.2499688 0.25001898 0.2499374 ]
[0.25008535 0.24996835 0.25001317 0.24993314]
[0.25007445 0.24996167 0.25001654 0.2499473 ]
[0.2500802 0.24997373 0.2500132 0.24993286]
[0.25007555 0.24996133 0.25003204 0.24993111]
[0.25007793 0.24997222 0.25001374 0.24993618]
[0.25005803 0.24997807 0.2500176 0.24994631]
[0.2500784 0.24996012 0.25001165 0.24994987]
[0.2500772 0.24996161 0.25001106 0.24995017]
[0.2500813 0.2499702 0.2500116 0.24993694]
[0.25007752 0.24997033 0.25001392 0.24993823]
[0.25007617 0.24996412 0.25001463 0.24994507]
[0.25007036 0.24997123 0.25001976 0.24993876]
[0.25007662 0.2499671 0.25001895 0.2499373 ]
[0.25007728 0.24996927 0.2500137 0.24993978]
[0.2500731 0.2499717 0.25001666 0.2499386 ]
[0.2500792 0.24997078 0.25001392 0.24993607]
[0.2500816 0.2499651 0.25001276 0.24994059]
[0.25007784 0.24995804 0.2500315 0.2499326 ]
[0.25007495 0.24995953 0.2500203 0.24994528]
[0.25007513 0.24995038 0.25003663 0.24993785]
[0.25008193 0.24995089 0.25002486 0.24994227]
[0.25006574 0.24995221 0.2500334 0.24994862]
[0.25007313 0.24997073 0.2500068 0.2499494 ]
[0.25007546 0.24996972 0.25002092 0.2499339 ]
[0.2500602 0.24995638 0.25002822 0.24995506]
[0.25007087 0.24998225 0.25001073 0.24993609]
[0.25009194 0.24996428 0.25000528 0.24993849]
[0.25007385 0.24997565 0.2500184 0.24993216]
[0.25007346 0.24996477 0.25002295 0.24993882]
[0.25006285 0.24995741 0.25003105 0.24994871]
[0.25007546 0.24997638 0.25001225 0.24993594]
[0.25006792 0.24995044 0.25003865 0.24994306]
[0.25008726 0.24996646 0.25001788 0.24992841]
[0.25007448 0.24997017 0.25001752 0.2499378 ]
[0.25007167 0.24996181 0.25001797 0.24994862]
[0.25008237 0.24996439 0.25000966 0.24994355]
[0.2500695 0.24996364 0.25002092 0.24994592]
[0.25007692 0.2499711 0.25001416 0.24993788]
[0.2500824 0.24996519 0.25000864 0.24994376]
[0.2500767 0.24996592 0.25001433 0.24994306]
[0.2500684 0.24995859 0.25003484 0.2499382 ]
[0.25007054 0.24996616 0.25001797 0.24994533]
[0.25009173 0.24996561 0.25000745 0.24993522]
[0.25008002 0.24996746 0.2500147 0.24993786]
[0.2500748 0.24996786 0.25000873 0.24994864]
[0.2500774 0.24997115 0.25001118 0.24994026]
[0.25006884 0.24996077 0.25002298 0.24994741]
[0.25005615 0.24995732 0.25003523 0.24995126]
[0.25007707 0.24996826 0.25001627 0.24993831]
[0.25008127 0.24996836 0.25001183 0.24993852]
[0.25007385 0.2499684 0.2500187 0.24993904]
[0.25006378 0.24997012 0.2500189 0.24994728]
[0.2500762 0.24997364 0.25001153 0.24993856]
[0.25008038 0.24995975 0.2500139 0.24994597]
[0.2500857 0.2499656 0.25000656 0.24994215]
[0.25008604 0.24996783 0.25001597 0.24993013]
[0.25006878 0.24996276 0.25002745 0.24994111]
[0.25007826 0.24996968 0.2500182 0.24993387]
[0.2500698 0.24997027 0.2500128 0.24994715]
[0.25007647 0.24996483 0.25001547 0.24994324]
[0.25008276 0.24996983 0.25001243 0.24993502]
[0.2500697 0.24997318 0.2500129 0.24994427]
[0.2500747 0.24996682 0.2500108 0.24994765]
[0.250073 0.24996653 0.25001878 0.24994177]
[0.2500709 0.24997084 0.25001994 0.24993828]
[0.25006709 0.24997054 0.2500242 0.24993815]
[0.25008497 0.24996297 0.25001696 0.249935 ]
[0.2500651 0.24995178 0.25003874 0.24994442]
[0.25007528 0.24996938 0.2500194 0.24993595]
[0.25007984 0.2499677 0.25001895 0.2499335 ]
[0.25008506 0.24996516 0.25001836 0.24993142]
[0.250082 0.24996546 0.25001726 0.24993534]
[0.25006655 0.24996561 0.25002077 0.24994712]
[0.25007233 0.24997492 0.25001252 0.24994019]
[0.250074 0.24996708 0.25000945 0.24994949]
[0.25008136 0.24995574 0.25002033 0.24994257]
[0.2500709 0.24996184 0.2500229 0.24994433]
[0.25007755 0.24995202 0.25003335 0.249937 ]
[0.2500736 0.24997254 0.250015 0.24993886]
[0.25006583 0.24996303 0.2500226 0.2499486 ]
[0.25007483 0.24997738 0.25001508 0.24993278]
[0.2500662 0.24996226 0.2500349 0.24993668]
[0.2500774 0.24996667 0.25001344 0.24994245]
[0.25006792 0.24995145 0.25003457 0.2499461 ]
[0.2500603 0.24995281 0.2500334 0.24995343]
[0.250059 0.24995674 0.25003695 0.24994728]
[0.2500626 0.24996206 0.25002703 0.24994832]
[0.25007066 0.24996097 0.25002307 0.2499453 ]
[0.2500736 0.24997665 0.2500144 0.24993531]
[0.25008708 0.24997056 0.25001645 0.2499259 ]
[0.25005743 0.24996099 0.25002706 0.24995449]
[0.250062 0.24998043 0.2500229 0.24993478]
[0.25006992 0.24996766 0.25001696 0.24994548]
[0.25007722 0.24996564 0.2500146 0.24994259]
[0.25008243 0.24997042 0.2500136 0.24993362]
[0.25006735 0.24996237 0.25002933 0.24994104]
[0.25006872 0.24995397 0.2500326 0.24994478]
[0.2500679 0.24996191 0.2500241 0.24994612]
[0.25009197 0.24995779 0.2500152 0.24993509]
[0.25008324 0.24996872 0.25001323 0.24993473]
[0.25007743 0.24997263 0.25000903 0.24994095]
[0.2500631 0.2499686 0.25003257 0.24993584]
[0.25006607 0.24996045 0.25002548 0.24994805]
[0.25005895 0.24998237 0.25001976 0.24993889]
[0.25006407 0.2499555 0.25003356 0.24994694]
[0.25006366 0.24995647 0.25002795 0.24995202]
[0.25008795 0.24997298 0.25001886 0.24992022]
[0.25005513 0.2499775 0.25001684 0.24995047]
[0.25007448 0.24996139 0.25002256 0.24994159]
[0.25006628 0.24997294 0.25002363 0.24993722]
[0.2500835 0.24996667 0.2500097 0.24994011]
[0.25006843 0.24995919 0.2500261 0.24994628]
[0.25007316 0.24997073 0.25001794 0.24993816]
[0.25008273 0.2499673 0.25001198 0.2499379 ]
[0.2500748 0.24997313 0.25001442 0.2499376 ]
[0.250075 0.24997061 0.25001994 0.2499345 ]
[0.2500757 0.24997036 0.25001526 0.24993867]
[0.250078 0.2499685 0.25000528 0.2499482 ]
[0.25007048 0.24994877 0.2500401 0.24994063]
[0.25006124 0.2499642 0.25001806 0.24995647]
[0.2500705 0.2499617 0.25001982 0.24994802]
[0.2500815 0.24996363 0.25000906 0.2499458 ]
[0.25006425 0.24996895 0.25001505 0.24995178]
[0.25007218 0.24997188 0.25002077 0.24993512]
[0.25007123 0.24997196 0.2500212 0.24993564]
[0.25006527 0.2499646 0.25001764 0.24995254]
[0.25007126 0.24995747 0.25002196 0.2499494 ]
[0.2500677 0.24995501 0.2500297 0.24994752]
[0.2500733 0.24996659 0.2500171 0.24994294]
[0.25007385 0.2499434 0.2500364 0.24994639]
[0.25006333 0.24996033 0.25002643 0.24994989]
[0.25007668 0.24996741 0.25001174 0.24994417]
[0.25006187 0.24995287 0.25003016 0.24995513]
[0.2500838 0.24996549 0.25001827 0.2499324 ]
[0.25008044 0.24996719 0.25001672 0.24993567]
[0.25007057 0.24995601 0.25002605 0.24994737]
[0.2500773 0.24997096 0.25001264 0.24993913]
[0.2500796 0.24997199 0.25000918 0.24993925]
[0.2500773 0.2499672 0.25001302 0.24994251]
[0.25007573 0.24997133 0.25000885 0.2499441 ]
[0.25006795 0.24996015 0.2500245 0.24994741]
[0.25007918 0.24996641 0.25001818 0.24993613]
[0.25007203 0.24996912 0.25000858 0.24995026]
[0.25007164 0.24996807 0.2500243 0.24993607]
[0.2500733 0.249961 0.25001734 0.24994832]
[0.2500638 0.24996074 0.25002792 0.24994762]
[0.25007963 0.2499707 0.2500181 0.24993162]
[0.25008625 0.24996635 0.2500065 0.24994084]
[0.25008115 0.24996302 0.250018 0.24993783]
[0.25007886 0.2499649 0.25001216 0.24994412]
[0.25007793 0.24997391 0.2500138 0.24993435]
[0.2500758 0.24996938 0.2500197 0.24993521]
[0.25008273 0.24996585 0.25001657 0.2499349 ]
[0.25006455 0.24997023 0.25001556 0.24994972]
[0.25007576 0.24996512 0.2500098 0.24994932]
[0.2500829 0.24997182 0.25001445 0.24993081]
[0.2500687 0.24997266 0.25001624 0.24994251]
[0.25007847 0.24997012 0.25000468 0.24994673]
[0.25006625 0.24996042 0.25002444 0.24994884]
[0.25007161 0.24996774 0.25001383 0.24994685]
[0.25006938 0.2499722 0.25001884 0.24993967]
[0.2500708 0.24997376 0.2500203 0.24993515]
[0.25008178 0.24996838 0.25001445 0.24993533]
[0.25008288 0.24996988 0.25000745 0.24993972]
[0.25007623 0.249967 0.25000775 0.24994898]
[0.25007024 0.24996075 0.25003254 0.24993643]
[0.2500883 0.2499763 0.2500102 0.24992523]
[0.2500711 0.24995194 0.25002965 0.24994734]
[0.25008234 0.2499697 0.25001442 0.24993351]]
new_model = model.fit(x_train, y_train,
batch_size=32, epochs=20, verbose=1)
Fixes tried:
changed the LR from 0.01 to 0.00000001
changed Pixels from 20x20 to 1000x1000
changed batch_size
changed epochs
changed optimizer
changed activation from sigmoid to softmax
changed layer
added batch_normalization
added pooling layers
added augmentation
used different test sizes
Any kind of held is very appreciated!
Thank you for your time and help!
You are calling model.predict() before you call model.fit()
Model.fit is where the training occurs, so you are trying to predict data on an untrained model. It's the last line in your code -- do it immediately after your model.compile() call instead.
It looks like your model doesn't have an Input layer, so it probably doesn't even see the images.
i am trying to run the python repository for digit classification using Mnist dataset in nengo but unable to get the results due to this error "AttributeError: 'Conv2D' object has no attribute 'subsample" i tried hard to get rid of this error but failed any one who can suggest me the solution to this error.
from __future__ import print_function
import os
os.environ['THEANO_FLAGS'] = 'device=gpu,floatX=float32'
import nengo
import nengo_ocl
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import (
Dense, Dropout, Activation, Flatten, Convolution2D, AveragePooling2D)
from keras.layers.noise import GaussianNoise
from keras.utils import np_utils
import nengo
from nengo_extras.keras import (
load_model_pair, save_model_pair, SequentialNetwork, SoftLIF)
from nengo_extras.gui import image_display_function
np.random.seed(1)
filename = 'mnist_spiking_cnn'
# --- Load data
img_rows, img_cols = 28, 28
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')/128 - 1
X_test = X_test.astype('float32')/128 - 1
# --- Train model
if not os.path.exists(filename + '.h5'):
batch_size = 128
nb_epoch = 6
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
kmodel = Sequential()
softlif_params = dict(
sigma=0.002, amplitude=0.063, tau_rc=0.022, tau_ref=0.002)
kmodel.add(GaussianNoise(0.1, input_shape=(1, img_rows, img_cols)))
kmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid'))
kmodel.add(SoftLIF(**softlif_params))
kmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv))
kmodel.add(SoftLIF(**softlif_params))
kmodel.add(AveragePooling2D(pool_size=(nb_pool, nb_pool)))
kmodel.add(Dropout(0.25))
kmodel.add(Flatten())
kmodel.add(Dense(128))
kmodel.add(SoftLIF(**softlif_params))
kmodel.add(Dropout(0.5))
kmodel.add(Dense(nb_classes))
kmodel.add(Activation('softmax'))
kmodel.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
kmodel.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = kmodel.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
save_model_pair(kmodel, filename, overwrite=True)
else:
kmodel = load_model_pair(filename)
# --- Run model in Nengo
presentation_time = 0.2
model = nengo.Network()
with model:
u = nengo.Node(nengo.processes.PresentInput(X_test, presentation_time))
knet = SequentialNetwork(kmodel, synapse=nengo.synapses.Alpha(0.005))
nengo.Connection(u, knet.input, synapse=None)
input_p = nengo.Probe(u)
output_p = nengo.Probe(knet.output)
# --- image display
image_shape = kmodel.input_shape[1:]
display_f = image_display_function(image_shape)
display_node = nengo.Node(display_f, size_in=u.size_out)
nengo.Connection(u, display_node, synapse=None)
# --- output spa display
vocab_names = ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR',
'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE']
vocab_vectors = np.eye(len(vocab_names))
vocab = nengo.spa.Vocabulary(len(vocab_names))
for name, vector in zip(vocab_names, vocab_vectors):
vocab.add(name, vector)
config = nengo.Config(nengo.Ensemble)
config[nengo.Ensemble].neuron_type = nengo.Direct()
with config:
output = nengo.spa.State(len(vocab_names), subdimensions=10, vocab=vocab)
nengo.Connection(knet.output, output.input)
the error trace is below
AttributeError Traceback (most recent call last)
<ipython-input-14-6831ac9971de> in <module>()
5 with model:
6 u = nengo.Node(nengo.processes.PresentInput(X_train, presentation_time))
----> 7 knet = SequentialNetwork(kmodel,synapse=nengo.synapses.Alpha(0.001))
8 nengo.Connection(u, knet.input, synapse=None)
9
~\Anaconda3.0\lib\site-packages\nengo_extras\keras.py in __init__(self, model, synapse, lif_type, **kwargs)
79 self.add_data_layer(np.prod(model.input_shape[1:]))
80 for layer in model.layers:
---> 81 self._add_layer(layer)
82
83 def _add_layer(self, layer):
~\Anaconda3.0\lib\site-packages\nengo_extras\keras.py in _add_layer(self, layer)
99 for cls in type(layer).__mro__:
100 if cls in layer_adder:
--> 101 return layer_adder[cls](layer)
102
103 raise NotImplementedError("Cannot build layer type %r" %
~\Anaconda3.0\lib\site-packages\nengo_extras\keras.py in _add_conv2d_layer(self, layer)
112 filters, biases = layer.get_weights()
113 filters = filters[..., ::-1, ::-1] # flip
--> 114 strides = layer.subsample
115
116 nf, nc, ni, nj = filters.shape
AttributeError: 'Conv2D' object has no attribute 'subsample'
See this https://forums.fast.ai/t/what-is-subsample-of-convolution2d-doing/3555
which suggests that "subsample" in old versions of keras has been replaced by "strides" in new versions