'Sequential' object has no attribute 'in_features' and 'fc' - python

I am making the model using the fine-tuning method and the model is
VGG-16. But I got the following error 'Sequential' object has no
attribute 'in_features' I used classifier so I change classifier into
fc but got this error 'Sequential' object has no attribute 'fc'. Can
somebody guide me on what I am doing wrong? I have attached the
screenshot of the error as well.
**ERROR:'Sequential' object has no attribute 'in_features'**
[![enter image description here][1]][1]
Traceback (most recent call last):
File "ct_pretrained.py", line 186, in <module>
model = build_model().cuda()
File "ct_pretrained.py", line 42, in build_model
return models.VGG(is_emr=is_emr)
File "/data/torch/models/vgg.py", line 19, in __init__
num_ftrs = self.axial_model.classifier.in_features
File "/root/miniconda/lib/python3.8/site-packages/torch/nn/modules/module.py",line 778, in __getattr__
raise ModuleAttributeError("'{}' object has no attribute '{}'".format(
torch.nn.modules.module.ModuleAttributeError: 'Sequential' object has no attribute 'in_features'
**ERROR:'VGG' object has no attribute 'fc'**
[![enter image description here][2]][2]
Traceback (most recent call last):
File "ct_pretrained.py", line 186, in <module>
model = build_model().cuda()
File "ct_pretrained.py", line 42, in build_model
return models.VGG(is_emr=is_emr)
File "/data/torch/models/vgg.py", line 19, in __init__
num_ftrs = self.axial_model.fc.in_features
File "/root/miniconda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 778, in __getattr__
raise ModuleAttributeError("'{}' object has no attribute '{}'".format(
torch.nn.modules.module.ModuleAttributeError: 'VGG' object has no attribute 'fc'
import torch
import torch.nn as nn
from torchvision import models
__all__ = ['VGG']
class VGG(nn.Module):
def __init__(self, is_emr=False, mode='sum'):
super().__init__()
self.is_emr = is_emr
self.mode = mode
in_dim = 45
self.axial_model = models.vgg16(pretrained=True)
out_channels = self.axial_model.features[0].out_channels
self.axial_model.features[0] = nn.Conv2d(1, out_channels, kernel_size=7, stride=1, padding=0, bias=False)
self.axial_model.features[3] = nn.MaxPool2d(1)
num_ftrs = self.axial_model.classifier.in_features #error in this line of code
self.axial_model.classifier = nn.Linear(num_ftrs, 15)
self.sa_co_model = models.vgg16(pretrained=True)
self.sa_co_model.features[0] = nn.Conv2d(1, out_channels, kernel_size=7, stride=1, padding=(3,0), bias=False)
self.sa_co_model.features[3] = nn.MaxPool2d(1)
self.sa_co_model.classifier = nn.Linear(num_ftrs, 15)
if self.is_emr:
self.emr_model = EMRModel()
if self.mode == 'concat': in_dim = 90
self.classifier = Classifier(in_dim)
def forward(self, axial, sagittal, coronal, emr):
axial = axial[:,:,:-3,:-3]
sagittal = sagittal[:,:,:,:-3]
coronal = coronal[:,:,:,:-3]
axial_feature = self.axial_model(axial)
sagittal_feature = self.sa_co_model(sagittal)
coronal_feature = self.sa_co_model(coronal)
out = torch.cat([axial_feature, sagittal_feature, coronal_feature], dim=1)
if self.is_emr:
emr_feature = self.emr_model(emr)
if self.mode == 'concat':
out = torch.cat([out, emr_feature], dim=1)
elif self.mode == 'sum':
out += emr_feature
out = self.classifier(out)
return out

The classifier sequential object does not have a variable called in_features. If you want to do it dynamically, you will need to access the layer in the classifier, rather than the entire classifier: num_ftrs = self.axial.model.classifier[0].in_features. This accesses the first layer of the sequential object, namely the one that determines how many features go into the entire sequential object.
However, you can easily replace the classifier layer with another layer by determining the necessary number of features by hand. Looking at the pytorch sourcecode for VGG16, you can see the classifier takes 512 * 7 * 7 features as input.

Related

Tensorflow model layer connection failed, and can not use shap.DeepExplainer

I am trying to apply shap.deepexplainer to explain the model output.
My model class is as follows:
class MyModel(tf.keras.Model):
def __init__(self,
input_dim,
emb_dim=128,
alpha_hidden_dim_size=128,
beta_hidden_dim_size=128,
keep_prob_emb=0.25,
keep_prob_context=0.25,
num_class=1):
super(MyModel, self).__init__()
self.embedding = layers.Dense(emb_dim,
use_bias=False,
input_shape=(input_dim, ))
self.emb_drp = layers.Dropout(keep_prob_emb)
self.enroll = layers.Dense(emb_dim, activation='tanh')
self.gru_alpha = layers.Bidirectional(
layers.LSTM(alpha_hidden_dim_size, return_sequences=True))
self.gru_beta = layers.Bidirectional(
layers.LSTM(beta_hidden_dim_size, return_sequences=True))
self.alpha = layers.Dense(1)
self.beta = layers.Dense(emb_dim, activation='tanh')
self.context_drp = layers.Dropout(keep_prob_context)
self.out = layers.Dense(num_class)
def call(self, visits, enroll, lengths, **kwargs):
max_len = lengths[tf.argmax(lengths)]
visits = visits[:, :max_len]
emb = self.embedding(visits)
emb = self.emb_drp(emb, training=kwargs.get('training', False))
enroll = self.enroll(enroll)
mask = tf.sequence_mask(lengths)
h_a = self.gru_alpha(emb, mask=mask)
h_b = self.gru_beta(emb, mask=mask)
preAlpha = self.alpha(h_a)
preAlpha = tf.keras.backend.squeeze(preAlpha, axis=2)
mask_norm = (1 - tf.cast(mask, tf.float32)) * NEG_INF
alpha = tf.nn.softmax(preAlpha + mask_norm, axis=1)
beta = self.beta(h_b)
c_t = tf.math.reduce_sum(alpha[:, :, None] * beta * emb, axis=1)
c_t = layers.add([c_t, enroll])
c_t = self.context_drp(c_t, training=kwargs.get('training', False))
preY = self.out(c_t)
return preY, alpha, beta
When I applied my model as:
model = MyModel(**flags)
And the model is successfully loaded:
print(model)
<__main__.MyModel object at 0x7f51db414400>
Then I am trying to use the
background = X.loc[10:20]
e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].output), background)
but then it gives me the error:
AttributeError: Layer dense is not connected, no input to return.
Traceback (most recent call last): File
"/home/ANANT/codes/test/env/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py",line
1808, in input' is not connected, no input to return.')
And also the model.layers[-1].output can not give proper output neither:
AttributeError: Layer dense_4 has no inbound nodes. Traceback (most
recent call last): File
"/home/ANANT/test/env/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py",
line 1827, in output raise AttributeError('Layer ' + self.name + ' has
no inbound nodes.')
My package versions are: keras==2.3.1, tensorflow==1.15.3, shap==0.35.0
I stuck at this question for a few days, tried shap.KernelExplainer as well, and it gives me a different error:
shap.KernelExplainer(model, df_fis, link="logit")
And the error is as follows:
TypeError: call() missing 2 required positional arguments: 'enroll'
and 'lengths' Traceback (most recent call last): File
"/home/ANANT/test/env/lib/python3.6/site-packages/shap/explainers/kernel.py",
line 97, in __init__model_null = match_model_to_data(self.model,
self.data) File
"/home/ANANT/test/env/lib/python3.6/site-packages/shap/common.py",
line 89, in match_model_to_dataout_val = model.f(data.data) File
"/home/ANANT/test/env/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py",
line 968, in __call__outputs = self.call(cast_inputs, *args, **kwargs)
Please help, thanks in advance!
I think you missed the softmax part
Pytorch version
self.softmax = LogSoftmax(dim=1)
Keras version
layers.Dense(num_classes, activation="softmax")
Add the above line at the end of your __init__ method, see if it works

Exporting a model with tf.map_fn

I have some tensorflow model which I need to export in a saved model. Below is the simplified code of the model, which I am trying to export.
import tensorflow as tf
def foo(x):
return tf.reduce_sum(x)
inputs = tf.keras.layers.Input(shape=(128,128,3))
y = tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='SAME')(inputs)
y = tf.keras.layers.ReLU()(y)
outputs = tf.map_fn(foo, y, dtype=(tf.float32))
model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
model.save('./export', save_format='tf')
but while exporting the model I am getting the following error.
/Users/bruce/.venv/bin/python /Users/bruce/test_project/mymodel/test.py
Traceback (most recent call last):
File "/Users/bruce/test_project/mymodel/test.py", line 12, in <module>
outputs = tf.map_fn(foo, y, dtype=(tf.float32))
File "/Users/bruce/.venv/lib/python3.6/site-packages/tensorflow_core/python/ops/map_fn.py", line 228, in map_fn
for elem in elems_flat]
File "/Users/bruce/.venv/lib/python3.6/site-packages/tensorflow_core/python/ops/map_fn.py", line 228, in <listcomp>
for elem in elems_flat]
File "/Users/bruce/.venv/lib/python3.6/site-packages/tensorflow_core/python/ops/tensor_array_ops.py", line 1078, in __init__
name=name)
File "/Users/bruce/.venv/lib/python3.6/site-packages/tensorflow_core/python/ops/tensor_array_ops.py", line 716, in __init__
self._tensor_array = [None for _ in range(size)]
TypeError: 'Tensor' object cannot be interpreted as an integer
I cannot remove the part tf.map_fn which is doing some essential processing which I need in the saved model while deploying it.
You need to use a custom layer:
class MyMapLayer(tf.keras.layers.Layer):
def __init__(*args, **kwargs)
super().__init__(*args, **kwargs)
def foo(self, x):
return tf.reduce_sum(x)
def call(self, inputs, **kwargs):
return tf.map_fn(self.foo, inputs, dtype=(tf.float32))
Then, in your model:
inputs = tf.keras.layers.Input(shape=(128,128,3))
y = tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='SAME')(inputs)
y = tf.keras.layers.ReLU()(y)
outputs = MyMapLayer()(y)
model = tf.keras.models.Model(inputs=inputs, outputs=outputs)

How to train data in NiftyNet

I'm trying to train a network using NiftyNet with my own data (CT images and their corresponding labels). I designed the Net class shortly following some other training with similar sample data, all NiftyNet documentation I could find and parameters of my own data adjusted. But I keep getting this error:
"TypeError: init() got an unexpected keyword argument 'w_initializer'".
I've tried every change I could think of in my config.ini, Net class, etc. But I can't make it work nor find the reason. Can anyone help with this error? Or maybe share some guidelines to train my own network from the beginning so I can at least try to start an alternative from zero and see if I find a way out?
Training command:
! net_segment train -c /home/niftynet/extensions/dense_vnet_TC/config.ini --name dense_vnet_TC.net_TC.MyNet
Some values in config.ini:
[NETWORK]
name = dense_vnet
batch_size = 6
volume_padding_size = 0
window_sampling = resize
[TRAINING]
sample_per_volume = 1
lr = 0.001
loss_type = dense_vnet_TC.dice_hinge.dice
starting_iter = 0
save_every_n = 1000
max_iter = 3001
[INFERENCE]
border = (0, 0, 0)
inference_iter = 3000
output_interp_order = 0
spatial_window_size = (512, 512, 40)
save_seg_dir = ./segmentation_output/
############################ Custom configuration
[SEGMENTATION]
image = ct
label = label
label_normalisation = False
output_prob = False
num_classes = 2
Basics of Net class:
from niftynet.network.base_net import BaseNet
class MyNet(BaseNet):
def __init__(self, num_classes, name='MyNet'):
super(MyNet, self).__init__(num_classes=num_classes, acti_func=acti_func, name=name)
# network specific property
self.hidden_features = 10
def layer_op(self, images, is_training):
# create layer instances
conv_1 = ConvolutionalLayer(self.hidden_features, kernel_size=3, name='conv_input')
conv_2 = ConvolutionalLayer(self.num_classes, kernel_size=1, acti_func=None, name='conv_output')
# apply layer instances
flow = conv_1(images, is_training)
flow = conv_2(flow, is_training)
return flow
End of output, after doing some of the processing as expected:
Traceback (most recent call last): File
"/home/niftynet/bin/net_segment", line 10, in
sys.exit(main()) File "/home/niftynet/lib/python3.6/site- packages/niftynet/init.py",
line 142, in main
app_driver.run(app_driver.app) File "/home/niftynet/lib/python3.6/site-packages/niftynet/engine/application_driver.py",
line 189, in run
is_training_action=self.is_training_action) File "/home/niftynet/lib/python3.6/site- packages/niftynet/engine/application_driver.py",
line 258, in create_graph
application.initialise_network() File "/home/niftynet/lib/python3.6/site-packages/niftynet/application/segmentation_application.py",
line 280, in initialise_network
acti_func=self.net_param.activation_function) TypeError: init() got an unexpected keyword argument 'w_initializer'
I think you need to change this line (based on a similar problem I had):
super(MyNet, self).__init__(num_classes=num_classes, acti_func=acti_func, name=name)
for (just add w_regularizer) :
super(MyNet, self).__init__(num_classes=num_classes, w_regularizer=w_regularizer, acti_func=acti_func, name=name)
if not try also to add it here :
def __init__(self, num_classes, w_regularizer=w_regularizer, name='MyNet'):
I hope it helps.

Why do I keep getting operation attribute error in my tensor flow codes

I try to build a hidden layer of neural network with tensorflow but I keep getting error message
"Operation" object has no attribute "dtype".
This is where the code throws the error:
codings = tf.layers.dense(X, n_hidden, name="hidden")
This is the entire script
import numpy as np
import tensorflow as tf
from PIL import Image
data = []
test2 = Image.open("./ters/test2.jpg")
prepared_data = np.asarray(test2.resize((800, 1000), Image.ANTIALIAS))
data.append(prepared_data)
data = np.asarray(data)
saver = tf.train.import_meta_graph("./my_model.ckpt.meta")
batch_size, height, width, channels = data.shape
n_hidden = 400
X = tf.get_default_graph().get_operation_by_name("Placeholder")
training_op = tf.get_default_graph().get_operation_by_name("train/Adam")
codings = tf.layers.dense(X, n_hidden, tf.float32)
n_iterations = 5
with tf.Session() as sess:
saver.restore(sess, "./my_model.ckpt")
sess.run(training_op)
test_img = codings.eval(feed_dict={X: X_test})
print(test_img)
Note:
I have already trained the model and named it my_model.ckpt and I tried importing and using it.
This is the error message:
Traceback (most recent call last):
File "using_img_cleaner.py", line 36, in <module>
codings = tf.layers.dense(X, n_hidden, tf.float32)
File "/home/exceptions/env/lib/python3.5/site-packages/tensorflow/python/layers/core.py", line 250, in dense
dtype=inputs.dtype.base_dtype,
AttributeError: 'Operation' object has no attribute 'dtype'

Error when using the class_weight parameter in the fit function in Keras

I wanted to test my network on a toy data set - a few examples with two imbalanced classes (0 and 1). Unfortunately, there are problems when using the class_weight parameter to improve the balance. It looks like I forget something.
import tensorflow as tf
from tensorflow.python.keras.layers import Dense, Dropout
from tensorflow.python.keras.applications.xception import Xception, preprocess_input
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import Adam
# parsing images from TFRecords
def parse_function(proto):
example = {'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64)}
parsed_example = tf.parse_single_example(proto, example)
image = tf.decode_raw(parsed_example['image_raw'], tf.uint8)
image = tf.reshape(image, [HEIGHT, WIDTH, DEPTH])
image = preprocess_input(tf.cast(image, tf.float32))
return image, parsed_example['label']
def get_data(filepath, schuffle_size=32, batch_size=8, prefetch=1, repeat=None, num_parallel_calls=1):
dataset = tf.data.TFRecordDataset(filepath)
if schuffle_size != 0:
dataset = dataset.shuffle(schuffle_size)
dataset = dataset.repeat(repeat)
dataset = dataset.map(parse_function, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(prefetch)
iterator = dataset.make_one_shot_iterator()
return iterator
def build_model(number_of_neurons_in_dense_layer, dropout, learning_rate):
base_model = Xception(weights='imagenet', include_top=False, pooling='avg', input_shape=(HEIGHT, WIDTH, 3))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = Dropout(dropout)(x)
x = Dense(number_of_neurons_in_dense_layer, activation='relu')(x)
x = Dropout(dropout)(x)
logits = Dense(NUMBER_OF_CLASSES, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=logits)
model.compile(optimizer=Adam(lr=learning_rate), loss='sparse_categorical_crossentropy', metrics=['categorical_accuracy'])
return model
global NUMBER_OF_CLASSES, HEIGHT, WIDTH, DEPTH
NUMBER_OF_CLASSES = 2
...
CLASS_WEIGHTS = {
0: 1,
1: 7
}
model = build_model(64, 0.4, 0.001)
train = get_data(..., 8, 2, num_parallel_calls=8)
val = get_data(...., 0, 4, num_parallel_calls=8)
model.fit(train, validation_data=val, epochs=3,steps_per_epoch=8//2,
validation_steps=8//4, shuffle=False,
class_weight=CLASS_WEIGHTS)
I am getting the following errors
Original exception was:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 51, in _wrapfunc
return getattr(obj, method)(*args, **kwds)
AttributeError: 'Tensor' object has no attribute 'reshape'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/usr/model.py", line 147, in main
class_weight=CLASS_WEIGHTS)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 776, in fit
shuffle=shuffle)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 2432, in _standardize_user_data
feed_sample_weight_modes)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 2431, in <listcomp>
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training_utils.py", line 758, in standardize_weights
y_classes = np.reshape(y, y.shape[0])
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 279, in reshape
return _wrapfunc(a, 'reshape', newshape, order=order)
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 61, in _wrapfunc
return _wrapit(obj, method, *args, **kwds)
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 41, in _wrapit
result = getattr(asarray(obj), method)(*args, **kwds)
TypeError: __index__ returned non-int (type NoneType)
Without the class_weight parameter, the fit function works correctly.
Just for a future reference:
I ran into this error to and was able to resolve it by passing an array instead of a dictionary.
e.g.
CLASS_WEIGHTS = np.array([1,7])
instead of:
CLASS_WEIGHTS = {
0: 1,
1: 7
}

Categories