I want to apply Spatial Pyramid Pooling before the Dense layer in a CNN.
I used Keras for implementation.
Tensorflow was used as a backend.
However, I got an error.
What's wrong with my code? Thank you.
Traceback (most recent call last):
File "<pyshell#25>", line 1, in <module>
model.add(SpatialPyramidPooling(pooling_regions, input_shape=Input(shape = (None,None,None,3))))
File "C:\Program Files\Python36\lib\site-packages\spp\SpatialPyramidPooling.py", line 33, in __init__
super(SpatialPyramidPooling, self).__init__(**kwargs)
File "C:\Program Files\Python36\lib\site-packages\keras\engine\topology.py", line 311, in __init__
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
File "C:\Program Files\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 439, in __iter__
"Tensor objects are not iterable when eager execution is not "
TypeError: Tensor objects are not iterable when eager execution is not enabled. To iterate over this tensor use tf.map_fn.
Here is the code:
from keras.engine.topology import Layer
from keras.models import Sequential
import keras.backend as K
import numpy as np
model = Sequential()
model.add(SpatialPyramidPooling((1,2,4), Input(shape=(None, None, None, 3))))
class SpatialPyramidPooling(Layer):
def __init__(self, pool_list, **kwargs):
self.dim_ordering = K.image_dim_ordering()
assert self.dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
self.pool_list = pool_list
self.num_outputs_per_channel = sum([i * i for i in pool_list])
super(SpatialPyramidPooling, self).__init__(**kwargs)
def call(self, x, mask=None):
input_shape = K.shape(x)
print(input_shape)
print(K.eval(input_shape))
outputs = K.variable(value=np.random.random((3,4)))
return outputs
I'm pretty sure you should be using input_shape=(None,None,None,3) instead of input_shape=Input(shape = (None,None,None,3))
Also, you cannot use any function that demands the presence of data in the call method. You're using K.shape and K.eval, both will bring you errors in compilation.
If you need information about the input shape, you have to do it in the def build(self, input_shape): method.
Related
I have some tensorflow model which I need to export in a saved model. Below is the simplified code of the model, which I am trying to export.
import tensorflow as tf
def foo(x):
return tf.reduce_sum(x)
inputs = tf.keras.layers.Input(shape=(128,128,3))
y = tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='SAME')(inputs)
y = tf.keras.layers.ReLU()(y)
outputs = tf.map_fn(foo, y, dtype=(tf.float32))
model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
model.save('./export', save_format='tf')
but while exporting the model I am getting the following error.
/Users/bruce/.venv/bin/python /Users/bruce/test_project/mymodel/test.py
Traceback (most recent call last):
File "/Users/bruce/test_project/mymodel/test.py", line 12, in <module>
outputs = tf.map_fn(foo, y, dtype=(tf.float32))
File "/Users/bruce/.venv/lib/python3.6/site-packages/tensorflow_core/python/ops/map_fn.py", line 228, in map_fn
for elem in elems_flat]
File "/Users/bruce/.venv/lib/python3.6/site-packages/tensorflow_core/python/ops/map_fn.py", line 228, in <listcomp>
for elem in elems_flat]
File "/Users/bruce/.venv/lib/python3.6/site-packages/tensorflow_core/python/ops/tensor_array_ops.py", line 1078, in __init__
name=name)
File "/Users/bruce/.venv/lib/python3.6/site-packages/tensorflow_core/python/ops/tensor_array_ops.py", line 716, in __init__
self._tensor_array = [None for _ in range(size)]
TypeError: 'Tensor' object cannot be interpreted as an integer
I cannot remove the part tf.map_fn which is doing some essential processing which I need in the saved model while deploying it.
You need to use a custom layer:
class MyMapLayer(tf.keras.layers.Layer):
def __init__(*args, **kwargs)
super().__init__(*args, **kwargs)
def foo(self, x):
return tf.reduce_sum(x)
def call(self, inputs, **kwargs):
return tf.map_fn(self.foo, inputs, dtype=(tf.float32))
Then, in your model:
inputs = tf.keras.layers.Input(shape=(128,128,3))
y = tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='SAME')(inputs)
y = tf.keras.layers.ReLU()(y)
outputs = MyMapLayer()(y)
model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
I'm using keras (tf.keras) in tensorflow 2.0.0
I've a network, whose input is an image and output is also an image. I want to use a combination of MSE, MSE in VGG feature space and some other losses, which depend on intermediate layer output. I'm defining a custom loss function. I'm able to build the model, compile with the custom loss. But when I train using fit_generator, I'm getting a SymbolicException saying Inputs to eager execution function cannot be Keras symbolic tensors
Full Code:
Train File:
def __init__(self, gray_images: bool, verbose: bool = True):
super().__init__(gray_images, verbose)
self.model = None
self.vgg_feature_extractor = VggFeaturesExtractor(model_name='vgg16', layers=[3, 6, 10])
def build_model():
image_input = Input(shape=(None, None, num_input_channels))
out1 = self.build_out1_model(image_input, num_filters, depth_t)
out2 = self.build_out2_model(image_input, num_filters, depth_n, use_bnorm)
enhanced_image = ... # Some function of image_input, out1 and out2
self.model = Model(inputs=image_input, outputs=enhanced_image)
self.model.add_loss(loss_weights[1] * self.loss2(out2))
self.model.compile(optimizer='adam', loss=self.vgg_loss)
def vgg_loss(self, gt_image, est_image):
gt_features = self.vgg_feature_extractor.extract_features(gt_image)
est_features = self.vgg_feature_extractor.extract_features(est_image)
loss = tf.reduce_mean(tf.square(gt_features[0] - est_features[0])) + \
tf.reduce_mean(tf.square(gt_features[1] - est_features[1])) + \
tf.reduce_mean(tf.square(gt_features[2] - est_features[2]))
return loss
VggFeatures.py:
class VggFeaturesExtractor:
def __init__(self, model_name: str, layers: List[int]):
self.model_name = model_name
self.layers = layers
if model_name == 'vgg16':
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
vgg_model = VGG16(include_top=False)
self.preprocess_input = preprocess_input
elif model_name == 'vgg19':
from tensorflow.keras.applications.vgg19 import VGG19, preprocess_input
vgg_model = VGG19(include_top=False)
self.preprocess_input = preprocess_input
else:
raise RuntimeError(f'Unknown Model: {model_name}')
outputs = []
for layer_num in layers:
outputs.append(vgg_model.layers[layer_num].output)
self.feature_extractor = keras.Model(inputs=vgg_model.input, outputs=outputs)
def extract_features(self, images: numpy.ndarray):
preprocessed_images = self.preprocess_input(images)
features = self.feature_extractor(preprocessed_images)
return features
Stack trace:
Epoch 1/1000
Traceback (most recent call last):
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/eager/execute.py", line 61, in quick_execute
num_outputs)
TypeError: An op outside of the function building code is being passed
a "Graph" tensor. It is possible to have Graph tensors
leak out of the function building context by including a
tf.init_scope in your function building code.
For example, the following function will fail:
#tf.function
def has_init_scope():
my_constant = tf.constant(1.)
with tf.init_scope():
added = my_constant * 2
The graph tensor has name: StridedSliceGrad:0
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/media/nagabhushan/Data02/SNB/IISc/Research/.../Workspace/Ideas/01_Supervised/src/N09.py", line 363, in <module>
main()
File "/media/nagabhushan/Data02/SNB/IISc/Research/.../Workspace/Ideas/01_Supervised/src/N09.py", line 343, in main
args.save_interval)
File "/media/nagabhushan/Data02/SNB/IISc/Research/.../Workspace/Ideas/01_Supervised/src/N09.py", line 92, in train_model
verbose=self.verbose)
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py", line 1297, in fit_generator
steps_name='steps_per_epoch')
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_generator.py", line 265, in model_iteration
batch_outs = batch_function(*batch_data)
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py", line 973, in train_on_batch
class_weight=class_weight, reset_metrics=reset_metrics)
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 264, in train_on_batch
output_loss_metrics=model._output_loss_metrics)
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_eager.py", line 311, in train_on_batch
output_loss_metrics=output_loss_metrics))
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_eager.py", line 268, in _process_single_batch
grads = tape.gradient(scaled_total_loss, trainable_weights)
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/eager/backprop.py", line 1014, in gradient
unconnected_gradients=unconnected_gradients)
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/eager/imperative_grad.py", line 76, in imperative_grad
compat.as_str(unconnected_gradients.value))
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/eager/function.py", line 911, in _backward_function_wrapper
processed_args, remapped_captures)
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/eager/function.py", line 1224, in _call_flat
ctx, args, cancellation_manager=cancellation_manager)
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/eager/function.py", line 511, in call
ctx=ctx)
File "/media/nagabhushan/Data02/SoftwareFiles/Anaconda/anaconda3/envs/.../lib/python3.7/site-packages/tensorflow_core/python/eager/execute.py", line 75, in quick_execute
"tensors, but found {}".format(keras_symbolic_tensors))
tensorflow.python.eager.core._SymbolicException: Inputs to eager execution function cannot be Keras symbolic tensors, but found [<tf.Tensor 'StridedSliceGrad:0' shape=(16, 64, 64, 3) dtype=float32>]
Process finished with exit code 1
Note:
1. If I replace self.model.compile(optimizer='adam', loss=self.vgg_loss) with self.model.compile(optimizer='adam', loss='mse'), code works fine, which implies the other part of code is working correctly.
2. Almost every question I found on SO regarding VGG loss advises to append VGG network to the main network, set trainable=False for VGG network and then train with MSE loss. But I can't do that, since I have many components in my loss function.
I was able to fix this issue by disabling eager execution. In tensorflow 2.0, eager execution is enabled by default.
tf.compat.v1.disable_eager_execution()
I didn't understand how this was able to fix the issue though. If anybody stumbles on a similar problem, you can try disabling eager execution.
I am trying to use Pytorch grouped Conv2d operator on very large images (10k x 10k pixels). I am getting an RuntimeError: offset is too big error when trying to apply a grouped convolution in the network. Anyone knows how to circumvent this?
Code for reproducibility:
import torch
import torch.nn as nn
import pdb
def create_img(size, batch_size=1, channels=3):
return torch.FloatTensor(batch_size, channels, size, size).uniform_(-1, 1)
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3), stride=(1,1), groups=64, bias=False)
def forward(self, x):
out = self.conv1(x)
return out
if __name__ == '__main__':
model = TestModel()
data = create_img(5002, channels=64)
out = model(data)
pdb.set_trace()
and the error:
Traceback (most recent call last):
File "test.py", line 26, in <module>
out = model(data)
File ".../pipenv/lib/python3.6/site-packages/torch/nn/modules/module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "test.py", line 17, in forward
out = self.conv1(x)
File ".../pipenv/lib/python3.6/site-packages/torch/nn/modules/module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File ".../pipenv/lib/python3.6/site-packages/torch/nn/modules/conv.py", line 320, in forward
self.padding, self.dilation, self.groups)
RuntimeError: offset is too big
I am using Python 3.6 and Pytorch 1.0.0. Strange thing is, this works with smaller images. Change the images size from 5002 to 502, for example.
Solved by updating Pytorch to 1.3.0
I get this error when I use my own layer:
Traceback (most recent call last):
File "E:/fffan/try.py", line 40, in <module>
run(input, 5000)
File "E:/fffan/try.py", line 36, in run
out = SelfAttention(nclass)(output, state)
TypeError: __call__() takes 2 positional arguments but 3 were given
Here is my code, and holp someone can tell me how to fix it.
from keras.engine.topology import Layer
from keras.layers.core import Dense
from keras import backend as K
from keras.layers import Input,CuDNNGRU
from keras.activations import softmax
class SelfAttention(Layer): #### 对于长序列效果较差
def __init__(self,units,**kwargs):
self.W1 = Dense(units)
self.W2 = Dense(units)
self.V = Dense(1)
super(SelfAttention, self).__init__(**kwargs)
def call(self,features, hidden):
hidden_with_time_axis = K.expand_dims(hidden, 1)
score = self.V(K.tanh(self.W1(features) + self.W2(hidden_with_time_axis)))
attention_weights = softmax(score, axis=1)
context_vector = attention_weights * features
return context_vector
def GRU(units):
return CuDNNGRU(units, return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def run(input,nclass):
output, state = GRU(nclass)(input)
out = SelfAttention(nclass)(output, state)
if __name__ == '__main__':
input = Input(shape=(35, 512), name='the_input')
run(input, 5000)
My tensorflow version is 1.14.0, and my keras is 2.1.5
Does somebody know anything adout this issue ?
It should be:
def __call__(self, features, hidden):
instead of:
def call(self, features, hidden):
I am trying to implement a Faster-RCNN model for object detection written by Yinghan Xu. After I have trained and saved the model with model_all.save('filename.h5'), I am trying to freeze the Keras model as TensorFlow graph (as .pb) for inference using keras_to_tensorflow.py written by Amir Abdi. But when I try to convert it, I get a ValueError: Unknown layer: roipoolingconv due to a custom RoiPoolingConv layer:
class RoiPoolingConv(Layer):
'''ROI pooling layer for 2D inputs.
See Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition,
K. He, X. Zhang, S. Ren, J. Sun
# Arguments
pool_size: int
Size of pooling region to use. pool_size = 7 will result in a 7x7 region.
num_rois: number of regions of interest to be used
# Input shape
list of two 4D tensors [X_img,X_roi] with shape:
X_img:
`(1, rows, cols, channels)`
X_roi:
`(1,num_rois,4)` list of rois, with ordering (x,y,w,h)
# Output shape
3D tensor with shape:
`(1, num_rois, channels, pool_size, pool_size)`
'''
def __init__(self, pool_size, num_rois, **kwargs):
self.dim_ordering = K.image_dim_ordering()
self.pool_size = pool_size
self.num_rois = num_rois
super(RoiPoolingConv, self).__init__(**kwargs)
def build(self, input_shape):
self.nb_channels = input_shape[0][3]
def compute_output_shape(self, input_shape):
return None, self.num_rois, self.pool_size, self.pool_size, self.nb_channels
def call(self, x, mask=None):
assert(len(x) == 2)
# x[0] is image with shape (rows, cols, channels)
img = x[0]
# x[1] is roi with shape (num_rois,4) with ordering (x,y,w,h)
rois = x[1]
input_shape = K.shape(img)
outputs = []
for roi_idx in range(self.num_rois):
x = rois[0, roi_idx, 0]
y = rois[0, roi_idx, 1]
w = rois[0, roi_idx, 2]
h = rois[0, roi_idx, 3]
x = K.cast(x, 'int32')
y = K.cast(y, 'int32')
w = K.cast(w, 'int32')
h = K.cast(h, 'int32')
# Resized roi of the image to pooling size (7x7)
rs = tf.image.resize_images(img[:, y:y+h, x:x+w, :], (self.pool_size, self.pool_size))
outputs.append(rs)
final_output = K.concatenate(outputs, axis=0)
# Reshape to (1, num_rois, pool_size, pool_size, nb_channels)
# Might be (1, 4, 7, 7, 3)
final_output = K.reshape(final_output, (1, self.num_rois, self.pool_size, self.pool_size, self.nb_channels))
# permute_dimensions is similar to transpose
final_output = K.permute_dimensions(final_output, (0, 1, 2, 3, 4))
return final_output
def get_config(self):
config = {'pool_size': self.pool_size,
'num_rois': self.num_rois}
base_config = super(RoiPoolingConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
I have looked at most of the resources out there and almost all of them suggest to comment out this layer. But since this layer is important for object detection, I was wondering if a workaround is possible or not.
The complete traceback of error (note: I've saved filename as freezekeras.py, contents are same as keras_to_tensorflow.py):
Using TensorFlow backend.
Traceback (most recent call last):
File "freezekeras.py", line 181, in <module>
app.run(main)
File "/usr/local/lib/python3.5/dist-packages/absl/app.py", line 300, in run
_run_main(main, args)
File "/usr/local/lib/python3.5/dist-packages/absl/app.py", line 251, in _run_main
sys.exit(main(argv))
File "freezekeras.py", line 127, in main
model = load_model(FLAGS.input_model, FLAGS.input_model_json, FLAGS.input_model_yaml)
File "freezekeras.py", line 105, in load_model
raise wrong_file_err
File "freezekeras.py", line 62, in load_model
model = keras.models.load_model(input_model_path)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/saving.py", line 419, in load_model
model = _deserialize_model(f, custom_objects, compile)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/saving.py", line 225, in _deserialize_model
model = model_from_config(model_config, custom_objects=custom_objects)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/saving.py", line 458, in model_from_config
return deserialize(config, custom_objects=custom_objects)
File "/usr/local/lib/python3.5/dist-packages/keras/layers/__init__.py", line 55, in deserialize
printable_module_name='layer')
File "/usr/local/lib/python3.5/dist-packages/keras/utils/generic_utils.py", line 145, in deserialize_keras_object
list(custom_objects.items())))
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 1022, in from_config
process_layer(layer_data)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 1008, in process_layer
custom_objects=custom_objects)
File "/usr/local/lib/python3.5/dist-packages/keras/layers/__init__.py", line 55, in deserialize
printable_module_name='layer')
File "/usr/local/lib/python3.5/dist-packages/keras/utils/generic_utils.py", line 138, in deserialize_keras_object
': ' + class_name)
ValueError: Unknown layer: RoiPoolingConv
Try to specify the custom layer explicitly:
model = load_model('my_model.h5', custom_objects={'RoiPoolingConv': RoiPoolingConv})
Obviously, you have to re-write the keras_to_tensorflow.py script. See Handling custom layers (or other custom objects) in saved models section under Keras FAQ.
Solution
specify custom layer while loading model in keras_to_tensorflow.py
model = keras.models.load_model(input_model_path, custom_objects={'RoiPoolingConv':RoiPoolingConv})
import RoiPoolingConv.py to keras_to_tensorflow project
specify default pool_size, num_rois for RoiPoolingConv
def __init__(self, pool_size = 7, num_rois = 32, **kwargs):