I am trying to generate adversarial example using FGSM, and the code frame i am using is from Google Colab code(https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/adversarial_fgsm.ipynb#scrollTo=wpYrQ4OQSYWk). And the kernal information of my Jupyter Book is Python3.7. However, when I tried to use my own picture to generate the adversarial example, the compilation failed all the time. Actually, the only part that I changed is "image_path = 'cat.jpg'". Well, I searched the errors on google, but it seems like there are no similar situations like this. Therefore, could you please give a hand,thanks a lot!
Here are my codes:
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (8, 8)
mpl.rcParams['axes.grid'] = False
pretrained_model = tf.keras.applications.MobileNetV2(include_top=True,
weights='imagenet')
pretrained_model.trainable = False
# ImageNet labels
decode_predictions = tf.keras.applications.mobilenet_v2.decode_predictions
# Helper function to preprocess the image so that it can be inputted in MobileNetV2
def preprocess(image):
image = tf.cast(image, tf.float32)
image = tf.image.resize(image, (224, 224))
image = tf.keras.applications.mobilenet_v2.preprocess_input(image)
image = image[None, ...]
return image
# Helper function to extract labels from probability vector
def get_imagenet_label(probs):
return decode_predictions(probs, top=1)[0][0]
image_path = 'cat.jpg'
image_raw = tf.io.read_file(image_path)
image = tf.image.decode_image(image_raw)
image = preprocess(image)
image_probs = pretrained_model.predict(image)
plt.figure()
plt.imshow(image[0]*0.5+0.5) # To change [-1, 1] to [0,1]
_, image_class, class_confidence = get_imagenet_label(image_probs)
plt.title('{} : {:.2f}% Confidence'.format(image_class, class_confidence*100))
plt.show()
loss_object = tf.keras.losses.CategoricalCrossentropy()
def create_adversarial_pattern(input_image, input_label):
with tf.GradientTape() as tape:
tape.watch(input_image)
prediction = pretrained_model(input_image)
loss = loss_object(input_label, prediction)
# Get the gradients of the loss w.r.t to the input image.
gradient = tape.gradient(loss, input_image)
# Get the sign of the gradients to create the perturbation
signed_grad = tf.sign(gradient)
return signed_grad
# Get the input label of the image.
labrador_retriever_index = 208
label = tf.one_hot(labrador_retriever_index, image_probs.shape[-1])
label = tf.reshape(label, (1, image_probs.shape[-1]))
perturbations = create_adversarial_pattern(image, label)
plt.imshow(perturbations[0]*0.5+0.5); # To change [-1, 1] to [0,1]
def display_images(image, description):
_, label, confidence = get_imagenet_label(pretrained_model.predict(image))
plt.figure()
plt.imshow(image[0]*0.5+0.5)
plt.title('{} \n {} : {:.2f}% Confidence'.format(description,
label, confidence*100))
plt.show()
epsilons = [0, 0.02, 0.2, 0.4, 0.8, 0.9, 1.0]
descriptions = [('Epsilon = {:0.3f}'.format(eps) if eps else 'Input')
for eps in epsilons]
for i, eps in enumerate(epsilons):
adv_x = image + eps*perturbations
adv_x = tf.clip_by_value(adv_x, -1, 1)
display_images(adv_x, descriptions[i])
And the erros are:
> ---------------------------------------------------------------------------
_FallbackException Traceback (most recent call last)
D:\anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\gen_io_ops.py in read_file(filename, name)
601 _ctx._context_handle, _ctx._thread_local_data.device_name, "ReadFile",
--> 602 name, _ctx._post_execution_callbacks, filename)
603 return _result
_FallbackException: This function does not handle the case of the path where all inputs are not already EagerTensors.
During handling of the above exception, another exception occurred:
UnicodeDecodeError Traceback (most recent call last)
<ipython-input-18-4edeac21c176> in <module>
27
28 image_path = 'cat.jpg'
---> 29 image_raw = tf.io.read_file(image_path)
30 image = tf.image.decode_image(image_raw)
31
D:\anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\gen_io_ops.py in read_file(filename, name)
605 try:
606 return read_file_eager_fallback(
--> 607 filename, name=name, ctx=_ctx)
608 except _core._SymbolicException:
609 pass # Add nodes to the TensorFlow graph.
D:\anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\gen_io_ops.py in read_file_eager_fallback(filename, name, ctx)
654 _attrs = None
655 _result = _execute.execute(b"ReadFile", 1, inputs=_inputs_flat,
--> 656 attrs=_attrs, ctx=_ctx, name=name)
657 _execute.record_gradient(
658 "ReadFile", _inputs_flat, _attrs, _result, name)
D:\anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
59 tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
60 op_name, inputs, attrs,
---> 61 num_outputs)
62 except core._NotOkStatusException as e:
63 if name is not None:
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd5 in position 57: invalid continuation byte
I tested your code with some of my images and everything is ok.
it's just about your image encode.
run again with some other images or change the image encoding.
Related
Hello I am working on semantic segmentation with the DeeplabV3Plus architecture and Tensorflow (Keras). I did it well with another dataset but now I want to do it with my own. But in the first step of loading the data, it shows me a strange error. The function is
tf.data.Dataset.from_tensor_slices
and the error is:
ValueError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_20192\306109049.py in <module>
57
58 train_dataset = data_generator(train_images, train_masks)
---> 59 val_dataset = data_generator(val_images, val_masks)
60
61 print("Train Dataset:", train_dataset)
~\AppData\Local\Temp\ipykernel_20192\306109049.py in data_generator(image_list, mask_list)
50
51 def data_generator(image_list, mask_list):
---> 52 dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
53 dataset = dataset.map(load_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
54 dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py in from_tensor_slices(tensors, name)
812 Dataset: A `Dataset`.
813 """
--> 814 return TensorSliceDataset(tensors, name=name)
815
816 class _GeneratorState(object):
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py in __init__(self, element, is_files, name)
4720 batch_dim.assert_is_compatible_with(
4721 tensor_shape.Dimension(
-> 4722 tensor_shape.dimension_value(t.get_shape()[0])))
4723
4724 variant_tensor = gen_dataset_ops.tensor_slice_dataset(
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\framework\tensor_shape.py in assert_is_compatible_with(self, other)
298 if not self.is_compatible_with(other):
299 raise ValueError("Dimensions %s and %s are not compatible" %
--> 300 (self, other))
301
302 def merge_with(self, other):
ValueError: Dimensions 37 and 50 are not compatible
The Error is "Dimensions 37 and 50 are not compatible", I searched for this, but cannot find a solution.
Code:
import os
import cv2
import numpy as np
from glob import glob
from scipy.io import loadmat
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
IMAGE_SIZE = 512
BATCH_SIZE = 4
NUM_CLASSES = 20
DATA_DIR = r'C:/Users/Joshi/Desktop/CARLA_0.9.13/WindowsNoEditor/PythonAPI/examples/out'
NUM_TRAIN_IMAGES = 250
NUM_VAL_IMAGES = 50
train_images = sorted(glob(os.path.join(DATA_DIR, "out/*")))[:NUM_TRAIN_IMAGES]
train_masks = sorted(glob(os.path.join(DATA_DIR, "Seman/*")))[:NUM_TRAIN_IMAGES]
val_images = sorted(glob(os.path.join(DATA_DIR, "out/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
val_masks = sorted(glob(os.path.join(DATA_DIR, "Seman/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
def read_image(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.image.decode_png(image, channels=1)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
else:
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 127.5 - 1
return image
def load_data(image_list, mask_list):
image = read_image(image_list)
mask = read_image(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_dataset = data_generator(train_images, train_masks)
val_dataset = data_generator(val_images, val_masks)
print("Train Dataset:", train_dataset)
print("Val Dataset:", val_dataset)
It just was the wrong size for the picture.
I'm trying to interpret inference result of image classification model. I tried usign gcam but it wasn't much helpful as heatmap doesn't give pixel level information. I found this library called shap according to there documentation link it'll give pixel level interpretation I followed there one of the example for image classification and used it to replicate for my own problem as shown below
model = load_model(r'Model.h5', compile=False)
image = cv2.imread(r'img.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
test_img = [image]
test_img = [np.expand_dims(img, axis=0) for img in test_img]
test_img = tf.concat(test_img, axis=0)
test_img = tf.image.resize(test_img, [224, 224])
test_img = tf.cast(test_img, tf.float32) / 255.0
def f(X):
tmp = X.copy()
# preprocess_input(tmp)
return model(tmp)
masker = shap.maskers.Image("inpaint_telea", test_img.shape)
explainer = shap.Explainer(f, masker, output_names=class_label_mapping)
shap_values = explainer(test_img,outputs=shap.Explanation.argsort.flip[:1])
shap.image_plot(shap_values)
when I executed above code I got error as below
Output exceeds the size limit. Open the full output data in a text
editor
--------------------------------------------------------------------------- AttributeError Traceback (most recent call
last) c:\research\visualize_cnn\visualize_shap.ipynb Cell 8 in <cell
line: 1>()
----> 1 shap_values = explainer(test_img,outputs=shap.Explanation.argsort.flip[:1])
2 shap.image_plot(shap_values)
File
~\AppData\Roaming\Python\Python38\site-packages\shap\explainers_partition.py:136,
in Partition.call(self, max_evals, fixed_context, main_effects,
error_bounds, batch_size, outputs, silent, *args)
132 def call(self, *args, max_evals=500, fixed_context=None, main_effects=False, error_bounds=False, batch_size="auto",
133 outputs=None, silent=False):
134 """ Explain the output of the model on the given arguments.
135 """
--> 136 return super().call(
137 *args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects,
error_bounds=error_bounds, batch_size=batch_size,
138 outputs=outputs, silent=silent
139 )
File
~\AppData\Roaming\Python\Python38\site-packages\shap\explainers_explainer.py:266,
in Explainer.call(self, max_evals, main_effects, error_bounds,
batch_size, outputs, silent, *args, **kwargs)
264 feature_names = [[] for _ in range(len(args))]
265 for row_args in show_progress(zip(*args), num_rows, self.class.name+" explainer", silent):
--> 266 row_result = self.explain_row(
267 *row_args, max_evals=max_evals, main_effects=main_effects, error_bounds=error_bounds,
268 batch_size=batch_size, outputs=outputs, silent=silent, **kwargs
269 )
270 values.append(row_result.get("values", None))
271 output_indices.append(row_result.get("output_indices", None)) ...
---> 91 x = x.ravel()
93 # if mask is not given then we mask the whole image
94 if mask is None:
AttributeError: 'tensorflow.python.framework.ops.EagerTensor' object
has no attribute 'ravel'
The way I do inference is as below
image = cv2.imread('img.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
test_img = [image]
test_img = [np.expand_dims(img, axis=0) for img in test_img]
test_img = tf.concat(test_img, axis=0)
test_img = tf.image.resize(test_img, [224, 224])
test_img = tf.cast(test_img, tf.float32) / 255.0
y_pred_on_hot = model.predict(test_img) # , batch_size=1)
y_pred = np.argmax(y_pred_on_hot, axis=1)
print(y_pred)
print(class_label_mapping[y_pred[0]])
This will print the class label of testing image.
How can I interpret the testing image using shap library successfully without any error ?
Any help or suggestion on this will be very helpful
I have a dataset where there are 500 images of dimension (32, 32, 3) and its label which is a single integer. Thus making my label a shape ().
However when I run my code, it gives error where:
Shapes () and (1, 32, 32, 3) are incompatible
I'm assuming my model is requiring a (32x32x3) dimension data, so how should I alter my label (integer of 0 to 7) into a (1,32,32,3) shape to fit?
Summary of model:
Code I'm running:
for i in range(20):
n = random.randint(0, len(data_manager.X_test))
label = data_manager.y_test[n]
#label_samples = np.zeros((1,32,32,3), dtype=label.dtype)
img = data_manager.X_test[n]
img = tf.expand_dims(img, axis=0)
img = np.array(img)
img = img.astype('float32')
true_pred = our_network_skip.predict(img)
pa = pgd_attack(our_network_skip, img, label,
epsilon=0.0313,
num_steps=20,
step_size=0.002,
clip_value_min=0.,
clip_value_max=1.0,
soft_label=False,
from_logits= False) #error from this function where label has to be (1,32,32,3)
Function for the error:
def pgd_attack(model, input_image, input_label= None,
epsilon=0.0313,
num_steps=20,
step_size=0.002,
clip_value_min=0.,
clip_value_max=1.0,
soft_label=False,
from_logits= False):
loss_fn = tf.keras.losses.categorical_crossentropy #compute CE loss from logits or prediction probabilities
if type(input_image) is np.ndarray:
input_image = tf.convert_to_tensor(input_image)
if type(input_label) is np.ndarray:
input_label = tf.convert_to_tensor(input_label)
# random initialization around input_image
random_noise = tf.random.uniform(shape=input_image.shape, minval=-epsilon, maxval=epsilon)
adv_image = input_image + random_noise
for _ in range(num_steps):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(adv_image)
if not soft_label:
loss = loss_fn(input_label, adv_image, from_logits= from_logits) # use ground-truth label to attack
else:
pred_label = tf.math.argmax(adv_image, axis=1)
loss = loss_fn(pred_label, adv_image, from_logits= from_logits) # use predicted label to attack
gradient = tape.gradient(loss, adv_image) # get the gradient of the loss w.r.t. the current point
adv_image = adv_image + step_size * tf.sign(gradient) # move current adverarial example along the gradient direction with step size is eta
adv_image = tf.clip_by_value(adv_image, input_image-epsilon, input_image+epsilon) # clip to a valid boundary
adv_image = tf.clip_by_value(adv_image, clip_value_min, clip_value_max) # clip to a valid range
adv_image = tf.stop_gradient(adv_image) # stop the gradient to make the adversarial image as a constant input
return adv_image
Full error:
ValueError Traceback (most recent call last)
Input In [57], in <cell line: 48>()
57 img = img.astype('float32')
60 true_pred = our_network_skip.predict(img)
---> 62 pa = pgd_attack(our_network_skip, img, label,
63 epsilon=0.0313,
64 num_steps=20,
65 step_size=0.002,
66 clip_value_min=0.,
67 clip_value_max=1.0,
68 soft_label=False,
69 from_logits= False)
71 pgd_pred = our_network_skip.predict(pa)
72 print("True label: {}, adversarial label: {}".format(true_pred, pgd_pred))
Input In [57], in pgd_attack(model, input_image, input_label, epsilon, num_steps, step_size, clip_value_min, clip_value_max, soft_label, from_logits)
32 tape.watch(adv_image)
34 if not soft_label:
---> 35 loss = loss_fn(input_label, adv_image, from_logits= from_logits) # use ground-truth label to attack
36 else:
37 pred_label = tf.math.argmax(adv_image, axis=1)
File ~\anaconda3\envs\tf2_cpu\lib\site-packages\tensorflow\python\util\dispatch.py:206, in add_dispatch_support.<locals>.wrapper(*args, **kwargs)
204 """Call target, and fall back on dispatchers if there is a TypeError."""
205 try:
--> 206 return target(*args, **kwargs)
207 except (TypeError, ValueError):
208 # Note: convert_to_eager_tensor currently raises a ValueError, not a
209 # TypeError, when given unexpected types. So we need to catch both.
210 result = dispatch(wrapper, args, kwargs)
File ~\anaconda3\envs\tf2_cpu\lib\site-packages\keras\losses.py:1665, in categorical_crossentropy(y_true, y_pred, from_logits, label_smoothing, axis)
1660 return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
1662 y_true = tf.__internal__.smart_cond.smart_cond(label_smoothing, _smooth_labels,
1663 lambda: y_true)
-> 1665 return backend.categorical_crossentropy(
1666 y_true, y_pred, from_logits=from_logits, axis=axis)
File ~\anaconda3\envs\tf2_cpu\lib\site-packages\tensorflow\python\util\dispatch.py:206, in add_dispatch_support.<locals>.wrapper(*args, **kwargs)
204 """Call target, and fall back on dispatchers if there is a TypeError."""
205 try:
--> 206 return target(*args, **kwargs)
207 except (TypeError, ValueError):
208 # Note: convert_to_eager_tensor currently raises a ValueError, not a
209 # TypeError, when given unexpected types. So we need to catch both.
210 result = dispatch(wrapper, args, kwargs)
File ~\anaconda3\envs\tf2_cpu\lib\site-packages\keras\backend.py:4839, in categorical_crossentropy(target, output, from_logits, axis)
4837 target = tf.convert_to_tensor(target)
4838 output = tf.convert_to_tensor(output)
-> 4839 target.shape.assert_is_compatible_with(output.shape)
4841 # Use logits whenever they are available. `softmax` and `sigmoid`
4842 # activations cache logits on the `output` Tensor.
4843 if hasattr(output, '_keras_logits'):
File ~\anaconda3\envs\tf2_cpu\lib\site-packages\tensorflow\python\framework\tensor_shape.py:1161, in TensorShape.assert_is_compatible_with(self, other)
1149 """Raises exception if `self` and `other` do not represent the same shape.
1150
1151 This method can be used to assert that there exists a shape that both
(...)
1158 ValueError: If `self` and `other` do not represent the same shape.
1159 """
1160 if not self.is_compatible_with(other):
-> 1161 raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes () and (1, 32, 32, 3) are incompatible
Thank you!
I have a csv file which looks liks this:
I want to load the image (from df['Image_location']) and text (from df['Content']) together, so I did the following operations:
df = pd.read_csv(csv_data_dir, encoding= 'cp1252')
features = df[['Content', 'Image_location']]
labels = df['Sentiment']
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
def process_path(x):
content, image_path = x[0], x[1]
print(image_path)
img = tf.io.read_file(image_path)
img = tf.io.decode_jpeg(img, channels=3)
return content, img
dataset = dataset.map(lambda x, y: (process_path(x), y))
dataset = dataset.batch(32, drop_remainder = True)
Upon running the training loop:
for step , (x, y) in enumerate(dataset):
print(f"Step:{step}")
InvalidArgumentError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_19112/3450653832.py in <module>
1 import matplotlib.pyplot as plt
----> 2 for step , (x, y) in enumerate(dataset):
3 print(f"Step:{step}")
4 content = x[0]
5 image = x[1]
~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py in __next__(self)
798 def __next__(self):
799 try:
--> 800 return self._next_internal()
801 except errors.OutOfRangeError:
802 raise StopIteration
~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py in _next_internal(self)
781 # to communicate that there is no more data to iterate over.
782 with context.execution_mode(context.SYNC):
--> 783 ret = gen_dataset_ops.iterator_get_next(
784 self._iterator_resource,
785 output_types=self._flat_output_types,
~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\ops\gen_dataset_ops.py in iterator_get_next(iterator, output_types, output_shapes, name)
2842 return _result
2843 except _core._NotOkStatusException as e:
-> 2844 _ops.raise_from_not_ok_status(e, name)
2845 except _core._FallbackException:
2846 pass
~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\framework\ops.py in raise_from_not_ok_status(e, name)
7105 def raise_from_not_ok_status(e, name):
7106 e.message += (" name: " + name if name is not None else "")
-> 7107 raise core._status_to_exception(e) from None # pylint: disable=protected-access
7108
7109
InvalidArgumentError: Cannot add tensor to the batch: number of elements does not match. Shapes are: [tensor]: [344,500,3], [batch]: [500,333,3] [Op:IteratorGetNext]
Any idea where I'm going wrong or how to batch this dataset properly as without dataset = dataset.batch(32, drop_remainder = True), the code works fine.
I can imagine that not all images have the same shape and that is why you are getting mismatches when batch_size > 1. I would recommend resizing all images to the same size. Here is an example:
def process_path(x):
content, image_path = x[0], x[1]
img = tf.io.read_file(image_path)
img = tf.io.decode_png(img, channels=3)
img = tf.image.resize(img,[120, 120], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return content, img
Otherwise, you will have to sort your batches by image size and also take care of the labels.
so I'm trying to train a GAN to color images using a the new TensorFlow data set API
and I cant get it to work
I'm trying to use the simple one shot iterator for my data set and I think it might be causing the problem but I can't figure out why
so what I'm asking is
can someone tell me whats wrong with the code
code:
creating the data set
def get_next():
#where gray_ls is just a list of image paths
gray_ds = tf.data.Dataset.from_tensor_slices(gray_ls).shuffle(50).map(in_parser).batch(30).repeat()
print(f"output types = {gray_ds.output_types}") # --> output types = <dtype: 'float32'>
print(f"output shapes = {gray_ds.output_shapes}") # --> output shapes = (?, ?, ?, ?)
gray_iter = gray_ds.make_one_shot_iterator()
next_gray = gray_iter.get_next()
# next_color is the same as next gray just different images
return next_color, next_gray
# mapping function
def in_parser(img_path):
img_file = tf.read_file(img_path)
img = tf.image.decode_image(img_file,channels=3)
img = tf.image.random_flip_left_right(img)
img = tf.image.random_brightness(img, max_delta = 0.1)
img = tf.image.random_contrast(img, lower = 0.9, upper = 1.1)
img = tf.cast(img, tf.float32)
img = img/255.0
print(img)
return img
#some global vars
stddev = 0.02
decay = 0.9
epsilon = 1e-4
k_size = [5,5]
strides = [2,2]
def gen(input, is_train):
#chanel number
c1 , c2 ,c3 ,c4 = 64, 128, 256, 512
with tf.variable_scope("gen",reuse=tf.AUTO_REUSE):
#this is where it crashes
conv1 = tf.layers.conv2d(input,c1,k_size,strides,'SAME',
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
name='conv1')
bn1 = tf.contrib.layers.batch_norm(conv1,is_training=is_train, updates_collections=None,
decay=decay,epsilon=epsilon,scope='bn1')
ac1 = lrelu(bn1,'ac1')
#there is more code after this
trying to run it:
next_color, next_gray = get_next()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
foo = sess.run(next_gray)
print(f"foo ndims : {foo.ndim}") # --> foo ndims : 4
gen_image = gen(foo, True)
# some more code after this
now this rasises an error:
AttributeError: 'tuple' object has no attribute 'ndims'
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-701a9276e633> in <module>()
94
95
---> 96 train()
<ipython-input-1-701a9276e633> in train()
41 # print(foo.shape)
42 print("==========================+==============")
---> 43 gen_image = gen(foo, True)
44 # gen_image = gen(next_gray, True)
45 print("==========================+==============")
~\Desktop\code\python\image_processing\Untitled Folder\Untitled Folder\testing1_2\my_gen.py in gen(input, is_train)
30 conv1 = tf.layers.conv2d(input,c1,k_size,strides,'SAME',
31 kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
---> 32 name='conv1')
33
34 bn1 = tf.contrib.layers.batch_norm(conv1,is_training=is_train, updates_collections=None,
~\Anaconda2\envs\image_rec\lib\site-packages\tensorflow\python\layers\convolutional.py in conv2d(inputs, filters, kernel_size, strides, padding, data_format, dilation_rate, activation, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint, trainable, name, reuse)
423 _reuse=reuse,
424 _scope=name)
--> 425 return layer.apply(inputs)
426
427
~\Anaconda2\envs\image_rec\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in apply(self, inputs, *args, **kwargs)
803 Output tensor(s).
804 """
--> 805 return self.__call__(inputs, *args, **kwargs)
806
807 def _set_learning_phase_metadata(self, inputs, outputs):
~\Anaconda2\envs\image_rec\lib\site-packages\tensorflow\python\layers\base.py in __call__(self, inputs, *args, **kwargs)
360
361 # Actually call layer
--> 362 outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
363
364 if not context.executing_eagerly():
~\Anaconda2\envs\image_rec\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
718
719 # Check input assumptions set before layer building, e.g. input rank.
--> 720 self._assert_input_compatibility(inputs)
721 if input_list and self._dtype is None:
722 try:
~\Anaconda2\envs\image_rec\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in _assert_input_compatibility(self, inputs)
1408 spec.min_ndim is not None or
1409 spec.max_ndim is not None):
-> 1410 if x.shape.ndims is None:
1411 raise ValueError('Input ' + str(input_index) + ' of layer ' +
1412 self.name + ' is incompatible with the layer: '
AttributeError: 'tuple' object has no attribute 'ndims'
thanks in advance
so apparently casting the out put to a tf.float32 solves the problem
next_color, next_gray = get_next()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
foo = sess.run(next_gray)
gray_batch = tf.cast(foo, dtype = tf.float32)
gen_image = gen(gray_batch, True)