I am using Keras with tensorflow 2.0 and I am classifying image using Mobilenetv2_.
The code for training and validation works fine. Now, i want to visualize activation of certain layers.
Here is part of my code:
from __future__ import print_function
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
from tensorflow.keras import backend as k
k.backend.image_data_format= 'channels_last' # for remote run
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img,img_to_array
from tensorflow.keras import optimizers,models
from keras.models import model_from_json
from tensorflow.keras.applications import InceptionResNetV2,MobileNetV2
from tensorflow.keras.applications.inception_resnet_v2 import preprocess_input
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import ReduceLROnPlateau
#import cv2
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.utils import multi_gpu_model
from helper_func import save_batch_info,smooth_curve,save_model_hyperparam,save_plots,get_batch_stats
import tensorflow as tf
import os
# ********** SOME CODE HERE *************************
def Mobilenetv2_finetune_model():
MobileNet_tuned = MobileNetV2(weights='C:\\ML\\mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_96_no_top.h5'
, include_top=False,input_shape=(W_rz, H_rz, nc),alpha =1)
#If alpha < 1.0, proportionally decreases the number of filters in each layer.
# If alpha > 1.0, proportionally increases the number of filters in each layer.
model = models.Sequential()
model.add(MobileNet_tuned)
model.add(layers.Flatten())
model.add(layers.Dropout(0.1))
model.add(layers.Dense(nclass, activation='softmax'))
return model,MobileNet_tuned
model,conv_bs = Mobilenetv2_finetune_model()
#****************** Visualizing intermediate activations ***************
layer_outputs = [layer.output for layer in model.layers[0].layers[:17]]
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
img_pth = 'C://ML//test//52.png'
img_tst = img_to_array(load_img(img_pth, target_size=(96,96)))
activations = activation_model.predict(img_tst)
As I run this,I get error as below:
This is coming from line
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
What am I doing wrong here? I followed Francois Chollet's book, and it was used similarly there.
Any help is greatly appreciated.
sedy
Related
I have been getting this error 'NoneType' object has no attribute 'register_forward_hook' when I run my code. I believe that there is something wrong with my function but not too sure what is the problem. If not is there a better way to get the feature vectors? I am trying to extract the Feature Vectors from a pretrained model from a resnet pretrained algorithm. Help would be much appreciated.
Here is the code.
import torch
import torch.nn as nn
import torchvision.models as models
import numpy as np
from torch.autograd import Variable
from torchvision import datasets, transforms,models
import torch.nn.functional as F
import torch.nn as nn
import torchvision.utils as vutils
from io import open
import os
from PIL import Image
import sys
import models.resnet as ResNet
import models.senet as SENet
import torchvision.models as models
import pickle
import pandas as pd
import sklearn.metrics
import matplotlib.pyplot as plt
from models import resnet, resnet50_ferplus_dag, resnet50_ft_dag, resnet50_scratch_dag, senet, senet50_ferplus_dag, senet50_ft_dag, senet50_scratch_dag, vgg_face_dag, vgg_m_face_bn_dag
model = resnet50_ft_dag.resnet50_ft_dag(weights_path='Weights/resnet50_ft_dag.pth') # RESNET MS1M
layer = model._modules.get('avgpool')
model.eval()
scaler = transforms.Scale((224, 224))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
to_tensor = transforms.ToTensor()
def get_vector(image):
# Load image
img = Image.open(image)
# Create Pytorch variable with transformed image
t_img = Variable(normalize(to_tensor(scaler(img))).unsqueeze(0))
# Create a vector of zeros that will hold our feature vector
# Outputsize of 2048
my_embedding = torch.zeros(2048)
# Define a function that will copy the output of a layer
def copy_data(m, i, o):
my_embedding.copy_(o.data)
# Attach that function to our selected layer
h = layer.register_forward_hook(copy_data)
# Run model on transformed image
model(t_img)
# Detach our copy function from the layer
h.remove()
# Return the feature vector
return my_embedding
get_vector('C:/Users/Public/Documents/DIN_Image/average_images/' + list_dir[2])
I have the error mentioned in the title, with the following code
import tensorflow as tf
import numpy as np
import pandas as pd
import seaborn as sns
from random import shuffle
import datetime
from matplotlib import pyplot
from numpy import mean
from numpy import std
from matplotlib import pyplot
from sklearn.model_selection import KFold
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import load_model
first_branch= Input(shape=(28,28,1))
first_branch_st1 = Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28,1))(first_branch)
first_branch_st2 = MaxPooling2D((2, 2))(first_branch_st1)
first_branch_st3 = Flatten()(first_branch_st2)
first_branch_st4 = Dense(100, activation='relu')(first_branch_st3)
This sends the following error
ValueError Traceback (most recent call last)
/tmp/ipykernel_1973/3453405928.py in <module>
2 first_branch_st1 = Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28,1))(first_branch)
3 first_branch_st2 = MaxPooling2D((2, 2))(first_branch_st1)
----> 4 first_branch_st3 = Flatten()(first_branch_st2)
5 first_branch_st4 = Dense(100, activation='relu')(first_branch_st3)
6
ValueError: Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.
According to the question asked with the same error it happens when you mix up keras and tf.keras. But i think have defined the imports accordingly, so unless there is a clash between imports or a bad definition of them i do not think that's the problem. There is another known solution?
There's only 1 bug, Input is not defined but still unrelated to the error mentioned. Use below:
first_branch= tf.keras.Input(shape=(28,28,1))
My advice is to please check on the latest versions of tf e.g. 2.4.1.
I had the same error that popped up when I updated to newest version of TF on google collab (2.6.0). Previously I was using 2.5 and it was running error-free.
After changing from
from tf.python.keras.applications.efficientnet import EfficientNetB3
to
from tf.keras.applications.efficientnet import EfficientNetB3
it was solved.
I used to generate heatmaps for my Convolutional Neural Networks, based on the stand-alone Keras library on top of TensorFlow 1. That worked fine, however, after my switch to TF2.0 and built-in tf.keras implementation (with eager execution) I cannot use my old heatmap generation code any longer.
So I re-wrote parts of my code for TF2.0 and ended up with the following:
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import preprocessing
from tensorflow.keras import backend as K
from tensorflow.keras import models
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
image_size = 150
image_path = "/tmp/images/test-image.jpg"
model_path = "/tmp/models/prototype/basic_vgg16.h5"
# Load pre-trained Keras model and the image to classify
model = load_model(model_path) # VGG16 CNN with custom classifier head
image = load_img(image_path, target_size=(image_size, image_size))
img_tensor = preprocessing.image.img_to_array(image)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor = preprocess_input(img_tensor)
input_layer = model.get_layer("model_input")
conv_layer = model.get_layer("block5_conv3")
heatmap_model = models.Model([model.inputs], [conv_layer.output, model.output])
# Get gradient of the winner class w.r.t. the output of the (last) conv. layer
with tf.GradientTape() as gtape:
conv_output, predictions = heatmap_model(img_tensor)
loss = predictions[:, np.argmax(predictions[0])]
grads = gtape.gradient(loss, conv_output)
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# Get values of pooled grads and model conv. layer output as Numpy arrays
iterate = K.function([model.inputs], [pooled_grads, conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([img_tensor])
# Multiply each channel in the feature-map array by "how important it is"
for i in range(pooled_grads_value.shape[0]):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
# Channel-wise mean of resulting feature-map is the heatmap of class activation
heatmap = np.mean(conv_layer_output_value, axis=-1)
heatmap = np.maximum(heatmap, 0)
max_heat = np.max(heatmap)
if max_heat == 0:
max_heat = 1e-10
heatmap /= max_heat
# Render heatmap via pyplot
plt.matshow(heatmap)
plt.show()
But now the following line:
iterate = K.function([model.inputs], [pooled_grads, conv_layer.output[0]])
leads to this error message:
AttributeError: Tensor.op is meaningless when eager execution is enabled.
I always used Keras and did not work with TF directly, so I am bit lost here.
Any ideas what could be the problem here?
PS: If you want to c&p this code, you can create the VGG16-based model like so:
# Create Keras model from pre-trained VGG16 and custom classifier
input_layer = layers.Input(shape=(image_size, image_size, 3), name="model_input")
vgg16_model = VGG16(weights="imagenet", include_top=False, input_tensor=input_layer)
model_head = vgg16_model.output
model_head = layers.Flatten(name="model_head_flatten")(model_head)
model_head = layers.Dense(256, activation="relu")(model_head)
model_head = layers.Dense(3, activation="softmax")(model_head)
model = models.Model(inputs=input_layer, outputs=model_head)
model.compile(loss="categorical_crossentropy", optimizer=optimizers.Adam(), metrics=["accuracy"])
At the end of the GradientTape loop, conv_output and grads already holds the value. The iterate function is no longer need to compute the values.
Working example below:
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import preprocessing
from tensorflow.keras import backend as K
from tensorflow.keras import models
import tensorflow as tf
import numpy as np
image_size = 224
# Load pre-trained Keras model and the image to classify
model = tf.keras.applications.vgg16.VGG16()
image = np.random.random((image_size, image_size, 3))
img_tensor = preprocessing.image.img_to_array(image)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor = preprocess_input(img_tensor)
conv_layer = model.get_layer("block5_conv3")
heatmap_model = models.Model([model.inputs], [conv_layer.output, model.output])
# Get gradient of the winner class w.r.t. the output of the (last) conv. layer
with tf.GradientTape() as gtape:
conv_output, predictions = heatmap_model(img_tensor)
loss = predictions[:, np.argmax(predictions[0])]
grads = gtape.gradient(loss, conv_output)
pooled_grads = K.mean(grads, axis=(0, 1, 2))
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_output), axis=-1)
heatmap = np.maximum(heatmap, 0)
max_heat = np.max(heatmap)
if max_heat == 0:
max_heat = 1e-10
heatmap /= max_heat
print(heatmap.shape)
one more issue to view the heatmap
import matplotlib.pyplot as plt
hm=np.squeeze(heatmap)
hm.shape
(14, 14)
plt.imshow(hm)
Can we resize an image from 64x64 to 256x256 without affecting the resolution is that a way to add zero on new row and column in the new resized output I m working on vgg and I get an error while adding my 64x64 input image because vggface is a pertrained model that include an input size of 224
code:
from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
from PIL import Image
import numpy as np
from keras.preprocessing.image import load_img, save_img, img_to_array
from keras.applications.imagenet_utils import preprocess_input
from keras.preprocessing import image
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# from sup5 import X_test, Y_test
from sklearn.metrics import roc_curve, auc
from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
from PIL import Image
import numpy as np
from keras.preprocessing.image import load_img, save_img, img_to_array
from keras.applications.imagenet_utils import preprocess_input
from keras.preprocessing import image
import matplotlib.pyplot as plt
# from sup5 import X_test, Y_test
from sklearn.metrics import roc_curve, auc
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
model = VGG16(weights='imagenet', include_top=False)
from keras.models import model_from_json
vgg_face_descriptor = Model(inputs=model.layers[0].input
, outputs=model.layers[-2].output)
# import pandas as pd
# test_x_predictions = deep.predict(X_test)
# mse = np.mean(np.power(X_test - test_x_predictions, 2), axis=1)
# error_df = pd.DataFrame({'Reconstruction_error': mse,
# 'True_class': Y_test})
# error_df.describe()
from PIL import Image
def preprocess_image(image_path):
img = load_img(image_path, target_size=(224, 224))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
return img
def findCosineSimilarity(source_representation, test_representation):
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findEuclideanDistance(source_representation, test_representation):
euclidean_distance = source_representation - test_representation
euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))
euclidean_distance = np.sqrt(euclidean_distance)
return euclidean_distance
vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
# for encod epsilon = 0.004
epsilon = 0.16
# epsilon = 0.095
retFalse,ret_val, euclidean_distance = verifyFace(str(i)+"test.jpg", str(j)+"train.jpg", epsilon)
verifyFace1(str(i) + "testencod.jpg", str(j) + "trainencod.jpg")
Error :
ValueError: operands could not be broadcast together with
remapped shapes [original->remapped]:
(512,14,14)->(512,newaxis,newaxis) (14,14,512)->(14,newaxis,newaxis)
and requested shape (14,512)
I'm not sure what you mean, here is my solution for you.
First method, if i understand clearly what you mean, for adding pad with zero value you need to use numpy.pad for each layer of image.
I use this image for take example, its shape is 158x84x3
import numpy as np
import cv2
from matplotlib import pyplot as mlt
image = cv2.imread('zero.png')
shape = image.shape
add_x = int((256-shape[0])/2)
add_y = int((256-shape[1])/2)
temp_img = np.zeros((256,256,3),dtype = int)
for i in range(3):
temp_img[:,:,i] = np.pad(image[:,:,i],((add_x,add_x),(add_y,add_y)),'constant', constant_values = (0))
mlt.imshow(temp_img)
By this code i can add padding into picture and have the result like this.
Now its shape is 256x256x3 like you want.
Or another method for you is use Image of Pillow library. By using that, you can resize the picture without losing too much information with very simple code.
from PIL import Image
image = Image.fromarray(image)
img = image.resize((256, 256), Image.BILINEAR)
mlt.imshow(img)
That code will give you this solution
Hope my answer can help you solve the problem!
I think the best way to solve your problem is not resizing the image but rather to load the model specifying the input shape of your images.
Assuming you are using keras:
model = VGG16(weights=..., include_top=False, input_shape=(64,64,3))
Include top has to be set to false in order to change the input shape, which means you will need to do some sort of training yourself.
If you need include_top to be True resizing the input image is the best way to proceed, but a network trained on 224x224 images is probably not going to perform great with upscaled 64x64 images.
I think you mean resize (resolution) without increasing the size (amount of data)
and as far as I'm aware, the answer would be no, because making the resolution bigger literally would mean a higher pixel count. You could resize the resolution without increasing the file size too much though, there are plenty of programs, websites and utilities for lightweight photo resizing, maybe you could implement the use of a service like such into your code?
I was trying to test a network, but seem to get an annoying error, which I am not quite sure I understand.
import keras
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers.core import Dense, Activation, Lambda, Reshape,Flatten
from keras.layers import Conv1D,Conv2D,MaxPooling2D, MaxPooling1D, Reshape
from keras.utils import np_utils
from keras.models import Model
from keras.layers import Input, Dense
from keras.layers import Dropout
from keras import backend as K
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping
from keras.layers.merge import Concatenate
from keras.callbacks import ModelCheckpoint
import random
import numpy as np
window_height = 8
filter_size=window_height
pooling_size = 28
stride_step = 2
def fws():
np.random.seed(100)
input = Input(5,window_height,1)
shared_conv = Conv2D(filters = 1, kernel_size = (0,window_height,1))
output = shared_conv(input)
print output.shape
fws()
Error message:
File "experiment.py", line 34, in <module>
fws()
File "experiment.py", line 29, in fws
input = Input(5,window_height,1)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 1426, in Input
input_tensor=tensor)
File "/usr/local/lib/python2.7/dist-packages/keras/legacy/interfaces.py", line 87, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/topology.py", line 1321, in __init__
batch_input_shape = tuple(batch_input_shape)
TypeError: 'int' object is not iterable
Why am i getting this error?
I am in the network trying to use shared convolution layer, which the code states,
and for test purposes want to see what the output became?..
your line:
input = Input(5,window_height,1)
is giving this error.
compare this with an example from keras:
https://keras.io/getting-started/functional-api-guide/
inputs = Input(shape=(784,))
the Input object is expecting an iterable for shape but you passed it an int. In the example you can see how they get around that for a 1 dimensional input.
EDIT:
I don't know why this is a popular answer - if you're getting this error because you're following bad example code somewhere, be sure to raise that with whatever source you're getting it from.