How to take the intermediate Transfer learning output. ? Eg:
from keras.models import Sequential
from keras.layers import Dense
# ... Other Imports..
from tensorflow.keras.applications.resnet50 import ResNet50
model = Sequential()
resnet = ResNet50(include_top = False, pooling = 'avg', weights = 'imagenet')
model.add(resnet)
model.add(Dense(10, activation = 'softmax'))
model.layers[0].trainable = False
Tried:
layer_output=model.get_layer('resnet').output
layer_output=model.get_layer('resnet').output
intermediate_model=tf.keras.models.Model(inputs=model.input,outputs=layer_output)
There's an unresolved issue in Tensorflow on this problem. According to the issue, you need to pass inputs of both outer model and inner model to get the output of inner model.
import numpy as np
layer_output = model.get_layer("resnet50").output
intermediate_model = tf.keras.models.Model(inputs=[model.input, resnet.input], outputs=[layer_output])
input_data = np.random.rand(1, 224, 224, 3)
result = intermediate_model.predict([input_data, input_data])
print(result[0].shape)
(7, 7, 2048)
Related
import keras
print(keras.__version__)
#2.3.0
from keras.models import Sequential
from keras.layers import Input, Dense,TimeDistributed
from keras.models import Model
model = Sequential()
resnet = ResNet50(include_top = False, pooling = 'avg', weights = 'imagenet')
model.add(resnet)
model.add(Dense(10, activation = 'relu'))
model.add(Dense(6, activation = 'sigmoid'))
model.summary()
// Training // model.fit( .. ) done
now how to just the output from layer ?
model.layers[0]._name='resnet50'
print(model.layers[0].name) # prints resnet50
layer_output = model.get_layer("resnet50").output
intermediate_model = Model(inputs=[model.input, resnet.input], outputs=[layer_output])
result = intermediate_model.predict([x, x])
print(result.shape)
print(result[0].shape)
Got Error
AttributeError: Layer resnet50 has multiple inbound nodes, hence the
notion of "layer output" is ill-defined. Use
get_output_at(node_index) instead. add Codeadd Markdown
Please try again using tf.keras to import model and layers.
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Dense,TimeDistributed
from tensorflow.keras.models import Model
and then run the same:
model.layers[0]._name='resnet50'
print(model.layers[0].name) # prints resnet50
layer_output = model.get_layer("resnet50").output
intermediate_model = Model(inputs=[model.input, resnet.input], outputs=[layer_output])
x = tf.ones((1, 250, 250, 3))
result = intermediate_model.predict([x, x])
print(result.shape)
print(result[0].shape)
Output:
resnet50
(1, 2048)
(2048,)
I'm trying to extract features of images through Resnet models pretrained on imagenet dataset as for the network should give the length of 2048 features. When I experimented with TensorFlow it gave the same amount of feature-length but when I try PyTorch version Resnet it gives me the length of 1000.
codes are as below
for Tensorflow
import numpy as np
from numpy.linalg import norm
import pickle
from tqdm import tqdm, tqdm_notebook
import os
import random
import time
import math
import tensorflow
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Flatten, Dense, Dropout, GlobalAveragePooling2D
def model_picker(name):
if (name == 'vgg16'):
model = VGG16(weights='imagenet',
include_top=False,
input_shape=(224, 224, 3),
pooling='max')
elif (name == 'vgg19'):
model = VGG19(weights='imagenet',
include_top=False,
input_shape=(224, 224, 3),
pooling='max')
elif (name == 'mobilenet'):
model = MobileNet(weights='imagenet',
include_top=False,
input_shape=(224, 224, 3),
pooling='max',
depth_multiplier=1,
alpha=1)
elif (name == 'inception'):
model = InceptionV3(weights='imagenet',
include_top=False,
input_shape=(224, 224, 3),
pooling='max')
elif (name == 'resnet'):
model = ResNet50(weights='imagenet',
include_top=False,
input_shape=(224, 224, 3),
pooling='max')
elif (name == 'xception'):
model = Xception(weights='imagenet',
include_top=False,
input_shape=(224, 224, 3),
pooling='max')
else:
print("Specified model not available")
return model
model_architecture = 'resnet'
model = model_picker(model_architecture)
def extract_features(img_path, model):
input_shape = (224, 224, 3)
img = image.load_img(img_path,
target_size=(input_shape[0], input_shape[1]))
img_array = image.img_to_array(img)
expanded_img_array = np.expand_dims(img_array, axis=0)
preprocessed_img = preprocess_input(expanded_img_array)
features = model.predict(preprocessed_img)
flattened_features = features.flatten()
normalized_features = flattened_features / norm(flattened_features)
return normalized_features
features = extract_features('dog.jpg', model)
print(len(features))
> 2048
As you can see it gives a length of 2048 features through the resnet50 model
Below is the code for PyTorch
from torchvision import models, transforms
from PIL import Image
from torch.autograd import Variable
import torch
res_model = models.resnet50(pretrained=True)
def image_loader(image,model,use_gpu= False):
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
img = Image.open(image)
img = transform(img)
print(img.shape)
x = Variable(torch.unsqueeze(img, dim = 0).float(), requires_grad = False)
print(x.shape)
if use_gpu:
x = x.cuda()
model = model.cuda()
y = model(x).cpu()
print(y.size())
y = torch.squeeze(y)
y = y.data.numpy()
print(y.shape)
print(len(y))
np.savetxt('features.txt',y,delimiter=',')
image_loader('dog.jpg',res_model)
> torch.Size([3, 224, 224]) torch.Size([1, 3, 224, 224]) torch.Size([1,
> 1000]) (1000,) 1000
As you can see it gives a length of 1000 for the feature extracted through the Resnet model with the PyTorch model
why am I getting different lengths isn't I get the same length according to architecture which is 2048 or am I doing anything wrong?
Printing the layers of the pytorch resnet will yield:
(fc): Linear(in_features=2048, out_features=1000, bias=True)
as the last layer of the resnet in Pytorch, because the model is by default set up for use as a classifier on imagenet data (1000 classes). If you want 2048 features instead, you can simply delete this last layer.
del model.fc
and your resulting output will then be of the desired dimension.
Edit: perhaps better is to simply overwrite model.fc with an identity function rather than deleting it so it doesn't cause errors when forward is called:
model.fc = torch.nn.Identity()
I am a beginner in text processing techniques and I am trying to execute the below code.
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
from keras.layers import Input, Dense, Embedding, Conv2D, MaxPooling2D, Dropout,concatenate
from keras.layers.core import Reshape, Flatten
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
from keras.models import Model
from keras import regularizers
sequence_length = trn_abs.shape[1]
filter_sizes = [3,4,5]
num_filters = 100
drop = 0.5
inputs = Input(shape=(sequence_length,))
embedding = embedding_layer(inputs)
reshape = Reshape((sequence_length,embedding_dim,1))(embedding)
conv_0 = Conv2D(num_filters, (filter_sizes[0], embedding_dim),activation='relu',kernel_regularizer=regularizers.l2(0.01))(reshape)
conv_1 = Conv2D(num_filters, (filter_sizes[1], embedding_dim),activation='relu',kernel_regularizer=regularizers.l2(0.01))(reshape)
conv_2 = Conv2D(num_filters, (filter_sizes[2], embedding_dim),activation='relu',kernel_regularizer=regularizers.l2(0.01))(reshape)
maxpool_0 = MaxPooling2D((sequence_length - filter_sizes[0] + 1, 1), strides=(1,1))(conv_0)
maxpool_1 = MaxPooling2D((sequence_length - filter_sizes[1] + 1, 1), strides=(1,1))(conv_1)
maxpool_2 = MaxPooling2D((sequence_length - filter_sizes[2] + 1, 1), strides=(1,1))(conv_2)
merged_tensor = concatenate([maxpool_0, maxpool_1, maxpool_2], axis=1)
flatten = Flatten()(merged_tensor)
reshape = Reshape((3*num_filters,))(flatten)
dropout = Dropout(drop)(flatten)
output = Dense(units=3, activation='softmax',kernel_regularizer=regularizers.l2(0.01))(dropout)
# this creates a model that includes
model = Model(inputs, output)
adam = Adam(lr=1e-3)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['acc'])
callbacks = [EarlyStopping(monitor='val_loss')]
model.fit(X_trn, trn[target_cols], epochs=100)
and I am getting the following error:
ValueError: A target array with shape (11203, 25) was passed for output of shape (None, 3) while using as loss `categorical_crossentropy`. This loss expects targets to have the same shape as the output.
Could anyone help me with this, I am new to stackoverflow too,so please accept my apologies for ill-formating of question.
It's really important that the number of neurons at the end of your neural network is the number of categories you have. So try this:
output = Dense(units=25, activation='softmax'...
This is my code in order to join resnet50 model with this model (that I want to train on my dataset). I want to freeze layers of the resnet50 model ( see Trainable=false) in the code .
Here I'm importing resnet 50 model
``
import tensorflow.keras
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
resnet50_imagnet_model = tensorflow.keras.applications.resnet.ResNet50(weights = "imagenet",
include_top=False,
input_shape = (150, 150, 3),
pooling='max')
``
Here I create my model
```
# freeze feature layers and rebuild model
for l in resnet50_imagnet_model.layers:
l.trainable = False
#construction du model
model5 = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(12, activation='softmax')
]
#Jointure des deux modeles
model_using_pre_trained_resnet50 = tf.keras.Sequential(resnet50_imagnet_model.layers + model5 )
```
Last line doesn't work and I have this error :
Input 0 of layer conv2_block1_3_conv is incompatible with the layer: expected axis -1 of input shape to have value 64 but received input with shape [None, 38, 38, 256
Thanks for help .
You can also use keras' functional API, like below
from tensorflow.keras.applications.resnet50 import ResNet50
import tensorflow as tf
resnet50_imagenet_model = ResNet50(include_top=False, weights='imagenet', input_shape=(150, 150, 3))
#Flatten output layer of Resnet
flattened = tf.keras.layers.Flatten()(resnet50_imagenet_model.output)
#Fully connected layer 1
fc1 = tf.keras.layers.Dense(128, activation='relu', name="AddedDense1")(flattened)
#Fully connected layer, output layer
fc2 = tf.keras.layers.Dense(12, activation='softmax', name="AddedDense2")(fc1)
model = tf.keras.models.Model(inputs=resnet50_imagenet_model.input, outputs=fc2)
Also refer this question.
I have been trying to create a multi-input model using Keras, but got errors. The idea is to combine the text and corresonding topics to make predictions for sentiments. Here's the code:
import numpy as np
text = np.random.randint(5000, size=(442702, 200), dtype='int32')
topic = np.random.randint(2, size=(442702, 227), dtype='int32')
sentiment = to_categorical(np.random.randint(5, size=442702), dtype='int32')
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, Flatten, GlobalMaxPool1D, Dropout, Conv1D
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras.losses import binary_crossentropy
from keras.optimizers import Adam
text_input = Input(shape=(200,), dtype='int32', name='text')
text_encoded = Embedding(input_dim=5000, output_dim=20, input_length=200)(text_input)
text_encoded = Dropout(0.1)(text_encoded)
text_encoded = Conv1D(300, 3, padding='valid', activation='relu', strides=1)(text_encoded)
text_encoded = GlobalMaxPool1D()(text_encoded)
topic_input = Input(shape=(227,), dtype='int32', name='topic')
concatenated = concatenate([text_encoded, topic_input])
sentiment = Dense(5, activation='softmax')(concatenated)
model = Model(inputs=[text_encoded, topic_input], outputs=sentiment)
# summarize layers
print(model.summary())
# plot graph
plot_model(model)
However, this gives me the below error:
TypeError: Tensors in list passed to 'values' of 'ConcatV2' Op have types [float32, int32] that don't all match.
Now if I change dtype of topic_input from 'int32' to 'float32', I got a different error:
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("text_37:0", shape=(?, 200), dtype=int32) at layer "text". The following previous layers were accessed without issue: []
On the other hand, part of the model works just fine with the sequential API.
model = Sequential()
model.add(Embedding(5000, 20, input_length=200))
model.add(Dropout(0.1))
model.add(Conv1D(300, 3, padding='valid', activation='relu', strides=1))
model.add(GlobalMaxPool1D())
model.add(Dense(227))
model.add(Activation('sigmoid'))
print(model.summary())
Any pointers are highly appreciated.
There are few issues with your Keras functional API implementation,
You should use the Concatenate layer as Concatenate(axis=-1)([text_encoded, topic_input]).
In the concatenate layer you are trying to combine an int32 tensor and a float32 tensor, which is not allowed. What you should do is, from keras.backend import cast and concatenated = Concatenate(axis=-1)([text_encoded, cast(topic_input, 'float32')]).
You got variable conflicts, there are two sentiment variables, one pointing to a to_categorical output and the other the output of the final Dense layer.
Your model inputs cannot be intermediate tensors like text_encoded. They should come from Input layers.
To help with your implementation, here's a working version of your code (I am not sure if this is exactly what you wanted though) in TF 1.13.
from keras.utils import to_categorical
text = np.random.randint(5000, size=(442702, 200), dtype='int32')
topic = np.random.randint(2, size=(442702, 227), dtype='int32')
sentiment1 = to_categorical(np.random.randint(5, size=442702), dtype='int32')
from keras.models import Sequential
from keras.layers import Input, Dense, Activation, Embedding, Flatten, GlobalMaxPool1D, Dropout, Conv1D, Concatenate, Lambda
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras.losses import binary_crossentropy
from keras.optimizers import Adam
from keras.backend import cast
from keras.models import Model
text_input = Input(shape=(200,), dtype='int32', name='text')
text_encoded = Embedding(input_dim=5000, output_dim=20, input_length=200)(text_input)
text_encoded = Dropout(0.1)(text_encoded)
text_encoded = Conv1D(300, 3, padding='valid', activation='relu', strides=1)(text_encoded)
text_encoded = GlobalMaxPool1D()(text_encoded)
topic_input = Input(shape=(227,), dtype='int32', name='topic')
topic_float = Lambda(lambda x:cast(x, 'float32'), name='Floatconverter')(topic_input)
concatenated = Concatenate(axis=-1)([text_encoded, topic_float])
sentiment = Dense(5, activation='softmax')(concatenated)
model = Model(inputs=[text_input, topic_input], outputs=sentiment)
# summarize layers
print(model.summary())
Hope these help.