Why does the TensorBoard display the wrong cosine distance? - python

i want to visualize word embeddings in the Projector from TensorBoard, but the cosine distances doesnt seem right.
If i compute the cosine distances via sklearn i get different results.
Am i using the TensorBoard Projector wrong?
TensorBoard:
https://i.imgur.com/2hRtXym.png
Sklearn:
https://i.imgur.com/49OaiEU.png
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.tensorboard.plugins import projector
LOG_DIR = 'logs'
metadata = os.path.join(LOG_DIR, 'metadata.tsv')
emb_arr = []
arr = []
# category -> dictionary
# category["Category 1"] -> array([[...,...,...,...,]]) # 300 dimensions
for category in category_embeddings:
arr.appendcategory_embeddings[category][0])
embds_arr = np.asarray(arr)
with open(metadata, 'w', encoding="utf-8") as metadata_file:
for key in category_embeddings.keys():
metadata_file.write(key + "\n")
embds = tf.Variable(embds_arr, name='embeds')
with tf.Session() as sess:
saver = tf.train.Saver([embds])
sess.run(embds.initializer)
saver.save(sess, os.path.join(LOG_DIR, 'category.ckpt'))
config = projector.ProjectorConfig()
config.model_checkpoint_path = os.path.join(LOG_DIR, 'checkpoint')
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embds.name
embedding.metadata_path = metadata
projector.visualize_embeddings(tf.summary.FileWriter(LOG_DIR), config)

Solved,
i tested it with different datasets and training cycles, it seems to be a bug within TensorBoard.
Sklearn returns the correct reuslts for the original vector space and TensorBoard possibly calculates the distance from a reduced dimensionality.
https://github.com/tensorflow/tensorboard/issues/2421

Related

Load Tensorflow model with labels

I stored a model using model.save('model') after this tutorial:
https://towardsdatascience.com/keras-transfer-learning-for-beginners-6c9b8b7143e
The labels are taken from the directory itself.
Now I would like to load it and do a prediction on an image using the following code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras.preprocessing import image
new_model = keras.models.load_model('./model/')
# Check its architecture
new_model.summary()
with image.load_img('testpics/mypic.jpg') as img: # , target_size=(32,32))
img = image.img_to_array(img)
img = img.reshape((1,) + img.shape)
# img = img/255
# img = img.reshape(-1,784)
img_class=new_model.predict(img)
prediction = img_class[0]
classname = img_class[0]
print("Class: ",classname)
Sadly the output is just
Class: [1.3706615e-03 2.9885881e-03 1.6783881e-03 3.0293325e-03 2.9168031e-03
7.2344812e-04 2.0196944e-06 2.0119224e-02 2.2996603e-04 1.1960276e-05
3.0794670e-04 6.0808496e-05 1.4892215e-05 1.5410941e-02 1.2452166e-04
8.2580920e-09 2.4049083e-02 3.1140331e-05 7.4609083e-01 1.5793210e-01
2.4283256e-03 1.5755130e-04 2.4227127e-03 2.2325735e-07 7.2101393e-06
7.6298704e-03 2.0922457e-04 1.2269774e-03 5.5882465e-06 2.4516811e-04
8.5745640e-03]
And I cannot figure out how to reload the labels... could someone help me out here :/?
The model does not contain the label names. Therefore it cannot be retrieved in this way. You have to save the labels while training and can then load and use them in the prediction phase.
I have used pickle to store the labels in a file as a serialized array. You can then load them and use the argmax of the predictions as the array index.
Here is the training phase:
CLASS_NAMES = ['ClassA', 'ClassB'] # should be dynamic
f = open('labels.pickle', "wb")
f.write(pickle.dumps(CLASS_NAMES))
f.close()
And in the prediction:
CLASS_NAMES = pickle.loads(open('labels.pickle', "rb").read())
predictions = model.predict(predict_image)
result = CLASS_NAMES[predictions.argmax(axis=1)[0]]
So you could just load the classes and map them no?
with open("classes.txt") as f:
classes = f.readlines()
correct_classname = classes[np.argmax(classname)] # classname is the variable equal to what you set it in your question
I don't think the labels are saved anywhere in your model, unless you implemented that manually. If you really need to save it in the model you can do something like this (which doesn't require you to retrain your model!):
import tensorflow as tf
import tensorflow_hub as hub
iput = tf.keras.layers.Input(...)
inferred = hub.Keraslayer(path_to_saved_model)(iput)
oput = tf.keras.layers.Lambda(lookup_fn)(inferred)
model = tf.keras.Model(inputs=[iput], outputs=[oput])
You'll then have to figure out the lookup_fn yourself, but a nice starting point is tf.lookup.TextFileInitializer.

convert keras h5 to tensorflow pb for batch inference

I have a problem with making batch inference using a tensorflow protobuf graph exported from a keras h5 model. Eventhough the exported pb graph can accept multiple inputs (samples), it always gives a single output regardless of the number of inputs. Here is a simple example to demonstrate the problem.
from keras.models import Model,load_model
from keras.layers import Dense, Input
from keras import backend as K
import tensorflow as tf
import numpy as np
import os
import os.path as osp
pinput = Input(shape=[10,], name='my_input')
poutput = Dense(1, activation='sigmoid')(pinput)
model = Model(inputs=[pinput], outputs=[poutput])
model.compile(loss='mean_squared_error',optimizer='sgd',metrics=['accuracy'])
data = np.random.random((100, 10))
labels = np.random.randint(2, size=(100, 1))
model.fit(data, labels, epochs=1, batch_size=32)
x = np.random.random((3, 10))
y = model.predict(x)
print y
####################################
# Save keras h5 to tensorflow pb
####################################
K.set_learning_phase(0)
#alias output names
numoutputs = 1
pred = [None]*numoutputs
pred_node_names = [None]*numoutputs
for i in range(numoutputs):
pred_node_names[i] = 'output'+'_'+str(i)
pred[i] = tf.identity(model.output[i], name=pred_node_names[i])
print('Output nodes names are: ', pred_node_names)
sess = K.get_session()
# Write the graph in human readable
f = 'graph_def_for_reference.pb.ascii'
tf.train.write_graph(sess.graph.as_graph_def(), '.', f, as_text=True)
input_graph_def = sess.graph.as_graph_def()
#freeze graph
from tensorflow.python.framework.graph_util import convert_variables_to_constants
output_names = pred_node_names
output_names += [v.op.name for v in tf.global_variables()]
constant_graph = convert_variables_to_constants(sess, input_graph_def,output_names)
# Write the graph in binary .pb file
from tensorflow.python.framework import graph_io
graph_io.write_graph(constant_graph, '.', 'model.pb', as_text=False)
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def, name="prefix")
return graph
###################################
# Test batch inference with tf
###################################
graph = load_graph("model.pb")
for op in graph.get_operations():
print(op.name)
minput = graph.get_tensor_by_name('prefix/my_input:0')
moutput = graph.get_tensor_by_name('prefix/output_0:0')
with tf.Session(graph=graph) as sess:
y = sess.run(moutput, feed_dict={minput: x})
print y
The output of the run is
Epoch 1/1
100/100 [==============================] - 0s 661us/step - loss: 0.2655 - acc: 0.3900
[[0.62018263]
[0.41664478]
[0.40322617]]
('Output nodes names are: ', ['output_0'])
prefix/my_input
prefix/dense_1/kernel
prefix/dense_1/kernel/read
prefix/dense_1/bias
prefix/dense_1/bias/read
prefix/dense_1/MatMul
prefix/dense_1/BiasAdd
prefix/dense_1/Sigmoid
prefix/SGD/iterations
prefix/SGD/lr
prefix/SGD/momentum
prefix/SGD/decay
prefix/training/SGD/Variable
prefix/training/SGD/Variable_1
prefix/strided_slice/stack
prefix/strided_slice/stack_1
prefix/strided_slice/stack_2
prefix/strided_slice
prefix/output_0
[0.62018263]
You can see the keras h5 graphs gives 3 ouputs wile the tensorflow pb graph just gives the first output. What am I doing wrong? I would like to
modify the h5 to pb conversion process so that I can do batch inference using the pb grapth with the python and c++ tensorflow backends.
It turns out this is due to a bug I inherited from k2tf_convert
pred[i] = tf.identity(model.output[i], name=pred_node_names[i])
should be
pred[i] = tf.identity(model.outputs[i], name=pred_node_names[i])
It seems the keras model class has both 'output' and 'outputs' members that makes this bug hard to track.

principal component analysis in RNN

If I want to project sequence(features) A,B,and C to target sequence with tensorflow LSTM, how can I know the importance of each features affected the target? Does the principal component analysis help? If pca can helps, how to do?
The structure(columns) of data set such as below:
A sequence
B sequence
C sequence
Target sequence
What would the principal components of this sequence be? What you could do is take the PCA of A sequence, B sequence and C sequence and visualise that...
Here is a simple tutorial on visualising PCA with Tensorboard: http://www.pinchofintelligence.com/simple-introduction-to-tensorboard-embedding-visualisation/
%matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.examples.tutorials.mnist import input_data
LOG_DIR = 'minimalsample'
NAME_TO_VISUALISE_VARIABLE = "mnistembedding"
TO_EMBED_COUNT = 500
path_for_mnist_sprites = os.path.join(LOG_DIR,'mnistdigits.png')
path_for_mnist_metadata = os.path.join(LOG_DIR,'metadata.tsv')
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
batch_xs, batch_ys = mnist.train.next_batch(TO_EMBED_COUNT)
embedding_var = tf.Variable(batch_xs, name=NAME_TO_VISUALISE_VARIABLE)
summary_writer = tf.summary.FileWriter(LOG_DIR)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
# Specify where you find the metadata
embedding.metadata_path = path_for_mnist_metadata #'metadata.tsv'
# Specify where you find the sprite (we will create this later)
embedding.sprite.image_path = path_for_mnist_sprites #'mnistdigits.png'
embedding.sprite.single_image_dim.extend([28,28])
# Say that you want to visualise the embeddings
projector.visualize_embeddings(summary_writer, config)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"), 1)
with open(path_for_mnist_metadata,'w') as f:
f.write("Index\tLabel\n")
for index,label in enumerate(batch_ys):
f.write("%d\t%d\n" % (index,label))
Hope this helps you think about PCA!

Attempting to use uninitialized value Variable in Tensorflow ( sess.run(tf.global_variables_initializer()) is used!)

I try to divide my neural network model and restore() function with setting random weights to zero.
Here's the model code: http://pastebin.com/TqN6kkeb
(it works properly).
And here's the function:
from __future__ import print_function
import tensorflow as tf
tf.GraphKeys.VARIABLES = tf.GraphKeys.GLOBAL_VARIABLES
import random
from LogReg import accuracy
from LogReg import W
from LogReg import x,y
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
def restore(model_file):
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(model_file + ".meta")
new_saver.restore(sess, model_file)
with tf.variable_scope("foo", reuse=True):
temp_var = tf.get_variable("W")
size_2a = tf.get_variable("b")
size_2 = tf.shape(size_2a).eval()[0]
size_1 = tf.shape(temp_var).eval()[0]
ones_mask = tf.Variable(tf.ones([size_1, size_2]))
arg = random.sample(xrange(size_1), size_1/2)
index_num=tf.convert_to_tensor(arg, dtype=tf.int32)
print("om", ones_mask)
print("index", index_num)
print(W)
zeroes = tf.zeros([size_1/2, size_2])
update = tf.scatter_update(ones_mask, index_num, zeroes)
print(update)
assign_op = W.assign(tf.mul(W, update))
sess.run(update)
sess.run(assign_op)
init_op = tf.global_variables_initializer()
sess.run(init_op)
new_saver.save(sess, model_file)
print("Accuracy_new:", accuracy.eval({x: mnist.test.images, y:mnist.test.labels}))
restore('./MyModel2')
The problems are:
1) is that it keeps writing me
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value Variable in this line:
update = tf.scatter_update(ones_mask, index_num, zeroes)
no matter what. I have read these topics: Prettytensor: Attempting to use uninitialized value and Update a subset of weights in TensorFlow (and many others), but advices from there didn't help to fix my bug.
And I don't understand, what's the problem with the initialization as long as I run tf.global_variables_initializer();
2) all of the weights seem to be setting to zero instead of the half, and I can't understand why.
Please, help, I really stuck.
Just for the record (and others finding this post), the method name has changed, as per the page here: https://www.tensorflow.org/versions/r0.10/how_tos/variables/#initialization
you should run the initialize_all_variables() method like this:
import tensorflow as tf
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

How to load pre-trained tensorflow model named inception by Google?

I have downloaded a tensorflow checkpoint model named inception_resnet_v2_2016_08_30.ckpt.
Do I need to create a graph (with all the variables) that were used when this checkpoint was created?
How do I make use of this model?
First of you have get the network architecture in memory. You can get the network architecture from here
Once you have this program with you, use the following approach to use the model:
from inception_resnet_v2 import inception_resnet_v2, inception_resnet_v2_arg_scope
height = 299
width = 299
channels = 3
X = tf.placeholder(tf.float32, shape=[None, height, width, channels])
with slim.arg_scope(inception_resnet_v2_arg_scope()):
logits, end_points = inception_resnet_v2(X, num_classes=1001,is_training=False)
With this you have all the network in memory, Now you can initialize the network with checkpoint file(ckpt) by using tf.train.saver:
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess, "/home/pramod/Downloads/inception_resnet_v2_2016_08_30.ckpt")
If you want to do bottle feature extraction, its simple like lets say you want to get features from last layer, then simply you have to declare predictions = end_points["Logits"] If you want to get it for other intermediate layer, you can get those names from the above program inception_resnet_v2.py
After that you can call: output = sess.run(predictions, feed_dict={X:batch_images})
Do I need to create a graph (with all the variables) that were used when this checkpoint was created?
No, you don't.
As for how to use checkpoint file (cpkt file)
1.This article (TensorFlow-Slim image classification library) tells you how to train your model from scratch
2.The following is an example code from google blog
import numpy as np
import os
import tensorflow as tf
import urllib2
from datasets import imagenet
from nets import inception
from preprocessing import inception_preprocessing
slim = tf.contrib.slim
batch_size = 3
image_size = inception.inception_v3.default_image_size
checkpoints_dir = '/root/code/model'
checkpoints_filename = 'inception_resnet_v2_2016_08_30.ckpt'
model_name = 'InceptionResnetV2'
sess = tf.InteractiveSession()
graph = tf.Graph()
graph.as_default()
def classify_from_url(url):
image_string = urllib2.urlopen(url).read()
image = tf.image.decode_jpeg(image_string, channels=3)
processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
logits, _ = inception.inception_resnet_v2(processed_images, num_classes=1001, is_training=False)
probabilities = tf.nn.softmax(logits)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, checkpoints_filename),
slim.get_model_variables(model_name))
init_fn(sess)
np_image, probabilities = sess.run([image, probabilities])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.axis('off')
plt.show()
names = imagenet.create_readable_names_for_imagenet_labels()
for i in range(5):
index = sorted_inds[i]
print('Probability %0.2f%% => [%s]' % (probabilities[index], names[index]))
Another way of loading a pre-trained Imagenet model is
ResNet50
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
model = ResNet50()
model.summary()
InceptionV3
iport tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3
model = InceptionV3()
model.summary()
You can check a detailed explanation related to this here

Categories