Tensorflow: The prediction of my model is always the same - python

I have trained a deep CNN that predicts a one-dimentional array and saved the weight variables in the format of .ckpt. But when I give the model new inputs, it always outputs the same array. I have already check the preprocess of the inputs and I'm sure they are alright. Here is the code of my prediction.
import pandas as pd
import numpy as np
import os
import tensorflow as tf
sess = tf.Session()
sess.run(tf.global_variables_initializer())
filename = os.listdir("D:/project/test datasets/image")
new_dir = "D:/project/test datasets/"
for img in filename:
img=os.path.splitext(img)[0]
xs = pd.read_csv(new_dir+img+'.csv',index_col=0)
xs = xs.values.flatten()
xs = np.expand_dims(xs,0)
saver = tf.train.import_meta_graph('model.ckpt.meta')
saver.restore(sess, 'model.ckpt')
graph = tf.get_default_graph()
x = graph.get_tensor_by_name("x:0")
keep_prob = graph.get_tensor_by_name("keep_prob:0")
y_conv = graph.get_tensor_by_name("y_conv:0")
print(sess.run(y_conv,feed_dict={x:xs,keep_prob:1.0}))
And I also find that when I add the code statement y_conv = tf.constant(0) in the end of the loop, the following output will all be 0, which means my prediction y_conv doesn't update in each loop.
I have no idea where is wrong. Any feedback or advice would be greatly appreciated.

Your code looks fine to me. Please can you try in the below format
with tf.Session() as sess:
saver = tf.train.import_meta_graph(savefile)
saver.restore(sess, tf.train.latest_checkpoint(savedir))
graph = tf.get_default_graph()
input_x = graph.get_tensor_by_name("input_x:0")
result = graph.get_tensor_by_name("result:0")
feed_dict = {input_x: x_data,}
predictions = result.eval(feed_dict=feed_dict)

Related

deploying the Tensorflow model in Python

Need help in implementing the Tensorflow model in real time.
While I am training everything is working fine but when I move on for a realtime forecast or prediction, the output what I received flunked.
I do not know why is this happening.
I used the reference of teh code from here: https://www.kaggle.com/raoulma/ny-stock-price-prediction-rnn-lstm-gru/notebook
And tried to implement or deploy using the same code with few changes.
See the following code:
import numpy as np
import pandas as pd
import sklearn
import sklearn.preprocessing
import datetime
import os
import tensorflow as tf
df = pd.read_csv("Realtime_Values.csv", index_col = 0)
df.info()
def load_data(stock,seq_len):
data_raw = stock.as_matrix() # convert to numpy array
data = []
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index: index + seq_len])
#print(len(data))
data = np.array(data);
x_forecast = data[:,:-1,:]
return x_forecast
def normalize_data(df):
cols = list(df.columns.values)
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
df = pd.DataFrame(min_max_scaler.fit_transform(df.values))
df.columns = cols
return df
model_path ="modelsOHLC"
seq_len = 9
# parameters
n_steps = seq_len-1
n_inputs = 4
n_neurons = 100
n_outputs = 4
n_layers = 4
learning_rate = 0.01
batch_size = 10
n_epochs = 1000
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_outputs])
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.elu)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
outputs = outputs[:,n_steps-1,:] # keep only last output of sequence
loss = tf.reduce_mean(tf.square(outputs - y)) # loss function = mean squared error
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
saver = tf.train.Saver()
sess =tf.Session()
sess.run(tf.global_variables_initializer())
if(tf.train.checkpoint_exists(tf.train.latest_checkpoint(model_path))):
saver.restore(sess, tf.train.latest_checkpoint(model_path))
df = normalize_data(df)
x_forecast = load_data(df,seq_len)
y_forecast_pred = sess.run(outputs, feed_dict={X: x_forecast})
print(y_forecast_pred)
Can anyone help me in getting the above code run in real time without any issues?
There is a possibility that the code failed to find the saved weights when program trains the model; thus the predictions are being generated at an untrained state. Your code for training model is:
if (tf.train.checkpoint_exists(tf.train.latest_checkpoint(model_path))):
saver.restore(sess, tf.train.latest_checkpoint(model_path))
To fix this problem:
Add a debugging code such as print("checkpoint exists!")
Place breakpoint through a debugger before or after save.restore(...) to find a checkpoint to restore from.
Look at the model_path to ensure your checkpoints are saved correctly.

How to run Keras.model() for prediction inside a tensorflow session?

I am currently having an issue, while executing my model predict of keras inside a tensorflow session.
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
## want to know how to add model.predict() inside this condition
predictions = model.predict(#my_model)
#predictions output is same not appending
or any alternative method will be helpful.
Any help would be appreciated.
from keras import backend as K
with tf.Graph().as_default():
with tf.Session() as sess:
K.set_session(sess)
model = load_model(model_path)
preds = model.predict(in_data)
from keras.models import load_model
with tf.Session(graph=K.get_session().graph) as session:
session.run(tf.global_variables_initializer())
model = load_model('model.h5')
predictions = model.predict(input)
Above code works for me. I am using keras mobilenet inside tensorflow.
Have to declare placeholder first..then load the model
input_img = tf.placeholder(tf.float32,
(1,12,8,3), name = 'image')
CnnClassifier=tf.keras.models.load_model('model.h5',custom_objects
=None,compile = True)
output = CnnClassifier(input_img)
with tf.Session() as sess:
sess.run(tf.global_variables_intializer())
output_val = sess.run(output,
{input_img:np.expend_dims(img,0)})
If im not mistaken you could replace
with tf.Session() as sess:
simply by
sess = K.get_session()
(K is keras.backend imported)

Basic Tensorflow Example - Prediction of a Line

I'm trying to create this super simple example with Tensorflow and I clearly don't fully understand the API for Tensorflow.
I have the following code. It's not mine originally - I found it from some demo, but I can't remember where I found it, or else I would give the author credit. Apologies.
Saving the Trained Line Model
import tensorflow as tf
import numpy as np
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for W and b that compute y_data = W * x_data + b
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name='W')
b = tf.Variable(tf.zeros([1]), name='b')
y = W * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.global_variables_initializer()
# Create a session saver
saver = tf.train.Saver()
# Launch the graph.
sess = tf.Session()
sess.run(init)
# Fit the line.
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(W), sess.run(b))
saver.save(sess, 'linemodel')
Ok that's all fine. I just want to load in the model and then query my model to get a predicted value. Here is my attempted code:
Loading and Querying the Trained Line Model
# This is going to load the line model
import tensorflow as tf
sess = tf.Session()
new_saver = tf.train.import_meta_graph('linemodel.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('./')) # latest checkpoint
all_vars = tf.global_variables()
for v in all_vars:
v_ = sess.run(v)
print("This is {} with value: {}".format(v.name, v_))
# this works
# None of the below works
# Tried this as well
#fetches = {
# "input": tf.constant(10, name='input')
#}
#feed_dict = {"input": tf.constant(10, name='input')}
#vals = sess.run(fetches, feed_dict = feed_dict)
# Tried this and it didn't work
# query_value = tf.constant(10, name='query')
# print(sess.run(query_value))
This is a really basic question, but how can I just pass in a value and use my line almost like a function. Do I need to change the way the line model is being constructed? My guess is that the computation graph is not set up where the output is an actual variable that we can get. Is this correct? If so, how should I modify this program?
You have to create tensorflow graph again and load saved weights into it. I added couple of lines to your code and it gives desired outputs. Please check it.
import tensorflow as tf
import numpy as np
sess = tf.Session()
new_saver = tf.train.import_meta_graph('linemodel.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('./')) # latest checkpoint
all_vars = tf.global_variables()
# load saved weights into new variables
W = all_vars[0]
b = all_vars[1]
# build TF graph
x = tf.placeholder(tf.float32)
y = tf.add(tf.multiply(W,x),b)
# Session
init = tf.global_variables_initializer()
print(sess.run(all_vars))
sess.run(init)
for i in range(2):
x_ip = np.random.rand(10).astype(np.float32) # batch_size : 10
vals = sess.run(y,feed_dict={x:x_ip})
print vals
Output:
[array([ 0.1000001], dtype=float32), array([ 0.29999995], dtype=float32)]
[-0.21707924 -0.18646611 -0.00732027 -0.14248954 -0.54388255 -0.33952206 -0.34291503 -0.54771954 -0.60995424 -0.91694558]
[-0.45050886 -0.01207681 -0.38950539 -0.25888413 -0.0103816 -0.10003483 -0.04783082 -0.83299863 -0.53189355 -0.56571382]
I hope this helps.

Attempting to use uninitialized value Variable in Tensorflow ( sess.run(tf.global_variables_initializer()) is used!)

I try to divide my neural network model and restore() function with setting random weights to zero.
Here's the model code: http://pastebin.com/TqN6kkeb
(it works properly).
And here's the function:
from __future__ import print_function
import tensorflow as tf
tf.GraphKeys.VARIABLES = tf.GraphKeys.GLOBAL_VARIABLES
import random
from LogReg import accuracy
from LogReg import W
from LogReg import x,y
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
def restore(model_file):
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(model_file + ".meta")
new_saver.restore(sess, model_file)
with tf.variable_scope("foo", reuse=True):
temp_var = tf.get_variable("W")
size_2a = tf.get_variable("b")
size_2 = tf.shape(size_2a).eval()[0]
size_1 = tf.shape(temp_var).eval()[0]
ones_mask = tf.Variable(tf.ones([size_1, size_2]))
arg = random.sample(xrange(size_1), size_1/2)
index_num=tf.convert_to_tensor(arg, dtype=tf.int32)
print("om", ones_mask)
print("index", index_num)
print(W)
zeroes = tf.zeros([size_1/2, size_2])
update = tf.scatter_update(ones_mask, index_num, zeroes)
print(update)
assign_op = W.assign(tf.mul(W, update))
sess.run(update)
sess.run(assign_op)
init_op = tf.global_variables_initializer()
sess.run(init_op)
new_saver.save(sess, model_file)
print("Accuracy_new:", accuracy.eval({x: mnist.test.images, y:mnist.test.labels}))
restore('./MyModel2')
The problems are:
1) is that it keeps writing me
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value Variable in this line:
update = tf.scatter_update(ones_mask, index_num, zeroes)
no matter what. I have read these topics: Prettytensor: Attempting to use uninitialized value and Update a subset of weights in TensorFlow (and many others), but advices from there didn't help to fix my bug.
And I don't understand, what's the problem with the initialization as long as I run tf.global_variables_initializer();
2) all of the weights seem to be setting to zero instead of the half, and I can't understand why.
Please, help, I really stuck.
Just for the record (and others finding this post), the method name has changed, as per the page here: https://www.tensorflow.org/versions/r0.10/how_tos/variables/#initialization
you should run the initialize_all_variables() method like this:
import tensorflow as tf
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

open tensorflow graph from file

I'm trying to use tensorflow for study and i don't undestand how to open and use saved early in file my graph with type tf.Graph. Something like this:
import tensorflow as tf
my_graph = tf.Graph()
with g.as_default():
x = tf.Variable(0)
b = tf.constant(-5)
k = tf.constant(2)
y = k*x + b
tf.train.write_graph(my_graph, '.', 'graph.pbtxt')
f = open('graph.pbtxt', "r")
# Do something with "f" to get my saved graph and use it below in
# tf.Session(graph=...) instead of dots
with tf.Session(graph=...) as sess:
tf.initialize_all_variables().run()
y1 = sess.run(y, feed_dict={x: 5})
y2 = sess.run(y, feed_dict={x: 10})
print(y1, y2)
You have to load file contents, parse it to GraphDef and then import.
It will be imported into current graph. You may want to wrap it with graph.as_default(): context manager.
import tensorflow as tf
from tensorflow.core.framework import graph_pb2 as gpb
from google.protobuf import text_format as pbtf
gdef = gpb.GraphDef()
with open('my-graph.pbtxt', 'r') as fh:
graph_str = fh.read()
pbtf.Parse(graph_str, gdef)
tf.import_graph_def(gdef)
One option: take a look at the Tensorflow MetaGraph save/restore support, documented here: https://www.tensorflow.org/versions/r0.11/how_tos/meta_graph/index.html
I solved this problem this way: first, i name needed calculation in my graph "output" and then save this model in code below...
import tensorflow as tf
x = tf.placeholder(dtype=tf.float64, shape=[], name="input")
a = tf.Variable(111, name="var1", dtype=tf.float64)
b = tf.Variable(-666, name="var2", dtype=tf.float64)
y = tf.add(x, a, name="output")
saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run()
print(sess.run(y, feed_dict={x: 555}))
save_path = saver.save(sess, "model.ckpt", meta_graph_suffix='meta', write_meta_graph=True)
print("Model saved in file: %s" % save_path)
Second, I need to run certain operation in graph, which i know by name "output". So I just restore model in another code and run my restored calculation by taking necessary graph parts with names "input" and "output" :
import tensorflow as tf
# Restore graph to another graph (and make it default graph) and variables
graph = tf.Graph()
with graph.as_default():
saver = tf.train.import_meta_graph("model.ckpt.meta")
y = graph.get_tensor_by_name("output:0")
x = graph.get_tensor_by_name("input:0")
with tf.Session() as sess:
saver.restore(sess, "model.ckpt")
print(sess.run(y, feed_dict={x: 888}))
# Variable out:
for var in tf.all_variables():
print("%s %.2f" % (var.name, var.eval()))

Categories