I was trying to create a web application (REST API) for Image Recognition with Tensorflow + keras in Flask. I have tried to follow some resource available in internet and came up with a script as below
from imageai.Prediction import ImagePrediction
import os
execution_path = os.getcwd()
prediction = ImagePrediction()
prediction.setModelTypeAsResNet()
prediction.setModelPath( execution_path + "\\resnet50_weights_tf_dim_ordering_tf_kernels.h5")
prediction.loadModel()
for i in range(3):
predictions, percentage_probabilities = prediction.predictImage("C:\\Users\\Administrator\\Downloads\\pics\\banana"+str(i)+".jpg", result_count=5)
for index in range(len(predictions)):
print(predictions[index] , " : " , percentage_probabilities[index])
This worked fine as standalone script. Then I tried to convert the same to Flask Application and it starts failing .
import flask
from imageai.Prediction import ImagePrediction
import os
import json
import keras
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
def init():
global model
execution_path = os.getcwd()
model = ImagePrediction()
model.setModelTypeAsResNet()
model.setModelPath( os.path.join(os.getcwd(),"models","resnet50_weights_tf_dim_ordering_tf_kernels.h5"))
model.loadModel()
# API for prediction
#app.route("/predict", methods=["GET"])
def predict():
predictions, percentage_probabilities = model.predictImage( os.path.join(os.getcwd(),"pics","banana.jpg"), result_count=5)
mylist = []
for index in range(len(predictions)):
mydict = {}
mydict[predictions[index]]=percentage_probabilities[index]
mylist.append(mydict)
keras.backend.clear_session()
return sendResponse(json.dumps(mylist))
# Cross origin support
def sendResponse(responseObj):
response = flask.jsonify(responseObj)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Methods', 'GET')
response.headers.add('Access-Control-Allow-Headers', 'accept,content-type,Origin,X-Requested-With,Content-Type,access_token,Accept,Authorization,source')
response.headers.add('Access-Control-Allow-Credentials', True)
return response
# if this is the main thread of execution first load the model and then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
init()
app.run(threaded=True)
Related
I have created a tensorflow model, saved it and have tested it. I really don't know how to use tensorflow serving and I'm not sure about the input and output nodes of my model to convert it into protobuf and then use tensorflow serving. So, I wanted to know if I can directly use the prediction function on flask and load the model there to make the predictions? I am really confused as to why we have to use only tensorflow serving to deploy tensorflow models? Is there any easier direct way?
You can, but you will need to set up a TensorFlow Serving server. Then you send a post request to the server.
Reference link here: Deploying-keras-models-using-tensorflow-serving-and-flask
Reference link here: Serving-TensorFlow flask client
"""This script wraps the client into a Flask server. It receives POST request with
prediction data, and forward the data to tensorflow server for inference.
"""
from flask import Flask, render_template, request, url_for, jsonify,Response
import json
import tensorflow as tf
import numpy as np
import os
import argparse
import sys
from datetime import datetime
from grpc.beta import implementations
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
tf.app.flags.DEFINE_string('server', 'localhost:9000', 'PredictionService host:port')
FLAGS = tf.app.flags.FLAGS
app = Flask(__name__)
class mainSessRunning():
def __init__(self):
host, port = FLAGS.server.split(':')
channel = implementations.insecure_channel(host, int(port))
self.stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
self.request = predict_pb2.PredictRequest()
self.request.model_spec.name = 'example_model'
self.request.model_spec.signature_name = 'prediction'
def inference(self, val_x):
# temp_data = numpy.random.randn(100, 3).astype(numpy.float32)
temp_data = val_x.astype(np.float32).reshape(-1, 3)
print("temp_data is:", temp_data)
data, label = temp_data, np.sum(temp_data * np.array([1, 2, 3]).astype(np.float32), 1)
self.request.inputs['input'].CopyFrom(
tf.contrib.util.make_tensor_proto(data, shape=data.shape))
result = self.stub.Predict(self.request, 5.0)
return result, label
run = mainSessRunning()
print("Initialization done. ")
# Define a route for the default URL, which loads the form
#app.route('/inference', methods=['POST'])
def inference():
request_data = request.json
input_data = np.expand_dims(np.array(request_data), 0)
result, label = run.inference(input_data)
di={"result":str(result),'label': label[0].tolist()}
return Response(json.dumps(di), mimetype='application/json')
I am currently developing a machine learning web application using tensorflow/keras and flask. For deployment I am using Google App Engine (GAE).
Locally I can run my application, get some data via the UI and display the prediction using my laoded models (there are four of them, all .h5 files)
I can deploy it on GAE without any errors but when I enter the link in my browser I get a '502 bad gateway' error and when I inspect the browser (Chrome) I can see three error messages saying 'failed to load resource'.
I am new to web development so I don't fully understand what and where the problem is. Do I need to declare my models in the .yaml file or load them specifically?
My .yaml file:
env: flex
runtime: python
runtime_config:
python_version: 3
My main.py file:
import pandas as pd
import tensorflow as tf
from keras.models import load_model
from keras.backend import set_session
from utils import instagram_scraper
from utils import image_preprocessing
from utils import label_encoding
from utils import overall_class_label
from sklearn.preprocessing import LabelEncoder
global sess
global graph
sess = tf.compat.v1.Session()
graph = tf.compat.v1.get_default_graph()
tf.compat.v1.keras.backend.set_session(sess)
num_attributes = 4
model = [[] for i in range(num_attributes)]
model[0] = load_model('./model/savemodels/glamorous_model.h5')
model[1] = load_model('./model/savemodels/rugged_model.h5')
model[2] = load_model('./model/savemodels/fun_model.h5')
model[3] = load_model('./model/savemodels/healthy_model.h5')
app = Flask(__name__)
def data_collection(brandname):
url = 'https://www.instagram.com/'+brandname+'/?hl=en'
scraper = instagram_scraper.InstagramScraper()
official_images = scraper.profile_page_posts(url)
return official_images
def data_preprocessing(official_images):
preprocessed_data = image_preprocessing.preprocessing(official_images)
return preprocessed_data
def make_prediction(preprocessed_data):
X_test = preprocessed_data
with graph.tf.compat.v1.as_default():
tf.compat.v1.keras.backend.set_session(sess)
y_pred = [[] for i in range(num_attributes)]
for i in range(num_attributes):
y_pred[i] = model[i].predict(X_test)
y_pred_label = overall_class_label.give_ovr_class_label_output(y_pred)
# convert from encoded label to label name
# encoded label
y_pred_lst = y_pred_label.tolist()
# map back to original label name
code2label = {0: 'glamorous', 1: 'rugged', 2: 'fun', 3: 'healthy'}
y_pred_lbnm = map(code2label.get, y_pred_lst)
y_pred_lbnm = list(y_pred_lbnm)
prediction = pd.Series(y_pred_lbnm).value_counts()
return prediction
#app.route("/", methods=["POST", "GET"])
def index():
if request.method == "POST":
brandname = request.form["brandname"]
return redirect(url_for("predict", brandname=brandname))
else:
return render_template("index.html")
#app.route("/predict/<brandname>", methods=["POST", "GET"])
def predict(brandname):
official_images = data_collection(brandname)
preprocessed_data = data_preprocessing(official_images)
prediction = make_prediction(preprocessed_data)
return render_template("predict.html", prediction=prediction)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=True)
My app structure:
parent
--model
--savemodels
fun_model.h5
glamorous_model.h5
healthy_model.h5
rugged_model.h5
--static
style.css
--templates
index.html
predict.html
--utils
image_preprocessing.py
instagram_scraper.py
label_encoding.py
overall_class_label.py
app.yaml
main.py
requirements.txt
Any help is very much appreciated, thank you!
All the best,
snowe
From postman, I'm trying to send a GET request with params as Key:
path, Value: home/guest/images/abc.jpeg
I want to retrieve the same and use the Value as the argument of my function.
Somehow I'm unable to get the same, API not being called. Any input shall be helpful.
I'm new to writing REST APIs using Flask Python. I want to get the Value of the Params key: path as the TEST_PATH so that the rest of the code run as it should return the JSONP_data
from flask import Flask
from flask_cors import CORS
import pandas as pd
import datetime
import numpy as np
from flask import jsonify
import json
from pymongo import MongoClient
# set the project root directory as the static folder, you can set others.
app = Flask(__name__)
CORS(app)
counter=0
#app.route('/')
def api_root():
return ('Welcome')
#app.route('/generateTags/<id>', methods=['GET'])
def generateTags1(id):
file = json.loads(create_task(id))
TEST_PATH = file['path']
print(TEST_PATH)
import DenseNet
import Inception
import ResNet
import SqueezNet
from imageai.Prediction import ImagePrediction
prediction = ImagePrediction()
# TEST_PATH = '/home/anit/Documents/Image Analysis/ImageAI-master/Dataset'
dense_list=DenseNet.tag(TEST_PATH, DenseNet.prediction)
dense_list = ['DenseNet'] + dense_list.tolist()[0]
res_list=ResNet.tag(TEST_PATH, ResNet.prediction)
res_list = ['ResNet'] + res_list.tolist()[0]
incept_list=Inception.tag(TEST_PATH, Inception.prediction)
incept_list = ['Inception'] + incept_list.tolist()[0]
squeez_list=SqueezNet.tag(TEST_PATH, SqueezNet.prediction)
squeez_list = ['SqueezNet'] + squeez_list.tolist()[0]
d = {'filename':dense_list[1], 'models':{}}
for l in [dense_list,res_list, incept_list, squeez_list]:
#breakpoint()
d['models'][l[0]] = l[2:]
JSONP_data = jsonify(d)
return JSONP_data
if __name__ == '__main__':
app.run ()
Im deploying a keras model and sending the test data to the model via a flask api. I have two files:
First: My Flask App:
# Let's startup the Flask application
app = Flask(__name__)
# Model reload from jSON:
print('Load model...')
json_file = open('models/model_temp.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
keras_model_loaded = model_from_json(loaded_model_json)
print('Model loaded...')
# Weights reloaded from .h5 inside the model
print('Load weights...')
keras_model_loaded.load_weights("models/Model_temp.h5")
print('Weights loaded...')
# URL that we'll use to make predictions using get and post
#app.route('/predict',methods=['GET','POST'])
def predict():
data = request.get_json(force=True)
predict_request = [data["month"],data["day"],data["hour"]]
predict_request = np.array(predict_request)
predict_request = predict_request.reshape(1,-1)
y_hat = keras_model_loaded.predict(predict_request, batch_size=1, verbose=1)
return jsonify({'prediction': str(y_hat)})
if __name__ == "__main__":
# Choose the port
port = int(os.environ.get('PORT', 9000))
# Run locally
app.run(host='127.0.0.1', port=port)
Second: The file Im using to send the json data sending to the api endpoint:
response = rq.get('api url has been removed')
data=response.json()
currentDT = datetime.datetime.now()
Month = currentDT.month
Day = currentDT.day
Hour = currentDT.hour
url= "http://127.0.0.1:9000/predict"
post_data = json.dumps({'month': month, 'day': day, 'hour': hour,})
r = rq.post(url,post_data)
Im getting this response from Flask regarding Tensorflow:
ValueError: Tensor Tensor("dense_6/BiasAdd:0", shape=(?, 1), dtype=float32) is not an element of this graph.
My keras model is a simple 6 dense layer model and trains with no errors.
Any ideas?
Flask uses multiple threads. The problem you are running into is because the tensorflow model is not loaded and used in the same thread. One workaround is to force tensorflow to use the gloabl default graph .
Add this after you load your model
global graph
graph = tf.get_default_graph()
And inside your predict
with graph.as_default():
y_hat = keras_model_loaded.predict(predict_request, batch_size=1, verbose=1)
It's so much simpler to wrap your keras model in a class and that class can keep track of it's own graph and session. This prevents the problems that having multiple threads/processes/models can cause which is almost certainly the cause of your issue. While other solutions will work this is by far the most general, scalable and catch all. Use this one:
import os
from keras.models import model_from_json
from keras import backend as K
import tensorflow as tf
import logging
logger = logging.getLogger('root')
class NeuralNetwork:
def __init__(self):
self.session = tf.Session()
self.graph = tf.get_default_graph()
# the folder in which the model and weights are stored
self.model_folder = os.path.join(os.path.abspath("src"), "static")
self.model = None
# for some reason in a flask app the graph/session needs to be used in the init else it hangs on other threads
with self.graph.as_default():
with self.session.as_default():
logging.info("neural network initialised")
def load(self, file_name=None):
"""
:param file_name: [model_file_name, weights_file_name]
:return:
"""
with self.graph.as_default():
with self.session.as_default():
try:
model_name = file_name[0]
weights_name = file_name[1]
if model_name is not None:
# load the model
json_file_path = os.path.join(self.model_folder, model_name)
json_file = open(json_file_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
self.model = model_from_json(loaded_model_json)
if weights_name is not None:
# load the weights
weights_path = os.path.join(self.model_folder, weights_name)
self.model.load_weights(weights_path)
logging.info("Neural Network loaded: ")
logging.info('\t' + "Neural Network model: " + model_name)
logging.info('\t' + "Neural Network weights: " + weights_name)
return True
except Exception as e:
logging.exception(e)
return False
def predict(self, x):
with self.graph.as_default():
with self.session.as_default():
y = self.model.predict(x)
return y
Just after loading the model add model._make_predict_function()
`
# Model reload from jSON:
print('Load model...')
json_file = open('models/model_temp.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
keras_model_loaded = model_from_json(loaded_model_json)
print('Model loaded...')
# Weights reloaded from .h5 inside the model
print('Load weights...')
keras_model_loaded.load_weights("models/Model_temp.h5")
print('Weights loaded...')
keras_model_loaded._make_predict_function()
It turns out this way does not need a clear_session call and is at the same time configuration friendly, using the graph object from configured session session = tf.Session(config=_config); self.graph = session.graph and the prediction by the created graph as default with self.graph.as_default(): offers a clean approach
from keras.backend.tensorflow_backend import set_session
...
def __init__(self):
config = self.keras_resource()
self.init_model(config)
def init_model(self, _config, *args):
session = tf.Session(config=_config)
self.graph = session.graph
#set configured session
set_session(session)
self.model = load_model(file_path)
def keras_resource(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return config
def predict_target(self, to_predict):
with self.graph.as_default():
predict = self.model.predict(to_predict)
return predict
I had the same problem. it was resolved by changing TensorFlow-1 version to TensorFlow-2.
just uninstall ver-1 and install ver-2.
Ya their is a bug when you predict from model with keras. Keras will not be able to build graph due to some error. Try to predict images from model with the help of tensor flow. Just replace this line of code
Keras code:
features = model_places.predict( img )
tensorflow code:
import tensorflow as tf
graph = tf.get_default_graph()
import this library in your code and replace.
with graph.as_default():
features = model_places.predict( img ).tolist()
If Problem still not solved :
if still problem not solved than try to refresh the graph.
As your code is fine, running with a clean environment should solve it.
Clear keras cache at ~/.keras/
Run on a new environment, with the right packages (can be done easily with anaconda)
Make sure you are on a fresh session, keras.backend.clear_session() should remove all existing tf graphs.
Keras Code:
keras.backend.clear_session()
features = model_places.predict( img )
TensorFlow Code:
import tensorflow as tf
with tf.Session() as sess:
tf.reset_default_graph()
Simplest solution is to use tensorflow 2.0. Run your code in Tensorflow 2.0 environment and it will work.
I was facing same issues while exposing a pre-trained model via REST server. I was loading the model at the server startup and later using the loaded model to make predictions via POST/GET request. While predicting it was generating error as session not saved between the predict call. Though when I was loading the model every time prediction is made it was working fine.
Then to avoid this issue with the session I just ran the code in TF=2.0 environment and it ran fine.
I'm new to python trying to build an app with TensorFlow. Basically what I need is get features from a loaded neural network, which take around 3 minutes to load.
I would like that my script above load the neural network just once when started and that I could call a rest function just to feed an image to the network.
from flask import Flask, request
from flask_restful import Resource, Api
from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import argparse
class ImageFeatures(Resource):
def get(self,img):
image = misc.imread(os.path.expanduser("Img/Abc_001.jpg"))
feed_dict = { images_placeholder: image, phase_train_placeholder:False }
emb = sess.run(embeddings, feed_dict=feed_dict)
return(emb)
with tf.Graph().as_default():
with tf.Session() as sess:
model_dir = 'Model/'
meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(model_dir))
facenet.load_model(model_dir, meta_file, ckpt_file)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
print("Rest Running")
app = Flask(__name__)
api = Api(app)
api.add_resource(ImageFeatures, '/getFeatures/<img>')
if __name__ == '__main__':
app.run(port='5002')
Check out https://github.com/PipelineAI/pipeline
We package your TensorFlow model (or any type of model) in a REST-based runtime.