Tensorflow: Can I run my tensorflow model directly in flask? - python

I have created a tensorflow model, saved it and have tested it. I really don't know how to use tensorflow serving and I'm not sure about the input and output nodes of my model to convert it into protobuf and then use tensorflow serving. So, I wanted to know if I can directly use the prediction function on flask and load the model there to make the predictions? I am really confused as to why we have to use only tensorflow serving to deploy tensorflow models? Is there any easier direct way?

You can, but you will need to set up a TensorFlow Serving server. Then you send a post request to the server.
Reference link here: Deploying-keras-models-using-tensorflow-serving-and-flask
Reference link here: Serving-TensorFlow flask client
"""This script wraps the client into a Flask server. It receives POST request with
prediction data, and forward the data to tensorflow server for inference.
"""
from flask import Flask, render_template, request, url_for, jsonify,Response
import json
import tensorflow as tf
import numpy as np
import os
import argparse
import sys
from datetime import datetime
from grpc.beta import implementations
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
tf.app.flags.DEFINE_string('server', 'localhost:9000', 'PredictionService host:port')
FLAGS = tf.app.flags.FLAGS
app = Flask(__name__)
class mainSessRunning():
def __init__(self):
host, port = FLAGS.server.split(':')
channel = implementations.insecure_channel(host, int(port))
self.stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
self.request = predict_pb2.PredictRequest()
self.request.model_spec.name = 'example_model'
self.request.model_spec.signature_name = 'prediction'
def inference(self, val_x):
# temp_data = numpy.random.randn(100, 3).astype(numpy.float32)
temp_data = val_x.astype(np.float32).reshape(-1, 3)
print("temp_data is:", temp_data)
data, label = temp_data, np.sum(temp_data * np.array([1, 2, 3]).astype(np.float32), 1)
self.request.inputs['input'].CopyFrom(
tf.contrib.util.make_tensor_proto(data, shape=data.shape))
result = self.stub.Predict(self.request, 5.0)
return result, label
run = mainSessRunning()
print("Initialization done. ")
# Define a route for the default URL, which loads the form
#app.route('/inference', methods=['POST'])
def inference():
request_data = request.json
input_data = np.expand_dims(np.array(request_data), 0)
result, label = run.inference(input_data)
di={"result":str(result),'label': label[0].tolist()}
return Response(json.dumps(di), mimetype='application/json')

Related

Google App Engine: Failed to load resources using keras/tensorflow/flask

I am currently developing a machine learning web application using tensorflow/keras and flask. For deployment I am using Google App Engine (GAE).
Locally I can run my application, get some data via the UI and display the prediction using my laoded models (there are four of them, all .h5 files)
I can deploy it on GAE without any errors but when I enter the link in my browser I get a '502 bad gateway' error and when I inspect the browser (Chrome) I can see three error messages saying 'failed to load resource'.
I am new to web development so I don't fully understand what and where the problem is. Do I need to declare my models in the .yaml file or load them specifically?
My .yaml file:
env: flex
runtime: python
runtime_config:
python_version: 3
My main.py file:
import pandas as pd
import tensorflow as tf
from keras.models import load_model
from keras.backend import set_session
from utils import instagram_scraper
from utils import image_preprocessing
from utils import label_encoding
from utils import overall_class_label
from sklearn.preprocessing import LabelEncoder
global sess
global graph
sess = tf.compat.v1.Session()
graph = tf.compat.v1.get_default_graph()
tf.compat.v1.keras.backend.set_session(sess)
num_attributes = 4
model = [[] for i in range(num_attributes)]
model[0] = load_model('./model/savemodels/glamorous_model.h5')
model[1] = load_model('./model/savemodels/rugged_model.h5')
model[2] = load_model('./model/savemodels/fun_model.h5')
model[3] = load_model('./model/savemodels/healthy_model.h5')
app = Flask(__name__)
def data_collection(brandname):
url = 'https://www.instagram.com/'+brandname+'/?hl=en'
scraper = instagram_scraper.InstagramScraper()
official_images = scraper.profile_page_posts(url)
return official_images
def data_preprocessing(official_images):
preprocessed_data = image_preprocessing.preprocessing(official_images)
return preprocessed_data
def make_prediction(preprocessed_data):
X_test = preprocessed_data
with graph.tf.compat.v1.as_default():
tf.compat.v1.keras.backend.set_session(sess)
y_pred = [[] for i in range(num_attributes)]
for i in range(num_attributes):
y_pred[i] = model[i].predict(X_test)
y_pred_label = overall_class_label.give_ovr_class_label_output(y_pred)
# convert from encoded label to label name
# encoded label
y_pred_lst = y_pred_label.tolist()
# map back to original label name
code2label = {0: 'glamorous', 1: 'rugged', 2: 'fun', 3: 'healthy'}
y_pred_lbnm = map(code2label.get, y_pred_lst)
y_pred_lbnm = list(y_pred_lbnm)
prediction = pd.Series(y_pred_lbnm).value_counts()
return prediction
#app.route("/", methods=["POST", "GET"])
def index():
if request.method == "POST":
brandname = request.form["brandname"]
return redirect(url_for("predict", brandname=brandname))
else:
return render_template("index.html")
#app.route("/predict/<brandname>", methods=["POST", "GET"])
def predict(brandname):
official_images = data_collection(brandname)
preprocessed_data = data_preprocessing(official_images)
prediction = make_prediction(preprocessed_data)
return render_template("predict.html", prediction=prediction)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=True)
My app structure:
parent
--model
--savemodels
fun_model.h5
glamorous_model.h5
healthy_model.h5
rugged_model.h5
--static
style.css
--templates
index.html
predict.html
--utils
image_preprocessing.py
instagram_scraper.py
label_encoding.py
overall_class_label.py
app.yaml
main.py
requirements.txt
Any help is very much appreciated, thank you!
All the best,
snowe

How to log from a custom ai platform model

I recently deployed a custom model to google cloud's ai-platform, and I am trying to debug some parts of my preprocessing logic. However, My print statements are not being logged to the stackdriver output. I have also tried using the logging client imported from google.cloud, to no avail. Here is my custom prediction file:
import os
import pickle
import numpy as np
from sklearn.datasets import load_iris
import tensorflow as tf
from google.cloud import logging
class MyPredictor(object):
def __init__(self, model, preprocessor):
self.logging_client = logging.Client()
self._model = model
self._preprocessor = preprocessor
self._class_names = ["Snare", "Kicks", "ClosedHH", "ClosedHH", "Clap", "Crash", "Perc"]
def predict(self, instances, **kwargs):
log_name = "Here I am"
logger = self.logging_client.logger(log_name)
text = 'Hello, world!'
logger.log_text(text)
print('Logged: {}'.format(text), kwargs.get("sr"))
inputs = np.asarray(instances)
outputs = self._model.predict(inputs)
if kwargs.get('probabilities'):
return outputs.tolist()
#return "[]"
else:
return [self._class_names[index] for index in np.argmax(outputs.tolist(), axis=1)]
#classmethod
def from_path(cls, model_dir):
model_path = os.path.join(model_dir, 'model.h5')
model = tf.keras.models.load_model(model_path, custom_objects={"adam": tf.keras.optimizers.Adam,
"categorical_crossentropy":tf.keras.losses.categorical_crossentropy, "lr":0.01, "name": "Adam"})
preprocessor_path = os.path.join(model_dir, 'preprocessor.pkl')
with open(preprocessor_path, 'rb') as f:
preprocessor = pickle.load(f)
return cls(model, preprocessor)
I can't find anything online for why my logs are not showing up in stackdriver (neither print statements nor the logging library calls). Has anyone faced this issue?
Thanks,
Nikita
NOTE: If you have enough rep to create tags please add the google-ai-platform tag to this post. I think it would really help people who are in my position. Thanks!
From Documentation:
If you want to enable online prediction logging, you must configure it
when you create a model resource or when you create a model version
resource, depending on which type of logging you want to enable. There
are three types of logging, which you can enable independently:
Access logging, which logs information like timestamp and latency for
each request to Stackdriver Logging.
You can enable access logging when you create a model resource.
Stream logging, which logs the stderr and stdout streams from your
prediction nodes to Stackdriver Logging, and can be useful for
debugging. This type of logging is in beta, and it is not supported by
Compute Engine (N1) machine types.
You can enable stream logging when you create a model resource.
Request-response logging, which logs a sample of online prediction
requests and responses to a BigQuery table. This type of logging is in
beta.
You can enable request-response logging by creating a model version
resource, then updating that version.
For your use case, please use the following template to log custom information into StackDriver:
Model
gcloud beta ai-platform models create {MODEL_NAME} \
--regions {REGION} \
--enable-logging \
--enable-console-logging
Model version
gcloud beta ai-platform versions create {VERSION_NAME} \
--model {MODEL_NAME} \
--origin gs://{BUCKET}/{MODEL_DIR} \
--python-version 3.7 \
--runtime-version 1.15 \
--package-uris gs://{BUCKET}/{PACKAGES_DIR}/custom-model-0.1.tar.gz \
--prediction-class=custom_prediction.CustomModelPrediction \
--service-account custom#project_id.iam.gserviceaccount.com
I tried this and worked fine:
I did some modification to the constructor due to the #classmethod decorator.
Create a service account and grant it "Stackdriver Debugger User" role, use it during model version creation
Add google-cloud-logging library to your setup.py
Consider extra cost of enabling StackDriver logging
When using log_struct check the correct type is passed. (If using str, make sure you convert bytes to str in Python 3 using .decode('utf-8'))
Define the project_id parameter during Stackdriver client creation
logging.Client(), otherwise you will get:
ERROR:root:Prediction failed: 400 Name "projects//logs/my-custom-prediction-log" is missing the parent component. Expected the form projects/[PROJECT_ID]/logs/[ID]"
Code below:
%%writefile cloud_logging.py
import os
import pickle
import numpy as np
from datetime import date
from google.cloud import logging
import tensorflow.keras as keras
LOG_NAME = 'my-custom-prediction-log'
class CustomModelPrediction(object):
def __init__(self, model, processor, client):
self._model = model
self._processor = processor
self._client = client
def _postprocess(self, predictions):
labels = ['negative', 'positive']
return [
{
"label":labels[int(np.round(prediction))],
"score":float(np.round(prediction, 4))
} for prediction in predictions]
def predict(self, instances, **kwargs):
logger = self._client.logger(LOG_NAME)
logger.log_struct({'instances':instances})
preprocessed_data = self._processor.transform(instances)
predictions = self._model.predict(preprocessed_data)
labels = self._postprocess(predictions)
return labels
#classmethod
def from_path(cls, model_dir):
client = logging.Client(project='project_id') # Change to your project
model = keras.models.load_model(
os.path.join(model_dir,'keras_saved_model.h5'))
with open(os.path.join(model_dir, 'processor_state.pkl'), 'rb') as f:
processor = pickle.load(f)
return cls(model, processor, client)
# Verify model locally
from cloud_logging import CustomModelPrediction
classifier = CustomModelPrediction.from_path('.')
requests = ["God I hate the north", "god I love this"]
response = classifier.predict(requests)
response
Then I check with the sample library:
python snippets.py my-custom-prediction-log list
Listing entries for logger my-custom-prediction-log:
* 2020-02-19T19:51:45.809767+00:00: {u'instances': [u'God I hate the north', u'god I love this']}
* 2020-02-19T19:57:18.615159+00:00: {u'instances': [u'God I hate the north', u'god I love this']}
To visualize the logs, in StackDriver > Logging > Select Global and your Log name, if you want to see Model logs you should be able to select Cloud ML Model version.
You can use my files here: model and pre-processor
If you just want your print to work and not use the logging method above me you can just add flush flag to your print,
print(“logged”,flush=True)

Retrieving Params from GET request to fetch the value. (PostMan)

From postman, I'm trying to send a GET request with params as Key:
path, Value: home/guest/images/abc.jpeg
I want to retrieve the same and use the Value as the argument of my function.
Somehow I'm unable to get the same, API not being called. Any input shall be helpful.
I'm new to writing REST APIs using Flask Python. I want to get the Value of the Params key: path as the TEST_PATH so that the rest of the code run as it should return the JSONP_data
from flask import Flask
from flask_cors import CORS
import pandas as pd
import datetime
import numpy as np
from flask import jsonify
import json
from pymongo import MongoClient
# set the project root directory as the static folder, you can set others.
app = Flask(__name__)
CORS(app)
counter=0
#app.route('/')
def api_root():
return ('Welcome')
#app.route('/generateTags/<id>', methods=['GET'])
def generateTags1(id):
file = json.loads(create_task(id))
TEST_PATH = file['path']
print(TEST_PATH)
import DenseNet
import Inception
import ResNet
import SqueezNet
from imageai.Prediction import ImagePrediction
prediction = ImagePrediction()
# TEST_PATH = '/home/anit/Documents/Image Analysis/ImageAI-master/Dataset'
dense_list=DenseNet.tag(TEST_PATH, DenseNet.prediction)
dense_list = ['DenseNet'] + dense_list.tolist()[0]
res_list=ResNet.tag(TEST_PATH, ResNet.prediction)
res_list = ['ResNet'] + res_list.tolist()[0]
incept_list=Inception.tag(TEST_PATH, Inception.prediction)
incept_list = ['Inception'] + incept_list.tolist()[0]
squeez_list=SqueezNet.tag(TEST_PATH, SqueezNet.prediction)
squeez_list = ['SqueezNet'] + squeez_list.tolist()[0]
d = {'filename':dense_list[1], 'models':{}}
for l in [dense_list,res_list, incept_list, squeez_list]:
#breakpoint()
d['models'][l[0]] = l[2:]
JSONP_data = jsonify(d)
return JSONP_data
if __name__ == '__main__':
app.run ()

Image Recognition with Tensorflow + keras + Flask

I was trying to create a web application (REST API) for Image Recognition with Tensorflow + keras in Flask. I have tried to follow some resource available in internet and came up with a script as below
from imageai.Prediction import ImagePrediction
import os
execution_path = os.getcwd()
prediction = ImagePrediction()
prediction.setModelTypeAsResNet()
prediction.setModelPath( execution_path + "\\resnet50_weights_tf_dim_ordering_tf_kernels.h5")
prediction.loadModel()
for i in range(3):
predictions, percentage_probabilities = prediction.predictImage("C:\\Users\\Administrator\\Downloads\\pics\\banana"+str(i)+".jpg", result_count=5)
for index in range(len(predictions)):
print(predictions[index] , " : " , percentage_probabilities[index])
This worked fine as standalone script. Then I tried to convert the same to Flask Application and it starts failing .
import flask
from imageai.Prediction import ImagePrediction
import os
import json
import keras
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
def init():
global model
execution_path = os.getcwd()
model = ImagePrediction()
model.setModelTypeAsResNet()
model.setModelPath( os.path.join(os.getcwd(),"models","resnet50_weights_tf_dim_ordering_tf_kernels.h5"))
model.loadModel()
# API for prediction
#app.route("/predict", methods=["GET"])
def predict():
predictions, percentage_probabilities = model.predictImage( os.path.join(os.getcwd(),"pics","banana.jpg"), result_count=5)
mylist = []
for index in range(len(predictions)):
mydict = {}
mydict[predictions[index]]=percentage_probabilities[index]
mylist.append(mydict)
keras.backend.clear_session()
return sendResponse(json.dumps(mylist))
# Cross origin support
def sendResponse(responseObj):
response = flask.jsonify(responseObj)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Methods', 'GET')
response.headers.add('Access-Control-Allow-Headers', 'accept,content-type,Origin,X-Requested-With,Content-Type,access_token,Accept,Authorization,source')
response.headers.add('Access-Control-Allow-Credentials', True)
return response
# if this is the main thread of execution first load the model and then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
init()
app.run(threaded=True)

How to feed a network through REST

I'm new to python trying to build an app with TensorFlow. Basically what I need is get features from a loaded neural network, which take around 3 minutes to load.
I would like that my script above load the neural network just once when started and that I could call a rest function just to feed an image to the network.
from flask import Flask, request
from flask_restful import Resource, Api
from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import argparse
class ImageFeatures(Resource):
def get(self,img):
image = misc.imread(os.path.expanduser("Img/Abc_001.jpg"))
feed_dict = { images_placeholder: image, phase_train_placeholder:False }
emb = sess.run(embeddings, feed_dict=feed_dict)
return(emb)
with tf.Graph().as_default():
with tf.Session() as sess:
model_dir = 'Model/'
meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(model_dir))
facenet.load_model(model_dir, meta_file, ckpt_file)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
print("Rest Running")
app = Flask(__name__)
api = Api(app)
api.add_resource(ImageFeatures, '/getFeatures/<img>')
if __name__ == '__main__':
app.run(port='5002')
Check out https://github.com/PipelineAI/pipeline
We package your TensorFlow model (or any type of model) in a REST-based runtime.

Categories