Getting <Response [400]> from the TF serving api - python

I have trained a model to classify the leafs using TF anf TFX. I am TFServing to deploy the model in local environment. While I am been able to deploy the model but when I make a POST request to the model, It is returning <Response [400]> or requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: http://localhost:8501/v1/models/leaf_classification_native_keras_8/versions/1:predict
Below is the commad to deploy the model
docker run -p 8501:8501 --name tfserving_leaf_classification --mount type=bind,source=/home/bhargavpatel/Desktop/Image_Classification_tfx/Image_Classification/tfx/pipelines/leaf_classification_native_keras_8/Pusher/pushed_model,target=/models/leaf_classification_native_keras_8 -e MODEL_NAME=leaf_classification_native_keras_8 -t tensorflow/serving &
Python code to make post request
import base64
import io
import json
import numpy as np
from PIL import Image
import requests
import tensorflow as tf
import cv2
import pprint
SERVER_URL = "http://localhost:8501/v1/models/leaf_classification_native_keras_8:predict"
IMAGE_PATH = ( "/home/bhargavpatel/Desktop/Image_Classification_tfx/Image_Classification/raw_data/test/healthy/healthy_test.0.jpg"
)
def main():
headers = {"content-type": "serving_default"}
img = cv2.imread(IMAGE_PATH, 1)
img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_AREA)
img = tf.keras.applications.mobilenet.preprocess_input(img)
image_content = img.astype("uint8").tolist()
data = json.dumps({"signature_name": "serving_default", "instances": image_content})
for _ in range(3):
response = requests.post(SERVER_URL, json=data, headers=headers)
pprint.pprint(response)
total_time = 0
num_requests = 10
for _ in range(num_requests):
response = requests.post(SERVER_URL, json=data, headers=headers)
response.raise_for_status()
total_time += response.elapsed.total_seconds()
prediction = response.json()["predictions"][0]
print("Prediction class: {}, avg latency: {} ms".format(np.argmax(prediction), (total_time * 1000) / num_requests))
if __name__ == "__main__":
main()
I checked wheather the docker container is reachable or not using below mentioned command
curl http://localhost:8501/v1/models/leaf_classification_native_keras_8
recived the below mentioned response
{
"model_version_status": [
{
"version": "215",
"state": "AVAILABLE",
"status": {
"error_code": "OK",
"error_message": ""
}
}
]
}
but When i post a request for the prediction it gives the error 400 response.

Related

Request to TFServing fails with std::bad_alloc

I have a problem. I want to make a prediction with TFServing but unfourtnaly as soon as I call the API of TFServing the docker container crashes with the following error:
2022-10-05 08:22:19.091237: I tensorflow_serving/model_servers/server.cc:442] Exporting HTTP/REST API at:localhost:8601 ...
terminate called after throwing an instance of 'std::bad_alloc'
what(): std::bad_alloc
I am using TFServing inside a docker container an the call comes from a flask server. What is the problem for that? I have for the VM 16GB RAM.
server.py
from flask import current_app, flash, jsonify, make_response, redirect, request, url_for
from keras_preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from dotenv import load_dotenv
from loguru import logger
from pathlib import Path
from flask import Flask
import tensorflow as tf
import numpy as np
import requests
import string
import pickle5 as pickle
import nltk
import re
import os
app = Flask(__name__)
load_dotenv()
#app.route("/test")
def index():
txt = "This is a text"
output = get_prediction_probability(txt)
return output
def text_wragling(text):
x = text.lower()
x = remove_URL(x)
x = remove_punct(x)
x = remove_stopwords(x)
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
x = tokenizer.texts_to_sequences([x])
# pad
x = pad_sequences(x, maxlen=int(os.getenv('NLP__MAXLEN')))
return x
def remove_URL(text):
url = re.compile(r"https?://\S+|www.\.S+")
return url.sub(r"",text)
def remove_punct(text):
translator = str.maketrans("", "", string.punctuation)
return text.translate(translator)
def remove_stopwords(text):
# nltk.download()
nltk.download('stopwords')
from nltk.corpus import stopwords
stop = set(stopwords.words("english"))
filtered_words = [word.lower() for word in text.split() if word.lower() not in stop]
return " ".join(filtered_words)
def get_prediction_probability(txt):
x = text_wragling(txt)
logger.info("Txt wragling")
data = {
"instances": [
x.tolist()
]
}
#logger.info(data)
logger.info("Get prediction from model")
response = requests.post("http://localhost:8601/v1/models/nlp_model/labels/production:predict", json=data)
probability = (np.asarray(response.json()['predictions']).max(axis=1))
pred = np.asarray(response.json()['predictions']).argmax(axis=1)
with open('labelenconder.pickle', 'rb') as handle:
le = pickle.load(handle)
pred = le.classes_[pred]
prediction = pred[0]
return {
"prediction": prediction,
"probability": probability[0]
}
if __name__ == '__main__':
#test()
app.run(host='0.0.0.0')
Dockerfile
FROM tensorflow/serving
EXPOSE 8601
docker-compose.yml
version: '3'
services:
tfserving:
container_name: tfserving
build: ..
ports:
- "8601:8601"
volumes:
- ./model.config:/models/model.config
- ../model:/models/model
environment:
- TENSORFLOW_SERVING_REST_API_PORT=8061
- TENSORFLOW_SERVING_MODEL_NAME=model
- TENSORFLOW_MODEL_BASE_PATH=/models/model/
entrypoint: [ "bash", "-c", "tensorflow_model_server --rest_api_port=8601 --allow_version_labels_for_unavailable_models --model_config_file=/models/model.config"]
model.config
model_config_list {
config {
name: 'nlp_model'
base_path: '/models/model/'
model_platform: 'tensorflow'
model_version_policy {
specific {
versions: 1
versions: 2
}
}
version_labels {
key: 'production'
value: 1
}
version_labels {
key: 'beta'
value: 2
}
}
}
This an error and has already been reported and been fixed on TensorFlow Serving 2.11 (not yet released).
You can use nightly release from docker-hub.
You can find this issue here #2048.

Failed to load resource: the server responded with a status of 422 (unprocessable entity) in reactjs with python api

I am working on reactjs with python api and openCV which after uploading photo returns result with green rectangle around the face. So working on it, on clicking upload photo it returns 422(unprocessable entity). I have three main part Upload.js for frontend uploading part, main.py image api and face_detector.py for opencv part.
Upload.js
import React, { useState } from 'react'
import './Upload.css'
import axios from 'axios';
const Upload = () => {
const [file, setFile] = useState();
const handleChange = (event) => {
setFile(URL.createObjectURL(event.target.files[0]))
}
const submitForm = () => {
const formData = new FormData();
formData.append('file', file);
axios
.post('http://127.0.0.1:8000/images', formData, {
headers: {
accept: 'multipart/form-data',
}
})
.then(() => {
alert('file upload succcess');
})
.catch(() => alert("File Upload Error"))
return formData
}
return (
<>
<input className='img_choose' type="file" onChange={handleChange} />
<img src={file} className='prev_img' alt='img' />
<button className='btn_upload' onClick={submitForm}>Upload</button>
</>
);
}
export default Upload
main.py
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import Response
from random import randint
from starlette.requests import Request
import uuid
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
db = []
origins = [
"http://localhost:3000",
"http://127.0.0.1:8000/"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
#app.get("/main")
def main():
return{"message":"Welcome"}
#app.post("/images/")
async def create_upload_file(file: UploadFile = File(...)):
file.filename = f"{uuid.uuid4()}.jpg"
contents = await file.read() # <-- Important!
db.append(contents)
return {"filename": file.filename}
#app.get("/images/")
async def read_random_file():
# get a random file from the image db
random_index = randint(0, len(db) - 1)
response = Response(content=db[random_index])
return response
Face_detector.py
import cv2
import urllib.request
import numpy as np
url = [
"http://127.0.0.1:8000/images/"
]
def url_to_image(url):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
for url in url:
trained_face_data = cv2.CascadeClassifier(
'haarcascade_frontalface_default.xml')
x = y = w = h = int
image = url_to_image(url)
face_coordinates = trained_face_data.detectMultiScale(image,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in face_coordinates:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
This is where you are doing it wrong.
setFile(URL.createObjectURL(event.target.files[0]))
You are attaching the file URL in the formData instead of the file.
use this instead
setFile(event.target.files[0])

Why is socket io client side is not working?

I am trying to make a video streamer to stream client camera using socket.io and flask, but my socket io seems not responding at all as it is not returning log on my console Connected..! What is the cause of this please help thanks.
Here is my app.py
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
app = Flask(__name__)
socketio = SocketIO(app)
#app.route('/', methods=['POST', 'GET'])
def index():
return render_template('index.html')
#socketio.on('image')
def image(data_image):
sbuf = StringIO()
sbuf.write(data_image)
# decode and convert into image
b = io.BytesIO(base64.b64decode(data_image))
pimg = Image.open(b)
## converting RGB to BGR, as opencv standards
frame = cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
# Process the image frame
frame = imutils.resize(frame, width=700)
frame = cv2.flip(frame, 1)
imgencode = cv2.imencode('.jpg', frame)[1]
# base64 encode
stringData = base64.b64encode(imgencode).decode('utf-8')
b64_src = 'data:image/jpg;base64,'
stringData = b64_src + stringData
# emit the frame back
emit('response_back', stringData)
if __name__ == "__main__":
socketio.run(app, host='127.0.0.1')
and here is my index.html. I plan to have the response to be put on img with id image by using javascript socket io in my client side.
<div class = 'video'>
<img id="image">
</div>
<script>
var socket = io('http://localhost:5000');
socket.on('connect', function(){
console.log("Connected...!", socket.connected)
});
const video = document.querySelector("#videoElement");
video.width = 500;
video.height = 375; ;
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
video.srcObject = stream;
video.play();
})
.catch(function (err0r) {
console.log(err0r)
console.log("Something went wrong!");
});
}
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let cap = new cv.VideoCapture(video);
const FPS = 22;
setInterval(() => {
cap.read(src);
var type = "image/png"
var data = document.getElementById("canvasOutput").toDataURL(type);
data = data.replace('data:' + type + ';base64,', ''); //split off junk
at the beginning
socket.emit('image', data);
}, 10000/FPS);
socket.on('response_back', function(image){
const image_id = document.getElementById('image');
image_id.src = image;
});
</script>
This is the only response that my flask server give. It is strange that my flask server completely stop after that get. What I expected is for the server to emit back the response but nothing happen
* Serving Flask app "app.py" (lazy loading)
* Environment: development
* Debug mode: on
* Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
* Restarting with stat
* Debugger is active!
* Debugger PIN: 268-223-757
127.0.0.1 - - [25/Mar/2021 09:41:49] "GET / HTTP/1.1" 200 -

Client is gone, closing socket in flask-socketio

I'm currently trying to understand how sockets work. I'm using Flask-socketio and a python socketio client and running through a basic example. Here is what I have done so far
Usage Module
-i https://pypi.org/simple
certifi==2019.6.16
chardet==3.0.4
click==7.0
confluent-kafka==1.1.0
dependency-injector==3.14.7
docker==4.0.2
flask-socketio==4.2.1
flask==1.1.1
idna==2.8
itsdangerous==1.1.0
jinja2==2.10.1
markupsafe==1.1.1
python-engineio==3.10.0
python-socketio==4.4.0
requests==2.22.0
six==1.12.0
urllib3==1.25.3
websocket-client==0.56.0
werkzeug==0.15.5
server.py
import json
import logging
import os
import sys
import threading
from threading import Lock
from containers import Configs, Consumers, Managers
from errors import ObjectNotFound
from flask import Response
from flask_socketio import SocketIO, emit, join_room, leave_room, \
close_room, rooms, disconnect
from flask import Flask, render_template, session, request, \
copy_current_request_context
import sys, traceback
# Configure logger
logging.basicConfig(
level=logging.ERROR,
format='%(name)s - %(levelname)s - %(message)s'
)
broker = 'localhost:9092'
# Check environment variable
if 'KAFKA_BROKER' in os.environ:
broker = os.environ['KAFKA_BROKER']
elif len(sys.argv) > 1 and sys.argv[1]:
node_id = sys.argv[1]
# Override configuration
Configs.config.override({
'broker': broker,
'groupId': 'grpactconsumer'
})
async_mode = None
api = Flask(__name__)
api.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(
api,
async_handlers=False,
ping_timeout=60,
async_mode=async_mode,
cors_allowed_origins="*",
always_connect=True,
engineio_logger=True
)
thread = None
thread_lock = Lock()
logging.getLogger('engineio').setLevel(logging.ERROR)
stat_consumer = Consumers.consumer()
def worker(topic):
def on_receive(value):
socketio.sleep(0)
socketio.emit('object_stat', value, namespace='/event')
stat_consumer.consume([topic], on_receive)
worker_thread = threading.Thread(target=worker, args=['Stats'])
worker_thread.start()
def on_event_received():
socketio.emit('object_event', {}, namespace='/event')
# Start listening events
object_manager = Managers.object_manager()
object_manager.listen_events(['Events'], on_event_received)
#socketio.on('disconnect', namespace='/event')
def test_disconnect():
print('Client disconnected', request.sid)
#socketio.on('my event')
def handle_my_custom_event(json):
logging.error('received json: ' + str(json))
print('received json: ' + str(json))
# CORS Policy handlers
#api.after_request
def after_request(response):
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
return response
# Object not found error handler for api
#api.errorhandler(ObjectNotFound)
def not_found_exception(error):
return json.dumps({}), 404, {'ContentType': 'application/json'}
# Unhandled error handler for api
#api.errorhandler(Exception)
def unhandled_exception(error):
return json.dumps({}), 500, {'ContentType': 'application/json'}
# Node Endpoints
#api.route('/node/list', methods=['GET'])
def list_nodes():
nodes = object_manager.node.list()
objects = {'nodes': nodes}
return json.dumps(objects, indent=4), 200, {'ContentType': 'application/json'}
# Service Endpoints
#api.route('/service', methods=['POST'])
def create_service():
service_id = object_manager.service.create(
request.json['name'],
request.json['image'],
request.json['command'],
request.json['node_labels'],
request.json['customer_id'],
request.json['application_id'],
request.json['limit']
)
return json.dumps({'success': True, 'id': service_id}), 200, {'ContentType': 'application/json'}
#api.route('/service/<service_id>', methods=['GET'])
def get_service(service_id):
service = object_manager.service.get(service_id)
return Response(json.dumps(service, indent=4), mimetype='application/json')
#api.route('/service/<service_id>/migrate', methods=['PUT'])
def migrate_service(service_id):
object_manager.service.migrate(
service_id,
request.json['node_labels']
)
return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}
#api.route('/service/<service_id>/scale', methods=['PUT'])
def scale_service(service_id):
object_manager.service.scale(
service_id,
request.json['replica_count']
)
return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}
#api.route('/service/<service_id>', methods=['DELETE'])
def remove_service(service_id):
object_manager.service.remove(service_id)
return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}
#api.route('/service/list', methods=['GET'])
def list_services():
services = object_manager.service.list()
objects = {'services': services}
return json.dumps(objects, indent=4), 200, {'ContentType': 'application/json'}
# Container Endpoints
#api.route('/container/list', methods=['GET'])
def list_containers():
containers = object_manager.container.list()
objects = {'containers': containers}
return json.dumps(objects, indent=4), 200, {'ContentType': 'application/json'}
# Start flask server
socketio.run(api, debug=True, use_reloader=False, host='0.0.0.0')
client.js
/**
* This file is used for socket connection. Any data received by this class is only shared with data.js.
*/
let namespace = 'http://0.0.0.0:5000/event';
let socket = io.connect(namespace, {'forceNew': true });
socket.on('object_event', function (msg, cb) {
$.get('http://0.0.0.0:5000/node/list', function (response) {
node_data_source = JSON.parse(response)['nodes'];
for (let data_table_id of Object.keys(data_table_listeners)) {
if ($.fn.DataTable.isDataTable('#' + data_table_id)) {
data_table_listeners[data_table_id]();
}
}
});
if (cb)
cb();
});
socket.on('object_stat', function (msg, cb) {
let data = JSON.parse(msg);
if (stat_data_source) {
let node_stats = stat_data_source[data['node']];
node_stats = ( typeof node_stats != 'undefined' && node_stats instanceof Array ) ?
node_stats : [];
node_stats[data['container']] = data;
stat_data_source[data['node']] = node_stats;
}
console.log("Object_stats Socket.js");
console.log(data);
for (let stat_listener of Object.keys(stat_data_listeners)) {
if (stat_data_listeners[stat_listener]) {
stat_data_listeners[stat_listener]();
}
}
if (cb)
cb();
});
Errors
linkp-master.0.dey9j12wugl1#vm | INFO:engineio.server:3f2175ed5d72425da816e24ffeddb275: Sending packet MESSAGE data 2/event,["object_stat","{\"ram_limit\": 61254823575.552, \"customer_id\": \"-\", \"time\": 1575638732170, \"io_limit\": 0.0, \"container\": \"b703eadb700f\", \"node\": \"953mxmlwyvltfrx88ujlpkx3k\", \"io_usage\": 0.0, \"application_id\": \"-\", \"cpu_percent\": \"1.38\", \"ram_usage\": 407266918.4, \"network_limit\": 6144000.0, \"network_usage\": 3768000.0, \"pids\": \"25\"}"]
linkp-master.0.dey9j12wugl1#vm | f603fce7e2464586ab77636d127d92dc: Client is gone, closing socket
linkp-master.0.dey9j12wugl1#vm | INFO:engineio.server:f603fce7e2464586ab77636d127d92dc: Client is gone, closing socket
linkp-master.0.dey9j12wugl1#vm | f603fce7e2464586ab77636d127d92dc: Client is gone, closing socket
linkp-master.0.dey9j12wugl1#vm | INFO:engineio.server:f603fce7e2464586ab77636d127d92dc: Client is gone, closing socket
linkp-master.0.dey9j12wugl1#vm | emitting event "object_stat" to all [/event]
linkp-master.0.dey9j12wugl1#vm | INFO:socketio.server:emitting event "object_stat" to all [/event]

Keras model deploying in tf-serving giving NAN outputs

I have built a Keras classifier that takes in an Image(224,224,3) and a text-encoding(5000) and gives out one output. The model works fine when using keras's predict function.
I saved the model using
tf.saved_model.simple_save(
sess,
export_path,
inputs={'input_image': model.input[0],'input_text': model.input[1]},
outputs={t.name:t for t in model.outputs})
When I am running it with the Rest API it returns:
{u'predictions': [[nan, nan]]}
The way I am sending data is:
img = cv2.imread(image_path)
#rescale image
img = cv2.resize(img, (scale_size[0], scale_size[1]))
img = img.astype(np.float32)
img = img.astype('float32')
#subtract mean
img -= mean
text = np.load(text_path)
text = text.astype(np.float32)
payload = {
"signature_name":signature_constants.CLASSIFY_METHOD_NAME,
"instances": [
{
"input_image": img.tolist(),
"input_text": text.tolist()
}
]
}
# sending post request to TensorFlow Serving server
r = json.dumps(payload)
results = requests.post('http://localhost:9000/v1/models/<model-name>:predict', data=r)
pred = json.loads(results.content.decode('utf-8'))
print(pred)

Categories