Request to TFServing fails with std::bad_alloc - python

I have a problem. I want to make a prediction with TFServing but unfourtnaly as soon as I call the API of TFServing the docker container crashes with the following error:
2022-10-05 08:22:19.091237: I tensorflow_serving/model_servers/server.cc:442] Exporting HTTP/REST API at:localhost:8601 ...
terminate called after throwing an instance of 'std::bad_alloc'
what(): std::bad_alloc
I am using TFServing inside a docker container an the call comes from a flask server. What is the problem for that? I have for the VM 16GB RAM.
server.py
from flask import current_app, flash, jsonify, make_response, redirect, request, url_for
from keras_preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from dotenv import load_dotenv
from loguru import logger
from pathlib import Path
from flask import Flask
import tensorflow as tf
import numpy as np
import requests
import string
import pickle5 as pickle
import nltk
import re
import os
app = Flask(__name__)
load_dotenv()
#app.route("/test")
def index():
txt = "This is a text"
output = get_prediction_probability(txt)
return output
def text_wragling(text):
x = text.lower()
x = remove_URL(x)
x = remove_punct(x)
x = remove_stopwords(x)
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
x = tokenizer.texts_to_sequences([x])
# pad
x = pad_sequences(x, maxlen=int(os.getenv('NLP__MAXLEN')))
return x
def remove_URL(text):
url = re.compile(r"https?://\S+|www.\.S+")
return url.sub(r"",text)
def remove_punct(text):
translator = str.maketrans("", "", string.punctuation)
return text.translate(translator)
def remove_stopwords(text):
# nltk.download()
nltk.download('stopwords')
from nltk.corpus import stopwords
stop = set(stopwords.words("english"))
filtered_words = [word.lower() for word in text.split() if word.lower() not in stop]
return " ".join(filtered_words)
def get_prediction_probability(txt):
x = text_wragling(txt)
logger.info("Txt wragling")
data = {
"instances": [
x.tolist()
]
}
#logger.info(data)
logger.info("Get prediction from model")
response = requests.post("http://localhost:8601/v1/models/nlp_model/labels/production:predict", json=data)
probability = (np.asarray(response.json()['predictions']).max(axis=1))
pred = np.asarray(response.json()['predictions']).argmax(axis=1)
with open('labelenconder.pickle', 'rb') as handle:
le = pickle.load(handle)
pred = le.classes_[pred]
prediction = pred[0]
return {
"prediction": prediction,
"probability": probability[0]
}
if __name__ == '__main__':
#test()
app.run(host='0.0.0.0')
Dockerfile
FROM tensorflow/serving
EXPOSE 8601
docker-compose.yml
version: '3'
services:
tfserving:
container_name: tfserving
build: ..
ports:
- "8601:8601"
volumes:
- ./model.config:/models/model.config
- ../model:/models/model
environment:
- TENSORFLOW_SERVING_REST_API_PORT=8061
- TENSORFLOW_SERVING_MODEL_NAME=model
- TENSORFLOW_MODEL_BASE_PATH=/models/model/
entrypoint: [ "bash", "-c", "tensorflow_model_server --rest_api_port=8601 --allow_version_labels_for_unavailable_models --model_config_file=/models/model.config"]
model.config
model_config_list {
config {
name: 'nlp_model'
base_path: '/models/model/'
model_platform: 'tensorflow'
model_version_policy {
specific {
versions: 1
versions: 2
}
}
version_labels {
key: 'production'
value: 1
}
version_labels {
key: 'beta'
value: 2
}
}
}

This an error and has already been reported and been fixed on TensorFlow Serving 2.11 (not yet released).
You can use nightly release from docker-hub.
You can find this issue here #2048.

Related

Getting <Response [400]> from the TF serving api

I have trained a model to classify the leafs using TF anf TFX. I am TFServing to deploy the model in local environment. While I am been able to deploy the model but when I make a POST request to the model, It is returning <Response [400]> or requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: http://localhost:8501/v1/models/leaf_classification_native_keras_8/versions/1:predict
Below is the commad to deploy the model
docker run -p 8501:8501 --name tfserving_leaf_classification --mount type=bind,source=/home/bhargavpatel/Desktop/Image_Classification_tfx/Image_Classification/tfx/pipelines/leaf_classification_native_keras_8/Pusher/pushed_model,target=/models/leaf_classification_native_keras_8 -e MODEL_NAME=leaf_classification_native_keras_8 -t tensorflow/serving &
Python code to make post request
import base64
import io
import json
import numpy as np
from PIL import Image
import requests
import tensorflow as tf
import cv2
import pprint
SERVER_URL = "http://localhost:8501/v1/models/leaf_classification_native_keras_8:predict"
IMAGE_PATH = ( "/home/bhargavpatel/Desktop/Image_Classification_tfx/Image_Classification/raw_data/test/healthy/healthy_test.0.jpg"
)
def main():
headers = {"content-type": "serving_default"}
img = cv2.imread(IMAGE_PATH, 1)
img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_AREA)
img = tf.keras.applications.mobilenet.preprocess_input(img)
image_content = img.astype("uint8").tolist()
data = json.dumps({"signature_name": "serving_default", "instances": image_content})
for _ in range(3):
response = requests.post(SERVER_URL, json=data, headers=headers)
pprint.pprint(response)
total_time = 0
num_requests = 10
for _ in range(num_requests):
response = requests.post(SERVER_URL, json=data, headers=headers)
response.raise_for_status()
total_time += response.elapsed.total_seconds()
prediction = response.json()["predictions"][0]
print("Prediction class: {}, avg latency: {} ms".format(np.argmax(prediction), (total_time * 1000) / num_requests))
if __name__ == "__main__":
main()
I checked wheather the docker container is reachable or not using below mentioned command
curl http://localhost:8501/v1/models/leaf_classification_native_keras_8
recived the below mentioned response
{
"model_version_status": [
{
"version": "215",
"state": "AVAILABLE",
"status": {
"error_code": "OK",
"error_message": ""
}
}
]
}
but When i post a request for the prediction it gives the error 400 response.

Heroku Returned a H10 Error for Python App Deployment

I'm trying to deploy a python app on heroku, but heroku returned a H10 error. Here is my code python code:
import os
import requests
import random
from flask import Flask, jsonify, request
from flask_cors import CORS
from system.backend.data import Data
from system.backend.folder import Folder
from system.wallet.data_pool import DataPool
from system.wallet.exchange import Exchange
from system.pubsub import PubSub
app = Flask(__name__)
CORS(app, resources={ r'/*': { 'origins': 'http://localhost:8080' } })
data = Data()
data_pool = DataPool()
folder = Folder(data)
pubsub = PubSub(data, data_pool)
#app.route('/')
def default():
return 'Welcome to mypython app'
#app.route('/main')
def route_mainn():
return jsonify(data.to_json())
#app.route('/main/range')
def route_main_range():
start = int(request.args.get('start'))
end = int(request.args.get('end'))
return jsonify(data.to_json()[::-1][start:end])
#app.route('/main/datalength')
def route_main_datalength():
return jsonify(len(data.length))
#app.route('/myapp/data')
def route_myapp_data():
app_data = data_pool.app_data()
exchange_data.append(Exchange.reward_exchange(folder).to_json())
data.add_data(exchange_data)
folder = data.length[-1]
pubsub.broadcast_folder(folder)
data_pool.clear_data_exchange(data)
return jsonify(data.to_json())
#app.route('/folder/exchange', methods=['POST'])
def route_folder_exchange():
exchange_data = request.get_json()
exchange = data_pool.existing_exchange(folder.address)
if exchange:
exchange.update(
folder,
exchange_data['recipient'],
exchange_data['sender']
)
else:
exchange = Exchange(
folder,
exchange_data['recipient'],
exchange_data['sender']
)
pubsub.broadcast_exchange(exchange)
data_pool.set_exchange(exchange)
return jsonify(exchange.to_json())
#app.route('/folder/info')
def route_folder_info():
return jsonify({'address': folder.address, 'data': folder.balance})
#app.route('/known-addresses')
def route_known_addresses():
known_addresses = set()
for data in main.length:
for exchange in main.data:
exchange['output'].keys()
known_addresses.update(exchange['output'].keys())
return jsonify(list(known_addresses))
#app.route('/exchange')
def route_exchanges():
return jsonify(exchange_pool.exchange_data())
ROOT_PORT = 8080
PORT = ROOT_PORT
if os.environ.get('PEER') == 'True':
PORT = random.randint(8081, 9000)
result = requests.get(f'http://localhost:{ROOT_PORT}/main')
print(f'result.json(): {result.json()}')
result_main = Data.from_json(result.json())
try:
data.replace_length(result_data.length)
print('\n -- Successfully synchronized the local data')
except Exception as e:
print(f'\n -- Error synchronizing: {e}')
if os.environ.get('SEED_DATA') == 'True':
for i in range(10):
main.add_folder([
Exchange(Folder(), Folder().address, random.randint(500, 1000)).to_json(),
Exchange(Folder(), Folder().address, random.randint(500, 1000)).to_json()
])
for i in range(3):
data_pool.set_exchange(
Exchange(Folder(), Folder().address, random.randint(500, 1000))
)
app.run(port=PORT)
I also made a worker.py file with the code as follows:
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDIS_URL', 'redis://localhost:8080')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
The mac terminal recommended to use waitress for python deployment, but I'm not sure on how to implement waitress within the code shown on the above.
To use waitress:
Just do pip install waitress
And add this snippet:
if __name__ == '__main__':
from waitress import serve
serve(app, host='0.0.0.0', port=8000)

Python nornir unittest mocking how to?

I'm using the nornir (3.3.0) automation framework with Python 3.8. I'd like to mock the SSH access to the devices in order to do testing without having some real or virtual network equipment online. How would I use patch or Mock/MagicMock from unittest.mock to mock netmiko_send_command (ssh interaction with device)?
I have the following nornir task function:
# dbb_automation/tasks.py
from nornir.core.task import Task, Result
from nornir_netmiko.tasks import netmiko_send_command
def get_interfaces_with_ip(task: Task):
log.debug(f"{task.name}: Getting result on host {task.host}")
result: MultiResult = task.run(name="show ip int br | e unass", task=netmiko_send_command,
command_string="show ip int br | e unass")
content_str = result[0].result
task.run(
task=write_file,
filename=f"outputs/{task.host}-{purpose}.{ending}",
content=content_str
)
return Result(
host=task.host,
result=f"{task.host.name} got ip result"
)
and the following test case (work in progress):
# tests/test_tasks.py
from dbb_automation.tasks import get_interfaces_with_ip
from nornir import InitNornir
from nornir.core.filter import F
from tests.settings import *
def test_get_interfaces_with_ip():
# [x] init nornir with fake host
# [ ] patch/mock netmiko_send_command
# [ ] check file contents with patched return string of netmiko_send_command
nr = InitNornir(
core={
"raise_on_error": True
},
runner={
"plugin": "threaded",
"options": {
"num_workers": 1,
}
},
inventory={
"plugin": "SimpleInventory",
"options": {
"host_file": DNAC_HOSTS_YAML,
"group_file": DNAC_GROUPS_YAML,
"defaults_file": DNAC_DEFAULT_YAML
}
},
logging={
"log_file": "logs/nornir.log"
}
)
result = nr.filter(F(has_parent_group="Borders")).run(name="get_interfaces_with_ip", task=get_interfaces_with_ip)
# todo: test code
assert False
Regards,
GĂ©rard
I think I found the solution. Key was to patch to where the imported function is used not to where it is defined and to set the return value on the mock object.
#patch("dbb_automation.tasks.netmiko_send_command")
def test_get_interfaces_with_ip(mock_netmiko_send_command, nr):
...
mock_netmiko_send_command.return_value = """Interface IP-Address OK? Method Status Protocol
GigabitEthernet22 10.1.54.146 YES TFTP up up
Loopback0 10.150.32.2 YES other up up
Port-channel1.2 10.150.33.65 YES manual up up
...
"""
import pytest
import os
import shutil
from unittest.mock import patch
from dbb_automation.tasks import get_interfaces_with_ip
from nornir import InitNornir
from nornir.core.filter import F
from tests.settings import *
#pytest.fixture()
def nr():
nr = InitNornir(
core={
"raise_on_error": True
},
runner={
"plugin": "threaded",
"options": {
"num_workers": 1,
}
},
inventory={
"plugin": "SimpleInventory",
"options": {
"host_file": DNAC_HOSTS_YAML,
"group_file": DNAC_GROUPS_YAML,
"defaults_file": DNAC_DEFAULT_YAML
}
},
logging={
"log_file": "logs/nornir.log"
}
)
return nr
#patch("dbb_automation.tasks.netmiko_send_command")
def test_get_interfaces_with_ip(mock_netmiko_send_command, nr):
output_folder_name = "outputs"
shutil.rmtree(output_folder_name)
os.mkdir(output_folder_name)
mock_netmiko_send_command.return_value = """Interface IP-Address OK? Method Status Protocol
GigabitEthernet22 10.1.54.146 YES TFTP up up
Loopback0 10.150.32.2 YES other up up
Port-channel1.2 10.150.33.65 YES manual up up
"""
nr.filter(F(has_parent_group="Borders")).run(name="get_interfaces_with_ip", task=get_interfaces_with_ip)
# test code
count = 0
files_found = None
for root_dir, cur_dir, files in os.walk(output_folder_name):
count += len(files)
assert files_found is None # make sure there are no subdirectories
files_found = files
assert count == 4 # we expect a file for each host
for file_name in files_found:
with open(f"{output_folder_name}/{file_name}") as f:
assert f.read() == mock_netmiko_send_command.return_value

How to update the Google Cloud scheduler with Python api

I am trying to update google scheduler job.But,it does not work well.How do I this?
Now I understand what problem is.The problem is that the option class is wrong.
How should I write this place?
Target(topic_name="projects/aaa/topics/bbb",data="test".encode("utf-8"))
The error message.
TypeError: Parameter to MergeFrom() must be instance of same class: expected google.cloud.scheduler.v1.PubsubTarget got PubsubTarget.
my code
import os
from google.cloud import scheduler_v1
from google.cloud.scheduler_v1 import PubsubTarget as Target
from google.protobuf import field_mask_pb2
pub = Target(topic_name="projects/aaa/topics/bbb",data="test".encode("utf-8"))
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = './test.json'
client = scheduler_v1.CloudSchedulerClient()
job = { 'name': "projects/aaa/locations/us-central1/jobs/test",
"description": "c",
"schedule": "59 * * * *",
"pubsub_target":pub
}
update_mask = field_mask_pb2.FieldMask(paths=['description','schedule','pubsub_target'])
response = client.update_job(job=job,update_mask=update_mask)
print(response)
import os
from google.cloud import scheduler_v1
from google.cloud.scheduler_v1 import PubsubTarget as Target
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = './test.json'
client = scheduler_v1.CloudSchedulerClient()
job = Job()
job.name = 'projects/aaa/locations/us-central1/jobs/test'
job.description = 'c'
job.schedule = '59 * * * *'
job.time_zone = 'America/Los_Angeles'
pt = Target()
pt.topic_name = 'projects/aaa/topics/bbb'
pt.data = 'test'.encode('utf-8')
job.pubsub_target = pt
job = client.create_job(parent='projects/aaa/locations/us-central1', job=job)

Swift Client with DecisionTreeRegressor

I am working with bluemix object storage container, What i want to do that i want to store my "RandomForestRegressor" into a pkl file with joblib. But when i run the code with the Swift client i receives the error.
TypeError: object of type 'DecisionTreeRegressor' has no len()
Here is my code please help.
import os
from flask import Flask,render_template, request,json
from flask.ext.cors import CORS
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
import random
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error
import os
from sklearn.externals import joblib
import pickle
import sys
import json
import csv
import swiftclient
app = Flask(__name__)
CORS(app)
cloudant_service = json.loads(os.environ['VCAP_SERVICES'])['Object-Storage'][0]
objectstorage_creds = cloudant_service['credentials']
if objectstorage_creds:
auth_url = objectstorage_creds['auth_url'] + '/v3' #authorization URL
password = objectstorage_creds['password'] #password
project_id = objectstorage_creds['projectId'] #project id
user_id = objectstorage_creds['userId'] #user id
region_name = objectstorage_creds['region'] #region name
def predict_joblib():
conn = swiftclient.Connection(key=password,
authurl=auth_url,
auth_version='3',
os_options={"project_id": project_id,
"user_id": user_id,
"region_name": region_name})
container_name = 'my-container'
# File name for testing
file_name = 'example_file.txt'
# Create a new container
conn.put_container(container_name)
print ("nContainer %s created successfully." % container_name)
# List your containers
print ("nContainer List:")
for container in conn.get_account()[1]:
print (container['name'])
# List objects in a container, and prints out each object name, the file size, and last modified date
print ("nObject List:")
for container in conn.get_account()[1]:
for data in conn.get_container(container['name'])[1]:
print ('object: {0}t size: {1}t date: {2}'.format(data['name'], data['bytes'], data['last_modified']))
print ("-----------LEARN-----------\n")
with open('training_set.json') as json_data:
df_train= pd.read_json(json_data)
train_X = df_train.drop('Price', 1)
train_y = df_train['Price']
print ("Training...")
rfreg = RandomForestRegressor(n_estimators=100, n_jobs=-1)
rfreg.fit(train_X, train_y)
print("\nPerformance on training set:")
print('R^2: %f' % rfreg.score(train_X, train_y))
# print('MSE: %f' % mean_squared_error(rfreg.predict(train_X), train_y))
# print('ABS: %f' % mean_absolute_error(rfreg.predict(train_X), train_y))
importances = rfreg.feature_importances_
std = np.std([tree.feature_importances_ for tree in rfreg.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("\nFeature ranking:")
for f in range(len(importances)):
print("%d. feature %d %s (%f)" % (f + 1, indices[f], df_train.columns[indices[f]], importances[indices[f]]))
# SERIALIZE MODEL USING joblib
print ("Serializing models using joblib...")
conn.put_object(container_name,'v3.pkl', contents= rfreg)
print ("Serializing vectorizers using joblib...")
for feature in ['Fluorescence', 'Culet']:
conn.put_object(container_name,feature+'_v3.pkl', contents= vectorizers[feature])
return rfreg, vectorizers
#app.route('/')
def hello():
predict_joblib()
return 'Welcome to Python Flask!'
#app.route('/signUp')
def signUp():
return 'signUp'
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0', port=int(port))

Categories