Error while deploying machine learning python application - python

I am trying to deploy my XGboost model into kubernetes. I am facing a problem in writing the flask code. Here is the code(imported from github). Whenever I try to deploy into web server, I am facing the error message: invalid parameters. Please help me to solve this issue and thank you in advance.
'''
#
import json
import pickle
import numpy as np
from flask import Flask, request
#
flask_app = Flask(__name__)
#ML model path
model_path = "Y:/Docker_Tests/Deploy-ML-model-master/Deploy-ML-model-master/ML_Model/model2.pkl"
#flask_app.route('/', methods=['GET'])
def index_page():
return_data = {
"error" : "0",
"message" : "Successful"
}
return flask_app.response_class(response=json.dumps(return_data), mimetype='application/json')
#flask_app.route('/predict',methods=['GET'])
def model_deploy():
try:
age = request.form.get('age')
bs_fast = request.form.get('BS_Fast')
bs_pp = request.form.get('BS_pp')
plasma_r = request.form.get('Plasma_R')
plasma_f = request.form.get('Plasma_F')
HbA1c = request.form.get('HbA1c')
fields = [age,bs_fast,bs_pp,plasma_r,plasma_f,HbA1c]
if not None in fields:
#Datapreprocessing Convert the values to float
age = float(age)
bs_fast = float(bs_fast)
bs_pp = float(bs_pp)
plasma_r = float(plasma_r)
plasma_f = float(plasma_f)
hbA1c = float(HbA1c)
result = [age,bs_fast,bs_pp,plasma_r,plasma_f,HbA1c]
#Passing data to model & loading the model from disk
classifier = pickle.load(open(model_path, 'rb'))
prediction = classifier.predict([result])[0]
conf_score = np.max(classifier.predict_proba([result]))*100
return_data = {
"error" : '0',
"message" : 'Successfull',
"prediction": prediction,
"confidence_score" : conf_score.round(2)
}
else:
return_data = {
"error" : '1',
"message": "Invalid Parameters"
}
except Exception as e:
return_data = {
'error' : '2',
"message": str(e)
}
return flask_app.response_class(response=json.dumps(return_data), mimetype='application/json')
if __name__ == "__main__":
flask_app.run(host ='0.0.0.0',port=9091, debug=True)
'''

Related

trying to print output from an ECR image with python boto3

I am trying to print output from an ECR image with python boto3. I can get it to print out the imageDigest, but would like to add the imageTag. Can anyone think of a way to add the imageTag? Everyway I have tried has errored out.
import json
import boto3
def get_reponames():
client = boto3.client('ecr')
reponames = [repo['repositoryName'] for repo in client.describe_repositories()['repositories']]
return reponames
def get_imageids(prepo):
client = boto3.client('ecr')
imageids = [img['imageDigest'] for img in client.list_images(repositoryName=prepo,)['imageIds']]
return imageids
def lambda_handler(event, context):
output = get_reponames()
for rn in output:
print(rn)
outputii = get_imageids(rn)
for ii in outputii:
print(ii)
return {
'body': json.dumps("hello world")
}
I'll post the output for list_images below. The above code works to display imageDigest, but I want to add imageTag too.
{
'imageIds': [
{
'imageDigest': 'sha256:764f63476bdff6d83a09ba2a818f0d35757063724a9ac3ba5019c56f74ebf42a',
'imageTag': 'precise',
},
],
'ResponseMetadata': {
'...': '...',
},
}
Your get_imageids function only return imageDigest so you can not access it on lambda_handler function. You need to return imageTag as well to read it on lambda_handler
import json
import boto3
def get_reponames():
client = boto3.client('ecr')
reponames = [repo['repositoryName'] for repo in client.describe_repositories()['repositories']]
return reponames
def get_imageids(prepo):
client = boto3.client('ecr')
imageids = [
{"digest": img['imageDigest'], "tag": img.get('imageTag', None)} for img in
client.list_images(repositoryName=prepo, )['imageIds']
]
return imageids
def lambda_handler(event, context):
output = get_reponames()
for rn in output:
print(rn)
outputii = get_imageids(rn)
for ii in outputii:
print(f"digest : {ii['digest']}, tag: {ii['tag']}")
return {
'body': json.dumps("hello world")
}

Chalice AWS 504

I have a chalice project (1.26.2) which is always exiting with a time out when deployed, chalice local works fine.
This is my project structure:
This is the code in my app.py:
from datetime import datetime
from decimal import Decimal
import boto3
import firebase_admin
from chalice import Chalice, AuthResponse
from chalice.app import AuthRequest
from firebase_admin import credentials, auth
from chalicelib import crud
from chalicelib import models
from chalicelib import schemas
from chalicelib.auth import full_user_from_context, user_from_context
from chalicelib.db import SessionLocal, engine
app = Chalice(app_name='server-aws')
BUCKET = 'meet-app'
s3_client = boto3.client('s3')
cred = credentials.Certificate('chalicelib/serviceAccountKey.json')
firebase_admin.initialize_app(cred)
models.Base.metadata.create_all(bind=engine)
DISTANCE_IN_METERS = 100
_DB = None
def get_db():
global _DB
if _DB is None:
_DB = SessionLocal()
return _DB
#app.lambda_function(name='test-function')
def create_user(event, context):
return {'hello': 'world'}
#app.route('/health')
def health():
return {'status': 'ok'}
#app.authorizer()
def token_authorizer(auth_request: AuthRequest) -> AuthResponse:
token = auth_request.token
try:
decoded_token = auth.verify_id_token(token)
decoded = decoded_token
allowed_routes = [
'/auth',
'/me',
]
if 'permission_verified' in decoded and decoded['permission_verified'] is True:
allowed_routes.append('/me/location')
allowed_routes.append('/nearby')
allowed_routes.append('/me/profile-image')
print('routes', allowed_routes)
return AuthResponse(routes=allowed_routes, principal_id=decoded['sub'], context=decoded)
except Exception as e:
print('error', e)
return AuthResponse(routes=[], principal_id='non-user')
#app.route('/auth', methods=['GET'], authorizer=token_authorizer)
def authorize():
u = user_from_context(app.current_request.context)
user = crud.get_user(get_db(), u['uid'])
if user is None:
user = crud.create_user(get_db(), schemas.UserCreate(
uid=u['uid'],
phone=u['phone_number'],
permission_verified=True # TODO: find verification method
))
token = auth.create_custom_token(user.uid, {
"permission_verified": user.permission_verified,
"uid": user.uid,
"phone": user.phone,
"name": user.name,
"linkedin": user.linkedin,
"instagram": user.instagram,
})
return {
'user': user.__json__(),
'token': token.decode()
}
#app.route('/me', methods=["PUT"], authorizer=token_authorizer)
def update_me():
r = app.current_request
u = full_user_from_context(r.context)
data = r.json_body
u = crud.update_user(get_db(), schemas.UserUpdate(
uid=u.uid,
name=data.get('name'),
phone=data.get('phone'),
instagram=data.get('instagram'),
linkedin=data.get('linkedin'),
))
if u is None: # todo: code
return {
"error": "could not update"
}
return {
"user": u.__json__()
}
#app.route('/me/profile-image', methods=["PUT"], content_types=['application/octet-stream'],
authorizer=token_authorizer)
def update_me():
r = app.current_request
u = full_user_from_context(r.context)
data = r.raw_body
file_name = u.uid
tmp_file_name = '/tmp/' + file_name + '.jpg'
with open(tmp_file_name, 'wb') as tmp_file:
tmp_file.write(data)
key = 'profile-images/' + file_name
try:
s3_client.upload_file(tmp_file_name, BUCKET, key, ExtraArgs={'ACL': 'public-read'})
except Exception as e:
app.log.error(e)
return {
"error": str(e)
}
url = f'https://{BUCKET}.s3.amazonaws.com/{key}'
u = crud.update_user(get_db(), schemas.UserUpdate(
uid=u.uid,
profile_image_url=url
))
return {
"url": url
}
#app.route('/me/location', methods=["PUT"], authorizer=token_authorizer)
def update_me_location():
r = app.current_request
u = full_user_from_context(r.context)
data = r.json_body
lat = Decimal(str(data.get('latitude')))
lng = Decimal(str(data.get('longitude')))
loc = crud.update_user_location(get_db(), schemas.UserLocationUpdate(
uid=u.uid,
latitude=lat,
longitude=lng,
timestamp=datetime.now(),
geo=f'POINT({lng} {lat})'
))
if loc is None:
loc = crud.create_user_location(get_db(), schemas.UserLocationCreate(
uid=u.uid,
latitude=lat,
longitude=lng
))
loc = schemas.UserLocationGet(
uid=u.uid,
user=schemas.UserGet(
uid=u.uid,
name=u.name,
linkedin=u.linkedin,
instagram=u.instagram,
profile_image_url=u.profile_image_url
),
latitude=loc.latitude,
longitude=loc.longitude,
timestamp=loc.timestamp.isoformat()
)
return {
'loc': loc.json()
}
#app.route('/nearby', methods=["GET"], authorizer=token_authorizer)
def nearby():
r = app.current_request
u = full_user_from_context(r.context)
user_location = crud.get_user_location(get_db(), u.uid)
if user_location is None:
return {
"error": "no user location"
} # todo: better errro
nearby_users = crud.get_nearby_users(get_db(), u.uid, schemas.UserLocationGet(
uid=user_location.user_id,
latitude=user_location.latitude,
longitude=user_location.longitude,
user=u,
timestamp=user_location.timestamp.isoformat()
), DISTANCE_IN_METERS)
return {
"nearby_distance_in_meters": DISTANCE_IN_METERS,
"nearby_users": list(map(lambda x: schemas.UserLocationGet(
uid=x.user_id,
latitude=x.latitude,
longitude=x.longitude,
timestamp=x.timestamp.isoformat(),
user=schemas.UserGet(
uid=x.user.uid,
name=x.user.name,
instagram=x.user.instagram,
linkedin=x.user.linkedin,
profile_image_url=x.user.profile_image_url
)
).dict(), nearby_users))
}
This is the response when I invoke the test-function from the AWS console:
This is what I get when I do chalice logs:
Traceback (most recent call last):[ERROR] Runtime.ImportModuleError: Unable to import module 'app': No module named 'crud'
Traceback (most recent call last):[ERROR] Runtime.ImportModuleError: Unable to import module 'app': No module named 'crud'
2021-11-14 19:29:04.197000 88eb68 2021-11-14T19:29:04.197Z c143dcd4-e212-41f1-a786-7d86e4b58e59 Task timed out after 60.06 seconds
This is my requirements.txt:
chalice~=1.26.2
firebase-admin
boto3~=1.18.53
botocore
sqlalchemy
pydantic
# psycopg2
GeoAlchemy2
psycopg2-binary
I need to use psycopg2 so maybe that's the problem.
Every http request results in a 504 time out.
In local mode everything works fine.
Thanks in advance.
your chalicelib package folder's __init__.py (it must be written like this) has an extra underscore at the end of it. So python import system doesn't recognize such folder as a package, hence the error.

How to Connect an ios App With mysql Database Using Python Instead of PHP

I'm trying to access a mysql database inside an ios app and I would like to use python to read and write to the database when called from my app.
PHP Code I would like to turn into a python script
I am not sure how to write or read data from my application using mysql-connector.
I believe this is done with the $_POST[a]; but I am not sure how to get the same results using python.
I also have a sample of how I am trying to send data from my app to my database written in swiftui.
'''
import Foundation
import SwiftUI
struct CreateEventButton: View {
#State private var isPresentedEvent = false
#State private var eventid: Int = 0
#State private var eventName: String = ""
#State private var eventDescription: String = ""
#State private var selectedStartTime = Date()
#State private var selectedEndTime = Date()
#Binding var annotationSelected: Bool
func send(_ sender: Any) {
let request = NSMutableURLRequest(url: NSURL(string: "http://YOUR FILE PATH")! as URL)
request.httpMethod = "POST"
let postString = "a=\(self.eventid)&b=\(self.eventName)&=c\(self.eventDescription)&=d\(self.selectedStartTime)&=e\(self.selectedEndTime)"
request.httpBody = postString.data(using: String.Encoding.utf8)
let task = URLSession.shared.dataTask(with: request as URLRequest) {
data, response, error in
if error != nil {
print("error=\(String(describing: error))")
return
}
print("response = \(String(describing: response))")
let responseString = NSString(data: data!, encoding: String.Encoding.utf8.rawValue)
print("responseString = \(String(describing: responseString))")
}
task.resume()
self.eventName = ""
self.eventDescription = ""
self.selectedStartTime = Date()
self.selectedEndTime = Date()
}
var body: some View {
Button(action: {
self.isPresentedEvent.toggle() //trigger modal
}, label: {
Text("Create Event")})
.foregroundColor(.secondary)
.background(Color(.secondarySystemBackground))
.cornerRadius(50.0)
.sheet(isPresented: $isPresentedEvent, content:{
VStack{
TextField("Event Name", text: self.$eventName).padding()
TextField("Event Description", text: self.$eventDescription).padding()
Form {
DatePicker("When your event starts: ", selection: self.$selectedStartTime, in: Date()...)
}
Form {
DatePicker("When your event ends: ", selection: self.$selectedEndTime, in: Date()...)
}
HStack{
Button(action: {
self.isPresentedEvent.toggle()
self.annotationSelected = false
self.eventid += 1
print("Start: \(self.selectedStartTime)")
print("End: \(self.selectedEndTime)")
//send()
}, label: {
Text("Create Event")
})
Button(action: {
self.isPresentedEvent.toggle()
}, label: {
Text("Cancel")
})
}
Text("Create Event Button (Non Functional)").padding()
}
} )
}
}
'''
I am not sure what parameters to put in the sender function created here.
Any insight would be greatly appreciated.
In order to receive POST requests from the SwiiftUI app, you need to run a python webserver like flask.
1. Create a python module (file) something like this:
app.py
from flask import Flask
from flask import make_response
import mysql.connector
from flask import request
server_name = "localhost"
username = "flask"
password = "flask"
dbname = "flask"
# Create and check connection
try:
conn = mysql.connector.connect(
host=server_name,
user=username,
passwd=password,
database=dbname
)
print("Connection OK")
except e:
print("Connection failed: ", e)
mycursor = conn.cursor()
app = Flask(__name__)
#app.route('/', methods=['POST'])
def register():
name = request.form['a']
age = request.form['b']
sql = "INSERT INTO users (name, age) VALUES (%s, %s)"
val = (name, age)
try:
mycursor.execute(sql, val)
conn.commit()
conn.close()
except e:
print("Error: ", e)
return make_response("Success!", 200)
2. Make sure you have the necessary libraries installed:
pip install flask
pip install mysql-connector-python
You need to make sure your MySQL database is up and that you fill in the real credentials for the database in the code. (Default MySQL port = 3306)
3. Start the flask development webserver
export FLASK_APP=app.py # OR set FLASK_APP=app.py (for Windows)
python -m flask run
Your server should start at: http://localhost:5000
Replace: http://YOUR FILE PATH with http://localhost:5000/ in your swift code example and viola!
UPDATE: I tested this and it works. Here is the swiftUI code I generated in postman:
import Foundation
var semaphore = DispatchSemaphore (value: 0)
let parameters = "a=Lance&b=35"
let postData = parameters.data(using: .utf8)
var request = URLRequest(url: URL(string: "http://localhost:5000/")!,timeoutInterval: Double.infinity)
request.addValue("application/x-www-form-urlencoded", forHTTPHeaderField: "Content-Type")
request.httpMethod = "POST"
request.httpBody = postData
let task = URLSession.shared.dataTask(with: request) { data, response, error in
guard let data = data else {
print(String(describing: error))
return
}
print(String(data: data, encoding: .utf8)!)
semaphore.signal()
}
task.resume()
semaphore.wait()

How to describe and visualize continued changed data from mongoDB using pandas

I'm setting up an API using Flask, and add some JSON post using postman to be transferred to mongoDB database.
Then I had to visualize statistics of the real time data from the database into lets say data_analysis_script.py, which means if I post some JSON from postman, the statistics should be changed since the data had been added.
Any suggestion about function or library I could use further in script for showing the data statistics ?
I had tried using manager which could run both app.run() and the code, but not print the code
API code
from flask import Flask, jsonify, request
from flask_pymongo import PyMongo
import pandas as pd
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'db'
app.config['MONGO_URI'] = 'mongodb://localhost:8000/db'
mongo = PyMongo(app)
#app.route('/stocks', methods=['GET'])
def get_all_stocks():
stocks = mongo.db.stocks
output = []
for i in stocks.find():
output.append({'name' : i['name'], 'item' : i['item']})
return jsonify({'Here yours' : output})
#app.route('/add', methods=['POST'])
def add_stocks():
stocks = mongo.db.stocks
name = request.json['name']
item = request.json['item']
item_id = stocks.insert({'name': name, 'item': item})
new_stocks = stocks.find_one({'_id': stocks_id })
output = {'name' : new_stocks['name'], 'item' : new_stocks['item']}
return jsonify({'Here yours' : output})
#app.route('/stocks/', methods=['GET'])
def get_one_stocks(name):
stocks = mongo.db.stocks
c = stocks.find_one({'name' : name})
if s:
output = {'name' : c['name'], 'item' : c['item']}
else:
output = "Nothing"
return jsonify({'Here yours' : output})
if __name__ == '__main__':
app.run(debug=True)
I expect the data visualization and statistics changed when there is a JSON entry

ElasticSearch and Python : Issue with search function

I'm trying to use for the first time ElasticSearch 6.4 with an existing web application wrote in Python/Django. I have some issues and I would like to understand why and how I can solve these issues.
###########
# Existing : #
###########
In my application, it's possible to upload document files (.pdf or .doc for example). Then, I have a search function in my application which let to search over documents indexed by ElasticSearch when they are uploaded.
Document title is always written through the same way :
YEAR - DOC_TYPE - ORGANISATION - document_title.extension
For example :
1970_ANNUAL_REPORT_APP-TEST_1342 - loremipsum.pdf
The search function is always done among doc_type = ANNUAL_REPORT. because there are several doc_types (ANNUAL_REPORT, OTHERS, ....).
##################
# My environment : #
##################
This is some data according to my ElasticSearch part. I'm learning ES commands too.
$ curl -XGET http://127.0.0.1:9200/_cat/indices?v
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
yellow open app 5T0HZTbmQU2-ZNJXlNb-zg 5 1 742 2 396.4kb 396.4kb
So my index is app
For the above example, if I search this document : 1970_ANNUAL_REPORT_APP-TEST_1342 - loremipsum.pdf, I have :
$ curl -XGET http://127.0.0.1:9200/app/annual-report/1343?pretty
{
"_index" : "app",
"_type" : "annual-report",
"_id" : "1343",
"_version" : 33,
"found" : true,
"_source" : {
"attachment" : {
"date" : "2010-03-04T12:08:00Z",
"content_type" : "application/pdf",
"author" : "manshanden",
"language" : "et",
"title" : "Microsoft Word - Test document Word.doc",
"content" : "some text ...",
"content_length" : 3926
},
"relative_path" : "app_docs/APP-TEST/1970_ANNUAL_REPORT_APP-TEST_1342.pdf",
"title" : "1970_ANNUAL_REPORT_APP-TEST_1342 - loremipsum.pdf"
}
}
Now, with my search part in my web application, I would like to find this document with this search : 1970.
def search_in_annual(self, q):
try:
response = self.es.search(
index='app', doc_type='annual-report',
q=q, _source_exclude=['data'], size=5000)
except ConnectionError:
return -1, None
total = 0
hits = []
if response:
for hit in response["hits"]["hits"]:
hits.append({
'id': hit['_id'],
'title': hit['_source']['title'],
'file': hit['_source']['relative_path'],
})
total = response["hits"]["total"]
return total, hits
But when q=1970, the result is 0
If I write :
response = self.es.search(
index='app', doc_type='annual-report',
q="q*", _source_exclude=['data'], size=5000)
It returns my document, but many documents too with no 1970 inside the title or the document content.
#################
# My global code : #
#################
This is the global class which manage indexing functions :
class EdqmES(object):
host = 'localhost'
port = 9200
es = None
def __init__(self, *args, **kwargs):
self.host = kwargs.pop('host', self.host)
self.port = kwargs.pop('port', self.port)
# Connect to ElasticSearch server
self.es = Elasticsearch([{
'host': self.host,
'port': self.port
}])
def __str__(self):
return self.host + ':' + self.port
#staticmethod
def file_encode(filename):
with open(filename, "rb") as f:
return b64encode(f.read()).decode('utf-8')
def create_pipeline(self):
body = {
"description": "Extract attachment information",
"processors": [
{"attachment": {
"field": "data",
"target_field": "attachment",
"indexed_chars": -1
}},
{"remove": {"field": "data"}}
]
}
self.es.index(
index='_ingest',
doc_type='pipeline',
id='attachment',
body=body
)
def index_document(self, doc, bulk=False):
filename = doc.get_filename()
try:
data = self.file_encode(filename)
except IOError:
data = ''
print('ERROR with ' + filename)
# TODO: log error
item_body = {
'_id': doc.id,
'data': data,
'relative_path': str(doc.file),
'title': doc.title,
}
if bulk:
return item_body
result1 = self.es.index(
index='app', doc_type='annual-report',
id=doc.id,
pipeline='attachment',
body=item_body,
request_timeout=60
)
print(result1)
return result1
def index_annual_reports(self):
list_docs = Document.objects.filter(category=Document.OPT_ANNUAL)
print(list_docs.count())
self.create_pipeline()
bulk = []
inserted = 0
for doc in list_docs:
inserted += 1
bulk.append(self.index_document(doc, True))
if inserted == 20:
inserted = 0
try:
print(helpers.bulk(self.es, bulk, index='app',
doc_type='annual-report',
pipeline='attachment',
request_timeout=60))
except BulkIndexError as err:
print(err)
bulk = []
if inserted:
print(helpers.bulk(
self.es, bulk, index='app',
doc_type='annual-report',
pipeline='attachment', request_timeout=60))
My document is indexed when he's submitted thanks a Django form with a signal :
#receiver(signals.post_save, sender=Document, dispatch_uid='add_new_doc')
def add_document_handler(sender, instance=None, created=False, **kwargs):
""" When a document is created index new annual report (only) with Elasticsearch and update conformity date if the
document is a new declaration of conformity
:param sender: Class which is concerned
:type sender: the model class
:param instance: Object which was just saved
:type instance: model instance
:param created: True for a creation, False for an update
:type created: boolean
:param kwargs: Additional parameter of the signal
:type kwargs: dict
"""
if not created:
return
# Index only annual reports
elif instance.category == Document.OPT_ANNUAL:
es = EdqmES()
es.index_document(instance)
This is what I've done and it seems to work :
def search_in_annual(self, q):
try:
response = self.es.search(
index='app', doc_type='annual-report', q=q, _source_exclude=['data'], size=5000)
if response['hits']['total'] == 0:
response = self.es.search(
index='app', doc_type='annual-report',
body={
"query":
{"prefix": {"title": q}},
}, _source_exclude=['data'], size=5000)
except ConnectionError:
return -1, None
total = 0
hits = []
if response:
for hit in response["hits"]["hits"]:
hits.append({
'id': hit['_id'],
'title': hit['_source']['title'],
'file': hit['_source']['relative_path'],
})
total = response["hits"]["total"]
return total, hits
It lets to search over title, prefix and content to find my document.

Categories