multiple functions in one endpoint fastapi - python

I am trying to create an audio server where I can upload various audio files, I have a requirement that I can only create one endpoint for creating, I have come up with this but it does show the request form to input data.
class AudioType(str, Enum):
Song = "Song"
Podcast = "Podcast"
Audiobook = "Audiobook"
#app.post("/{audio_type}", status_code=status.HTTP_200_OK)
def audio(audio_type: AudioType):
if audio_type == AudioType.Song:
def create_song(request: schemas.Song, db: Session = Depends(database.get_db)):
new_song = models.Song(name=request.name, duration=request.duration, uploadTime=request.uploadTime)
db.add(new_song)
db.commit()
db.refresh(new_song)
return new_song
elif audio_type == AudioType.Podcast:
def create_podcast(request: schemas.Podcast, db: Session = Depends(database.get_db)):
new_podcast = models.Podcast(name=request.name, duration=request.duration, uploadTime=request.uploadTime, host=request.host)
db.add(new_podcast)
db.commit()
db.refresh(new_podcast)
return new_podcast
elif audio_type == AudioType.Audiobook:
def create_audiobook(request: schemas.Audiobook, db: Session = Depends(database.get_db)):
new_audiobook = models.Audiobook(titile=request.title, author=request.author, narrator=request.narrator, duration=request.duration, uploadTime=request.uploadTime)
db.add(new_audiobook)
db.commit()
db.refresh(new_audiobook)
return new_audiobook

Your method doesn't accept the request object but only the audio_type.
Also from what I understand from your code, you may have multiple request bodies (schemas as you refer to them)
There are 2 options to what you want:
You need to declare your endpoint as follows:
from typing import Union
#app.post("/{audio_type}", status_code=status.HTTP_200_OK)
def audio(
request: Union[schemas.Song, schemas.Podcast,
schemas.Audiobook], audio_type: AudioType
):
... Your method ...
But the auto swagger of fastapi will not provide a schema example and you will have to provide examples manually, (which may or may not be possible, I don't really know and haven't tried it :/)
OR you can have a schema that can accept everything as Optional and the audio_type parameter:
from typing import Optional
class EndpointSchema(BaseModel):
audio_type: AudioType
song: Optional[schemas.Song]
podcast: Optional[schemas.Podcast]
audiobook: Optional[schemas.Audiobook]
#app.post("/audio", status_code=status.HTTP_200_OK)
def audio(request_body: EndpointSchema):
if request_body.audio_type == AudioType.Song:
... Continue with your request processing ...
Finally, very important: You are declaring internal methods (create_song etc.) that you are not calling afterward, so your code will do nothing. You don't need to do that, use the code you want to create a song, podcast, or audiobook directly inside the if, elif ... blocks!

Related

Set Optional params in PUT method using fastAPI/mongodb

I am trying to set Optional some params in a PUT method from my API.
Using fastAPI and mongodb I've build a simple API to insert students and delete the ones, now I am looking to allow me update the entries but not mandatory "params".
I've checked this Fastapi: put method and looks like something I am looking for mongodb.
And this response from art049 looks similar what I already have in my #api_router.put('/update-student/{id}', tags=['Student']) MongoDb with FastAPI
As example for my question here I have this structure:
Models:
class Student(BaseModel):
age:int
name:str
address:str
class UpdateStudent(BaseModel):
age: Optional[int] = None
name: Optional[str] = None
address: Optional[str] = None
Schemas:
def serializeDict(a) -> dict:
return {**{i:str(a[i]) for i in a if i=='_id'},**{i:a[i] for i in a if i!='_id'}}
def serializeList(entity) -> list:
return [serializeDict(a) for a in entity]
Routes:
#api_router.post('/create-student', tags=['Students'])
async def create_students(student: Student):
client.collegedb.students_collection.insert_one(dict(student))
return serializeList(client.collegedb.students_collection.find())
Also I know I can update the entry without problems in this way:
#api_router.put('/update-student/{id}', tags=['Student'])
async def update_student(id,ustudent: UpdateStudent):
client.collegedb.students_collection.find_one_and_update({"_id":ObjectId(id)},{
"$set":dict(ustudent)
})
return serializeDict(client.collegedb.students_collection.find_one({"_id":ObjectId(id)}))
My problem as you can see with my Models I need a way to validate which params are modified and update the ones only:
If right now I Update for example the age only; since the other params are not required, name and address will be stored as None (null actually) because I set this in my model.
Maybe I can do something like this:
if ustudent.age != None:
students_collection[ObjectId(id)] = ustudent.age
if ustudent.name != None:
students_collection[ObjectId(id)] = ustudent.name
if ustudent.address != None:
students_collection[ObjectId(id)] = ustudent.address
I know I can use this in a simple dictionary but never tried before in a collection in mongodb since pydantic not support ObjectId for iterations and that's why serializeDict was created.
I will really appreciate if somebody can give a hint with my concern
You can use exclude_unset=True argument as suggested in FastAPI documentation:
#api_router.put('/update-student/{id}', tags=['Student'])
async def update_student(id,ustudent: UpdateStudent):
client.collegedb.students_collection.find_one_and_update({"_id":ObjectId(id)},{
"$set":ustudent.dict(exclude_unset=True)
})
return serializeDict(client.collegedb.students_collection.find_one({"_id":ObjectId(id)}))
Here is the documentation for exporting Pydantic models.

Hazelcast and python there is no suitable de-serializer for type -120

hello i guess have problem with client and member config which config should i use as you can see i am inserting json as data when i call get_data it returns with no problem but when i try to use predicate-sql it gives me error "hazelcast.errors.HazelcastSerializationError: Exception from server: com.hazelcast.nio.serialization.HazelcastSerializationException: There is no suitable de-serializer for type -120. This exception is likely caused by differences in t
he serialization configuration between members or between clients and members."
#app.route('/insert_data/<database_name>/<collection_name>', methods=['POST'])
def insert_data(database_name, collection_name):
client = hazelcast.HazelcastClient(cluster_members=[
url
])
dbname_map = client.get_map(f"{database_name}-{collection_name}").blocking()
if request.json:
received_json_data = request.json
received_id = received_json_data["_id"]
del received_json_data["_id"]
dbname_map.put(received_id, received_json_data)
client.shutdown()
return jsonify()
else:
client.shutdown()
abort(400)
#app.route('/get_data/<database_name>/<collection_name>', methods=['GET'])
def get_all_data(database_name, collection_name):
client = hazelcast.HazelcastClient(cluster_members=[
url
])
dbname_map = client.get_map(f"{database_name}-{collection_name}").blocking()
entry_set = dbname_map.entry_set()
output = dict()
datas = []
for key, value in entry_set:
value['_id'] = key
output = value
datas.append(output)
client.shutdown()
return jsonify({"Result":datas})
#bp.route('/get_query/<database_name>/<collection_name>/<name>', methods=['GET'])
def get_query_result(database_name, collection_name,name):
client = hazelcast.HazelcastClient(cluster_members=[
url
])
predicate_map = client.get_map(f"{database_name}-{collection_name}").blocking()
predicate = and_(sql(f"name like {name}%"))
entry_set = predicate_map.values(predicate)
#entry_set = predicate_map.entry_set(predicate)
send_all_data = ""
for x in entry_set:
send_all_data += x.to_string()
send_all_data += "\n"
print(send_all_data)
# print("Retrieved %s values whose age is less than 30." % len(result))
# print("Entry is", result[0].to_string())
# value=predicate_map.get(70)
# print(value)
return jsonify()
i try to change hazelcast.xml according to hazelcast-full-example.xml but i can't start hazelcast after
the changes and do i really have to use serialization ? hazelcast version:4.1 python:3.9
This is most likely happening because you are putting entries of the type dictionary to the map, which is serialized by the pickle because you didn't specify a serializer for that and the client does not know how to handle that correctly, so it fallbacks to the default serializer. However, since pickle serialization is Python-specific, servers cannot deserialize it and throw such an exception.
There are possible solutions to that, see the https://hazelcast.readthedocs.io/en/stable/serialization.html chapter for details.
I think the most appropriate solution for your use case would be Portable serialization which does not require a configuration change or code on the server-side. See the https://hazelcast.readthedocs.io/en/stable/serialization.html#portable-serialization
BTW, client objects are quite heavyweight, so you shouldn't be creating them on demand like this. You can construct it once in your application and share and use it in your endpoints or business-logic code freely since it is thread-safe. The same applies to the map proxy you get from the client. It can also be re-used.

How to add a new parameter to a Python API created using Flask

I am writing my Python API using Flask. This API accept only 1 parameter called questionID. I would like it to accept a second parameter called lastDate. I tried to look around on how to add this parameter, but couldn't find a good method to do this. My current code looks as follows:
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
from sqlalchemy import create_engine
from json import dumps
from flask_jsonpify import jsonify
import psycopg2
from pandas import read_sql
connenction_string = "DB Credentials'";
app = Flask(__name__)
api = Api(app)
class GetUserAnswers(Resource):
def get(self, questionID):
conn = psycopg2.connect(connenction_string);
cursor = conn.cursor();
userAnswers = read_sql('''
select * from <tablename> where questionid = ''' + "'" + questionID + "' order by timesansweredincorrectly desc limit 15" +'''
''', con=conn)
conn.commit();
conn.close();
result = {}
for index, row in userAnswers.iterrows():
result[index] = dict(row)
return jsonify(result)
api.add_resource(GetUserAnswers, '/GetUserAnswers/<questionID>')
if __name__ == '__main__':
app.run(port='5002')
Question 1: I'm guessing I can accept the second parameter in the get definition. If this is not true, how should I accept the second parameter?
Question 2: How do I modify the api.add_resource() call to accept the second parameter?
Question 3: I currently use http://localhost:5002/GetUserAnswers/<some question ID> to call this API from the browser. How would this call change with a second parameter?
I have never developed an API before, so any help would be much appreciated.
If you want to add multiple parameters within the url path for example:
http://localhost:5002/GetUserAnswers/<question_id>/answers/<answer_id>
Then you need to add multiple parameters to your get method:
def get(self, question_id, answer_id):
# your code here
But if you instead want to add multiple query parameters to the url for example:
http://localhost:5002/GetUserAnswers/<question_id>?lastDate=2020-01-01&totalCount=10>
Then you can use request arguments:
def get(self, question_id):
lastDate = request.args.get('lastDate')
totalCount = request.args.get('totalCount')
# your code here
Consider several adjustments to your code:
For simpler implementation as you have, use decorators in Flask API and avoid need to initialize and call the class object;
Use parameterization in SQL and avoid the potentially dangerous and messy string concatenation;
Avoid using the heavy data analytics library, pandas, and its inefficient row by row iterrows loop. Instead, handle everything with cursor object, specifically use DictCursor in psycopg2;
Refactored Python code (adjust assumption of how to use lastDate):
#... leave out the heavy pandas ...
app = Flask(__name__)
#app.route('/GetUserAnswers', methods= ['GET'])
def GetUserAnswers():
questionID = request.args.get('questionID', None)
lastDate = request.args.get('lastDate', None)
conn = psycopg2.connect(connenction_string)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
userAnswers = '''SELECT * FROM <tablename>
WHERE questionid = %s
AND lastdate = %s
ORDER BY timesansweredincorrectly DESC
LIMIT 15
'''
# EXECUTE SQL WITH PARAMS
cur.execute(userAnswers, (questionID, lastDate))
# SAVE TO LIST OF DICTIONARIES
result = [dict(row) for row in cur.fetchall()]
cur.close()
conn.close()
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
Browser Call
http://localhost:5002/GetUserAnswers?questionID=8888&lastDate=2020-01-08

Using A Query string value in my AWS Lambda function

So I have followed this gudie from another stack question:
The steps to get this working are:
go to Resources -> Integration Request
click on the plus or edit icon next to templates dropdown (odd I know since the template field is already open and the button here looks greyed out)
Explicitly type application/json in the content-type field even though it shows a default (if you don't do this it will not save and will not give you an error message)
Put this in the input mapping { "name": "$input.params('name')" }
click on the check box next to the templates dropdown (I'm assuming this is what finally saves it)
I understand this however I don't understand how I can then use this parameter within my lambda function (Python)
I have tried input.name with no success.
You can use query strings like this:
UserName = event["UserName"]
Here is an example with Python:
def lambda_handler(event, context):
import boto3
import json
import decimal
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name='ap-southeast-1', endpoint_url="http://dynamodb.ap-southeast-1.amazonaws.com")
table = dynamodb.Table('TableUsers')
UserName =event["UserName"]
UserId = event["UserId"]
UserPassword=event["UserPassword"]
response = table.put_item(
Item={
'UserName': UserName,
'UserId': UserId,
'UserPassword':UserPassword
}
)
return "Register Successfully"

How to map a Class to multiple databases in sqlalchemy orm

I am using a sqlite database as my application file through sqlalchemy. I have a separate configuration file.
There are some classes whose information I persist on my application file that I would like to replicate on my configuration file. The thing is that I would load it alternatively from one or the other source depending on availability.
I saw this mention on the documentation, but I think it does not directly apply as the secondary mapping will not not persist the information. Also, the notion of which would be the primary is blurry. Both databases would carry the same information, maybe not on the same version, though.
http://sqlalchemy.readthedocs.org/en/rel_1_0/orm/nonstandard_mappings.html#multiple-mappers-for-one-class
I will try to make it clearer with an example:
I have a class A which represents a multi-field user input. I save this on my application file.
A class B also on my application file file is composed of an instance of Class A.
The same instance from Class A may compose several suitable instances of Class B. These are all stored on my application file.
My problem is that on another session, with a brand new configuration file I might want to reuse that Class A instance. I can not have it only on the application file, because if it gets updated, it will be relevant across all application files that use it.
On the other hand, it can not be only in the configuration file, as a user might share his application file with another and the later might not have a suitable configuration and would have to do it manually.
I need to have it in both places, be able to choose which database will be the source at runtime and have all changes persist on both databases at once.
Can it be done in sqlalchemy+sqlite? Is it a good idea? Are there classic solutions for this?
EDIT:
I think I am describing something that looks like a cache, which sqlalchemy does not do. Does any other approach come to mind?
Does sqlalchemy allow me to map an instance to a database upon instance creation? This would allow for two instances of the same class to be mapped against different databases. Then I would listen for an update event by sqlalchemy and issue the same sql to the other database. I also do not know how to do this.
Another option: map my class against a union query. Sqlalchemy might allow as it does for arbitrary selects, BUT then there is the persistence issue.
Another option: add a layer to the engine so that it connects to two databases simultaneously, issuing the same commands to both for reading and writing. I could deal with the duplicated returns.
I came up with the mixin below. I does not handle expunge or rollback, as I do not use those in my application nor know how to get about them.
It looks like it is working. I will proceed to expand it to handle collections.
import os
from sqlalchemy import Column, Float, String, Enum, Integer, event
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class ReplicateMixin:
#classmethod
def get_or_create(cls,prime_session, sessoes = None, **kwargs):
if sessoes is None:
sessoes = []
if not isinstance(sessoes, list):
sessoes = [sessoes]
sessoes = [prime_session] + sessoes #They are passed separatelly just to make explicit that the first might receive diferent treatment
replicas = []
for sessao in sessoes: #Gets a result or creates a new instance from each database
instance = sessao.query(Datum).filter_by(**kwargs).first()
if instance is None:
instance = cls(**kwargs)
setattr(instance, "__new", True)
sessao.add(instance)
instance.sessao = sessao
replicas.append(instance)
fittest = cls.__select_fittest(replicas) #Selects the instance whose data will prevail
prime = replicas.pop(0) #Instance from the session we will be issuing commits to. The others must simply follow.
cls.__copy_data(fittest, prime, ReplicateMixin.__get_primary_keys(prime))
setattr(prime, "__replicas", replicas) #The object will carry references to its copies
return prime
#staticmethod
def __select_fittest(instances):
"""This method should contain logic for choosing the instance that has
the most relevant information. It may be altered by child classes"""
if getattr(instances[0], "__new", False):
return instances[1]
else:
return instances[0]
#staticmethod
def __copy_data(source, dest, primary_keys = None):
primary_keys = [] if primary_keys is None else primary_keys
for prop in orm.class_mapper(type(source)).iterate_properties:
if (isinstance(prop, orm.ColumnProperty)
and prop.key not in primary_keys):
setattr(dest, prop.key,
getattr(source, prop.key))
#staticmethod
def __replicate(mapper, connection, original_obj):
replicants = getattr(original_obj, "__replicas", []) #if it IS a replicant it will not have a __replicas attribute
primary_keys = ReplicateMixin.__get_primary_keys(original_obj)
for objeto in replicants:
ReplicateMixin.__copy_data(original_obj, objeto, primary_keys)
objeto.sessao.commit()
#staticmethod
def __replicate_del(mapper, conection, original_obj):
replicants = getattr(original_obj, "__replicas", []) #if it IS a replicant it will not have a __replicas attribute
for objeto in replicants:
if objeto in objeto.sessao.new:
objeto.sessao.expunge(objeto)
else:
objeto.sessao.delete(objeto)
objeto.sessao.commit()
#staticmethod
def __get_primary_keys(mapped_object):
return [key.name for key in orm.class_mapper(type(mapped_object)).primary_key]
#classmethod
def __declare_last__(cls):
"""Binds certain events to functions"""
event.listen(cls, "before_insert", cls.__replicate)
event.listen(cls, "before_update", cls.__replicate)
event.listen(cls, "before_delete", cls.__replicate_del)
#FIXME might not play well with rollback
Example:
DeclarativeBase = declarative_base()
class Datum (ReplicateMixin, DeclarativeBase):
__tablename__ = "xUnitTestData"
Key = Column(Integer, primary_key=True)
Value = Column(Float)
nome = Column(String(10))
def __repr__(self):
return "{}; {}; {}".format(self.Key, self.Value, self.nome)
end_local = os.path.join(os.path.expanduser("~"), "Desktop", "local.bd")
end_remoto = os.path.join(os.path.expanduser("~"), "Desktop", "remoto.bd")
src_engine = create_engine('sqlite:///'+end_local, echo=False)
dst_engine = create_engine('sqlite:///'+end_remoto, echo=False)
DeclarativeBase.metadata.create_all(src_engine)
DeclarativeBase.metadata.create_all(dst_engine)
SessionSRC = sessionmaker(bind=src_engine)
SessionDST = sessionmaker(bind=dst_engine)
session1 = SessionSRC()
session2 = SessionDST()
item = Datum.pegar_ou_criar(session1, session2, Value = 0.5, nome = "terceiro")
item.Value = item.Value/2
print(item)
session1.delete(item)
session1.commit()
session1.close()

Categories