Im fairly new to working with flask and want to know how to insert a dictionary into a column.
I am making a shopping website and the dictionary I want to insert into the order table is shown below:
product_features = {
product_id: {'name': product.product_name,
'price': product.product_price,
'quantity': product_quantity,
'discount': product.product_discount,
'information': product.product_information,
'size': product_size,
'color': product_color,
'image': product.product_image_1,
'all_colors': product.product_color,
'all_sizes': product.product_size,
'max_quantity': product.product_quantity}
}
session['cart'] = product_features
I then store this in a session called cart.
This represents the goods that the user is ordering. Once the user makes an order I then want to add this session data into a column called orders in a table called CustomerOrder.
In a previous thread I saw the code below being used:
class Person(db.Model):
__tablename__ = 'persons'
id = db.Column(db.Integer, primary_key=True)
fancy_name = db.Column(JsonEncodedDict)
import json
from sqlalchemy.ext import mutable
db = SQLAlchemy()
class JsonEncodedDict(db.TypeDecorator):
"""Enables JSON storage by encoding and decoding on the fly."""
impl = db.Text
def process_bind_param(self, value, dialect):
if value is None:
return '{}'
else:
return json.dumps(value)
def process_result_value(self, value, dialect):
if value is None:
return {}
else:
return json.loads(value)
mutable.MutableDict.associate_with(JsonEncodedDict)
I am unsure of how this exactly works and do not want to just copy code that I do not understand. Any advice on this would be very much appreciated as I have looked on YouTube and online and their is not much information on this.
Really new to python and coding in general - would really help if someone could point me in the right direction with some code.
So to start off with I am making a proof of concept for a car park running AWS Rekognition and I need some help with updating the database. As you can see with the code below it inputs the reg_plate, entry_time and exit_time into the database all okay. But, what I am trying to do is when Rekognition is invoked a second time with the same reg_plate it updates the exit_time only for the current record in the database.
import boto3
import time
def detect_text(photo, bucket):
client=boto3.client('rekognition')
response=client.detect_text(Image={'S3Object':{'Bucket':bucket,'Name':photo}})
textDetections=response['TextDetections']
for text in textDetections:
if text['Type'] == 'LINE':
return text['DetectedText']
return False
def main(event, context):
bucket=''
photo='regtest.jpg'
text_detected=detect_text(photo,bucket)
if (text_detected == False):
exit()
print("Text detected: " + str(text_detected))
entry_time = str(int(time.time()))
dynamodb = boto3.client('dynamodb')
table_name = 'Customer_Plate_DB'
item = {
'reg_plate': {
'S': str(text_detected)
},
'entry_time': {
'S': entry_time
},
'exit_time': {
'S': str(0)
}
}
dynamodb.put_item(TableName=table_name, Item=item)
Tried various if statements but with no luck as whenever I try it just keeps making new records in the database and the exit_time is never updated.
In DynamoDB a PutItem will overwrite/insert data, so its not what you need if you are wanting to update a single attribute. You will need to use UpdateItem:
response = dynamodb.update_item(
TableName=table_name,
Key={
'reg_plate': {'S': str(text_detected)},
'entry_time': {'S': entry_time}
},
UpdateExpression="SET #t = :t",
ExpressionAttributeValues={ ":t": str(0) },
ExpressionAttributeNames={"#t":"exit_time"},
ConditionExpression='attribute_exists(reg_plate)'
)
In your question you do not tell me what your partition and/or sort keys are, so here I assume you have a partition key only called pk, which you can change to suit your needs.
In the above example, for item where pk = '123' I set exit_time to a value only if the item already exists in your table (Condition Expression).
I need to read keys in the Json file to later use them as columns and insert/update with the values pertaining to those Json file keys. The problem is that my Json has the first element as a Json Object (see code below).
Json:
{
"metadata":
{
"namespace": "5.2.0",
"message_id": "3c80151b-fcf3-4cc3-ada0-635be5b5c95f",
"transmit_time": "2020-01-30T11:25:47.247394-06:00",
"message_type": "pricing",
"domain": "Pricing Service",
"version": "1.0.0"
}
,
"prices": [
{
"price": 24.99,
"effective_date": "2019-06-01T00:00:00-05:00",
"strikethrough": 34.99,
"expiration_date": "2019-06-01T00:00:00-05:00",
"modified_date": "2019-08-30T02:14:39.044968-05:00",
"base_price": 25.99,
"sku_id": 341214,
"item_number": 244312,
"trade_base_price": 14.99,
"competitive_price": 20.00
},
{
"price": 24.99,
"effective_date": "2019-06-01T00:00:00-05:00",
"strikethrough": 34.99,
"expiration_date": "2019-06-01T00:00:00-05:00",
"modified_date": "2019-08-30T02:14:39.044968-05:00",
"base_price": 25.99,
"sku_id": 674523,
"item_number": 279412,
"trade_base_price": 14.99,
"competitive_price": 20.00
}
]
}
So when I read the "metadata" using get_data function below
SQL Postgres Table:
DROP TABLE MyTable;
CREATE TABLE IF NOT EXISTS MyTable
(
price numeric(5,2),
effective_date timestamp without time zone,
strikethrough numeric(5,2),
expiration_date timestamp without time zone,
modified_date timestamp without time zone,
base_price numeric(5,2),
sku_id integer CONSTRAINT PK_MyPK PRIMARY KEY NOT NULL,
item_number integer,
trade_base_price numeric(5,2),
competitive_price numeric(5,2),
namespace character varying(50),
message_id character varying(50),
transmit_time timestamp without time zone,
message_type character varying(50),
domain character varying(50),
version character varying(50)
)
Python 3.9:
import psycopg2
import json
# import the psycopg2 database adapter for PostgreSQL
from psycopg2 import connect, Error
with open("./Pricing_test.json") as arq_api:
read_data = json.load(arq_api)
# converts Json oblect "metadata" to a Json Array of Objects/Python list
read_data["metadata"] = [{key:value} for key,value in read_data["metadata"].items()] #this dies not work properly as "post_gre" function below only reads the very last key in the Json Array of Objects
#print(read_data)
data_pricing = []
def get_PricingData():
list_1 = read_data["prices"]
for dic in list_1:
price = dic.get("price")
effective_date = dic.get("effective_date")
strikethrough = dic.get("strikethrough")
expiration_date = dic.get("expiration_date")
modified_date = dic.get("modified_date")
base_price = dic.get("base_price")
sku_id = dic.get("sku_id")
item_number = dic.get("item_number")
trade_base_price = dic.get("trade_base_price")
competitive_price = dic.get("competitive_price")
data_pricing.append([price, effective_date, strikethrough, expiration_date, modified_date, base_price, sku_id, item_number, trade_base_price, competitive_price, None, None, None, None, None, None])
get_PricingData()
data_metadata = []
def get_Metadata():
list_2 = read_data["metadata"]
for dic in list_2:
namespace = dic.get("namespace")
message_id = dic.get("message_id")
transmit_time = dic.get("transmit_time")
message_type = dic.get("message_type")
domain = dic.get("domain")
version = dic.get("version")
#if len(namespace) == 0:
#data_pricing.append([None, None, None, None, None, version])
#else:
#for sub_dict in namespace:
#namespace = sub_dict.get("namespace")
#message_id = sub_dict.get("message_id")
#transmit_time = sub_dict.get("transmit_time")
#message_type = sub_dict.get("message_type")
#domain = sub_dict.get("domain")
#data_pricing.append([group_id, group_name, subgrop_id, subgrop_name, None, None, None])
data_metadata.append([namespace, message_id, transmit_time, message_type, domain, version])
get_Metadata()
conn = connect(
host="MyHost",
database="MyDB",
user="MyUser",
password="MyPassword",
# attempt to connect for 3 seconds then raise exception
connect_timeout = 3
)
cur = conn.cursor()
cur.execute("TRUNCATE TABLE MyTable") #comment this one out to avoid sku_id PK violation error
def post_gre():
for item in data_pricing:
my_Pricingdata = tuple(item)
cur.execute("INSERT INTO MyTable VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", my_Pricingdata)
#upades with metadata
for item2 in data_metadata:
my_Metadata = tuple(item2)
cur.execute("UPDATE MyTable SET namespace = %s, message_id = %s, transmit_time = %s, message_type = %s, domain = %s, version = %s", my_Metadata)
post_gre()
conn.commit()
conn.close()
it throughs me the following error:
namespace = dic.get("namespace") AttributeError: 'str' object has no attribute 'get'
But if I wrap the metadata Json object with array brackets [] (see pic below) it works perfectly fine - It reads every key in the metadata as a separate column (namespace, message_id, transmit_time, message_type, domain, version)
But since I should not modify the JSon source file itself I need to interpret "metadata" to a python List type, so that it could read the keys.
P.S.
Almost right Solution:
read_data["metadata"] = [{key:value} for key,value in read_data["metadata"].items()]
Suggestion provided by Hi #Suraj works, but for some reason it inserts NULL for all "metadata" keys column (namespace, message_id, transmit_time, message_type, domain), except for "version". Any idea why? It does insert correct values when changing the Json by adding []. But should not do it.
I was able to narrow down the issue with not reading other keys in the "metadata", it basically reads only one very last key which happens to "Version", but if you change the order it would read the very last one whatever you change it to (eg.: "domain").
How about now ?
import pandas as pd
import json
with open('stak_flow.json') as f:
data = json.load(f)
data['metadata'] = [{key:value} for key,value in data['metadata'].items()]
print(data)
Here is my sample code
import boto3
import os
ENV = "dev"
DB = "http://awsservice.com"
REGION = "us-east-1"
TABLE = "traffic-count"
def main():
os.environ["AWS_PROFILE"] = ENV
client = boto3.resource("dynamodb", endpoint_url=DB, region_name=REGION)
kwargs = {'Key': {'id': 'D-D0000012345-P-1'},
'UpdateExpression': 'ADD #count.#car :delta \n SET #parentKey = :parent_key, #objectKey = :object_key',
'ExpressionAttributeValues': {':delta': 1, ':parent_key': 'District-D0000012345', ':object_key': 'Street-1'},
'ExpressionAttributeNames': {'#car': 'car', '#count': 'count', '#parentKey': 'parentKey', '#objectKey': 'objectKey'}}
client.Table(TABLE).update_item(**kwargs)
if __name__ == "__main__":
main()
What I want to achieve is this:
With a single API call (in this update_item), I want to be able to
If the item does not exit. create an item with a map count and initialise it with {'car': 1} and set the fields parent_key and object_key.
or
If the item already exists, update the field to {'car': 2} (if the original count is 1)
Previously, if I did not use a map, I can successfully update with this expression,
SET #count = if_not_exist(#count, :zero) + :delta,
#parentKey = :parent_key, #objectKey = :object_key
However I am getting this error:
botocore.exceptions.ClientError: An error occurred
(ValidationException) when calling the UpdateItem operation: The
document path provided in the update expression is invalid for update
Which document path is causing the problem? How can I fix it?
For those who landed on this page with similar error:
The document path provided in the update expression is invalid for update
The reason may be:
for the item on which the operation is being performed,
this attribute (count, for example) is not yet set.
Considering the sample code from question,
The exception could be coming from all those items where count is empty or not set. So the update query doesn't know on which map the new value(s) (car, for example) needs to be set or updated.
In the question, it was possible for the OP in the beginning because, the attribute is not a map and the process is simply setting the value to count as is. It's not trying to access a key of an unknown map to set the value.
This can be handled by catching the exception. For example:
from botocore.exceptions import ClientError
...
try:
response = table.update_item(
Key={
"pk": pk
},
UpdateExpression="set count.car = :c,
ExpressionAttributeValues={
':c': "some car"
},
ReturnValues="UPDATED_NEW"
)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationException':
response = table.update_item(
Key={
"pk": pk
},
UpdateExpression="set count = :count",
ExpressionAttributeValues={
':count': {
':c': "some car"
}
},
ReturnValues="UPDATED_NEW"
)
I run a web service with an api function which uses a method I created to interact with MongoDB, using pymongo.
The json data comes with post may or may not include a field: firm. I don't want to create a new method for posts that does not include a firm field.
So I want to use that firm in pymongo.find if it does exists, or I want to just skip it if it doesn't. How can I do this with using one api function and one pymongo method?
API function:
#app.route(f'/{API_PREFIX}/wordcloud', methods=['POST'])
def generate_wc():
request_ = request.get_json()
firm = request_.get("firm").lower()
source = request_["source"]
since = datetime.strptime(request_["since"], "%Y-%m-%d")
until = datetime.strptime(request_["until"], "%Y-%m-%d")
items = mongo.get_tweets(firm, since, until)
...
The pymongo method:
def get_tweets(self, firm: str, since: datetime, until: datetime):
tweets = self.DB.tweets.find(
{
# use firm here if it exists (I mean not None), else just get items by date
'date': {'$gte': since, '$lte': until}
})
...
Here in the second code, comment line in find.
Thanks.
Since it involves two different queries: {date: ...} and {date: ..., firm: ...} depending on the existence of firm in the input, you would have to check if firm is not None in get_tweets and execute the proper query.
For example:
def get_tweets(self, since, until, firm=None):
query = { 'date': { '$gte': since, '$lte': until } }
if firm is not None:
query['firm'] = firm
tweets = self.DB.tweets.find(query)
....
Note that since firm has a default value, it needs to be last in the get_tweets parameter list.