Children count not fetched although query fetches everything else - python

I have a function like so:
#app.route('/search/')
def search():
# Handle search conditions
query = db.session.query(
Parent.id,
Parent.name,
func.count(Children.id),
)
query = query.filter(and_(*conditions)) # This comes from "Handle search conditions"
query = query.outerjoin((Children, Parent.children))
return jsonify([row._asdict() for row in query.all()])
And when called, it properly returns the parents' data:
[
{
'id': 1,
'name': 'Example Parent',
},
{
'id': 2,
'name': 'Another Parent',
}
]
But it doesn't return the count of its children.
What did I do wrong, how do I make it also fetch the child count?

You need to set the label for the count column
query = db.session.query(
Parent.id,
Parent.name,
func.count(Children.id).label('count'),
)

Related

How to insert document with collection.update_many() into Collection (MongoDB) using Pymongo (No Duplicated)

I insert Document into Collection with collection.update() because each data I have a postID to different. I want to when I run if a post was inserted in MongoDB, the post will be updated (not insert a new post with postID overlapping with postID first). This is a structure of my data:
comment1 = [
{
'commentParentId': parent_content.text,
'parentId': parent_ID,
'posted': child_time.text,
'postID':child_ID,
'author':
{
'name': child_name.text
},
'content': child_content.text
},
...............
]
This is my code, i used to insert data :
client = MongoClient()
db = client['comment_data2']
db.collection_data = db['comments']
for i in data_comment:
db.collection_data.update_many(
{db.collection_data.find({"postID": {"$in": i["postID"]}})},
{"$set": i},
{'upsert': True}
)
But I have a Error : TypeError: filter must be an instance of dict, bson.son.SON, or other type that inherits from collections.Mapping in line {'upsert': True}. And {db.collection_data.find({"postID": {"$in": i["postID"]}})} is right?
you can use this code:
db.collection_data.update_many(
{"postId": i["postID"]},
{"$set":i},
upsert = True
)

mediaItems.search not working with albumId

When I run the following code I get this error.
{'error': {'code': 400, 'message': 'Invalid JSON payload received. Unknown name "album_id": Proto field is not repeating, cannot start list.', 'status': 'INVALID_ARGUMENT', 'details': [{'#type': 'type.googleapis.com/google.rpc.BadRequest', 'fieldViolations': [{'description': 'Invalid JSON payload received. Unknown name "album_id": Proto field is not repeating, cannot start list.'}]}]}}
If I remove the "albumId": ["albumid code"] it works fine and returns
10 new items, total 10
def _actually_list_media_items(session):
ret = []
params = {
'fields': 'mediaItems(id,baseUrl,filename,mimeType,productUrl),nextPageToken',
}
search_json = {
"pageSize": 10,
"albumId": ["<albumid code>"],
"filters": {
"includeArchivedMedia": False,
"contentFilter": {
"excludedContentCategories": [
"DOCUMENTS",
"RECEIPTS",
"SCREENSHOTS",
"UTILITY",
"WHITEBOARDS",
]
},
"mediaTypeFilter": {
"mediaTypes": [
"PHOTO",
],
},
},
}
tmp = 0
while tmp < 1:
rsp = session.post(
'https://photoslibrary.googleapis.com/v1/mediaItems:search',
params=params,
json=search_json,
).json()
if 'error' in rsp:
print(rsp)
cur = [m for m in rsp.get('mediaItems', [])]
ret += cur
print(f'{len(cur)} new items, total {len(ret)}')
pageToken = rsp.get('nextPageToken')
if pageToken is None:
break
params['pageToken'] = pageToken
tmp = tmp + 1
return ret
The comment about albumId and filters being exclusive is correct, so you need to pick one or the other. However, assuming you want to use the albumId by itself, you need to remove the square brackets around your albumid code, here's a clip from my code:
searchbody = {
"albumId": album_id,
"pageSize": 10
}
print(searchbody)
mediaresults = gAPIservice.mediaItems().search(body=searchbody).execute()
mediaitems = mediaresults.get('mediaItems', [])
for item in mediaitems:
print(u'{0} ({1})'.format(item['filename'], item['id']))
Edit:
Apparently you can't use albumId and filters together: source
filters: object(Filters)
Filters to apply to the request. Can't be set in conjunction with an albumId.
Aside from that, albumId is a supposed to be a string not an array: source
"albumId": "<albumid code>",

Update nested map dynamodb

I have a dynamodb table with an attribute containing a nested map and I would like to update a specific inventory item that is filtered via a filter expression that results in a single item from this map.
How to write an update expression to update the location to "in place three" of the item with name=opel,tags include "x1" (and possibly also f3)?
This should just update the first list elements location attribute.
{
"inventory": [
{
"location": "in place one", # I want to update this
"name": "opel",
"tags": [
"x1",
"f3"
]
},
{
"location": "in place two",
"name": "abc",
"tags": [
"a3",
"f5"
]
}],
"User" :"test"
}
Updated Answer - based on updated question statement
You can update attributes in a nested map using update expressions such that only a part of the item would get updated (ie. DynamoDB would apply the equivalent of a patch to your item) but, because DynamoDB is a document database, all operations (Put, Get, Update, Delete etc.) work on the item as a whole.
So, in your example, assuming User is the partition key and that there is no sort key (I didn't see any attribute that could be a sort key in that example), an Update request might look like this:
table.update_item(
Key={
'User': 'test'
},
UpdateExpression="SET #inv[0].#loc = :locVal",
ExpressionAttributeNames={
'#inv': 'inventory',
'#loc': 'location'
},
ExpressionAttributeValues={
':locVal': 'in place three',
},
)
That said, you do have to know what the item schema looks like and which attributes within the item should be updated exactly.
DynamoDB does NOT have a way to operate on sub-items. Meaning, there is no way to tell Dynamo to execute an operation such as "update item, set 'location' property of elements of the 'inventory' array that have a property of 'name' equal to 'opel'"
This is probably not the answer you were hoping for, but it is what's available today. You may be able to get closer to what you want by changing the schema a bit.
If you need to reference the sub-items by name, perhaps storing something like:
{
"inventory": {
"opel": {
"location": "in place one", # I want to update this
"tags": [ "x1", "f3" ]
},
"abc": {
"location": "in place two",
"tags": [ "a3", "f5" ]
}
},
"User" :"test"
}
Then your query would be:
table.update_item(
Key={
'User': 'test'
},
UpdateExpression="SET #inv.#brand.#loc = :locVal",
ExpressionAttributeNames={
'#inv': 'inventory',
'#loc': 'location',
'#brand': 'opel'
},
ExpressionAttributeValues={
':locVal': 'in place three',
},
)
But YMMV as even this has limitations because you are limited to identifying inventory items by name (ie. you still can't say "update inventory with tag 'x1'"
Ultimately you should carefully consider why you need Dynamo to perform these complex operations for you as opposed to you being specific about what you want to update.
You can update the nested map as follow:
First create and empty item attribute of type map. In the example graph is the empty item attribute.
dynamoTable = dynamodb.Table('abc')
dynamoTable.put_item(
Item={
'email': email_add,
'graph': {},
}
Update nested map as follow:
brand_name = 'opel'
DynamoTable = dynamodb.Table('abc')
dynamoTable.update_item(
Key={
'email': email_add,
},
UpdateExpression="set #Graph.#brand= :name, ",
ExpressionAttributeNames={
'#Graph': 'inventory',
'#brand': str(brand_name),
},
ExpressionAttributeValues = {
':name': {
"location": "in place two",
'tag': {
'graph_type':'a3',
'graph_title': 'f5'
}
}
Updating Mike's answer because that way doesn't work any more (at least for me).
It is working like this now (attention for UpdateExpression and ExpressionAttributeNames):
table.update_item(
Key={
'User': 'test'
},
UpdateExpression="SET inv.#brand.loc = :locVal",
ExpressionAttributeNames={
'#brand': 'opel'
},
ExpressionAttributeValues={
':locVal': 'in place three',
},
)
And whatever goes in Key={}, it is always partition key (and sort key, if any).
EDIT:
Seems like this way only works when with 2 level nested properties. In this case you would only use "ExpressionAttributeNames" for the "middle" property (in this example, that would be #brand: inv.#brand.loc). I'm not yet sure what is the real rule now.
DynamoDB UpdateExpression does not search on the database for matching cases like SQL (where you can update all items that match some condition). To update an item you first need to identify it and get primary key or composite key, if there are many items that match your criteria, you need to update one by one.
then the issue to update nested objects is to define UpdateExpression,ExpressionAttributeValues & ExpressionAttributeNames to pass to Dynamo Update Api .
I use a recursive function to update nested Objects on dynamoDB. You ask for Python but I use javascript, I think is easy to see this code and implents on Python:
https://gist.github.com/crsepulv/4b4a44ccbd165b0abc2b91f76117baa5
/**
* Recursive function to get UpdateExpression,ExpressionAttributeValues & ExpressionAttributeNames to update a nested object on dynamoDB
* All levels of the nested object must exist previously on dynamoDB, this only update the value, does not create the branch.
* Only works with objects of objects, not tested with Arrays.
* #param obj , the object to update.
* #param k , the seed is any value, takes sense on the last iteration.
*/
function getDynamoExpression(obj, k) {
const key = Object.keys(obj);
let UpdateExpression = 'SET ';
let ExpressionAttributeValues = {};
let ExpressionAttributeNames = {};
let response = {
UpdateExpression: ' ',
ExpressionAttributeNames: {},
ExpressionAttributeValues: {}
};
//https://stackoverflow.com/a/16608074/1210463
/**
* true when input is object, this means on all levels except the last one.
*/
if (((!!obj) && (obj.constructor === Object))) {
response = getDynamoExpression(obj[key[0]], key);
UpdateExpression = 'SET #' + key + '.' + response['UpdateExpression'].substring(4); //substring deletes 'SET ' for the mid level values.
ExpressionAttributeNames = {['#' + key]: key[0], ...response['ExpressionAttributeNames']};
ExpressionAttributeValues = response['ExpressionAttributeValues'];
} else {
UpdateExpression = 'SET = :' + k;
ExpressionAttributeValues = {
[':' + k]: obj
}
}
//removes trailing dot on the last level
if (UpdateExpression.indexOf(". ")) {
UpdateExpression = UpdateExpression.replace(". ", "");
}
return {UpdateExpression, ExpressionAttributeValues, ExpressionAttributeNames};
}
//you can try many levels.
const obj = {
level1: {
level2: {
level3: {
level4: 'value'
}
}
}
}
I had the same need.
Hope this code helps. You only need to invoke compose_update_expression_attr_name_values passing the dictionary containing the new values.
def compose_update_expression_attr_name_values(data: dict) -> (str, dict, dict):
""" Constructs UpdateExpression, ExpressionAttributeNames, and ExpressionAttributeValues for updating an entry of a DynamoDB table.
:param data: the dictionary of attribute_values to be updated
:return: a tuple (UpdateExpression: str, ExpressionAttributeNames: dict(str: str), ExpressionAttributeValues: dict(str: str))
"""
# prepare recursion input
expression_list = []
value_map = {}
name_map = {}
# navigate the dict and fill expressions and dictionaries
_rec_update_expression_attr_name_values(data, "", expression_list, name_map, value_map)
# compose update expression from single paths
expression = "SET " + ", ".join(expression_list)
return expression, name_map, value_map
def _rec_update_expression_attr_name_values(data: dict, path: str, expressions: list, attribute_names: dict,
attribute_values: dict):
""" Recursively navigates the input and inject contents into expressions, names, and attribute_values.
:param data: the data dictionary with updated data
:param path: the navigation path in the original data dictionary to this recursive call
:param expressions: the list of update expressions constructed so far
:param attribute_names: a map associating "expression attribute name identifiers" to their actual names in ``data``
:param attribute_values: a map associating "expression attribute value identifiers" to their actual values in ``data``
:return: None, since ``expressions``, ``attribute_names``, and ``attribute_values`` get updated during the recursion
"""
for k in data.keys():
# generate non-ambiguous identifiers
rdm = random.randrange(0, 1000)
attr_name = f"#k_{rdm}_{k}"
while attr_name in attribute_names.keys():
rdm = random.randrange(0, 1000)
attr_name = f"#k_{rdm}_{k}"
attribute_names[attr_name] = k
_path = f"{path}.{attr_name}"
# recursion
if isinstance(data[k], dict):
# recursive case
_rec_update_expression_attr_name_values(data[k], _path, expressions, attribute_names, attribute_values)
else:
# base case
attr_val = f":v_{rdm}_{k}"
attribute_values[attr_val] = data[k]
expression = f"{_path} = {attr_val}"
# remove the initial "."
expressions.append(expression[1:])

Google DLP: "ValueError: Protocol message Value has no "stringValue" field."

I have a method where I build a table for multiple items for Google's DLP inspect API which can take either a ContentItem, or a table of values
Here is how the request is constructed:
def redact_text(text_list):
dlp = google.cloud.dlp.DlpServiceClient()
project = 'my-project'
parent = dlp.project_path(project)
items = build_item_table(text_list)
info_types = [{'name': 'EMAIL_ADDRESS'}, {'name': 'PHONE_NUMBER'}]
inspect_config = {
'min_likelihood': "LIKELIHOOD_UNSPECIFIED",
'include_quote': True,
'info_types': info_types
}
response = dlp.inspect_content(parent, inspect_config, items)
return response
def build_item_table(text_list):
rows = []
for item in text_list:
row = {"values": [{"stringValue": item}]}
rows.append(row)
table = {"table": {"headers": [{"name": "something"}], "rows": rows}}
return table
When I run this I get back the error ValueError: Protocol message Value has no "stringValue" field. Even though the this example and the docs say otherwise.
Is there something off in how I build the request?
Edit: Here's the output from build_item_table
{
'table':
{
'headers':
[
{'name': 'value'}
],
'rows':
[
{
'values':
[
{
'stringValue': 'My name is Jenny and my number is (555) 867-5309, you can also email me at anemail#gmail.com, another email you can reach me at is email#email.com. '
}
]
},
{
'values':
[
{
'stringValue': 'Jimbob Doe (555) 111-1233, that one place down the road some_email#yahoo.com'
}
]
}
]
}
}
Try string_value .... python uses the field names, not the type name.

Combination of two fields to be unique in Python Eve

In Python Eve framework, is it possible to have a condition which checks combination of two fields to be unique?
For example the below definition restricts only firstname and lastname to be unique for items in the resource.
people = {
# 'title' tag used in item links.
'item_title': 'person',
'schema': {
'firstname': {
'type': 'string',
'required': True,
'unique': True
},
'lastname': {
'type': 'string',
'required': True,
'unique': True
}
}
Instead, is there a way to restrict firstname and lastname combination to be unique?
Or is there a way to implement a CustomValidator for this?
You can probably achieve what you want by overloading the _validate_unique and implementing custom logic there, taking advantage of self.document in order to retrieve the other field value.
However, since _validate_unique is called for every unique field, you would end up performing your custom validation twice, once for firstname and then for lastname. Not really desirable. Of course the wasy way out is setting up fullname field, but I guess that's not an option in your case.
Have you considered going for a slighty different design? Something like:
{'name': {'first': 'John', 'last': 'Doe'}}
Then all you need is make sure that name is required and unique:
{
'name': {
'type':'dict',
'required': True,
'unique': True,
'schema': {
'first': {'type': 'string'},
'last': {'type': 'string'}
}
}
}
Inspired by Nicola and _validate_unique.
from eve.io.mongo import Validator
from eve.utils import config
from flask import current_app as app
class ExtendedValidator(Validator):
def _validate_unique_combination(self, unique_combination, field, value):
""" {'type': 'list'} """
self._is_combination_unique(unique_combination, field, value, {})
def _is_combination_unique(self, unique_combination, field, value, query):
""" Test if the value combination is unique.
"""
if unique_combination:
query = {k: self.document[k] for k in unique_combination}
query[field] = value
resource_config = config.DOMAIN[self.resource]
# exclude soft deleted documents if applicable
if resource_config['soft_delete']:
query[config.DELETED] = {'$ne': True}
if self.document_id:
id_field = resource_config['id_field']
query[id_field] = {'$ne': self.document_id}
datasource, _, _, _ = app.data.datasource(self.resource)
if app.data.driver.db[datasource].find_one(query):
key_names = ', '.join([k for k in query])
self._error(field, "value combination of '%s' is not unique" % key_names)
The way I solved this issue is by creating a dynamic field using a combination of functions and lambdas to create a hash that will use
which ever fields you provide
def unique_record(fields):
def is_lambda(field):
# Test if a variable is a lambda
return callable(field) and field.__name__ == "<lambda>"
def default_setter(doc):
# Generate the composite list
r = [
str(field(doc)
# Check is lambda
if is_lambda(field)
# jmespath is not required, but it enables using nested doc values
else jmespath.search(field, doc))
for field in fields
]
# Generate MD5 has from composite string (Keep it clean)
return hashlib.md5(''.join(r).encode()).hexdigest()
return {
'type': 'string',
'unique': True,
'default_setter': default_setter
}
Practical Implementation
My use case was to create a collection that limits the amount of key value pairs a user can create within the collection
domain = {
'schema': {
'key': {
'type': 'string',
'minlength': 1,
'maxlength': 25,
'required': True,
},
'value': {
'type': 'string',
'minlength': 1,
'required': True
},
'hash': unique_record([
'key',
lambda doc: request.USER['_id']
]),
'user': {
'type': 'objectid',
'default_setter': lambda doc: request.USER['_id'] # User tenant ID
}
}
}
}
The function will receive a list of either string or lambda function for dynamic value setting at request time, in my case the user's "_id"
The function supports the use of JSON query with the JMESPATH package, this isn't mandatory, but leave the door open for nested doc flexibility in other usecases
NOTE: This will only work with values that are set by the USER at request time or injected into the request body using the pre_GET trigger pattern, like the USER object I inject in the pre_GET trigger which represents the USER currently making the request

Categories