Elasticsearch Cosine similarity exception - python

I am using Elasticsearch 7.15.2
I have a dense vector field of size 1024 that is saved in field vector. My query goes like this,
{
"size": 100,
"min_score": 0.75,
"query": {
"script_score": {
"query": {
"bool": {
"must": [],
"must_not": [
{
"terms": {
"id": [
"12"
]
}
}
],
"filter": [
]
}
},
"script": {
"source": "doc['vector'].size() == 0 ? 0 : cosineSimilarity(params.query_vector, 'vector')",
"params": {
"query_vector": [
10.798686228610265,
4.186900536065757,
0.19701037630829776,
0.20834632696963679,
......
......
]
}
}
}
}
}
when I execute this I am getting,
{
"error": {
"root_cause": [
{
"type": "script_exception",
"reason": "runtime error",
"script_stack": [
"org.elasticsearch.xpack.vectors.query.ScoreScriptUtils$DenseVectorFunction.<init>(ScoreScriptUtils.java:65)",
"org.elasticsearch.xpack.vectors.query.ScoreScriptUtils$CosineSimilarity.<init>(ScoreScriptUtils.java:172)",
"doc['vector'].size() == 0 ? 0 : cosineSimilarity(params.query_vector, 'vector')",
" ^---- HERE"
],
"script": "doc['vector'].size() == 0 ? 0 : cosineSimilarity(params.query_vector, 'vector')",
"lang": "painless",
"position": {
"offset": 70,
"start": 0,
"end": 79
}
}
],
"type": "search_phase_execution_exception",
"reason": "all shards failed",
"phase": "query",
"grouped": true,
"failed_shards": [
{
"shard": 0,
"index": "candidate_saas",
"node": "itz4QoZERGCHYk65uiTUBg",
"reason": {
"type": "script_exception",
"reason": "runtime error",
"script_stack": [
"org.elasticsearch.xpack.vectors.query.ScoreScriptUtils$DenseVectorFunction.<init>(ScoreScriptUtils.java:65)",
"org.elasticsearch.xpack.vectors.query.ScoreScriptUtils$CosineSimilarity.<init>(ScoreScriptUtils.java:172)",
"doc['vector'].size() == 0 ? 0 : cosineSimilarity(params.query_vector, 'vector')",
" ^---- HERE"
],
"script": "doc['vector'].size() == 0 ? 0 : cosineSimilarity(params.query_vector, 'vector')",
"lang": "painless",
"position": {
"offset": 70,
"start": 0,
"end": 79
},
"caused_by": {
"type": "class_cast_exception",
"reason": "class org.elasticsearch.index.fielddata.ScriptDocValues$Doubles cannot be cast to class org.elasticsearch.xpack.vectors.query.VectorScriptDocValues$DenseVectorScriptDocValues (org.elasticsearch.index.fielddata.ScriptDocValues$Doubles is in unnamed module of loader 'app'; org.elasticsearch.xpack.vectors.query.VectorScriptDocValues$DenseVectorScriptDocValues is in unnamed module of loader java.net.FactoryURLClassLoader #af9a89f)"
}
}
}
]
},
"status": 400
}
Please help

Related

json.decoder.JSONDecodeError - while converting JSON to CSV output

While trying to convert a JSON output below to CSV, getting error
Here is the JSON output
{
"data": [
{
"id": "-1000100591151294842",
"type": "fres",
"attributes": {
"operationState": "In Service",
"deploymentState": "discovered",
"displayData": {
"operationState": "Up",
"adminState": "Enabled",
"displayTopologySource": "Protocol,Derived",
"displayPhotonicSpectrumData": [
{
"frequency": "194.950000",
"wavelength": "1537.79",
"channel": "CH-20"
}
],
"displayDeploymentState": "Discovered",
"displayName": "J-BBEG-CHLC-P109"
},
"utilizationData": {
"totalCapacity": "100.0",
"usedCapacity": "100.0",
"utilizationPercent": "100",
"capacityUnits": "Gbps"
},
"resourceState": "discovered",
"serviceClass": "OTU",
"linkLabel": "BBEG-ROADM-0101:5-4-1,CHLC-ROADM-0401:7-35-1",
"lastUpdatedAdminStateTimeStamp": "2021-05-03T00:29:24.444Z",
"lastUpdatedOperationalStateTimeStamp": "2022-12-08T22:42:21.567Z",
"userLabel": "J-BBEG-CHLC-P109",
"mgmtName": "",
"nativeName": "",
"awarenessTime": "2022-12-08T22:42:22.123Z",
"layerRate": "OTU4",
"layerRateQualifier": "OTU4",
"supportedByLayerRatePackageList": [
{
"layerRate": "OTSi",
"layerRateQualifier": "100G"
}
],
"networkRole": "FREAP",
"directionality": "bidirectional",
"topologySources": [
"adjacency",
"stitched"
],
"adminState": "In Service",
"photonicSpectrumPackageList": [
{
"frequency": "194.950000",
"width": "37.5"
}
],
"active": true,
"additionalAttributes": {
"isActual": "true",
"hasLowerTopology": "true"
},
"reliability": "auto",
"resilienceLevel": "unprotected"
},
"relationships": {
"freDiscovered": {
"data": {
"type": "freDiscovered",
"id": "-1000100591151294842"
}
},
"supportedByServices": {
"data": [
{
"type": "fres",
"id": "6765278351459212874"
}
]
},
"endPoints": {
"data": [
{
"type": "endPoints",
"id": "-1000100591151294842:1"
},
{
"type": "endPoints",
"id": "-1000100591151294842:2"
}
]
},
"partitionFres": {
"data": [
{
"type": "fres",
"id": "7147507956181395827"
}
]
}
}
},
{
"id": "-1013895107051577774",
"type": "fres",
"attributes": {
"operationState": "In Service",
"deploymentState": "discovered",
"displayData": {
"operationState": "Up",
"adminState": "Enabled",
"displayTopologySource": "Protocol,Derived",
"displayPhotonicSpectrumData": [
{
"frequency": "191.600000",
"wavelength": "1564.68",
"channel": "CH-87"
}
],
"displayDeploymentState": "Discovered",
"displayName": "J-KFF9-PNTH-P101"
},
"utilizationData": {
"totalCapacity": "100.0",
"usedCapacity": "90.0",
"utilizationPercent": "90",
"capacityUnits": "Gbps"
},
"resourceState": "discovered",
"serviceClass": "OTU",
"tags": [
"J-KFF9-PNTH-P101"
],
"linkLabel": "KFF9-ROADM-0301:1-1-1,PNTH-ROADM-0101:1-1-1",
"lastUpdatedAdminStateTimeStamp": "2021-09-12T20:22:59.334Z",
"lastUpdatedOperationalStateTimeStamp": "2022-10-12T14:20:44.779Z",
"userLabel": "J-KFF9-PNTH-P101",
"mgmtName": "",
"nativeName": "",
"awarenessTime": "2022-10-12T14:20:45.417Z",
"layerRate": "OTU4",
"layerRateQualifier": "OTU4",
"supportedByLayerRatePackageList": [
{
"layerRate": "OTSi",
"layerRateQualifier": "100G"
}
],
"networkRole": "FREAP",
"directionality": "bidirectional",
"topologySources": [
"adjacency",
"stitched"
],
"adminState": "In Service",
"photonicSpectrumPackageList": [
{
"frequency": "191.600000",
"width": "37.5"
}
],
"active": true,
"additionalAttributes": {
"isActual": "true",
"hasLowerTopology": "true"
},
"reliability": "auto",
"resilienceLevel": "unprotected"
},
"relationships": {
"freDiscovered": {
"data": {
"type": "freDiscovered",
"id": "-1013895107051577774"
}
},
"supportedByServices": {
"data": [
{
"type": "fres",
"id": "6055685088078365419"
}
]
},
"endPoints": {
"data": [
{
"type": "endPoints",
"id": "-1013895107051577774:1"
},
{
"type": "endPoints",
"id": "-1013895107051577774:2"
}
]
},
"partitionFres": {
"data": [
{
"type": "fres",
"id": "-6727082893715936342"
}
]
}
}
}
] }
getting below error, not sure what is missing
Here is the python script I used. have been trying different variations but no luck getting different errors in all other instances
filename = Path('fre.json')
data = []
with open(filename,'r') as json_file:
data_str = json_file.read()
data_str = data_str.split('[',1)[-1]
data_str = data_str.rsplit(']',1)[0]
data_str = data_str.split('][')
for jsonStr in data_str:
jsonStr = '[' + jsonStr + ']'
temp_data = json.loads(jsonStr)
for each in temp_data:
data.append(each)
what is wrong?

update nested json object in python

I have a json file name input which as follows
{
"abc": {
"dbc": {
"type": "string",
"metadata": {
"description": "Name of the namespace"
}
},
"fgh": {
"type": "string",
"metadata": {
"description": "Name of the Topic"
}
}
},
"resources": [
{
"sku": {
"name": "[parameters('sku')]"
},
"properties": {},
"resources": [
{
"resources": [
{
"resources": [
{
"properties": {
"filterType": "SqlFilter",
"sqlFilter": {
"sqlExpression": "HAI"
}
}
}
]
}
]
}
]
}
]
}
I want "sqlExpression": "HAI" value to be replaced with BYE as below
"sqlExpression": "BYE"
I want python code to do it, I tried the below code but not working
input['resources'][0]['resources'][0]['resources'][0]['resources'][0][properties][0][sqlFilter][0][sqlExpression][0]='BYE'
inp = {
"abc": {
"dbc": {
"type": "string",
"metadata": {
"description": "Name of the namespace"
}
},
"fgh": {
"type": "string",
"metadata": {
"description": "Name of the Topic"
}
}
},
"resources": [
{
"sku": {
"name": "[parameters('sku')]"
},
"properties": {},
"resources": [
{
"resources": [
{
"resources": [
{
"properties": {
"filterType": "SqlFilter",
"sqlFilter": {
"sqlExpression": "HAI"
}
}
}
]
}
]
}
]
}
]
}
inp['resources'][0]['resources'][0]['resources'][0]['resources'][0]['properties']['sqlFilter']['sqlExpression']='BYE'
print(inp)
Result
{'abc': {'dbc': ...truncated... {'sqlExpression': 'BYE'}}}]}]}]}]}

Is there a way to add curly brackets around a list of dictionaries already existing within a JSON file?

I currently have two JSONS that I want to merge into one singular JSON, additionally I want to add in a slight change.
Firstly, these are the two JSONS in question.
An intents JSON:
[
{
"ID": "G1",
"intent": "password_reset",
"examples": [
{
"text": "I forgot my password"
},
{
"text": "I can't log in"
},
{
"text": "I can't access the site"
},
{
"text": "My log in is failing"
},
{
"text": "I need to reset my password"
}
]
},
{
"ID": "G2",
"intent": "account_closure",
"examples": [
{
"text": "I want to close my account"
},
{
"text": "I want to terminate my account"
}
]
},
{
"ID": "G3",
"intent": "account_creation",
"examples": [
{
"text": "I want to open an account"
},
{
"text": "Create account"
}
]
},
{
"ID": "G4",
"intent": "complaint",
"examples": [
{
"text": "A member of staff was being rude"
},
{
"text": "I have a complaint"
}
]
}
]
and an entities JSON:
[
{
"ID": "K1",
"entity": "account_type",
"values": [
{
"type": "synonyms",
"value": "business",
"synonyms": [
"corporate"
]
},
{
"type": "synonyms",
"value": "personal",
"synonyms": [
"vanguard",
"student"
]
}
]
},
{
"ID": "K2",
"entity": "beverage",
"values": [
{
"type": "synonyms",
"value": "hot",
"synonyms": [
"heated",
"warm"
]
},
{
"type": "synonyms",
"value": "cold",
"synonyms": [
"ice",
"freezing"
]
}
]
}
]
The expected outcome is to create a JSON file that mimics this structure:
{
"intents": [
{
"intent": "password_reset",
"examples": [
{
"text": "I forgot my password"
},
{
"text": "I want to reset my password"
}
],
"description": "Reset a user password"
}
],
"entities": [
{
"entity": "account_type",
"values": [
{
"type": "synonyms",
"value": "business",
"synonyms": [
"company",
"corporate",
"enterprise"
]
},
{
"type": "synonyms",
"value": "personal",
"synonyms": []
}
],
"fuzzy_match": true
}
],
"metadata": {
"api_version": {
"major_version": "v2",
"minor_version": "2018-11-08"
}
},
"dialog_nodes": [
{
"type": "standard",
"title": "anything_else",
"output": {
"generic": [
{
"values": [
{
"text": "I didn't understand. You can try rephrasing."
},
{
"text": "Can you reword your statement? I'm not understanding."
},
{
"text": "I didn't get your meaning."
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"conditions": "anything_else",
"dialog_node": "Anything else",
"previous_sibling": "node_4_1655399659061",
"disambiguation_opt_out": true
},
{
"type": "event_handler",
"output": {
"generic": [
{
"title": "What type of account do you hold with us?",
"options": [
{
"label": "Personal",
"value": {
"input": {
"text": "personal"
}
}
},
{
"label": "Business",
"value": {
"input": {
"text": "business"
}
}
}
],
"response_type": "option"
}
]
},
"parent": "slot_9_1655398217028",
"event_name": "focus",
"dialog_node": "handler_6_1655398217052",
"previous_sibling": "handler_7_1655398217052"
},
{
"type": "event_handler",
"output": {},
"parent": "slot_9_1655398217028",
"context": {
"account_type": "#account_type"
},
"conditions": "#account_type",
"event_name": "input",
"dialog_node": "handler_7_1655398217052"
},
{
"type": "standard",
"title": "business_account",
"output": {
"generic": [
{
"values": [
{
"text": "We have notified your corporate security team, they will be in touch to reset your password."
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"parent": "node_3_1655397279884",
"next_step": {
"behavior": "jump_to",
"selector": "body",
"dialog_node": "node_4_1655399659061"
},
"conditions": "#account_type:business",
"dialog_node": "node_1_1655399028379",
"previous_sibling": "node_3_1655399027429"
},
{
"type": "standard",
"title": "intent_collection",
"output": {
"generic": [
{
"values": [
{
"text": "Thank you for confirming that you want to reset your password."
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"next_step": {
"behavior": "jump_to",
"selector": "body",
"dialog_node": "node_3_1655397279884"
},
"conditions": "#password_reset",
"dialog_node": "node_3_1655396920143",
"previous_sibling": "Welcome"
},
{
"type": "frame",
"title": "account_type_confirmation",
"output": {
"generic": [
{
"values": [
{
"text": "Thank you"
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"parent": "node_3_1655396920143",
"context": {},
"next_step": {
"behavior": "skip_user_input"
},
"conditions": "#password_reset",
"dialog_node": "node_3_1655397279884"
},
{
"type": "standard",
"title": "personal_account",
"output": {
"generic": [
{
"values": [
{
"text": "We have sent you an email with a password reset link."
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"parent": "node_3_1655397279884",
"next_step": {
"behavior": "jump_to",
"selector": "body",
"dialog_node": "node_4_1655399659061"
},
"conditions": "#account_type:personal",
"dialog_node": "node_3_1655399027429"
},
{
"type": "standard",
"title": "reset_confirmation",
"output": {
"generic": [
{
"values": [
{
"text": "Do you need assistance with anything else today?"
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"digress_in": "does_not_return",
"dialog_node": "node_4_1655399659061",
"previous_sibling": "node_3_1655396920143"
},
{
"type": "slot",
"output": {},
"parent": "node_3_1655397279884",
"variable": "$account_type",
"dialog_node": "slot_9_1655398217028",
"previous_sibling": "node_1_1655399028379"
},
{
"type": "standard",
"title": "welcome",
"output": {
"generic": [
{
"values": [
{
"text": "Hello. How can I help you?"
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"conditions": "welcome",
"dialog_node": "Welcome"
}
],
"counterexamples": [],
"system_settings": {
"off_topic": {
"enabled": true
},
"disambiguation": {
"prompt": "Did you mean:",
"enabled": true,
"randomize": true,
"max_suggestions": 5,
"suggestion_text_policy": "title",
"none_of_the_above_prompt": "None of the above"
},
"human_agent_assist": {
"prompt": "Did you mean:"
},
"intent_classification": {
"training_backend_version": "v2"
},
"spelling_auto_correct": true
},
"learning_opt_out": false,
"name": "Reset Password",
"language": "en",
"description": "Basic Password Reset Request"
}
So what I am missing in my original files, is essentially:
"intents":
and for the entities file:
"entities"
at the start of each list of dictionaries.
Additionally, I would need to wrap the whole thing in curly braces to comply with json formatting.
As seen, the final goal is not just appending these two to one another but the file technically continues with some other JSON code that I have yet to write and deal with.
My question now is as follows; by what method can I either add in these words and the braces to the individual files, then combine them into a singular JSON or alternatively by what method can I read in these files and combine them with the changes all in one go?
The new output file closing on a curly brace after the entities list of dicts is an acceptable outcome for me at the time, so that I can continue to make changes and hopefully further learn from this how to do these changes in future when I get there.
TIA
JSON is only a string format, you can it load in a language structure, in python that is list and dict, do what you need then dump it back, so you don't "add strings" and "add brackets", on modify the structure
file = 'intents.txt'
intents = json.load(open(file)) # load a list
file = 'entities.txt'
entities = json.load(open(file)) # load a list
# create a dict
content = {
"intents": intents,
"entities": entities
}
json.dump(content, open(file, "w"))
If you're reading all the json in as a string, you can just prepend "{'intents':" to the start and append a closing "}".
myJson = "your json string"
myWrappedJson = '{"intents":' + myJson + "}"

pymongo group multiple conditions

CURRENT_TZ = timezone(bp.BaseModel.__timezone__ or "Asia/Shanghai")
NOW = CURRENT_TZ.localize(datetime.utcnow())
EXPIRY_DATE = NOW + relativedelta(days=5)
res = await Fixture.aggregate(
[
{"$match": dict(eol={"$nin": [True, ""]})},
{
"$group": {
"_id": {
"$cond": [
{"$lt": ["pm_date", start_date]},
"PENDING",
{
"$gte": ["pm_date", start_date],
"$lt": ["pm_date", end_date],
},
"DONE",
{
"$gte": ["pm_due_date", start_date],
"$lte": ["pm_due_date", EXPIRY_DATE],
},
"WILL EXPIRED",
{"$lte": ["pm_due_date", NOW]},
"EXPIRED",
]
},
"count": {"$sum": 1},
}
},
]
)
from the above code, I expected output for example like
{
"_id" : "PENDING",
"qty": 50
},
{
"_id" : "DONE",
"qty": 50
},
{
"_id" : "WILL BE EXPIRE",
"qty": 40
}
{
"_id" : "EXPIRED",
"qty": 10
}
but from my console show error as following, can someone help me fix the pymongo pipeline for groping multiple conditions?
raise OperationFailure(msg % errmsg, code, response)
pymongo.errors.OperationFailure: An object representing an expression must have exactly one field: { $gte: [ "pm_date", new Date(1596240000000) ], $lt: [ "pm_date", new Date(1598918400000) ] }
Update: I got the result by using $switch (aggregation)
Refer to: https://docs.mongodb.com/manual/reference/operator/aggregation/switch/
res = await Fixture.aggregate(
[
{"$match": dict(eol={"$nin": [True, ""]})},
{
"$project": {
"pm_due_date": 1,
"status": {
"$switch": {
"branches": [
{
"case": {"$lt": ["$pm_due_date", NOW]},
"then": "EXPIRED",
},
{
"case": {
"$and": [
{
"$gte": [
"$pm_due_date",
start_date,
]
},
{
"$lte": [
"$pm_due_date",
EXPIRY_DATE,
]
},
]
},
"then": "WILL EXPIRE",
},
{
"case": {"$lt": ["$pm_date", start_date]},
"then": "PENDING",
},
{
"case": {
"$and": [
{"$gte": ["$pm_date", start_date]},
{"$lt": ["$pm_date", end_date]},
]
},
"then": "DONE",
},
],
"default": "NA",
}
},
}
},
{"$group": {"_id": "$status", "count": {"$sum": 1}}},
]
)
You should put your $cond in a $project stage instead of the $group
[
{"$match": dict(eol={"$nin": [True, ""]})},
{"$project": {
"status": {
"$cond": [
{"$lt": ["pm_date", start_date]},
"PENDING",
{"$cond": [
{
"$and": [
{"$gte": ["pm_date", start_date]},
{"$lt": ["pm_date", end_date]}
]
},
"DONE",
{"$cond": [
{
"$and": [
{"$gte": ["pm_date", start_date]},
{"$lt": ["pm_date", EXPIRY_DATE]}
]
},
"WILL EXPIRED",
"EXPIRED"
]}
]}
]}
}
},
{
"$group": {
"_id": "$status",
"count": {"$sum": 1},
}
},
]

JSON manipulation for substituting values python

I have a scenario where I am substituting the values in a payload(data1) by manipulating a json payload(data2).
data2:
[
{
"eqid": 71430,
"tags": [
{
"id": 135853,
"content": "content1",
"class_id": 13733,
"class_name": "reEs"
},
{
"id": 1358341,
"content": "content2",
"class_id": 13734447,
"class_name": "reEp"
},
{
"id": 135832561,
"content": "content3",
"class_id": 137342347,
"class_name": "reEj"
},
{
"id": 1358234561,
"content": "content4",
"class_id": 137123347,
"class_name": "reEk"
},
{
"id": 1355678561,
"content": "content5",
"class_id": 137432347,
"class_name": "reEm"
},
{
"id": 1352348561,
"content": "content6",
"class_id": 137786347,
"class_name": "reEn"
}
],
"auth": false
},
{
"eqid": 243582,
"tags": [
{
"id": 1358456561,
"content": "content1",
"class_id": 137213347,
"class_name": "reEl"
},
{
"id": 13584567561,
"content": "content2",
"class_id": 13745347,
"class_name": "reEt"
},
{
"id": 1353218561,
"content": "content3",
"class_id": 137980347,
"class_name": "reEf"
},
{
"id": 13589758561,
"content": "content4",
"class_id": 1375678347,
"class_name": "reEb"
}
],
"auth": false
},
{
"eqid": 243672,
"tags": [
{
"id": 1358456561,
"content": "content1",
"class_id": 137213347,
"class_name": "reEl"
},
{
"id": 13589758561,
"content": "content4",
"class_id": 1375678347,
"class_name": "reEb"
}
],
"auth": false
}
]
data 1 -
data1 = {
"data": [
{
"name": "app-pp",
"ck_name": "App1",
"eid": 71430,
"Iar": "Osk",
"sps": "Active",
"tgs": "tobedonetages",
"sid": "tobedoneservice",
"last_checked": "19-05-2020"
},
{
"name": "app-pq",
"ck_name": "App2",
"eid": 243582,
"Iar": "Osk",
"sps": "Active",
"tgs": "tobedonetages",
"sid": "tobedoneservice",
"last_checked": "19-05-2020"
}
]
}
Now here based on the condition that if eid of data1 is equal to eqid data2
then replace the value of payload data1 for this two key's tgs & sid with values from data2 of key's content (under tags) and auth.
What I have tried :
for tes in data2:
tempmed = tes["eqid"]
tempservice = tes["auth"]
tempservicel = tes["tags"]
for k in data1:
templand= tempkey["name"]
temphck= tempkey["ck_name"]
tempevalid= tempkey["eid"]
tempiaas= tempkey["Iar"]
tempspc= tempkey["sps"]
temptag= tempkey["tas"]
tempserv= tempkey["sid"]
templc = tempkey["last_checked"]
if tempmed == tempevalid:
tempserv = tempservice
temptag = tempservicel
data1.append({'name': templand, 'ck_name': temphck, 'eid': tempevalid, 'Iar': tempiaas, 'sps': tempspc, 'tgs': temptag, 'sid': tempserv, 'last_checked': templc})
I am not sure what should be the approach to achieve this as the current approach of mine doesn't works as expected.
expected O/P :
{"data":[
{
"name":"app-pp",
"ck_name":"App1",
"eid":71430,
"Iar":"Osk",
"sps":"Active",
"tgs":"content1,content2,content3,content4,content5,content6",
"sid":"false",
"last_checked":"19-05-2020"
},
{
"name":"app-pq",
"ck_name":"App2",
"eid":243582,
"Iar":"Osk",
"sps":"Active",
"tgs":"content1,content2,content3,content4",
"sid":"false",
"last_checked":"19-05-2020"
}
]}
Any help would be great !
It is not optimal but it works. And it can be more readable for beginner.
for item1 in data1['data']:
#print("item1['eid']: ", item1['eid'])
for item2 in data2:
if item1['eid'] == item2['eqid']:
#print("item2['eqid']:", item2['eqid'])
item1['sid'] = item2['auth']
#c = []
#for tag in item2['tags']:
# #print(tag['content'])
# c.append(tag['content'])
#item1['tgs'] = ','.join(c)
item1['tgs'] = ','.join(tag['content'] for tag in item2['tags'])
print(data1)
For bigger data it could be good first to use loop to create structure only with values content and auth from data2 and later use loop to replace it in data1. This way it would run less loops.
Full working example
data2 = [
{
"eqid": 71430,
"tags": [
{
"id": 135853,
"content": "content1",
"class_id": 13733,
"class_name": "reEs"
},
{
"id": 1358341,
"content": "content2",
"class_id": 13734447,
"class_name": "reEp"
},
{
"id": 135832561,
"content": "content3",
"class_id": 137342347,
"class_name": "reEj"
},
{
"id": 1358234561,
"content": "content4",
"class_id": 137123347,
"class_name": "reEk"
},
{
"id": 1355678561,
"content": "content5",
"class_id": 137432347,
"class_name": "reEm"
},
{
"id": 1352348561,
"content": "content6",
"class_id": 137786347,
"class_name": "reEn"
}
],
"auth": False
},
{
"eqid": 243582,
"tags": [
{
"id": 1358456561,
"content": "content1",
"class_id": 137213347,
"class_name": "reEl"
},
{
"id": 13584567561,
"content": "content2",
"class_id": 13745347,
"class_name": "reEt"
},
{
"id": 1353218561,
"content": "content3",
"class_id": 137980347,
"class_name": "reEf"
},
{
"id": 13589758561,
"content": "content4",
"class_id": 1375678347,
"class_name": "reEb"
}
],
"auth": False
},
{
"eqid": 243672,
"tags": [
{
"id": 1358456561,
"content": "content1",
"class_id": 137213347,
"class_name": "reEl"
},
{
"id": 13589758561,
"content": "content4",
"class_id": 1375678347,
"class_name": "reEb"
}
],
"auth": False
}
]
data1 = {
"data": [
{
"name": "app-pp",
"ck_name": "App1",
"eid": 71430,
"Iar": "Osk",
"sps": "Active",
"tgs": "tobedonetages",
"sid": "tobedoneservice",
"last_checked": "19-05-2020"
},
{
"name": "app-pq",
"ck_name": "App2",
"eid": 243582,
"Iar": "Osk",
"sps": "Active",
"tgs": "tobedonetages",
"sid": "tobedoneservice",
"last_checked": "19-05-2020"
}
]
}
for item1 in data1['data']:
print("item1['eid']: ", item1['eid'])
for item2 in data2:
if item1['eid'] == item2['eqid']:
print("item2['eqid']:", item2['eqid'])
item1['sid'] = item2['auth']
c = []
for tag in item2['tags']:
print(tag['content'])
c.append(tag['content'])
c = ','.join(c)
item1['tgs'] = c
print(data1)
try this
pool = {}
for d2 in data2:
tags = d2["tags"]
content = [i["content"] for i in tags]
pool[d2["eqid"]] = [",".join(content), d2["auth"]]
#print(pool)
for i, d1 in enumerate(data1["data"]):
if(d1["eid"] in pool):
data1["data"][i]["tgs"] = pool[d1["eid"]][0]
data1["data"][i]["sid"] = pool[d1["eid"]][1]
print(data1)

Categories