Get value from json with Jmespath - Python - python

I'm trying get all values where "_tradeIdScheme": "mhi:MUREX".
e.g. "#value": "37066751"
Part of JSON:
"trade": {
"tradeHeader": {
"partyTradeIdentifier": [{
"tradeId": [{
"#value": "5fbbc10b32a3adbcc7bb6fc0",
"_tradeIdScheme": "mhi:trade-id",
"_xsi:type": "TradeId"
}, {
"#value": "37066751",
"_tradeIdScheme": "mhi:MUREX",
"_xsi:type": "TradeId"
}
}]
}]
I am trying to do this, but it returns only one value, i need to get all values.
filepath = jsonEod + "\\" + "MHEUeodTrades.json"
f_open = open(filepath).read().replace("\n", "")
json_obj = json.loads(f_open, strict=False)
for doc in json_obj:
tradeId = jmespath.search( "trade.tradeHeader.partyTradeIdentifier[].tradeId[?_tradeIdScheme ==
'mhi:MUREX']",doc,)
list_mx.append(tradeId)
Does anyone know which part of my path is wrong?

for file in cdwEodTrades: # i,
filepath = jsonEod + "\\" + file #
f_open = open(filepath).read().replace("\n", "")
json_obj = json.loads(f_open, strict=False)
for doc in json_obj:
tradeId = jmespath.search("trade.tradeHeader.partyTradeIdentifier[]", doc)
for i, x in enumerate(tradeId): # iterate over number of partyTradeIdentifier
mxId = jmespath.search(
f"trade.tradeHeader.partyTradeIdentifier[{i}].tradeId[?_tradeIdScheme == 'mhi:MUREX']",
doc,
)[0]["#value"]
list_mx.append(mxId)
)
list(set(list_mx))

Related

Python - Add nested items to one nested list

I am trying to create one nested Python dictionary called Results.
I am using AWS Rekognition to get an image and output the results.
The results_dict only contains one result after it's complated, and I wish to have all the results in one nested loop
I'm trying to get:
{
"Results": [
{
"Name": "Human",
"Confidence": 98.87621307373047,
},
{
"Name": "Face",
"Confidence": 98.87621307373047,
},
{
"Name": "Person",
"Confidence": 98.87621307373047,
},
]
}
But I'm getting:
{
'Results':
{
'Name': 'Paper',
'Confidence': 57.299766540527344
}
}
The code is replacing the text, and I want to add another set of Name and Confidence.
My code is:
import boto3
import json
BUCKET = "*****"
FOLDER = 'testing/'
JOEY = FOLDER + "Joey_30_Sept.png"
BEYONCE = FOLDER + "beyonce_rekognition_moderation_testing.jpg"
MANBEARD = FOLDER + "man_beard.jpg"
MEN = FOLDER + "men_group.jpg"
client = boto3.client('rekognition')
response = client.detect_labels(Image=
{'S3Object': {
'Bucket': BUCKET,
'Name': JOEY
}},
MaxLabels = 10,
MinConfidence=0)
results_dict = {}
results_dict['Results'] = {}
results_dict['Results']['Name'] = ""
results_dict['Results']['Confidence'] = ""
for label in response['Labels']:
name = label['Name'] #to get the whole bounding box.
confidence = label['Confidence']
name_str = str(name)
conf_str = str(confidence)
results_dict["Results"]["Name"] = label['Name']
results_dict["Results"]["Confidence"] = label['Confidence']
print(results_dict)
You defined results_dict['Results'] as dictionary as dict not list:
...
results_dict = {}
results_dict['Results'] = []
results_dict['Results']['Name'] = ""
results_dict['Results']['Confidence'] = ""
for label in response['Labels']:
name = label['Name'] #to get the whole bounding box.
confidence = label['Confidence']
name_str = str(name)
conf_str = str(confidence)
results_dict['Results'].append({["Name": name_str, "Confidence": conf_str })
print(results_dict)

Modify a sub value of json file using python

I'm trying to create multiple JSON files with different numbers at specific value, this is my code :
import json
json_dict = {
"assetName": "GhostCastle#",
"previewImageNft": {
"mime_Type": "png",
"description": "#",
"fileFromIPFS": "QmNuFreEoJy9CHhXchxaDAwuFXPHu84KYWY9U7S2banxFS/#.png",
"metadataPlaceholder": [
{
"": ""
}
]
}
}
n = 10
for i in range(1, n+1):
json_dict["assetName"] = f"GhostCastle{i}"
json_dict[#What to put here to choose "fileFromIPFS"] = f"QmNuFreEoJy9CHhXchxaDAwuFXPHu84KYWY9U7S2banxFS/{i}.png"
with open(f"{i}.json", 'w') as json_file:
#json.dump() method save dict json to file
json.dump(json_dict, json_file)
so What to put to choose "fileFromIPFS" in the second json_dict

Iterate / Loop thru a json file using python multiple times

Ive a json file,
{
"IGCSE":[
{
"rolename": "igcsesubject1",
"roleid": 764106550863462431
},
{
"rolename": "igcsesubject2",
"roleid": 764106550863462431
}
],
"AS":[
{
"rolename": "assubject1",
"roleid": 854789476987546
},
{
"rolename": "assubject2",
"roleid": 854789476987546
}
],
"A2":[
{
"rolename": "a2subject1",
"roleid": 854789476987856
},
{
"rolename": "a2subject2",
"roleid": 854789476987856
}
]
}
I want to fetch the keys [igcse, as, a2..] and then fetch the rolename and roleids under the specific keys. How do i do it?
Below is the python code for how i used to do it without the keys.
with open(fileloc) as f:
data = json.load(f)
for s in range(len(data)):
d1 = data[s]
rname = d1["rolename"]
rid = d1["roleid"]
any help would be appreciated :)
First you can have a list of keys, under which you will get them:
l = ['A1','A2']
Then iterate like this:
for x in data:
if x in l:
for y in range(len(data[x])):
print(j[x][y]['rolename'])
print(j[x][y]['roleid'])
hi you can use for and you will get the keys:
with open(fileloc) as f:
data = json.load(f)
for s in data:
d1 = data[s]
rname = d1["rolename"]
rid = d1["roleid"]
The following would work for what you need:
with open(file) as f:
json_dict = json.load(f)
for key in json_dict:
value_list = json_dict[key]
for item in value_list:
rname = item["rolename"]
rid = item["roleid"]
If you need to filter for specific keys in the JSON, you can have a list of keys you want to obtain and filter for those keys as you iterate through the keys (similar to Wasif Hasan's suggestion above).

python why in my json array each json element have the same value

So I have a json template and I am reading from a csv to update the some of the value of the json properties. I then put all the json in a array to write to a file. but in my file, all the json elements have the same value.
The issue is the old values are being overwritten some how. How should I fix that?
def main():
df = pd.read_csv("Daily_EXRATE.csv")
df = df.loc[df['Field1'] == '04']
opdb = {
"sell_rate": 1.2676,
"type": "currency_exchange",
"version": "1"
}
opdbarray = []
for index, rowsr in df.iterrows():
data = {}
data = rowsr.to_json()
data = json.loads(data)
opdb["sell_rate"] = data["Field11"]
opdbarray.append(opdb)
print(json.dumps(opdb, indent = 4 ))
# now write output to a file
jsonDataFile = open("ccData_1.json", "w")
jsonDataFile.write(json.dumps(opdbarray, indent=4, sort_keys=True))
jsonDataFile.close()
outputs are all the same
[
{
"sell_rate": "2.1058000000",
"type": "currency_exchange",
"version": "1"
},
{
"sell_rate": "2.1058000000",
"type": "currency_exchange",
"version": "1"
},
{
"sell_rate": "2.1058000000",
"type": "currency_exchange",
"version": "1"
},
You're appending the same obdb dictionary to apdbarray each time through the loop, just replacing its sell_rate element. You need to create a new dictionary each time.
def main():
df = pd.read_csv("Daily_EXRATE.csv")
df = df.loc[df['Field1'] == '04']
opdbarray = []
for index, rowsr in df.iterrows():
data = {}
data = rowsr.to_json()
data = json.loads(data)
opdb = {
"sell_rate": 1.2676,
"type": "currency_exchange",
"version": "1",
"sell_rate": data["Field11"]
}
opdbarray.append(opdb)
print(json.dumps(opdb, indent = 4 ))
# now write output to a file
jsonDataFile = open("ccData_1.json", "w")
jsonDataFile.write(json.dumps(opdbarray, indent=4, sort_keys=True))
jsonDataFile.close()

Iterate through a list of dictionaries and save duplicate data

I would like to iterate through a list of dictionaries and save values of certain keys (in my case "consumer Key" and "consumer Secret") as many times they are present into another dictionary.
Problem: I'm able to iterate through the list but my code is not saving the second consumer key and consumer secret, instead it is saving the first consumer key and consumer secret twice.
Input:
{
"accessType": "",
"apiProducts": [],
"appFamily": "default",
"appId": "ac56c8b2-6ac1-4971-a1d3-4bf97893c067",
"attributes": [
{
"name": "DisplayName",
"value": "quotaapp"
},
{
"name": "Notes",
"value": ""
}
],
"callbackUrl": "",
"createdAt": 1549274952045,
"createdBy": "suraj.pai.airody#sap.com",
"credentials": [
{
"apiProducts": [
{
"apiproduct": "apiprod",
"status": "approved"
}
],
"attributes": [],
"consumerKey": "xyz",
"consumerSecret": "abc",
"expiresAt": -1,
"issuedAt": 1549274952051,
"scopes": [],
"status": "approved"
},
{
"apiProducts": [
{
"apiproduct": "ouathTest-Product",
"status": "approved"
}
],
"attributes": [],
"consumerKey": "pqr",
"consumerSecret": "wmn",
"expiresAt": -1,
"issuedAt": 1554802431452,
"scopes": [],
"status": "approved"
}
],
"developerId": "xyz",
"lastModifiedAt": 1554802431662,
"lastModifiedBy": "suraj.pai.airody#sap.com",
"name": "quotaapp",
"scopes": [],
"status": "approved"
}
Code:
import requests
import json
from requests.auth import HTTPBasicAuth
import csv
def get_v2details():
a = 'orgID1'
b = 'appID1'
c = 'ConKey1'
d = 'ConSecret1'
e = 'appName1'
org_lst = []
some_dict = {}
con_blst = [] # variable to append the dictionary app level
n = int(input("Enter number of orgs from Landscape 1: "))
for i in range(0, n):
ele = str(input())
org_lst.append(ele)
cmp_orglst = list(org_lst)
print(cmp_orglst)
for j in cmp_orglst:
url = "https://canarydevmgmtsrv.dmzmo.sap.corp/v1/o/" + str(j) + "/apps/"
headers = {'Content-Type': 'application/json'}
response = requests.get(url, auth=HTTPBasicAuth('xyz', 'xyz'), headers=headers, verify=False)
app_data = json.loads(response.text)
print(app_data)
for k in app_data:
url1 = "https://canarydevmgmtsrv.dmzmo.sap.corp/v1/o/" + str(j) + "/apps/" + str(k)
headers = {'Content-Type': 'application/json'}
response1 = requests.get(url1, auth=HTTPBasicAuth('xyz', 'xyz'), headers=headers, verify=False)
consumer_data = json.loads(response1.text)
print(" Consumer Data is ", consumer_data)
for l in range(len(consumer_data['credentials'])):
some_dict[a] = str(j)
some_dict[b] = consumer_data['appId']
some_dict[e] = consumer_data['name']
some_dict[c] = consumer_data['credentials'][0]['consumerKey']
some_dict[d] = consumer_data['credentials'][0]['consumerSecret']
print(some_dict) # Print dictionary of each app ID
con_blst.append(some_dict.copy())
print(con_blst)
csv_columns = ['orgID1', 'appName1', 'appID1', 'ConKey1', 'ConSecret1']
csv_file = "Names1.csv"
try:
with open(csv_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in con_blst:
writer.writerow(data)
except IOError:
print("I/O error")
Expected result:
orgID1 appName1 appID1 ConKey1 ConSecret1
VALIDATE quotaapp 4bf97893c067 xyz abc
VALIDATE quotaapp 4bf97893c067 pqr wmn
Actual result:
orgID1 appName1 appID1 ConKey1 ConSecret1
VALIDATE quotaapp 4bf97893c067 xyz abc
VALIDATE quotaapp 4bf97893c067 xyz abc
It seems you just made a small error.
for l in range(len(consumer_data['credentials'])):
some_dict[a] = str(j)
some_dict[b] = consumer_data['appId']
some_dict[e] = consumer_data['name']
some_dict[c] = consumer_data['credentials'][0]['consumerKey'] #this line
some_dict[d] = consumer_data['credentials'][0]['consumerSecret'] #and this line
print(some_dict) # Print dictionary of each app ID
con_blst.append(some_dict.copy())
Should be
for l in range(len(consumer_data['credentials'])):
some_dict[a] = str(j)
some_dict[b] = consumer_data['appId']
some_dict[e] = consumer_data['name']
some_dict[c] = consumer_data['credentials'][l]['consumerKey'] # Here
some_dict[d] = consumer_data['credentials'][l]['consumerSecret'] # Here
print(some_dict) # Print dictionary of each app ID
con_blst.append(some_dict.copy())
You weren't looping through consumer_data['credentials'], you were just storing consumer_data['credentials'][0] twice

Categories