How add values in different keys with the same name - python

here are my data :
{
"data": [
{
"date": 1577836800000,
"#NOTIFICATION_SENT": 62629,
"#NOTIFICATION_OPENED": 404
},
{
"date": 1577923200000,
"#NOTIFICATION_OPENED": 734
}
]
}
How can i add all #NOTIFICATION_OPENED" key to get all the notif opened in a same strings ? If it's not possible, how can I select only the first key "#NOTIFICATION_OPENED" ?
With my code, I print the value of the last duplicate key.
Here is my code :
def create_json(id, notificationSent, notificationOpened):
return {(id):{
'id': id,
'notificationSent': notificationSent,
'notificationOpened': notificationOpened,
}}
statUrl = 'myapiurl'
with urlopen (statUrl) as response: sourcedata = response.read()
statdata = json.loads (sourcedata)
def push_data():
newJsonx = dict()
for item in data["data"]:
for item in statdata["data"]:
try:
notificationOpened = item["#NOTIFICATION_OPENED"]
except:
notificationOpened = '0'
print(notificationOpened)
try:
notificationSent = item["#NOTIFICATION_SENT"]
except:
notificationSent = '0'
# JSON DATA
newJson = create_json(notificationSent, notificationOpened)
newJsonx.update(newJson)
with open('myfile.json', 'w', encoding='utf-8') as json_file:
json.dump(newJsonx, json_file, ensure_ascii=False, sort_keys=True, indent=2)
print('JSON: %s' % json.dumps(newJsonx, sort_keys=True, indent=2))
push_data()

# ...snipped for brevity...
with urlopen(statUrl) as response:
sourcedata = response.read()
statdata = json.loads(sourcedata)
print(statdata)
# should be this format as in your question post
# {
# "data": [{
# "date": 1577836800000,
# "#NOTIFICATION_SENT": 62629,
# "#NOTIFICATION_OPENED": 404
# },
# {
# "date": 1577923200000,
# "#NOTIFICATION_OPENED": 734
# }]
# }
notificationSent = []
notificationOpened = []
for i, d in enumerate(statdata['data']):
notificationOpened.append(d.get('#NOTIFICATION_OPENED', 0))
notificationSent.append(d.get('#NOTIFICATION_SENT', 0))
print(sum(notificationOpened))
1138

Here's a variant of your original code that will create the dictionary you're after.
# create data
statdata = {
"data": [
{
"date": 1577836800000,
"#NOTIFICATION_SENT": 62629,
"#NOTIFICATION_OPENED": 404
},
{
"date": 1577923200000,
"#NOTIFICATION_OPENED": 734
}
]
}
# use your create_json function
def create_json(id, notificationSent, notificationOpened):
return {(id):{
'id': id,
'notificationSent': notificationSent,
'notificationOpened': notificationOpened,
}}
# define an ID value (needed for the create_json function)
id = 0
# initialise output dictionary
newJsonx = dict()
# add items to the dictionary
for elem in statdata["data"]:
for item in elem:
try:
notificationOpened = elem["#NOTIFICATION_OPENED"]
except:
notificationOpened = '0'
print(notificationOpened)
try:
notificationSent = elem["#NOTIFICATION_SENT"]
except:
notificationSent = '0'
newJson = create_json(id, notificationSent, notificationOpened)
newJsonx.update(newJson)
id =+ 1
This gives:
newJsonx
{0: {'id': 0, 'notificationSent': 62629, 'notificationOpened': 404}, 1: {'id': 1, 'notificationSent': '0', 'notificationOpened': 734}}

Related

Get value from json with Jmespath - Python

I'm trying get all values where "_tradeIdScheme": "mhi:MUREX".
e.g. "#value": "37066751"
Part of JSON:
"trade": {
"tradeHeader": {
"partyTradeIdentifier": [{
"tradeId": [{
"#value": "5fbbc10b32a3adbcc7bb6fc0",
"_tradeIdScheme": "mhi:trade-id",
"_xsi:type": "TradeId"
}, {
"#value": "37066751",
"_tradeIdScheme": "mhi:MUREX",
"_xsi:type": "TradeId"
}
}]
}]
I am trying to do this, but it returns only one value, i need to get all values.
filepath = jsonEod + "\\" + "MHEUeodTrades.json"
f_open = open(filepath).read().replace("\n", "")
json_obj = json.loads(f_open, strict=False)
for doc in json_obj:
tradeId = jmespath.search( "trade.tradeHeader.partyTradeIdentifier[].tradeId[?_tradeIdScheme ==
'mhi:MUREX']",doc,)
list_mx.append(tradeId)
Does anyone know which part of my path is wrong?
for file in cdwEodTrades: # i,
filepath = jsonEod + "\\" + file #
f_open = open(filepath).read().replace("\n", "")
json_obj = json.loads(f_open, strict=False)
for doc in json_obj:
tradeId = jmespath.search("trade.tradeHeader.partyTradeIdentifier[]", doc)
for i, x in enumerate(tradeId): # iterate over number of partyTradeIdentifier
mxId = jmespath.search(
f"trade.tradeHeader.partyTradeIdentifier[{i}].tradeId[?_tradeIdScheme == 'mhi:MUREX']",
doc,
)[0]["#value"]
list_mx.append(mxId)
)
list(set(list_mx))

Python: Fetching postgresql result and loading it to JSON

I'm trying to get details from PostgreSQL about some cameras and I need to insert them all in one JSON response, but I can't imagine how it should be done, because for row in self.data: processes one line per time, how can I add them all in one JSON dump?
I imagine JSON dump like this:
{
"status": "ok",
"total_cameras": 3,
"cameras":[{
"camera_id" : 1,
"camera_name" : "hikvision 1",
"camera_ip": "42.51.56.0"
},
{
"camera_id" : 2,
"camera_name" : "hikvision 2",
"camera_ip": "42.51.56.5"
},
{
"camera_id" : 3,
"camera_name" : "hikvision 3",
"camera_ip": "2.1.58.5"
}]
}
My code which I use to get information from PostgreSQL :
if not self.Data:
self.RES = {'status': 'nocameras'}
return web.Response(text=json.dumps(self.RES), status=403)
else:
self.rows = self.cursor.rowcount
for row in self.Data:
if self.rows > 1:
# Authorizing objects
print(row)
self.Camera_ID = row[0]
self.Camera_Name = row[1]
self.Camera_LAT = row[3]
self.Camera_LOG = row[4]
self.Camera_IP = row[2]
self.Camera_Last_Updated = row[6]
self.Camera_Street = row[5]
self.Camera_API_key = row[7]
print(self.Camera_ID, self.Camera_Name)
else:
self.RES = {'status': 'row_error'}
return web.Response(text=json.dumps(self.RES), status=500)
I would first use the returned rows to build a list of dictionaries:
cameras = [
dict(
camera_id=c_id,
camera_name=c_name,
camera_ip=c_ip
) for c_id, c_name, _, _, c_ip, *_ in self.Data
]
And then create the final JSON object:
json.dumps({
'status': 'ok',
'total_cameras': len(cameras),
'cameras': cameras
})

Convert recursive POST data to JSON object

I'm POSTing JSON via javascript to an endpoint as form data, not in the body. I'm doing this to avoid an extra CORS OPTIONS lookup. The JSON is formatted as:
$.ajax({
url: 'somewhere',
type: 'POST',
data: {
"foo": [
"a",
"b"
],
"bar": [
{
"biz": [
"c",
"d"
]
}
]
},
success: function(response) {
// something
}
});
When the query string comes through, it looks like foo%5B%5D=a&foo%5B%5D=b&bar%5B0%5D%5Bbiz%5D%5B%5D=c&bar%5B0%5D%5Bbiz%5D%5B%5D=d
I parse the query string with:
data = parse.unquote(data)
data = parse.parse_qs(data)
print(json.dumps(data))
But the resulting dict/json is
{
"foo[]": [
"a",
"b"
],
"bar[0][biz][]": [
"c",
"d"
]
}
This is problematic since I can't traverse the original keys. How can I convert the received value back into it's original JSON form?
This is what I have so far
def findValue(value):
if isinstance(value, dict):
value = findValue(list(value.keys())[0])
elif isinstance(value, list):
value = findValue(value[0])
return prepareValue(value)
def prepareKey(data, value=None):
print(data)
if isinstance(data, str):
match = re.findall('([a-zA-Z0-9_-]+)\[([a-zA-Z0-9_-]+)\](.*)', data)
return match[0]
if data[2] != '':
additional = prepareKey(data[2], value)
else:
additional = value
return {data[0]: {data[1]: additional}}
def prepareValue(value):
if len(value) == 1:
value = value[0]
return value
def preparePost(data, newData=False):
if isinstance(data, dict):
if not newData:
newData = {}
for key, value in data.items():
match = re.findall('([a-zA-Z0-9_-]+)\[([a-zA-Z0-9_-]+)\](.*)', key)
if len(match):
assignment = findValue(value)
thisData = prepareKey(match[0], assignment)
newData.update(thisData)
else:
newData[key] = prepareValue(value)
return newData
data = parse.unquote(data)
data = parse.parse_qs(data)
data = preparePost(data)
print(data)
It works moderately well but fails on the deeply nested values.
This is the real query I'm working with: foo=bar&cart%5Bcurrency%5D=USD&cart%5Banalytics%5D%5Blib%5D%5Bintegrations%5D%5B0%5D%5Boptions%5D%5BdefaultAttributes%5D%5BthemeId%5D=38654869555&cart%5Banalytics%5D%5Blib%5D%5Bintegrations%5D%5B0%5D%5Boptions%5D%5BdefaultAttributes%5D%5BcontentLanguage%5D=en
I can get as far as {'foo': 'bar', 'cart': {'currency': 'USD'}} but it fails on cart%5Banalytics%5D%5Blib%5D%5Bintegrations%5D%5B0%5D%5Boptions%5D%5BdefaultAttributes%5D%5BthemeId%5D=38654869555
Solution
Stringify the JSON and send the correct headers:
$.ajax({
url: 'somewhere',
type: 'POST',
contentType: 'application/x-www-form-urlencoded',
data: JSON.stringify({
"foo": [
"a",
"b"
],
"bar": [
{
"biz": [
"c",
"d"
]
}
]
}),
success: function(response) {
// something
}
});

Iterate through a list of dictionaries and save duplicate data

I would like to iterate through a list of dictionaries and save values of certain keys (in my case "consumer Key" and "consumer Secret") as many times they are present into another dictionary.
Problem: I'm able to iterate through the list but my code is not saving the second consumer key and consumer secret, instead it is saving the first consumer key and consumer secret twice.
Input:
{
"accessType": "",
"apiProducts": [],
"appFamily": "default",
"appId": "ac56c8b2-6ac1-4971-a1d3-4bf97893c067",
"attributes": [
{
"name": "DisplayName",
"value": "quotaapp"
},
{
"name": "Notes",
"value": ""
}
],
"callbackUrl": "",
"createdAt": 1549274952045,
"createdBy": "suraj.pai.airody#sap.com",
"credentials": [
{
"apiProducts": [
{
"apiproduct": "apiprod",
"status": "approved"
}
],
"attributes": [],
"consumerKey": "xyz",
"consumerSecret": "abc",
"expiresAt": -1,
"issuedAt": 1549274952051,
"scopes": [],
"status": "approved"
},
{
"apiProducts": [
{
"apiproduct": "ouathTest-Product",
"status": "approved"
}
],
"attributes": [],
"consumerKey": "pqr",
"consumerSecret": "wmn",
"expiresAt": -1,
"issuedAt": 1554802431452,
"scopes": [],
"status": "approved"
}
],
"developerId": "xyz",
"lastModifiedAt": 1554802431662,
"lastModifiedBy": "suraj.pai.airody#sap.com",
"name": "quotaapp",
"scopes": [],
"status": "approved"
}
Code:
import requests
import json
from requests.auth import HTTPBasicAuth
import csv
def get_v2details():
a = 'orgID1'
b = 'appID1'
c = 'ConKey1'
d = 'ConSecret1'
e = 'appName1'
org_lst = []
some_dict = {}
con_blst = [] # variable to append the dictionary app level
n = int(input("Enter number of orgs from Landscape 1: "))
for i in range(0, n):
ele = str(input())
org_lst.append(ele)
cmp_orglst = list(org_lst)
print(cmp_orglst)
for j in cmp_orglst:
url = "https://canarydevmgmtsrv.dmzmo.sap.corp/v1/o/" + str(j) + "/apps/"
headers = {'Content-Type': 'application/json'}
response = requests.get(url, auth=HTTPBasicAuth('xyz', 'xyz'), headers=headers, verify=False)
app_data = json.loads(response.text)
print(app_data)
for k in app_data:
url1 = "https://canarydevmgmtsrv.dmzmo.sap.corp/v1/o/" + str(j) + "/apps/" + str(k)
headers = {'Content-Type': 'application/json'}
response1 = requests.get(url1, auth=HTTPBasicAuth('xyz', 'xyz'), headers=headers, verify=False)
consumer_data = json.loads(response1.text)
print(" Consumer Data is ", consumer_data)
for l in range(len(consumer_data['credentials'])):
some_dict[a] = str(j)
some_dict[b] = consumer_data['appId']
some_dict[e] = consumer_data['name']
some_dict[c] = consumer_data['credentials'][0]['consumerKey']
some_dict[d] = consumer_data['credentials'][0]['consumerSecret']
print(some_dict) # Print dictionary of each app ID
con_blst.append(some_dict.copy())
print(con_blst)
csv_columns = ['orgID1', 'appName1', 'appID1', 'ConKey1', 'ConSecret1']
csv_file = "Names1.csv"
try:
with open(csv_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in con_blst:
writer.writerow(data)
except IOError:
print("I/O error")
Expected result:
orgID1 appName1 appID1 ConKey1 ConSecret1
VALIDATE quotaapp 4bf97893c067 xyz abc
VALIDATE quotaapp 4bf97893c067 pqr wmn
Actual result:
orgID1 appName1 appID1 ConKey1 ConSecret1
VALIDATE quotaapp 4bf97893c067 xyz abc
VALIDATE quotaapp 4bf97893c067 xyz abc
It seems you just made a small error.
for l in range(len(consumer_data['credentials'])):
some_dict[a] = str(j)
some_dict[b] = consumer_data['appId']
some_dict[e] = consumer_data['name']
some_dict[c] = consumer_data['credentials'][0]['consumerKey'] #this line
some_dict[d] = consumer_data['credentials'][0]['consumerSecret'] #and this line
print(some_dict) # Print dictionary of each app ID
con_blst.append(some_dict.copy())
Should be
for l in range(len(consumer_data['credentials'])):
some_dict[a] = str(j)
some_dict[b] = consumer_data['appId']
some_dict[e] = consumer_data['name']
some_dict[c] = consumer_data['credentials'][l]['consumerKey'] # Here
some_dict[d] = consumer_data['credentials'][l]['consumerSecret'] # Here
print(some_dict) # Print dictionary of each app ID
con_blst.append(some_dict.copy())
You weren't looping through consumer_data['credentials'], you were just storing consumer_data['credentials'][0] twice

Load data from MongoDb to Elasticsearch through python

I have some json data loaded in MongoDb. e.g. doc1 = {"id": 1,"name": "x1"},doc2 = {"id": 2,"name": "x2"},doc3 = {"id": 3,"name": "x3"}. Now I want this data to import from MongoDb to Elasticsearch. I wrote this piece of code.
mgclient = MongoClient()
db = mgclient['light-test']
col = db['test']
es1 = Elasticsearch()
print ("Connected", es1.info())
es1.indices.create(index='light-test', ignore=400)
# Pull from mongo and dump into ES using bulk API
actions = []
for data in tqdm(col.find(), total=col.count()):
data.pop('_id')
action = {
"_index": 'light-test',
"_type": 'test',
"_source": data
}
actions.append(action)
print("complete")
# Dump x number of objects at a time
if len(actions) >= 100:
deque(parallel_bulk(es1, actions), maxlen=0)
actions = []
print("done")
a = es1.search(index='light-test', body={
'query': {
'match_all': {
}
}
})
print(a)
The problem is in the query returned. The hits shows blank whereas it should had returned the json files.
results
Help me in importing the data from MongoDb to Elasticsearch.
app = Flask(__name__)
MONGO_URL = '...'
mgclient = MongoClient(MONGO_URL, ssl=True, ssl_cert_reqs=ssl.CERT_NONE)
db = mgclient['light']
col = db['task']
doc1 = {...}
doc2 = {...}
doc3 = {...}
post_id = col.insert_many([doc1, doc2, doc3])
print(col.count())
es1 = Elasticsearch(...)
ESinfo=(es1.info())
# Pull from mongo and dump into ES using bulk API
actions = []
for data in tqdm(col.find(), total=col.count()):
data.pop('_id')
action = {
"index": {
"_index": 'light',
"_type": 'task',
}
}
actions.append(action)
actions.append(data)
#delete = es1.indices.delete(index = 'light')
request_body = {
"settings" : {
"number_of_shards": 1,
"number_of_replicas": 0
}
}
es1.indices.create(index='light', body = request_body, ignore=400)
res = es1.bulk(index = 'light', body = actions, refresh = True)
result = col.find()
names = []
for obj in col.find():
name = obj['name']
names.append(name)
print(names)
#app.route('/query')
def Query():
a = es1.search(index='light', body={
'query': {
'match': {
'name': '...',
}
}
})
return jsonify(query=a)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=1024)
This has helped. thank you :)

Categories