i have a following json file
json_data = {
"action":"postRecord",
"data":{
"data":[
{
"info":{
"lid":999,
"cid":1234
},
"info":{
"lid":111,
"cid":"6789"
}
}
]
}
}
i tried the usage of json.load
output = json.load(json_data)
but it returnign a string
i want to access the value of id in a way like output['data']['data']['info']['id']
Isn't your data already in the format you want?
print json_data['data']['data'][0]['info']['cid']
Uh. You're saying that you've got an JSON array as a plain text in a json_data variable?
You should put the JSON data into the json_data variable as a string and use json.loads(json_data).
Since JSON is just text, you should treat it like a string, unless you have opened a JSON file from the disk, then json.load() should work with the file object itself.
In [5]: json_data = '{ "action":"postRecord", "data":{ "data":[ { "info":{ "lid":999, "cid":1234 }, "info":{ "lid":111, "cid":"6789" } } ] } }'
In [6]: output = json.loads(json_data)
In [7]: output['data']['data'][0]['info']['cid']
Out[7]: u'6789'
seems the original json string has problem, there has duplicate info property in one element, so after json.dump to format , the first info will be removed , and the result is the second 6789.
json_data = { "action":"postRecord", "data":{ "data":[ { "info":{ "lid":999, "cid":1234 }, "info":{ "lid":111, "cid":"6789" } } ] } }
data = json.dumps(json_data)
json_to_python = json.loads(data)
print (json_to_python)
print (json_to_python['data']['data'][0]['info']['cid'])
======>
{'action': 'postRecord', 'data': {'data': [{'info1': {'lid': 999, 'cid': 1234}, 'info': {'lid': 111, 'cid': '6789'}}]}}
6789
Related
I am trying to add data into a json key from a csv file and maintain the original structure as is.. the json file looks like this..
{
"inputDocuments": {
"gcsDocuments": {
"documents": [
{
"gcsUri": "gs://test/.PDF",
"mimeType": "application/pdf"
}
]
}
},
"documentOutputConfig": {
"gcsOutputConfig": {
"gcsUri": "gs://test"
}
},
"skipHumanReview": false
The csv file I am trying to load has the following structure..
note that the
mimetype
is not included in the csv file.
I already have code that can do this, however its a bit manual and I am looking for a simpler approach that would just require a csv file with the values and this data will be added into the json structure. The expected outcome should look like this:
{
"inputDocuments": {
"gcsDocuments": {
"documents": [
{
"gcsUri": "gs://sampleinvoices/Handwritten/1.pdf",
"mimeType": "application/pdf"
},
{
"gcsUri": "gs://sampleinvoices/Handwritten/2.pdf",
"mimeType": "application/pdf"
}
]
}
},
"documentOutputConfig": {
"gcsOutputConfig": {
"gcsUri": "gs://test"
}
},
"skipHumanReview": false
The code that I am currently using, which is a bit manual looks like this..
import json
# function to add to JSON
def write_json(new_data, filename='keyvalue.json'):
with open(filename,'r+') as file:
# load existing data into a dict.
file_data = json.load(file)
# Join new_data with file_data inside documents
file_data["inputDocuments"]["gcsDocuments"]["documents"].append(new_data)
# Sets file's current position at offset.
file.seek(0)
# convert back to json.
json.dump(file_data, file, indent = 4)
# python object to be appended
y = {
"gcsUri": "gs://test/.PDF",
"mimeType": "application/pdf"
}
write_json(y)
I would suggest something like this:
import pandas as pd
import json
from pathlib import Path
df_csv = pd.read_csv("your_data.csv")
json_file = Path("your_data.json")
json_data = json.loads(json_file.read_text())
documents = [
{
"gcsUri": cell,
"mimeType": "application/pdf"
}
for cell in df_csv["column_name"]
]
json_data["inputDocuments"]["gcsDocuments"]["documents"] = documents
json_file.write_text(json.dumps(json_data))
Probably you should split this into separate functions, but it should communicate the general idea.
following Update json nodes in Python using jsonpath, would like to know how one might update the JSON data given a certain context.
So, say we pick the exact same JSON example:
{
"SchemeId": 10,
"nominations": [
{
"nominationId": 1
}
]
}
But this time, would like to double the value of the original value, hence some lambda function is needed which takes into account the current node value.
No need for lambdas; for example, to double SchemeId, something like this should work:
data = json.loads("""the json string above""")
jsonpath_expr = parse('$.SchemeId')
jsonpath_expr.find(data)
val = jsonpath_expr.find(data)[0].value
jsonpath_expr.update(data, val*2)
print(json.dumps(data, indent=2))
Output:
{
"SchemeId": 20,
"nominations": [
{
"nominationId": 1
}
]
}
Here is example with lambda expression:
import json
from jsonpath_ng import parse
settings = '''{
"choices": {
"atm": {
"cs": "Strom",
"en": "Tree"
},
"bar": {
"cs": "Dům",
"en": "House"
},
"sea": {
"cs": "Moře",
"en": "Sea"
}
}
}'''
json_data = json.loads(settings)
pattern = parse('$.choices.*')
def magic(f: dict, to_lang='cs'):
return f[to_lang]
pattern.update(json_data,
lambda data_field, data, field: data.update({field: magic(data[field])}))
json_data
returns
{
'choices': {
'atm': 'Strom',
'bar': 'Dům',
'sea': 'Moře'
}
}
I am attempting to parse a json response that looks like this:
{
"links": {
"next": "http://www.neowsapp.com/rest/v1/feed?start_date=2015-09-08&end_date=2015-09-09&detailed=false&api_key=xxx",
"prev": "http://www.neowsapp.com/rest/v1/feed?start_date=2015-09-06&end_date=2015-09-07&detailed=false&api_key=xxx",
"self": "http://www.neowsapp.com/rest/v1/feed?start_date=2015-09-07&end_date=2015-09-08&detailed=false&api_key=xxx"
},
"element_count": 22,
"near_earth_objects": {
"2015-09-08": [
{
"links": {
"self": "http://www.neowsapp.com/rest/v1/neo/3726710?api_key=xxx"
},
"id": "3726710",
"neo_reference_id": "3726710",
"name": "(2015 RC)",
"nasa_jpl_url": "http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3726710",
"absolute_magnitude_h": 24.3,
"estimated_diameter": {
"kilometers": {
"estimated_diameter_min": 0.0366906138,
"estimated_diameter_max": 0.0820427065
},
"meters": {
"estimated_diameter_min": 36.6906137531,
"estimated_diameter_max": 82.0427064882
},
"miles": {
"estimated_diameter_min": 0.0227984834,
"estimated_diameter_max": 0.0509789586
},
"feet": {
"estimated_diameter_min": 120.3760332259,
"estimated_diameter_max": 269.1689931548
}
},
"is_potentially_hazardous_asteroid": false,
"close_approach_data": [
{
"close_approach_date": "2015-09-08",
"close_approach_date_full": "2015-Sep-08 09:45",
"epoch_date_close_approach": 1441705500000,
"relative_velocity": {
"kilometers_per_second": "19.4850295284",
"kilometers_per_hour": "70146.106302123",
"miles_per_hour": "43586.0625520053"
},
"miss_distance": {
"astronomical": "0.0269230459",
"lunar": "10.4730648551",
"kilometers": "4027630.320552233",
"miles": "2502653.4316094954"
},
"orbiting_body": "Earth"
}
],
"is_sentry_object": false
},
}
I am trying to figure out how to parse through to get "miss_distance" dictionary values ? I am unable to wrap my head around it.
Here is what I have been able to do so far:
After I get a Response object from request.get()
response = request.get(url
I convert the response object to json object
data = response.json() #this returns dictionary object
I try to parse the first level of the dictionary:
for i in data:
if i == "near_earth_objects":
dataset1 = data["near_earth_objects"]["2015-09-08"]
#this returns the next object which is of type list
Please someone can explain me :
1. How to decipher this response in the first place.
2. How can I move forward in parsing the response object and get to miss_distance dictionary ?
Please any pointers/help is appreciated.
Thank you
Your data will will have multiple dictionaries for the each date, near earth object, and close approach:
near_earth_objects = data['near_earth_objects']
for date in near_earth_objects:
objects = near_earth_objects[date]
for object in objects:
close_approach_data = object['close_approach_data']
for close_approach in close_approach_data:
print(close_approach['miss_distance'])
The code below gives you a table of date, miss_distances for every object for every date
import json
raw_json = '''
{
"near_earth_objects": {
"2015-09-08": [
{
"close_approach_data": [
{
"miss_distance": {
"astronomical": "0.0269230459",
"lunar": "10.4730648551",
"kilometers": "4027630.320552233",
"miles": "2502653.4316094954"
},
"orbiting_body": "Earth"
}
]
}
]
}
}
'''
if __name__ == "__main__":
parsed = json.loads(raw_json)
# assuming this json includes more than one near_earch_object spread across dates
near_objects = []
for date, near_objs in parsed['near_earth_objects'].items():
for obj in near_objs:
for appr in obj['close_approach_data']:
o = {
'date': date,
'miss_distances': appr['miss_distance']
}
near_objects.append(o)
print(near_objects)
output:
[
{'date': '2015-09-08',
'miss_distances': {
'astronomical': '0.0269230459',
'lunar': '10.4730648551',
'kilometers': '4027630.320552233',
'miles': '2502653.4316094954'
}
}
]
I have a JSON file as follows :
{
"desired":{
"property1":{
"port":"/dev/usbserial",
"rx":{
"watchdoginterval":3600
},
"state":{
"path":"/Users/user1"
},
"enabled":"true",
"active":{
"enabled":"true"
}
},
"property2":{
"signal_interrupt":"USR2",
"signal_description_path":"/tmp/logger.log"
},
"property3":{
"periodmins":40
},
}
}
I am having issues trying to convert this into a string for use with AWS IoT. The function I am using is deviceShadowHandler.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
Where JSONPayload should be the JSON string.
I have tried :
with open('JSONfile.json' , 'r') as f:
dict = json.load(f)
JSONPayload = str(dict)
but I receive an "Invalid JSON file error".
An attempt to manually create a literal string from the jSON file gets messy with complaints about "EOL while scanning string literal" etc.
What is the best solution to solve this? I am new to JSON and stuff and Python.
Trailing commas are not allowed in JSON.
{
"desired":{
"property1":{
"port":"/dev/usbserial",
"rx":{
"watchdoginterval":3600
},
"state":{
"path":"/Users/user1"
},
"enabled":"true",
"active":{
"enabled":"true"
}
},
"property2":{
"signal_interrupt":"USR2",
"signal_description_path":"/tmp/logger.log"
},
"property3":{
"periodmins":40
} # <- no comma there
}
}
I have the following Json file: car_models.json
{
"name":"John",
"age":30,
"cars":
[
{
"car_model": "Mustang",
"car_brand": "Ford"
},
{
"car_model": "cx-5",
"car_brand": "Mazda"
}
]
}
I have another json file data_change.json, which contains details about the jsonpath and their values:
{
"testcase_ID": "test_1A",
"description": "Some description",
"request_change_data": [
{
"element_path": "$.cars.[0].car_model",
"element_value": "focus"
}
]
}
I want to read the data_change.json content, use the element_path from here, parse through car_models.json and update its value to the value from data_change.json.
As in, I want to use the jsonPath - $cars[0].car_model, parse through car_models.json, and change the value of car_model from Mustang to focus. So my updated car_models.json should be the following:
{
"name":"John",
"age":30,
"cars":
[
{
"car_model": "focus",
"car_brand": "Ford"
},
{
"car_model": "cx-5",
"car_brand": "Mazda"
}
]
}
How can I do this in python?
guessing that the Expected answer needs to have "focus" and not "ford"
The following should give you this:
import json
import re
with open('cars_model.json') as f:
cars_model = json.load(f)
with open('data_change.json') as f:
data_change = json.load(f)
for elements in data_change['request_change_data']:
element_path = elements['element_path']
#Reg ex to get you the number (as a string) between the square brackets
position_match = re.match(r"^.*\[(.*)\].*$", element_path)
position = int(position_match.group(1))
print position
# Split on "period" to get the thing to match
thing_to_change = element_path.split(".")[1]
print thing_to_change
value = elements['element_value']
print value
cars_model['cars'][0][thing_to_change] = value
print cars_model