Update Key Value In Python In JSON File - python

How do I change a value in a json file with python? I want to search and find "class": "DepictionScreenshotsView" and replace it with "class": ""
JSON File:
{
"minVersion": "0.1",
"class": "DepictionTabView",
"tintColor": "#2cb1be",
"headerImage": "",
"tabs": [
{
"tabname": "Details",
"class": "DepictionStackView",
"tintColor": "#2cb1be",
"views": [
{
"class": "DepictionSubheaderView",
"useBoldText": true,
"useBottomMargin": false,
"title": "Description"
},
{
"class": "DepictionMarkdownView",
"markdown": "Some dummy text...",
"useRawFormat": true
},
{
"class": "DepictionSeparatorView"
},
{
"class": "DepictionSubheaderView",
"useBoldText": true,
"useBottomMargin": false,
"title": "Screenshots"
},
{
"class": "DepictionScreenshotsView",
"itemCornerRadius": 6,
"itemSize": "{160, 284.44444444444}",
"screenshots": [
{
"accessibilityText": "Screenshot",
"url": "http://example.com/image.png"
}
]
},
{
"class": "DepictionSeparatorView"
},
{
"class": "DepictionSubheaderView",
"useBoldText": true,
"useBottomMargin": false,
"title": "Information"
},
{
"class": "DepictionTableTextView",
"title": "Author",
"text": "User"
},
{
"class": "DepictionSpacerView",
"spacing": 16
},
{
"class": "DepictionStackView",
"views": [
{
"class": "DepictionTableButtonView",
"title": "Contact",
"action": "http://example.com/",
"openExternal": true
}
]
},
{
"class": "DepictionSpacerView",
"spacing": 16
}
]
},
{
"tabname": "History",
"class": "DepictionStackView",
"views": [
{
"class": "DepictionSubheaderView",
"useBoldText": true,
"useBottomMargin": false,
"title": ""
},
{
"class": "DepictionMarkdownView",
"markdown": "<ul>\n<li>Initial release.<\/li>\n<\/ul>",
"useRawFormat": true
}
]
}
]
}

You can read a json file in python as follow:
import json
with open('your_file.json') as f:
data = json.load(f)
Then you access and change the value (for your case):
data['tabs'][0]['views'][4]['class'] = ""
After having changed the data, you can save it :
with open('your_file.json', 'w') as outfile:
json.dump(data, outfile)

Related

json.decoder.JSONDecodeError - while converting JSON to CSV output

While trying to convert a JSON output below to CSV, getting error
Here is the JSON output
{
"data": [
{
"id": "-1000100591151294842",
"type": "fres",
"attributes": {
"operationState": "In Service",
"deploymentState": "discovered",
"displayData": {
"operationState": "Up",
"adminState": "Enabled",
"displayTopologySource": "Protocol,Derived",
"displayPhotonicSpectrumData": [
{
"frequency": "194.950000",
"wavelength": "1537.79",
"channel": "CH-20"
}
],
"displayDeploymentState": "Discovered",
"displayName": "J-BBEG-CHLC-P109"
},
"utilizationData": {
"totalCapacity": "100.0",
"usedCapacity": "100.0",
"utilizationPercent": "100",
"capacityUnits": "Gbps"
},
"resourceState": "discovered",
"serviceClass": "OTU",
"linkLabel": "BBEG-ROADM-0101:5-4-1,CHLC-ROADM-0401:7-35-1",
"lastUpdatedAdminStateTimeStamp": "2021-05-03T00:29:24.444Z",
"lastUpdatedOperationalStateTimeStamp": "2022-12-08T22:42:21.567Z",
"userLabel": "J-BBEG-CHLC-P109",
"mgmtName": "",
"nativeName": "",
"awarenessTime": "2022-12-08T22:42:22.123Z",
"layerRate": "OTU4",
"layerRateQualifier": "OTU4",
"supportedByLayerRatePackageList": [
{
"layerRate": "OTSi",
"layerRateQualifier": "100G"
}
],
"networkRole": "FREAP",
"directionality": "bidirectional",
"topologySources": [
"adjacency",
"stitched"
],
"adminState": "In Service",
"photonicSpectrumPackageList": [
{
"frequency": "194.950000",
"width": "37.5"
}
],
"active": true,
"additionalAttributes": {
"isActual": "true",
"hasLowerTopology": "true"
},
"reliability": "auto",
"resilienceLevel": "unprotected"
},
"relationships": {
"freDiscovered": {
"data": {
"type": "freDiscovered",
"id": "-1000100591151294842"
}
},
"supportedByServices": {
"data": [
{
"type": "fres",
"id": "6765278351459212874"
}
]
},
"endPoints": {
"data": [
{
"type": "endPoints",
"id": "-1000100591151294842:1"
},
{
"type": "endPoints",
"id": "-1000100591151294842:2"
}
]
},
"partitionFres": {
"data": [
{
"type": "fres",
"id": "7147507956181395827"
}
]
}
}
},
{
"id": "-1013895107051577774",
"type": "fres",
"attributes": {
"operationState": "In Service",
"deploymentState": "discovered",
"displayData": {
"operationState": "Up",
"adminState": "Enabled",
"displayTopologySource": "Protocol,Derived",
"displayPhotonicSpectrumData": [
{
"frequency": "191.600000",
"wavelength": "1564.68",
"channel": "CH-87"
}
],
"displayDeploymentState": "Discovered",
"displayName": "J-KFF9-PNTH-P101"
},
"utilizationData": {
"totalCapacity": "100.0",
"usedCapacity": "90.0",
"utilizationPercent": "90",
"capacityUnits": "Gbps"
},
"resourceState": "discovered",
"serviceClass": "OTU",
"tags": [
"J-KFF9-PNTH-P101"
],
"linkLabel": "KFF9-ROADM-0301:1-1-1,PNTH-ROADM-0101:1-1-1",
"lastUpdatedAdminStateTimeStamp": "2021-09-12T20:22:59.334Z",
"lastUpdatedOperationalStateTimeStamp": "2022-10-12T14:20:44.779Z",
"userLabel": "J-KFF9-PNTH-P101",
"mgmtName": "",
"nativeName": "",
"awarenessTime": "2022-10-12T14:20:45.417Z",
"layerRate": "OTU4",
"layerRateQualifier": "OTU4",
"supportedByLayerRatePackageList": [
{
"layerRate": "OTSi",
"layerRateQualifier": "100G"
}
],
"networkRole": "FREAP",
"directionality": "bidirectional",
"topologySources": [
"adjacency",
"stitched"
],
"adminState": "In Service",
"photonicSpectrumPackageList": [
{
"frequency": "191.600000",
"width": "37.5"
}
],
"active": true,
"additionalAttributes": {
"isActual": "true",
"hasLowerTopology": "true"
},
"reliability": "auto",
"resilienceLevel": "unprotected"
},
"relationships": {
"freDiscovered": {
"data": {
"type": "freDiscovered",
"id": "-1013895107051577774"
}
},
"supportedByServices": {
"data": [
{
"type": "fres",
"id": "6055685088078365419"
}
]
},
"endPoints": {
"data": [
{
"type": "endPoints",
"id": "-1013895107051577774:1"
},
{
"type": "endPoints",
"id": "-1013895107051577774:2"
}
]
},
"partitionFres": {
"data": [
{
"type": "fres",
"id": "-6727082893715936342"
}
]
}
}
}
] }
getting below error, not sure what is missing
Here is the python script I used. have been trying different variations but no luck getting different errors in all other instances
filename = Path('fre.json')
data = []
with open(filename,'r') as json_file:
data_str = json_file.read()
data_str = data_str.split('[',1)[-1]
data_str = data_str.rsplit(']',1)[0]
data_str = data_str.split('][')
for jsonStr in data_str:
jsonStr = '[' + jsonStr + ']'
temp_data = json.loads(jsonStr)
for each in temp_data:
data.append(each)
what is wrong?

Is there a way to add curly brackets around a list of dictionaries already existing within a JSON file?

I currently have two JSONS that I want to merge into one singular JSON, additionally I want to add in a slight change.
Firstly, these are the two JSONS in question.
An intents JSON:
[
{
"ID": "G1",
"intent": "password_reset",
"examples": [
{
"text": "I forgot my password"
},
{
"text": "I can't log in"
},
{
"text": "I can't access the site"
},
{
"text": "My log in is failing"
},
{
"text": "I need to reset my password"
}
]
},
{
"ID": "G2",
"intent": "account_closure",
"examples": [
{
"text": "I want to close my account"
},
{
"text": "I want to terminate my account"
}
]
},
{
"ID": "G3",
"intent": "account_creation",
"examples": [
{
"text": "I want to open an account"
},
{
"text": "Create account"
}
]
},
{
"ID": "G4",
"intent": "complaint",
"examples": [
{
"text": "A member of staff was being rude"
},
{
"text": "I have a complaint"
}
]
}
]
and an entities JSON:
[
{
"ID": "K1",
"entity": "account_type",
"values": [
{
"type": "synonyms",
"value": "business",
"synonyms": [
"corporate"
]
},
{
"type": "synonyms",
"value": "personal",
"synonyms": [
"vanguard",
"student"
]
}
]
},
{
"ID": "K2",
"entity": "beverage",
"values": [
{
"type": "synonyms",
"value": "hot",
"synonyms": [
"heated",
"warm"
]
},
{
"type": "synonyms",
"value": "cold",
"synonyms": [
"ice",
"freezing"
]
}
]
}
]
The expected outcome is to create a JSON file that mimics this structure:
{
"intents": [
{
"intent": "password_reset",
"examples": [
{
"text": "I forgot my password"
},
{
"text": "I want to reset my password"
}
],
"description": "Reset a user password"
}
],
"entities": [
{
"entity": "account_type",
"values": [
{
"type": "synonyms",
"value": "business",
"synonyms": [
"company",
"corporate",
"enterprise"
]
},
{
"type": "synonyms",
"value": "personal",
"synonyms": []
}
],
"fuzzy_match": true
}
],
"metadata": {
"api_version": {
"major_version": "v2",
"minor_version": "2018-11-08"
}
},
"dialog_nodes": [
{
"type": "standard",
"title": "anything_else",
"output": {
"generic": [
{
"values": [
{
"text": "I didn't understand. You can try rephrasing."
},
{
"text": "Can you reword your statement? I'm not understanding."
},
{
"text": "I didn't get your meaning."
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"conditions": "anything_else",
"dialog_node": "Anything else",
"previous_sibling": "node_4_1655399659061",
"disambiguation_opt_out": true
},
{
"type": "event_handler",
"output": {
"generic": [
{
"title": "What type of account do you hold with us?",
"options": [
{
"label": "Personal",
"value": {
"input": {
"text": "personal"
}
}
},
{
"label": "Business",
"value": {
"input": {
"text": "business"
}
}
}
],
"response_type": "option"
}
]
},
"parent": "slot_9_1655398217028",
"event_name": "focus",
"dialog_node": "handler_6_1655398217052",
"previous_sibling": "handler_7_1655398217052"
},
{
"type": "event_handler",
"output": {},
"parent": "slot_9_1655398217028",
"context": {
"account_type": "#account_type"
},
"conditions": "#account_type",
"event_name": "input",
"dialog_node": "handler_7_1655398217052"
},
{
"type": "standard",
"title": "business_account",
"output": {
"generic": [
{
"values": [
{
"text": "We have notified your corporate security team, they will be in touch to reset your password."
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"parent": "node_3_1655397279884",
"next_step": {
"behavior": "jump_to",
"selector": "body",
"dialog_node": "node_4_1655399659061"
},
"conditions": "#account_type:business",
"dialog_node": "node_1_1655399028379",
"previous_sibling": "node_3_1655399027429"
},
{
"type": "standard",
"title": "intent_collection",
"output": {
"generic": [
{
"values": [
{
"text": "Thank you for confirming that you want to reset your password."
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"next_step": {
"behavior": "jump_to",
"selector": "body",
"dialog_node": "node_3_1655397279884"
},
"conditions": "#password_reset",
"dialog_node": "node_3_1655396920143",
"previous_sibling": "Welcome"
},
{
"type": "frame",
"title": "account_type_confirmation",
"output": {
"generic": [
{
"values": [
{
"text": "Thank you"
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"parent": "node_3_1655396920143",
"context": {},
"next_step": {
"behavior": "skip_user_input"
},
"conditions": "#password_reset",
"dialog_node": "node_3_1655397279884"
},
{
"type": "standard",
"title": "personal_account",
"output": {
"generic": [
{
"values": [
{
"text": "We have sent you an email with a password reset link."
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"parent": "node_3_1655397279884",
"next_step": {
"behavior": "jump_to",
"selector": "body",
"dialog_node": "node_4_1655399659061"
},
"conditions": "#account_type:personal",
"dialog_node": "node_3_1655399027429"
},
{
"type": "standard",
"title": "reset_confirmation",
"output": {
"generic": [
{
"values": [
{
"text": "Do you need assistance with anything else today?"
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"digress_in": "does_not_return",
"dialog_node": "node_4_1655399659061",
"previous_sibling": "node_3_1655396920143"
},
{
"type": "slot",
"output": {},
"parent": "node_3_1655397279884",
"variable": "$account_type",
"dialog_node": "slot_9_1655398217028",
"previous_sibling": "node_1_1655399028379"
},
{
"type": "standard",
"title": "welcome",
"output": {
"generic": [
{
"values": [
{
"text": "Hello. How can I help you?"
}
],
"response_type": "text",
"selection_policy": "sequential"
}
]
},
"conditions": "welcome",
"dialog_node": "Welcome"
}
],
"counterexamples": [],
"system_settings": {
"off_topic": {
"enabled": true
},
"disambiguation": {
"prompt": "Did you mean:",
"enabled": true,
"randomize": true,
"max_suggestions": 5,
"suggestion_text_policy": "title",
"none_of_the_above_prompt": "None of the above"
},
"human_agent_assist": {
"prompt": "Did you mean:"
},
"intent_classification": {
"training_backend_version": "v2"
},
"spelling_auto_correct": true
},
"learning_opt_out": false,
"name": "Reset Password",
"language": "en",
"description": "Basic Password Reset Request"
}
So what I am missing in my original files, is essentially:
"intents":
and for the entities file:
"entities"
at the start of each list of dictionaries.
Additionally, I would need to wrap the whole thing in curly braces to comply with json formatting.
As seen, the final goal is not just appending these two to one another but the file technically continues with some other JSON code that I have yet to write and deal with.
My question now is as follows; by what method can I either add in these words and the braces to the individual files, then combine them into a singular JSON or alternatively by what method can I read in these files and combine them with the changes all in one go?
The new output file closing on a curly brace after the entities list of dicts is an acceptable outcome for me at the time, so that I can continue to make changes and hopefully further learn from this how to do these changes in future when I get there.
TIA
JSON is only a string format, you can it load in a language structure, in python that is list and dict, do what you need then dump it back, so you don't "add strings" and "add brackets", on modify the structure
file = 'intents.txt'
intents = json.load(open(file)) # load a list
file = 'entities.txt'
entities = json.load(open(file)) # load a list
# create a dict
content = {
"intents": intents,
"entities": entities
}
json.dump(content, open(file, "w"))
If you're reading all the json in as a string, you can just prepend "{'intents':" to the start and append a closing "}".
myJson = "your json string"
myWrappedJson = '{"intents":' + myJson + "}"

Compare keys value on two list of dict

I'm trying to compare the contents of a dictionary from 2 lists of dict, I have data like this
data1 = [
{
"name": "MoreDependentsInd",
"description": "Form 1040-SP Indicator",
"data_type": "CheckboxType",
"xpath": "",
},
{
"name": "DependentFirstNm",
"description": "Form 1040-SR Indicator",
"data_type": "PersonNameControlType",
"xpath": "",
},
]
data2 = [
{
"id": 29,
"kind": "IN",
"container": 1,
"content": "null",
"uid": "IRS 1040-DependentDetail",
"title": "DependentDetail",
"display_name": "null",
"description": "null",
"extra_info": {
"kind": "null",
"include": {
"fields": [
{
"kind": "PersonFirstNameType",
"name": "DependentFirstNm",
"annotation": {
"documentation": {
"description": "Dependent First Name",
"line_number": "(1)",
}
},
"restriction": "null",
},
{
"kind": "PersonLastNameType",
"name": "DependentLastNm",
"annotation": {
"documentation": {
"description": "Dependent Last Name",
"line_number": "(1)",
}
},
"restriction": "null",
},
{
"kind": "PersonNameControlType",
"name": "DependentNameControlTxt",
"annotation": {
"documentation": {
"description": "Dependent Name Control",
"line_number": "(1)",
}
},
"restriction": "null",
},
{
"kind": "null",
"name": "IdentityProtectionPIN",
"annotation": {
"documentation": {
"description": "Dependent/Qualifying Child Identity Protection PIN",
"line_number": "(2)",
}
},
"restriction": {"rule": "null"},
},
{
"kind": "null",
"name": "DependentRelationshipCd",
"annotation": {
"documentation": {
"description": "Dependent Relationship Code",
"line_number": "(3)",
}
},
"restriction": {
"rule": {
"kind": "TextType",
"enumeration": [
"SON",
"DAUGHTER",
"STEPCHILD",
"FOSTER CHILD",
"BROTHER",
"SISTER",
"STEPBROTHER",
"STEPSISTER",
"HALF BROTHER",
"HALF SISTER",
"GRANDCHILD",
"NIECE",
"NEPHEW",
"PARENT",
"GRANDPARENT",
"AUNT",
"UNCLE",
"OTHER",
"NONE",
],
}
},
},
{
"kind": "PersonFirstNameType",
"name": "DependentFirstNm",
"annotation": {
"documentation": {
"description": "Dependent First Name",
"line_number": "(1)",
}
},
"restriction": "null",
},
{
"kind": "PersonLastNameType",
"name": "DependentLastNm",
"annotation": {
"documentation": {
"description": "Dependent Last Name",
"line_number": "(1)",
}
},
"restriction": "null",
},
{
"kind": "PersonNameControlType",
"name": "DependentNameControlTxt",
"annotation": {
"documentation": {
"description": "Dependent Name Control",
"line_number": "(1)",
}
},
"restriction": "null",
},
{
"kind": "null",
"name": "IdentityProtectionPIN",
"annotation": {
"documentation": {
"description": "Dependent/Qualifying Child Identity Protection PIN",
"line_number": "(2)",
}
},
"restriction": {"rule": "null"},
},
{
"kind": "null",
"name": "DependentRelationshipCd",
"annotation": {
"documentation": {
"description": "Dependent Relationship Code",
"line_number": "(3)",
}
},
"restriction": {
"rule": {
"kind": "TextType",
"enumeration": [
"SON",
"DAUGHTER",
"STEPCHILD",
"FOSTER CHILD",
"BROTHER",
"SISTER",
"STEPBROTHER",
"STEPSISTER",
"HALF BROTHER",
"HALF SISTER",
"GRANDCHILD",
"NIECE",
"NEPHEW",
"PARENT",
"GRANDPARENT",
"AUNT",
"UNCLE",
"OTHER",
"NONE",
],
}
},
},
{
"kind": "PersonFirstNameType",
"name": "DependentFirstNm",
"annotation": {
"documentation": {
"description": "Dependent First Name",
"line_number": "(1)",
}
},
"restriction": "null",
},
{
"kind": "PersonLastNameType",
"name": "DependentLastNm",
"annotation": {
"documentation": {
"description": "Dependent Last Name",
"line_number": "(1)",
}
},
"restriction": "null",
},
{
"kind": "PersonNameControlType",
"name": "DependentNameControlTxt",
"annotation": {
"documentation": {
"description": "Dependent Name Control",
"line_number": "(1)",
}
},
"restriction": "null",
},
{
"kind": "null",
"name": "IdentityProtectionPIN",
"annotation": {
"documentation": {
"description": "Dependent/Qualifying Child Identity Protection PIN",
"line_number": "(2)",
}
},
"restriction": {"rule": "null"},
},
{
"kind": "null",
"name": "DependentRelationshipCd",
"annotation": {
"documentation": {
"description": "Dependent Relationship Code",
"line_number": "(3)",
}
},
"restriction": {
"rule": {
"kind": "TextType",
"enumeration": [
"SON",
"DAUGHTER",
"STEPCHILD",
"FOSTER CHILD",
"BROTHER",
"SISTER",
"STEPBROTHER",
"STEPSISTER",
"HALF BROTHER",
"HALF SISTER",
"GRANDCHILD",
"NIECE",
"NEPHEW",
"PARENT",
"GRANDPARENT",
"AUNT",
"UNCLE",
"OTHER",
"NONE",
],
}
},
},
],
"reference": "null",
},
"annotation": "null",
"max_occurs": "100",
"min_occurs": "0",
"restriction": "null",
},
"instruction": "null",
"created_at": "2022-04-26T09:53:36.426118Z",
"updated_at": "2022-04-26T09:53:36.426203Z",
},
{
"id": 30,
"kind": "IN",
"container": 1,
"content": "null",
"uid": "IRS 1040-MoreDependentsInd",
"title": "MoreDependentsInd",
"display_name": "null",
"description": "null",
"extra_info": {
"kind": "CheckboxType",
"include": "null",
"annotation": {
"documentation": {
"description": "More Dependents Indicator",
"line_number": "null",
}
},
"max_occurs": "null",
"min_occurs": "0",
"restriction": "null",
},
"instruction": "null",
"created_at": "2022-04-26T09:53:36.427552Z",
"updated_at": "2022-04-26T09:53:36.427647Z",
},
]
for i in data1:
for j in data2:
if i["name"] == j["title"]:
print("success")
the result is a success, but in my code, there is a deficiency, if it processes a lot of data then the performance will feel slow, how to fix it? and what if the value such as the data type is in a key that we don't know is located?
for example, I search value data_type from data1 I compare it with CheckboxType
"extra_info": {
"kind": "CheckboxType",
"include": "null",
"annotation": {
"documentation": {
"description": "More Dependents Indicator",
"line_number": "null",
}
},
from data2 on index 1 data2[1] but the value CheckboxType is also exist in another key on data2 in another index
Thanks!
You can do the test in O(n) instead of O(n^2) by building sets of name and title (which is O(n)) and then taking their intersection (which is also O(n) now that they're sets):
>>> {i["name"] for i in data1} & {i["title"] for i in data2}
{'MoreDependentsInd'}
Non-empty sets are truthy so you can use this in a conditional if you don't care about what the intersection is:
>>> if {i["name"] for i in data1} & {i["title"] for i in data2}:
... print("success")
...
success
If you want to look at all string values without having to know that name and title are the intersecting keys, use another generator expression to put all string values into the two sets:
>>> {v for i in data1 for v in i.values() if isinstance(v, str)} & {v for i in data2 for v in i.values() if isinstance(v, str)}
{'MoreDependentsInd'}

Remove Non-Updated Text/Object In JSON File With Python

This script updates the code in the json file with the users input but doesn't replace the placeholder text Screenshot URL X x is a number (1, 5). I want to remove the placeholder text that hasn't been updated by the script with the user input. It was working before but now it isn't and I can find out why. Any help will be welcomed! Thanks!
Code:
# Load the data
file_name = path/to/json/file
with open(file_name) as fh:
full_data = json.load(fh)
# Dig into the data to find the screenshots
screen_shots = full_data['tabs'][0]['views'][1]['screenshots']
# Loop over each screen shot, updating each one
print("Press return/enter to enter another url or press it again with nothing entered to stop asking and continue the script.")
for number, screen_shot in enumerate(screen_shots):
new_url = input("Screnshot URL: ").strip()
if new_url:
# Updating the data here will also update the 'full_data' object
# as we are just referencing a part of it, not making copies
screen_shot.update({"url": new_url, "fullSizeURL": new_url})
else:
break
# Remove all entries which we did not update
screen_shots = screen_shots[:number]
# Save the data
with open(file_name, 'w') as fh:
json.dump(full_data, fh, indent=4)
JSON File:
{
"minVersion": "0.1",
"headerImage": "Header Image URL",
"tintColor": "",
"tabs": [
{
"tabname": "Details",
"views": [
{
"title": "Package Name",
"useBoldText": true,
"useBottomMargin": false,
"class": "DepictionSubheaderView"
},
{
"itemCornerRadius": 6,
"itemSize": "{160, 275.41333333333336}",
"screenshots": [
{
"accessibilityText": "Screenshot",
"url": "Screenshot URL 1",
"fullSizeURL": "Screenshot URL 1"
},
{
"accessibilityText": "Screenshot",
"url": "Screenshot URL 2",
"fullSizeURL": "Screenshot URL 2"
},
{
"accessibilityText": "Screenshot",
"url": "Screenshot URL 3",
"fullSizeURL": "Screenshot URL 3"
},
{
"accessibilityText": "Screenshot",
"url": "Screenshot URL 4",
"fullSizeURL": "Screenshot URL 4"
},
{
"accessibilityText": "Screenshot",
"url": "Screenshot URL 5",
"fullSizeURL": "Screenshot URL 5"
}
],
"class": "DepictionScreenshotsView"
},
{
"markdown": "This is a description.",
"useSpacing": true,
"class": "DepictionMarkdownView"
},
{
"class": "DepictionSeparatorView"
},
{
"title": "Known Issues",
"class": "DepictionHeaderView"
},
{
"markdown": "None",
"useSpacing": true,
"class": "DepictionMarkdownView"
},
{
"class": "DepictionSeparatorView"
},
{
"title": "Latest Version",
"class": "DepictionHeaderView"
},
{
"title": "Version",
"text": "1.2",
"class": "DepictionTableTextView"
},
{
"title": "Released",
"text": "1/1/11",
"class": "DepictionTableTextView"
},
{
"title": "Price",
"text": "Free",
"class": "DepictionTableTextView"
},
{
"title": "Developer",
"text": "Dev",
"class": "DepictionTableTextView"
},
{
"title": "Contact Support",
"action": "",
"class": "DepictionTableButtonView"
},
{
"spacing": 16,
"class": "DepictionSpacerView"
},
{
"spacing": 20,
"class": "DepictionSpacerView"
}
],
"class": "DepictionStackView"
},
{
"tabname": "Changelog",
"views": [
{
"title": "1.2",
"useBoldText": true,
"useBottomMargin": true,
"class": "DepictionSubheaderView"
},
{
"markdown": "\t\n\u2022 Initial Release",
"useSpacing": false,
"class": "DepictionMarkdownView"
},
{
"markdown": "<small style=\"color: #999; margin-top: -8px;\">Released 1/1/11</small>",
"useRawFormat": true,
"class": "DepictionMarkdownView"
}
],
"class": "DepictionStackView"
}
],
"class": "DepictionTabView"
}
Does this work?
import json
file_name = "test.json"
with open(file_name) as fh:
full_data = json.load(fh)
# Dig into the data to find the screenshots
screen_shots = full_data['tabs'][0]['views'][1]['screenshots']
# Loop over each screen shot, updating each one
print("Press return/enter to enter another url or press it again with nothing entered to stop asking and continue the script.")
for number, screen_shot in enumerate(screen_shots):
new_url = input("Screnshot URL: ").strip()
if new_url:
# Updating the data here will also update the 'full_data' object
# as we are just referencing a part of it, not making copies
screen_shot.update({"url": new_url, "fullSizeURL": new_url})
else:
break
# Remove all entries which we did not update
screen_shots = screen_shots[:number]
full_data['tabs'][0]['views'][1]['screenshots'] = screen_shots #-> this lines removes the placeholder text that is not updated by the user
# Save the data
with open(file_name, 'w') as fh:
json.dump(full_data, fh, indent=4)

Convert nested JSON to CSV in Python 2.7

Have seen a lot of thread but unable to found the solution for mine. I want to convert one nested JSON to CSV in Python 2.7. The sample JSON file is as below:
sample.json # My JSON file that mainly contains a firewall rule
"rulebase": [
{
"from": 1,
"name": "test-policy",
"rulebase": [
{
"action": "6c488338-8eec-4103-ad21-cd461ac2c473",
"action-settings": {},
"comments": "FYI",
"content": [
"97aeb369-9aea-11d5-bd16-0090272ccb30"
],
"content-direction": "any",
"content-negate": false,
"custom-fields": {
"field-1": "",
"field-2": "",
"field-3": ""
},
"destination": [
"97aeb369-9aea-11d5-bd16-0090272ccb30"
],
"destination-negate": false,
"domain": {
"domain-type": "domain",
"name": "SMC User",
"uid": "41e821a0-3720-11e3-aa6e-0800200c9fde"
},
"enabled": true,
"hits": {
"first-date": {
"iso-8601": "2016-09-04T22:21-0500",
"posix": 1473045718000
},
"last-date": {
"iso-8601": "2018-03-19T03:37-0500",
"posix": 1521448660000
},
"level": "low",
"percentage": "0%",
"value": 36737474
},
"install-on": [
"6c488338-8eec-4103-ad21-cd461ac2c476"
],
"meta-info": {
"creation-time": {
"iso-8601": "2016-09-15T12:42-0500",
"posix": 1473961370382
},
"creator": "System",
"last-modifier": "admin",
"last-modify-time": {
"iso-8601": "2018-08-30T18:36-0500",
"posix": 1535672186192
},
"lock": "unlocked",
"validation-state": "ok"
},
"rule-number": 1,
"service": [
"ef245528-9a3d-11d6-9eaa-3e5a6fdd6a6a",
"dff4f7ba-9a3d-11d6-91c1-3e5a6fdd5151",
"24bee257-6b37-49bb-99aa-557d993a0e48",
"97aeb45c-9aea-11d5-bd16-0090272ccb30",
"97aeb471-9aea-11d5-bd16-0090272ccb30"
],
"service-negate": false,
"source": [
"697bb7e0-0dfe-4070-a21a-68858daae98c",
"349fb05c-99b2-4fb2-aea6-7b447d0e661c"
],
"source-negate": true,
"time": [
"97aeb369-9aea-11d5-bd16-0090272ccb30"
],
"track": {
"accounting": false,
"alert": "none",
"per-connection": true,
"per-session": false,
"type": "598ead32-aa42-4615-90ed-f51a5928d41d"
},
"type": "access-rule",
"uid": "2da21174-0af8-4b5b-b02e-2957a24d70e1",
"vpn": [
"97aeb369-9aea-11d5-bd16-0090272ccb30"
]
},
{
"action": "6c488338-8eec-4103-ad21-cd461ac2c472",
"action-settings": {
"enable-identity-captive-portal": false
},
"comments": "",
"content": [
"97aeb369-9aea-11d5-bd16-0090272ccb30"
],
"content-direction": "any",
"content-negate": false,
"custom-fields": {
"field-1": "",
"field-2": "",
"field-3": ""
},
"destination": [
"b17d4573-ad1a-4126-ae6d-c874ea919cda",
"5b78417c-64ed-4566-9c76-e4e1af25a9ae",
"acb8d280-2ec4-46b1-be9f-c676fa255fb5"
],
"destination-negate": false,
"domain": {
"domain-type": "domain",
"name": "SMC User",
"uid": "41e821a0-3720-11e3-aa6e-0800200c9fde"
},
"enabled": true,
"hits": {
"level": "zero",
"percentage": "0%",
"value": 0
},
"install-on": [
"6c488338-8eec-4103-ad21-cd461ac2c476"
],
"meta-info": {
"creation-time": {
"iso-8601": "2018-07-25T16:27-0500",
"posix": 1532554044090
},
"creator": "admin",
"last-modifier": "admin",
"last-modify-time": {
"iso-8601": "2018-08-31T16:00-0500",
"posix": 1535749228997
},
"lock": "unlocked",
"validation-state": "ok"
},
"name": "tom#gmail.com",
"rule-number": 2,
"service": [
"18ec9eaa-1657-4240-ab97-5f234623336b"
],
"service-negate": false,
"source": [
"293ef5ba-5235-464e-9247-bda26229a998",
"b503873f-0c5f-4798-b87a-dd6ed4561b40"
],
"source-negate": false,
"time": [
"97aeb369-9aea-11d5-bd16-0090272ccb30"
],
"track": {
"accounting": false,
"alert": "none",
"per-connection": true,
"per-session": false,
"type": "598ead32-aa42-4615-90ed-f51a5928d41d"
},
"type": "access-rule",
"uid": "fcc5a2c8-3a78-4cc5-9fd3-e7bd59eb36ba",
"vpn": [
"97aeb369-9aea-11d5-bd16-0090272ccb30"
]
},
{
"action": "6c488338-8eec-4103-ad21-cd461ac2c472",
"action-settings": {
"enable-identity-captive-portal": false
},
"comments": "FYI",
"content": [
"97aeb369-9aea-11d5-bd16-0090272ccb30"
],
"content-direction": "any",
"content-negate": false,
"custom-fields": {
"field-1": "",
"field-2": "",
"field-3": ""
},
"destination": [
"b17d4573-ad1a-4126-ae6d-c874ea919cda",
"5b78417c-64ed-4566-9c76-e4e1af25a9ae",
"acb8d280-2ec4-46b1-be9f-c676fa255fb5"
],
"destination-negate": false,
"domain": {
"domain-type": "domain",
"name": "SMC User",
"uid": "41e821a0-3720-11e3-aa6e-0800200c9fde"
},
"enabled": true,
"hits": {
"first-date": {
"iso-8601": "2018-03-14T14:55-0500",
"posix": 1521057347000
},
"last-date": {
"iso-8601": "2018-03-19T03:58-0500",
"posix": 1521449932000
},
"level": "low",
"percentage": "0%",
"value": 11801
},
"install-on": [
"6c488338-8eec-4103-ad21-cd461ac2c476"
],
"meta-info": {
"creation-time": {
"iso-8601": "2018-03-14T09:47-0500",
"posix": 1521038846894
},
"creator": "System",
"last-modifier": "admin",
"last-modify-time": {
"iso-8601": "2018-08-31T16:17-0500",
"posix": 1535750234317
},
"lock": "unlocked",
"validation-state": "ok"
},
"name": "tom1#gmail.com",
}
From the above JSON file my requirement to redirect keys {uid, name, rule-number, comments, destination, source, hits.last-date}, etc. with their values to CSV basically.
By following the below code, I was able to generate the CSV but seems that is only parsing header, nothing else.
import json
import csv
def get_leaves(item, key=None):
if isinstance(item, dict):
leaves = []
for i in item.keys():
leaves.extend(get_leaves(item[i], i))
return leaves
elif isinstance(item, list):
leaves = []
for i in item:
leaves.extend(get_leaves(i, key))
return leaves
else:
return [(key, item)]
with open('sample.json') as f_input, open('output.csv', 'wb') as f_output:
csv_output = csv.writer(f_output)
write_header = True
for entry in json.load(f_input):
leaf_entries = sorted(get_leaves(entry))
if write_header:
csv_output.writerow([k for k, v in leaf_entries])
write_header = False
csv_output.writerows([v for k, v in leaf_entries.items()])
Please guide me as I am very much new to Python scripting.
You're pretty much there. You're just calling csv_output.writerow() on the list you created with [v for k, v in leaf_entries]. You should instead call csv_output.writerows().
Information on these calls is available here:
https://docs.python.org/3/library/csv.html#writer-objects
Just figured it out. The below code properly working and generating valid csv data from my complex JSON file.
# Generate CSV from JSON
fw_access_layers_data = open('show-access-layers.json', 'r')
fw_access_layers_parsed = json.loads(fw_access_layers_data.read())
access_layers = fw_access_layers_parsed['access-layers']
fw_access_layers_csv = open('show-access-layers.csv', 'w')
csvwriter = csv.writer(fw_access_layers_csv)
count = 0
for access_layer in access_layers:
if count == 0:
header = access_layer.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(access_layer.values())
fw_access_layers_csv.close()
Thanks for your help mates.

Categories