Python: for loop stops at first row when reading from csv - python

With this code, I can get it to fire thru the first row in a csv and post the content. Without the for loop, it works great as well. I am also, using a simple print statement, able to print out all of the rows in the csv. Where I'm getting stuck is how to get this to loop thru my csv (2300 rows) and replace two inline variable. I've tried a couple of iterations of this, moving statements around, etc, this is my latest attempt.
from __future__ import print_function
import arcrest
import json
import csv
if __name__ == "__main__":
username = "uid"
password = "pwd"
portalId = "id"
url = "http://www.arcgis.com/"
thumbnail_url = ""
with open('TILES.csv') as csvfile:
inputFile = csv.DictReader(csvfile)
x = 0 # counter to display file count
for row in inputFile:
if x == 0:
map_json = {
"operationalLayers": [
{
"templateUrl": "https://{subDomain}.tiles.mapbox.com/v4/abc.GRSM_"+row['ID']+"_pink/{level}/{col}/{row}.png?access_token=pk.secret",
"id": "GRSM_SPECIES_OBSERVATIONS_MAXENT_5733",
"type": "WebTiledLayer",
"layerType": "WebTiledLayer",
"title": row['Species']+" Prediction",
"copyright": "GRSM",
"fullExtent": {
"xmin": -20037508.342787,
"ymin": -20037508.34278,
"xmax": 20037508.34278,
"ymax": 20037508.342787,
"spatialReference": {
"wkid": 102100
}
},
"subDomains": [
"a",
"b",
"c",
"d"
],
"visibility": True,
"opacity": 1
}
],
"baseMap": {
"baseMapLayers": [
{
"id": "defaultBasemap",
"layerType": "ArcGISTiledMapServiceLayer",
"opacity": 1,
"visibility": True,
"url": "http://services.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer"
}
],
"title": "Topographic"
},
"spatialReference": {
"wkid": 102100,
"latestWkid": 3857
},
"version": "2.0"
}
securityHandler = arcrest.AGOLTokenSecurityHandler(username,
password)
# Create the administration connection
#
admin = arcrest.manageorg.Administration(url, securityHandler)
# Access the content properties to add the item
#
content = admin.content
# Get the user #
user = content.users.user()
# Provide the item parameters
#
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = "GRSM_"+row['Species']
itemParams.thumbnailurl = ""
itemParams.type = "Web Map"
itemParams.snippet = "Maxent Output: "+row['Species']
itemParams.licenseInfo = "License"
itemParams.accessInformation = "Credits"
itemParams.tags = "Maxent"+row['Species']
itemParams.description = "This map depicts the tiled output of a Maxent model depicting the probability of occurrence of "+row['Species']+". An in-line legend is not available for this map. "
itemParams.extent = "-84.1076,35.2814,-82.9795, 35.8366"
# Add the Web Map
#
print (user.addItem(itemParameters=itemParams,
overwrite=True,
text=json.dumps(row)))
x = x + 1
Here's the csv:
Species,ID
Abacion_magnum,0000166
Abaeis_nicippe,0000169
Abagrotis_alternata,0000172
Abies_fraseri,0000214
Ablabesmyia_mallochi,0000223
Abrostola_ovalis,0000232
Acalypha_rhomboidea,0000253
Acanthostigma_filiforme,0000296
Acanthostigma_minutum,0000297
Acanthostigma_multiseptatum,0000298
Acentrella_ampla,0000314
Acer_negundo,0000330
Acer_pensylvanicum,0000333
Acer_rubrum_v_rubrum,0000337
Acer_rubrum_v_trilobum,0000338
Acer_saccharum,0000341
Acer_spicatum,0000343

I think your indentation is wrong, you only have inside your for loop the if and the json:
if x == 0:
map_json = {
"operationalLayers": [
{
"templateUrl": "https://{subDomain}.tiles.mapbox.com/v4/abc.GRSM_"+row['ID']+"_pink/{level}/{col}/{row}.png?access_token=pk.secret",
"id": "GRSM_SPECIES_OBSERVATIONS_MAXENT_5733",
"type": "WebTiledLayer",
"layerType": "WebTiledLayer",
"title": row['Species']+" Prediction",
"copyright": "GRSM",
"fullExtent": {
"xmin": -20037508.342787,
"ymin": -20037508.34278,
"xmax": 20037508.34278,
"ymax": 20037508.342787,
"spatialReference": {
"wkid": 102100
}
},
"subDomains": [
"a",
"b",
"c",
"d"
],
"visibility": True,
"opacity": 1
}
],
"baseMap": {
"baseMapLayers": [
{
"id": "defaultBasemap",
"layerType": "ArcGISTiledMapServiceLayer",
"opacity": 1,
"visibility": True,
"url": "http://services.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer"
}
],
"title": "Topographic"
},
"spatialReference": {
"wkid": 102100,
"latestWkid": 3857
},
"version": "2.0"
}

The reason why you might only be getting one row in your result is because you have the code contained in an if statement where the condition is x == 0. I can see you set x to be 0 outside the for loop and at the end of the loop you are incrementing x. This results in x no longer being equal to 0 and therefore your if statement condition is false.
Try removing the if statement completely and the increment line at the end.
Just use:
for row in inputFile:
# your code here
This will allow you to loop through the csv file

Related

Python, How to denormalise CSV file to Nested Json

I currently have a CSV file with the header:
productCode | code | dataFields.0.category | dataFields.0.name | dataFields.0.code | ... with dataFields[n] up to 9.
When I convert the code to Json i get:
{
"Example": [
{
"exCode": "example_code",
"name": "ex",
"code": "ex_2",
"dataFields.0.category": "EXAMPLE",
"dataFields.0.name": "exampl",
"dataFields.0.code": "exampl",
"dataFields.0.unit": "v",
"dataFields.1.category": "EXAMPLE",
"dataFields.1.name": "exampl2",
"dataFields.1.code": "exampl2",
"dataFields.1.unit": "e",
"dataFields.2.category": "EXAMPLE2",
"dataFields.2.name": null,
"dataFields.2.code": null,
"dataFields.2.unit": "e",
}]
}
However, I'm trying to convert the CSV to look like:
{
"Example": [
{
"exCode": "example_code",
"name": "exampl",
"code": "exampl",
"dataFields": [{
"category": "EXAMPLE",
"name": "exampl",
"code": "exampl",
"unit": "v"
},
{
"category": "EXAMPLE",
"name": "exampl2",
"code": "exampl2",
"unit": "e"
}
}]
}
I have been writing this project on Python without using recursion to look at least 2 levels of nesting deep into a nested json array to
remove the normalised fields and add in denormalised (nested) fields. However, my main problem is that "dataFields" will get overwritten instead of adding multiple "dataFields" elements.
This is what I have so far:
def denormalize_json(json_list):
for children in json_list:
for inner_children in json_list[children]:
# print("Arr: ", inner_children)
for innest_child in inner_children:
split_norm = innest_child.split('.')
if len(split_norm) == 2: # If it is only a singular nested field
# Add correct field
changes_arr.append([children, inner_children, split_norm[0], split_norm[1], innest_child])
elif len(split_norm) == 3: # If it is normalized with more than one field
changes_arr.append([children, inner_children, split_norm[0], split_norm[2], innest_child])
print(changes_arr)
# Make changes to json_list
for i in range(len(changes_arr)):
changes = changes_arr[i]
# change inner children to correct fields
# add correct fields
try:
inner_children = changes[1]
inner_children[changes[2]].append(changes[3].append(inner_children[changes[4]]))
del(inner_children[changes[4]])
json_list[changes[0]] = inner_children
except Exception as e:
print(e)

How to write a python code to get all the data from a particular date?

I have json file which has a list of ids and date. How to write a python program to print all the ids for a particular month from the json file
Below is the sample json data
{
"entities": [
{
"Fields": [
{
"Name": "version",
"values": [
{
"value": "Cycle 1"
}
]
},
{
"Name": "subject",
"values": [
{
"value": "1008"
}
]
},
{
"Name": "project",
"values": [
{}
]
},
{
"Name": "linkage",
"values": [
{
"value": "N"
}
]
},
{
"Name": "cycle-id",
"values": []
},
{
"Name": "creation-time",
"values": [
{
"value": "2016-07-12"
}
]
},
{
"Name": "id",
"values": [
{
"value": "1"
}
]
}]}]}
I have just tried to load the json file from below code.
import json
f = open('defects-export-0-100.json')
data = json.load(f)
print(data)
# month = str("MM")
month = '09'
defect_items = []
defectIDs = []
for item in data["entities"]:
for container in item["Fields"]:
if container["Name"] == "creation-time":
if container["values"][0]["value"].split("-")[1] == month:
defect_items.append(item)
for item in defect_items:
for container in item["Fields"]:
if container["Name"] == "id":
defectIDs.append(container["values"][0]["value"])
My desired output: All the IDs from the one particular month of creation date.
The biggest issue is how you're referencing keys in a dictionary. You can get the value at a particular key with:
x = {"key": value}
x["key"]
# value
I've made some assumptions about your data set, but this code works with the sample you gave.
import json
with open("data.txt", "r") as f:
data = json.load(f)
#month = str("MM")
month = "07"
defect_items = []
defectIDs = []
# Loop through each entity
for item in data["entities"]:
# Loop through each field
for container in item["Fields"]:
# Find the field with the name "creation-item"
if container["Name"] == "creation-time":
# Check if the value matches with the desired date
# Assuming there can only be one value
if container["values"][0]["value"].split("-")[1] == month:
defect_items.append(item)
# Loop through the defective items
for item in defect_items:
# Loop through the fields
for container in item["Fields"]:
# Find the field with the name "id"
if container["Name"] == "id":
# Grab the value
# Assuming there can only be one value
defectIDs.append(container["values"][0]["value"])
Once the data is loaded, you can interact with it as you would any Python object. Get all the items with:
items = data['entities']
For the code below to work, create a variable month and set it to a string with the format MM (where M is a digit of the month: e.g. month='01' for January) so it exactly matches the correct month format of the data.
Then, run the following loop to collect the IDs:
ids = []
for item in items.keys():
id = None
time = False
for container in items[item].keys():
if items[item][container]['Name'] == 'creation-time':
if items[item][container]['values']['value'].split('-')[1] == month:
time = True
if items[item][container]['Name'] == 'id':
id = items[item][container]['values']['value']
if time and id: ids.append(id)

Convert multiple string stored in a variable into a single list in python

I hope everyone is doing well.
I need a little help where I need to get all the strings from a variable and need to store into a single list in python.
For example -
I have json file from where I am getting ids and all the ids are getting stored into a variable called id as below when I run print(id)
17298626-991c-e490-bae6-47079c6e2202
17298496-19bd-2f89-7b5f-881921abc632
17298698-3e17-7a9b-b337-aacfd9483b1b
172986ac-d91d-c4ea-2e50-d53700480dd0
172986d0-18aa-6f51-9c62-6cb087ad31e5
172986f4-80f0-5c21-3aee-12f22a5f4322
17298712-a4ac-7b36-08e9-8512fa8322dd
17298747-8cc6-d9d0-8d05-50adf228c029
1729875c-050f-9a99-4850-bb0e6ad35fb0
1729875f-0d50-dc94-5515-b4891c40d81c
17298761-c26b-3ce5-e77e-db412c38a5b4
172987c8-2b5d-0d94-c365-e8407b0a8860
1729881a-e583-2b54-3a52-d092020d9c1d
1729881c-64a2-67cf-d561-6e5e38ed14cb
172987ec-7a20-7eb6-3ebe-a9fb621bb566
17298813-7ac4-258b-d6f9-aaf43f9147b1
17298813-f1ef-d28a-0817-5f3b86c3cf23
17298828-b62b-9ee6-248b-521b0663226e
17298825-7449-2fcb-378e-13671cb4688a
I want these all values to be stored into a single list.
Can some please help me out with this.
Below is the code I am using:
import json
with open('requests.json') as f:
data = json.load(f)
print(type(data))
for i in data:
if 'traceId' in i:
id = i['traceId']
newid = id.split()
#print(type(newid))
print(newid)
And below is my json file looks like:
[
{
"id": "376287298-hjd8-jfjb-khkf-6479280283e9",
"submittedTime": 1591692502558,
"traceId": "17298626-991c-e490-bae6-47079c6e2202",
"userName": "ABC",
"onlyChanged": true,
"description": "Not Required",
"startTime": 1591694487929,
"result": "NONE",
"state": "EXECUTING",
"paused": false,
"application": {
"id": "16b22a09-a840-f4d9-f42a-64fd73fece57",
"name": "XYZ"
},
"applicationProcess": {
"id": "dihihdosfj9279278yrie8ue",
"name": "Deploy",
"version": 12
},
"environment": {
"id": "fkjdshkjdshglkjdshgldshldsh03r937837",
"name": "DEV"
},
"snapshot": {
"id": "djnglkfdglki98478yhgjh48yr844h",
"name": "DEV_snapshot"
},
},
{
"id": "17298495-f060-3e9d-7097-1f86d5160789",
"submittedTime": 1591692844597,
"traceId": "17298496-19bd-2f89-7b5f-881921abc632",
"userName": "UYT,
"onlyChanged": true,
"startTime": 1591692845543,
"result": "NONE",
"state": "EXECUTING",
"paused": false,
"application": {
"id": "osfodsho883793hgjbv98r3098w",
"name": "QA"
},
"applicationProcess": {
"id": "owjfoew028r2uoieroiehojehfoef",
"name": "EDC",
"version": 5
},
"environment": {
"id": "16cf69c5-4194-e557-707d-0663afdbceba",
"name": "DTESTU"
},
}
]
From where I am trying to get the traceId.
you could use simple split method like the follwing:
ids = '''17298626-991c-e490-bae6-47079c6e2202 17298496-19bd-2f89-7b5f-881921abc632 17298698-3e17-7a9b-b337-aacfd9483b1b 172986ac-d91d-c4ea-2e50-d53700480dd0 172986d0-18aa-6f51-9c62-6cb087ad31e5 172986f4-80f0-5c21-3aee-12f22a5f4322 17298712-a4ac-7b36-08e9-8512fa8322dd 17298747-8cc6-d9d0-8d05-50adf228c029 1729875c-050f-9a99-4850-bb0e6ad35fb0 1729875f-0d50-dc94-5515-b4891c40d81c 17298761-c26b-3ce5-e77e-db412c38a5b4 172987c8-2b5d-0d94-c365-e8407b0a8860 1729881a-e583-2b54-3a52-d092020d9c1d 1729881c-64a2-67cf-d561-6e5e38ed14cb 172987ec-7a20-7eb6-3ebe-a9fb621bb566 17298813-7ac4-258b-d6f9-aaf43f9147b1 17298813-f1ef-d28a-0817-5f3b86c3cf23 17298828-b62b-9ee6-248b-521b0663226e 17298825-7449-2fcb-378e-13671cb4688a'''
l = ids.split(" ")
print(l)
This will give the following result, I assumed that the separator needed is simple space you can adjust properly:
['17298626-991c-e490-bae6-47079c6e2202', '17298496-19bd-2f89-7b5f-881921abc632', '17298698-3e17-7a9b-b337-aacfd9483b1b', '172986ac-d91d-c4ea-2e50-d53700480dd0', '172986d0-18aa-6f51-9c62-6cb087ad31e5', '172986f4-80f0-5c21-3aee-12f22a5f4322', '17298712-a4ac-7b36-08e9-8512fa8322dd', '17298747-8cc6-d9d0-8d05-50adf228c029', '1729875c-050f-9a99-4850-bb0e6ad35fb0', '1729875f-0d50-dc94-5515-b4891c40d81c', '17298761-c26b-3ce5-e77e-db412c38a5b4', '172987c8-2b5d-0d94-c365-e8407b0a8860', '1729881a-e583-2b54-3a52-d092020d9c1d', '1729881c-64a2-67cf-d561-6e5e38ed14cb', '172987ec-7a20-7eb6-3ebe-a9fb621bb566', '17298813-7ac4-258b-d6f9-aaf43f9147b1', '17298813-f1ef-d28a-0817-5f3b86c3cf23', '17298828-b62b-9ee6-248b-521b0663226e', '17298825-7449-2fcb-378e-13671cb4688a']
Edit
You get list of lists because each iteration you read only 1 id, so what you need to do is to initiate an empty list and append each id to it in the following way:
l = []
for i in data
if 'traceId' in i:
id = i['traceId']
l.append(id)
you can append the ids variable to the list such as,
#list declaration
l1=[]
#this must be in your loop
l1.append(ids)
I'm assuming you get the id as a str type value. Using id.split() will return a list of all ids in one single Python list, as each id is separated by space here in your example.
id = """17298626-991c-e490-bae6-47079c6e2202 17298496-19bd-2f89-7b5f-881921abc632
17298698-3e17-7a9b-b337-aacfd9483b1b 172986ac-d91d-c4ea-2e50-d53700480dd0
172986d0-18aa-6f51-9c62-6cb087ad31e5 172986f4-80f0-5c21-3aee-12f22a5f4322
17298712-a4ac-7b36-08e9-8512fa8322dd 17298747-8cc6-d9d0-8d05-50adf228c029
1729875c-050f-9a99-4850-bb0e6ad35fb0 1729875f-0d50-dc94-5515-b4891c40d81c
17298761-c26b-3ce5-e77e-db412c38a5b4 172987c8-2b5d-0d94-c365-e8407b0a8860
1729881a-e583-2b54-3a52-d092020d9c1d 1729881c-64a2-67cf-d561-6e5e38ed14cb
172987ec-7a20-7eb6-3ebe-a9fb621bb566 17298813-7ac4-258b-d6f9-aaf43f9147b1
17298813-f1ef-d28a-0817-5f3b86c3cf23 17298828-b62b-9ee6-248b-521b0663226e
17298825-7449-2fcb-378e-13671cb4688a"""
id_list = id.split()
print(id_list)
Output:
['17298626-991c-e490-bae6-47079c6e2202', '17298496-19bd-2f89-7b5f-881921abc632',
'17298698-3e17-7a9b-b337-aacfd9483b1b', '172986ac-d91d-c4ea-2e50-d53700480dd0',
'172986d0-18aa-6f51-9c62-6cb087ad31e5', '172986f4-80f0-5c21-3aee-12f22a5f4322',
'17298712-a4ac-7b36-08e9-8512fa8322dd', '17298747-8cc6-d9d0-8d05-50adf228c029',
'1729875c-050f-9a99-4850-bb0e6ad35fb0', '1729875f-0d50-dc94-5515-b4891c40d81c',
'17298761-c26b-3ce5-e77e-db412c38a5b4', '172987c8-2b5d-0d94-c365-e8407b0a8860',
'1729881a-e583-2b54-3a52-d092020d9c1d', '1729881c-64a2-67cf-d561-6e5e38ed14cb',
'172987ec-7a20-7eb6-3ebe-a9fb621bb566', '17298813-7ac4-258b-d6f9-aaf43f9147b1',
'17298813-f1ef-d28a-0817-5f3b86c3cf23', '17298828-b62b-9ee6-248b-521b0663226e',
'17298825-7449-2fcb-378e-13671cb4688a']
split() splits by default with space as a separator. You can use the sep argument to use any other separator if needed.

Foreach loop in Python to extract value from array in json response

I've got this json response:
{
"properties": {
"basic": {
"bandwidth_class": "",
"failure_pool": "",
"max_connection_attempts": 0,
"max_idle_connections_pernode": 50,
"max_timed_out_connection_attempts": 2,
"monitors": [
"Simple HTTP"
],
"node_close_with_rst": false,
"node_connection_attempts": 3,
"node_delete_behavior": "immediate",
"node_drain_to_delete_timeout": 0,
"nodes_table": [
{
"node": "abc1.prod.local:80",
"priority": 1,
"state": "active",
"weight": 1
},
{
"node": "def1.prod.local:80",
"priority": 1,
"state": "disabled",
"weight": 1
},
{
"node": "ghi1.prod.local:80",
"priority": 1,
"state": "disabled",
"weight": 1
},
{
"node": "jkl1.prod.local:80",
"priority": 1,
"state": "active",
"weight": 1
}
],
"note": "",
"passive_monitoring": true,
"persistence_class": "",
"transparent": false
}
}
}
And this powershell script:
$nodesAarray = "abc1.prod.local:80", "jkl1.prod.local:80"
foreach($node in $nodesArray)
{
$nodes_match_and_enabled = $GetNodesResponse.properties.basic.nodes_table | Where { $_.node -eq $node -and $_.state -eq "active" }
if($nodes_match_and_enabled)
{
Write-Output "$node exists in the pool and active"
}
else
{
Write-Output "$node is either not active or the name mismatches"
$global:invalidNodeArray.Add($node)
}
}
In my powershell script I am looping to check the two nodes in my array actually match by value and the state is active. It works as I expect.
However, I am scripting the same exact logic in Python (I am a beginner) but not sure how to approach it. Any idea what the script would look like in Python???
First, filter all active nodes, then compare with node list:
data = json.loads(text)
active_nodes = {
n['node']
for n in data['properties']['basic']['nodes_table']
if n['state'] == 'active'
}
nodes = {"abc1.prod.local:80", "jkl1.prod.local:80"}
for node in nodes:
if node in active_nodes:
print('{} exists in the pool and active'.format(node))
else:
print('{} is either not active or the name mismatches'.format(node))
invalid_nodes = nodes - active_nodes
Should work in Python 2 or 3, I think:
#!/usr/bin/env python
import sys
import json
res = ""
for line in sys.stdin:
res += line.rstrip()
res_obj = json.loads(res)
nodes = [ 'abc1.prod.local:80', 'jkl1.prod.local:80' ]
invalid_nodes = []
for node in nodes:
try:
found = False
test_node_objs = res_obj['properties']['basic']['nodes_table']
for test_node_obj in test_node_objs:
test_node = test_node_obj['node']
if node == test_node:
found = True
break
if found:
sys.stdout.write("%s exists in the pool and active\n" % (node))
else:
sys.stdout.write("%s is either not active or the name mismatches\n" % (node))
invalid_nodes.append(node)
except KeyError as ke:
sys.stderr.write("malformed response? check input...\n")
pass
Example usage:
$ ./parse_response.py < response.json
Here's an implementation:
jsonObj = json.loads(jsonSrc)
expectedNodes = {"abc1.prod.local:80", "jkl1.prod.local:80"}
for node in expectedNodes:
node_table = jsonObj['properties']['basic']['nodes_table']
node_match = list(filter(lambda t_node: node == t_node['node'], node_table))
is_node_matches_and_active = len(node_match) > 0 and node_match[0]['state'] == "active"
if is_node_matches_and_active:
print('node {} exists and is active'.format(node))
else:
print('node {} not found or not active'.format(node))
Output :
node jkl1.prod.local:80 exists and is active
node abc1.prod.local:80 exists and is active

Nested dictionary from data in a text file

I am new with python and I am trying to create a dictionary that outputs in a JSON file, this with data from a text file. So the text file would be this one.
557e155fc5f0 557e155fc5f0 1 557e155fc602 1
557e155fc610 557e155fc610 2
557e155fc620 557e155fc620 1 557e155fc626 1
557e155fc630 557e155fc630 1 557e155fc636 1
557e155fc640 557e155fc640 1
557e155fc670 557e155fc670 1 557e155fc698 1
557e155fc6a0 557e155fc6a0 1 557e155fc6d8 1
And the desired output for the first two lines would be
{ "functions": [
{
"address": "557e155fc5f0",
"blocks": [
"557e155fc5f0": "calls":{1}
"557e155fc602": "calls":{1}
]
},
{
"address": " 557e155fc610",
"blocks": [
" 557e155fc610": "calls":{2}
]
},
I have wrote a script to begin but I don't know how to continue.
import json
filename = 'calls2.out' # here the name of the output file
funs = {}
bbls = {}
with open(filename) as fh: # open file
for line in fh: # walk line by line
if line.strip(): # non-empty line?
rtn,bbl = line.split(None,1) # None means 'all whitespace', the default
for j in range(len(bbl)):
funs[rtn] = bbl.split()
print(json.dumps(funs, indent=2, sort_keys=True))
#json = json.dumps(fun, indent=2, sort_keys=True) # to save it into a file
#f = open("fout.json","w")
#f.write(json)
#f.close()
this script gives me this output
"557e155fc5f0": [
"557e155fc5f0",
"1",
"557e155fc602",
"1"
],
"557e155fc610": [
"557e155fc610",
"2"
],
"557e155fc620": [
"557e155fc620",
"1",
"557e155fc626",
"1"
],
funs[rtn] = bbl.split()
Here you add "557e155fc5f0", "1" as value to the rtnkey, because bbl is 557e155fc5f0 1 at this point, but you want to add it as a dictionary.
temp_dict = {bbl.split()[0]: bbl.split()[1]}
funs[rtn] = temp_dict
This will give you following json:
{
"557e155fc6a0": {
"557e155fc6a0": "1"
}
}
If you need the calls as key in the json you'd need to extend a bit:
temp_dict = {bbl.split()[0]: {"calls": bbl.split()[1]}}
funs[rtn] = temp_dict
Gives you this:
{
"557e155fc6a0": {
"557e155fc6a0": {
"calls": "1"
}
}
}
Also, your example json is malformed, I assume you want sth like this:
{
"functions": {
"address": "557e155fc5f0",
"blocks": {
"557e155fc5f0": {
"calls": 1
},
"557e155fc602": {
"calls": 1
}
}
},
"address": " 557e155fc610",
"blocks": {
"557e155fc610": {
"calls": 2
}
}
}
I'd try an Online JSON Editor for testing/creating examples.
Hope it helps!

Categories