Iterate Python data structure - python

I'm having problems getting my head around this Python data structure:
data = {'nmap': {'command_line': u'ls',
'scaninfo': {u'tcp': {'method': u'connect',
'services': u'80,443'}},
'scanstats': {'downhosts': u'0',
'elapsed': u'1.18',
'timestr': u'Wed Mar 19 21:37:54 2014',
'totalhosts': u'1',
'uphosts': u'1'}},
'scan': {u'url': {'addresses': {u'ipv6': u'2001:470:0:63::2'},
'hostname': u'abc.net',
'status': {'reason': u'syn-ack',
'state': u'up'},
u'tcp': {80: {'conf': u'3',
'cpe': '',
'extrainfo': '',
'name': u'http',
'product': '',
'reason': u'syn-ack',
'state': u'open',
'version': ''},
443: {'conf': u'3',
'cpe': '',
'extrainfo': '',
'name': u'https',
'product': '',
'reason': u'syn-ack',
'script': {
u'ssl-cert': u'place holder'},
'state': u'open',
'version': ''}},
'vendor': {}
}
}
}
Basically I need to iterate over the 'tcp' key values and extract the contents of the 'script' item if it exists.
This is what I've tried:
items = data["scan"]
for item in items['url']['tcp']:
if t["script"] is not None:
print t
However I can't seem to get it to work.

This will find any dictionary items with the key 'script' anywhere in the data structure:
def find_key(data, search_key, out=None):
"""Find all values from a nested dictionary for a given key."""
if out is None:
out = []
if isinstance(data, dict):
if search_key in data:
out.append(data[search_key])
for key in data:
find_key(data[key], search_key, out)
return out
For your data, I get:
>>> find_key(data, 'script')
[{'ssl-cert': 'place holder'}]
To find the ports, too, modify slightly:
tcp_dicts = find_key(data, 'tcp') # find all values for key 'tcp'
ports = [] # list to hold ports
for d in tcp_dicts: # iterate through values for key 'tcp'
if all(isinstance(port, int) for port in d): # ensure all are port numbers
for port in d:
ports.append((port,
d[port].get('script'))) # extract number and script
Now you get something like:
[(80, None), (443, {'ssl-cert': 'place holder'})]

data['scan']['url']['tcp'] is a dictionary, so when you just iterate over it, you will get the keys but not the values. If you want to iterate over the values, you have to do so:
for t in data['scan']['url']['tcp'].values():
if 'script' in t and t['script'] is not None:
print(t)
If you need the key as well, iterate over the items instead:
for k, t in data['scan']['url']['tcp'].items():
if 'script' in t and t['script'] is not None:
print(k, t)
You also need to change your test to check 'script' in t first, otherwise accessing t['script'] will raise a key error.

Don't you mean if item["script"]?
Really though if the key has a chance to not exist, use the get method provided by dict.
So try instead
items = data["scan"]
for item in items['url']['tcp']:
script = item.get('script')
if script:
print script

Related

How can I remove nested keys and create a new dict and link both with an ID?

I have a problem. I have a dict my_Dict. This is somewhat nested. However, I would like to 'clean up' the dict my_Dict, by this I mean that I would like to separate all nested ones and also generate a unique ID so that I can later find the corresponding object again.
For example, I have detail: {...}, this nested, should later map an independent dict my_Detail_Dict and in addition, detail should receive a unique ID within my_Dict. Unfortunately, my list that I give out is empty. How can I remove my slaughtered keys and give them an ID?
my_Dict = {
'_key': '1',
'group': 'test',
'data': {},
'type': '',
'code': '007',
'conType': '1',
'flag': None,
'createdAt': '2021',
'currency': 'EUR',
'detail': {
'selector': {
'number': '12312',
'isTrue': True,
'requirements': [{
'type': 'customer',
'requirement': '1'}]
}
}
}
def nested_dict(my_Dict):
my_new_dict_list = []
for key in my_Dict.keys():
#print(f"Looking for {key}")
if isinstance(my_Dict[key], dict):
print(f"{key} is nested")
# Add id to nested stuff
my_Dict[key]["__id"] = 1
my_nested_Dict = my_Dict[key]
# Delete all nested from the key
del my_Dict[key]
# Add id to key, but not the nested stuff
my_Dict[key] = 1
my_new_dict_list.append(my_Dict[key])
my_new_dict_list.append(my_Dict)
return my_new_dict_list
nested_dict(my_Dict)
[OUT] []
# What I want
[my_Dict, my_Details_Dict, my_Data_Dict]
What I have
{'_key': '1',
'group': 'test',
'data': {},
'type': '',
'code': '007',
'conType': '1',
'flag': None,
'createdAt': '2021',
'currency': 'EUR',
'detail': {'selector': {'number': '12312',
'isTrue': True,
'requirements': [{'type': 'customer', 'requirement': '1'}]}}}
What I want
my_Dict = {'_key': '1',
'group': 'test',
'data': 18,
'type': '',
'code': '007',
'conType': '1',
'flag': None,
'createdAt': '2021',
'currency': 'EUR',
'detail': 22}
my_Data_Dict = {'__id': 18}
my_Detail_Dict = {'selector': {'number': '12312',
'isTrue': True,
'requirements': [{'type': 'customer', 'requirement': '1'}]}, '__id': 22}
The following code snippet will solve what you are trying to do:
my_Dict = {
'_key': '1',
'group': 'test',
'data': {},
'type': '',
'code': '007',
'conType': '1',
'flag': None,
'createdAt': '2021',
'currency': 'EUR',
'detail': {
'selector': {
'number': '12312',
'isTrue': True,
'requirements': [{
'type': 'customer',
'requirement': '1'}]
}
}
}
def nested_dict(my_Dict):
# Initializing a dictionary that will store all the nested dictionaries
my_new_dict = {}
idx = 0
for key in my_Dict.keys():
# Checking which keys are nested i.e are dictionaries
if isinstance(my_Dict[key], dict):
# Generating ID
idx += 1
# Adding generated ID as another key
my_Dict[key]["__id"] = idx
# Adding nested key with the ID to the new dictionary
my_new_dict[key] = my_Dict[key]
# Replacing nested key value with the generated ID
my_Dict[key] = idx
# Returning new dictionary containing all nested dictionaries with ID
return my_new_dict
result = nested_dict(my_Dict)
print(my_Dict)
# Iterating through dictionary to get all nested dictionaries
for item in result.items():
print(item)
If I understand you correctly, you wish to automatically make each nested dictionary it's own variable, and remove it from the main dictionary.
Finding the nested dictionaries and removing them from the main dictionary is not so difficult. However, automatically assigning them to a variable is not recommended for various reasons. Instead, what I would do is store all these dictionaries in a list, and then assign them manually to a variable.
# Prepare a list to store data in
inidividual_dicts = []
id_index = 1
for key in my_Dict.keys():
# For each key, we get the current value
value = my_Dict[key]
# Determine if the current value is a dictionary. If so, then it's a nested dict
if isinstance(value, dict):
print(key + " is a nested dict")
# Get the nested dictionary, and replace it with the ID
dict_value = my_Dict[key]
my_Dict[key] = id_index
# Add the id to previously nested dictionary
dict_value['__id'] = id_index
id_index = id_index + 1 # increase for next nested dic
inidividual_dicts.append(dict_value) # store it as a new dictionary
# Manually write out variables names, and assign the nested dictionaries to it.
[my_Details_Dict, my_Data_Dict] = inidividual_dicts

How to flatten nested dict formatted '_source' column of csv, into dataframe

I have a csv with 500+ rows where one column "_source" is stored as JSON. I want to extract that into a pandas dataframe. I need each key to be its own column. #I have a 1 mb Json file of online social media data that I need to convert the dictionary and key values into their own separate columns. The social media data is from Facebook,Twitter/web crawled... etc. There are approximately 528 separate rows of posts/tweets/text with each having many dictionaries inside dictionaries. I am attaching a few steps from my Jupyter notebook below to give a more complete understanding. need to turn all key value pairs for dictionaries inside dictionaries into columns inside a dataframe
Thank you so much this will be a huge help!!!
I have tried changing it to a dataframe by doing this
source = pd.DataFrame.from_dict(source, orient='columns')
And it returns something like this... I thought it might unpack the dictionary but it did not.
#source.head()
#_source
#0 {'sub_organization_id': 'default', 'uid': 'aba...
#1 {'sub_organization_id': 'default', 'uid': 'ab0...
#2 {'sub_organization_id': 'default', 'uid': 'ac0...
below is the shape
#source.shape (528, 1)
below is what the an actual "_source" row looks like stretched out. There are many dictionaries and key:value pairs where each key needs to be its own column. Thanks! The actual links have been altered/scrambled for privacy reasons.
{'sub_organization_id': 'default',
'uid': 'ac0fafe9ba98327f2d0c72ddc365ffb76336czsa13280b',
'project_veid': 'default',
'campaign_id': 'default',
'organization_id': 'default',
'meta': {'rule_matcher': [{'atribs': {'website': 'github.com/res',
'source': 'Explicit',
'version': '1.1',
'type': 'crawl'},
'results': [{'rule_type': 'hashtag',
'rule_tag': 'Far',
'description': None,
'project_veid': 'A7180EA-7078-0C7F-ED5D-86AD7',
'campaign_id': '2A6DA0C-365BB-67DD-B05830920',
'value': '#Far',
'organization_id': None,
'sub_organization_id': None,
'appid': 'ray',
'project_id': 'CDE2F42-5B87-C594-C900E578C',
'rule_id': '1838',
'node_id': None,
'metadata': {'campaign_title': 'AF',
'project_title': 'AF '}}]}],
'render': [{'attribs': {'website': 'github.com/res',
'version': '1.0',
'type': 'Page Render'},
'results': [{'render_status': 'success',
'path': 'https://east.amanaws.com/rays-ime-store/renders/b/b/70f7dffb8b276f2977f8a13415f82c.jpeg',
'image_hash': 'bb7674b8ea3fc05bfd027a19815f82c',
'url': 'https://discooprdapp.com/',
'load_time': 32}]}]},
'norm_attribs': {'website': 'github.com/res',
'version': '1.1',
'type': 'crawl'},
'project_id': 'default',
'system_timestamp': '2019-02-22T19:04:53.569623',
'doc': {'appid': 'subtter',
'links': [],
'response_url': 'https://discooprdapp.com',
'url': 'https://discooprdapp.com/',
'status_code': 200,
'status_msg': 'OK',
'encoding': 'utf-8',
'attrs': {'uid': '2ab8f2651cb32261b911c990a8b'},
'timestamp': '2019-02-22T19:04:53.963',
'crawlid': '7fd95-785-4dd259-fcc-8752f'},
'type': 'crawl',
'norm': {'body': '\n',
'domain': 'discordapp.com',
'author': 'crawl',
'url': 'https://discooprdapp.com',
'timestamp': '2019-02-22T19:04:53.961283+00:00',
'id': '7fc5-685-4dd9-cc-8762f'}}
before you post make sure the actual code works for the data attached. Thanks!
The below code I tried but it did not work there was a syntax error that I could not figure out.
pd.io.json.json_normalize(source_data.[_source].apply(json.loads))
pd.io.json.json_normalize(source_data.[_source].apply(json.loads))
^
SyntaxError: invalid syntax
Whoever can help me with this will be a saint!
I had to do something like that a while back. Basically I used a function that completely flattened out the json to identify the keys that would be turned into the columns, then iterated through the json to reconstruct a row and append each row into a "results" dataframe. So with the data you provided, it created 52 column row and looking through it, looks like it included all the keys into it's own column. Anything nested, for example: 'meta': {'rule_matcher':[{'atribs': {'website': ...]} should then have a column name meta.rule_matcher.atribs.website where the '.' denotes those nested keys
data_source = {'sub_organization_id': 'default',
'uid': 'ac0fafe9ba98327f2d0c72ddc365ffb76336czsa13280b',
'project_veid': 'default',
'campaign_id': 'default',
'organization_id': 'default',
'meta': {'rule_matcher': [{'atribs': {'website': 'github.com/res',
'source': 'Explicit',
'version': '1.1',
'type': 'crawl'},
'results': [{'rule_type': 'hashtag',
'rule_tag': 'Far',
'description': None,
'project_veid': 'A7180EA-7078-0C7F-ED5D-86AD7',
'campaign_id': '2A6DA0C-365BB-67DD-B05830920',
'value': '#Far',
'organization_id': None,
'sub_organization_id': None,
'appid': 'ray',
'project_id': 'CDE2F42-5B87-C594-C900E578C',
'rule_id': '1838',
'node_id': None,
'metadata': {'campaign_title': 'AF',
'project_title': 'AF '}}]}],
'render': [{'attribs': {'website': 'github.com/res',
'version': '1.0',
'type': 'Page Render'},
'results': [{'render_status': 'success',
'path': 'https://east.amanaws.com/rays-ime-store/renders/b/b/70f7dffb8b276f2977f8a13415f82c.jpeg',
'image_hash': 'bb7674b8ea3fc05bfd027a19815f82c',
'url': 'https://discooprdapp.com/',
'load_time': 32}]}]},
'norm_attribs': {'website': 'github.com/res',
'version': '1.1',
'type': 'crawl'},
'project_id': 'default',
'system_timestamp': '2019-02-22T19:04:53.569623',
'doc': {'appid': 'subtter',
'links': [],
'response_url': 'https://discooprdapp.com',
'url': 'https://discooprdapp.com/',
'status_code': 200,
'status_msg': 'OK',
'encoding': 'utf-8',
'attrs': {'uid': '2ab8f2651cb32261b911c990a8b'},
'timestamp': '2019-02-22T19:04:53.963',
'crawlid': '7fd95-785-4dd259-fcc-8752f'},
'type': 'crawl',
'norm': {'body': '\n',
'domain': 'discordapp.com',
'author': 'crawl',
'url': 'https://discooprdapp.com',
'timestamp': '2019-02-22T19:04:53.961283+00:00',
'id': '7fc5-685-4dd9-cc-8762f'}}
Code:
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
flat = flatten_json(data_source)
import pandas as pd
import re
results = pd.DataFrame()
special_cols = []
columns_list = list(flat.keys())
for item in columns_list:
try:
row_idx = re.findall(r'\_(\d+)\_', item )[0]
except:
special_cols.append(item)
continue
column = re.findall(r'\_\d+\_(.*)', item )[0]
column = re.sub(r'\_\d+\_', '.', column)
row_idx = int(row_idx)
value = flat[item]
results.loc[row_idx, column] = value
for item in special_cols:
results[item] = flat[item]
Output:
print (results.to_string())
atribs_website atribs_source atribs_version atribs_type results.rule_type results.rule_tag results.description results.project_veid results.campaign_id results.value results.organization_id results.sub_organization_id results.appid results.project_id results.rule_id results.node_id results.metadata_campaign_title results.metadata_project_title attribs_website attribs_version attribs_type results.render_status results.path results.image_hash results.url results.load_time sub_organization_id uid project_veid campaign_id organization_id norm_attribs_website norm_attribs_version norm_attribs_type project_id system_timestamp doc_appid doc_response_url doc_url doc_status_code doc_status_msg doc_encoding doc_attrs_uid doc_timestamp doc_crawlid type norm_body norm_domain norm_author norm_url norm_timestamp norm_id
0 github.com/res Explicit 1.1 crawl hashtag Far NaN A7180EA-7078-0C7F-ED5D-86AD7 2A6DA0C-365BB-67DD-B05830920 #Far NaN NaN ray CDE2F42-5B87-C594-C900E578C 1838 NaN AF AF github.com/res 1.0 Page Render success https://east.amanaws.com/rays-ime-store/render... bb7674b8ea3fc05bfd027a19815f82c https://discooprdapp.com/ 32.0 default ac0fafe9ba98327f2d0c72ddc365ffb76336czsa13280b default default default github.com/res 1.1 crawl default 2019-02-22T19:04:53.569623 subtter https://discooprdapp.com https://discooprdapp.com/ 200 OK utf-8 2ab8f2651cb32261b911c990a8b 2019-02-22T19:04:53.963 7fd95-785-4dd259-fcc-8752f crawl \n discordapp.com crawl https://discooprdapp.com 2019-02-22T19:04:53.961283+00:00 7fc5-685-4dd9-cc-8762f

Replace all occurrences of a string in JSON object regardless of key

I have a JSON object in Python created through requests built in .json() function.
Here is a simplified sample of what I'm doing:
data = session.get(url)
obj = data.json()
s3object = s3.Object(s3_bucket, output_file)
s3object.put(Body=(bytes(json.dumps(obj).encode('UTF-8'))))
Example obj:
{'id': 'fab779b7-2586-4895-9f3b-c9518f34e028', 'project_id': 'a1a73e68-9943-4584-9d59-cc84a0d3e92b', 'created_at': '2017-10-23 02:57:03 -0700', 'sections': [{'section_name': '', 'items': [{'id': 'ffadc652-dd36-4b9f-817c-6539a4b462ab', 'created_at': '2017-10-23 03:36:13 -0700', 'updated_at': '2017-10-23 03:38:32 -0700', 'created_by': 'paul', 'question_text': 'Drawing Ref(s)', 'spec_ref': '', 'display_number': null, 'response': '', 'comment': 'see attached mh309', 'position': 1, 'is_conforming': 'N/A', 'display_type': 'text'}]}]}
I need to replace any occurrence of the string "N/A" with "Not Applicable" anywhere it appears regardless of its key or location before I upload the JSON to S3. I cannot use local disk writes hence the reason this is done this way.
Is this possible?
My original plan was to turn it to a string and just replace before turning back, is this inefficient?
Thanks,
As mentioned in the comments, obj is a dict. One way to replace N/A with Not Applicable regardless of location is to convert it to a string, use string.replace and convert it back to dict for further processing
import json
#Original dict with N/A
obj = {'id': 'fab779b7-2586-4895-9f3b-c9518f34e028', 'project_id': 'a1a73e68-9943-4584-9d59-cc84a0d3e92b', 'created_at': '2017-10-23 02:57:03 -0700', 'sections': [{'section_name': '', 'items': [{'id': 'ffadc652-dd36-4b9f-817c-6539a4b462ab', 'created_at': '2017-10-23 03:36:13 -0700', 'updated_at': '2017-10-23 03:38:32 -0700', 'created_by': 'paul', 'question_text': 'Drawing Ref(s)', 'spec_ref': '', 'display_number': None, 'response': '', 'comment': 'see attached mh309', 'position': 1, 'is_conforming': 'N/A', 'display_type': 'text'}]}]}
#Convert to string and replace
obj_str = json.dumps(obj).replace('N/A', 'Not Applicable')
#Get obj back with replacement
obj = json.loads(obj_str)
Although #Devesh Kumar Singh's answer works with the sample json data in your question, converting the whole thing to a string, and then doing a wholesale bulk replace of the substring seems possibly error-prone because potentially it might change it in portions other than only in the values associated with dictionary keys.
To avoid that I would suggest using the following, which is more selective even though it takes a few more lines of code:
import json
def replace_NA(obj):
def decode_dict(a_dict):
for key, value in a_dict.items():
try:
a_dict[key] = value.replace('N/A', 'Not Applicable')
except AttributeError:
pass
return a_dict
return json.loads(json.dumps(obj), object_hook=decode_dict)
obj = {'id': 'fab779b7-2586-4895-9f3b-c9518f34e028', 'project_id': 'a1a73e68-9943-4584-9d59-cc84a0d3e92b', 'created_at': '2017-10-23 02:57:03 -0700', 'sections': [{'section_name': '', 'items': [{'id': 'ffadc652-dd36-4b9f-817c-6539a4b462ab', 'created_at': '2017-10-23 03:36:13 -0700', 'updated_at': '2017-10-23 03:38:32 -0700', 'created_by': 'paul', 'question_text': 'Drawing Ref(s)', 'spec_ref': '', 'display_number': None, 'response': '', 'comment': 'see attached mh309', 'position': 1, 'is_conforming': 'N/A', 'display_type': 'text'}]}]}
obj = replace_NA(obj)
I guess the Object you've pasted here must be of dict type, you can check it as if "type(json_object) is class dict". With that assumption youcan do it as:-
keys = json_object.keys()
for i in keys:
if json_object[i]=="N/A":
json_object[i]="Not Available"
Hope it helps!

Iterate through a list of key and value pairs and get specific key and value using Python

I have a list like this.
data = [{
'category': 'software',
'code': 110,
'actual': '["5.1.4"]',
'opened': '2018-10-16T09:18:12Z',
'component_type': 'update',
'event': 'new update available',
'current_severity': 'info',
'details': '',
'expected': None,
'id': 10088862,
'component_name': 'Purity//FA'
},
{
'category': 'software',
'code': 67,
'actual': None,
'opened': '2018-10-18T01:14:45Z',
'component_type': 'host',
'event': 'misconfiguration',
'current_severity': 'critical',
'details': '',
'expected': None,
'id': 10088898,
'component_name': 'pudc-vm-001'
},
{
'category': 'array',
'code': 42,
'actual': None,
'opened': '2018-11-22T22:27:29Z',
'component_type': 'hardware',
'event': 'failure',
'current_severity': 'warning',
'details': '' ,
'expected': None,
'id': 10089121,
'component_name': 'ct1.eth15'
}]
I want to iterate over this and get only category, component_type, event and current_severity.
I tried a for loop but it says too values to unpack, obviously.
for k, v, b, n in data:
print(k, v, b, n) //do something
i essentially want a list that is filtered to have only category, component_type, event and current_severity. So that i can use the same for loop to get out my four key value pairs.
Or if there is a better way to do it? Please help me out.
Note: The stanzas in the list is not fixed, it keeps changing, it might have more than three stanzas.
You have a list of dictionaries, simple way to iterate over this is
category = [x['category'] for x in data]
Which prints the values of category key
['software', 'software', 'array']
Do the same for component_type, event and current_severity and you're good to go
If you know that every dict inside your current list of dicts should have at least the keys you're trying to extract their data, then you can use dict[key], however for safety, i prefer using dict.get(key, default value) like this example:
out = [
{
'category': elm.get('category'),
'component_type': elm.get('component_type'),
'event': elm.get('event'),
'current_severity': elm.get('current_severity')
} for elm in data
]
print(out)
Output:
[{'category': 'software',
'component_type': 'update',
'current_severity': 'info',
'event': 'new update available'},
{'category': 'software',
'component_type': 'host',
'current_severity': 'critical',
'event': 'misconfiguration'},
{'category': 'array',
'component_type': 'hardware',
'current_severity': 'warning',
'event': 'failure'}]
For more informations about when we should use dict.get() instead of dict[key], see this answer
with this you get a new list with only the keys you are interested on:
new_list = [{
'category': stanza['category'],
'component_type': stanza['component_type'],
'event': stanza['event'],
} for stanza in data]

how to use nested dictionary in python?

I am trying to write some code with the Hunter.io API to automate some of my b2b email scraping. It's been a long time since I've written any code and I could use some input. I have a CSV file of Urls, and I want to call a function on each URL that outputs a dictionary like this:
`{'domain': 'fromthebachrow.com', 'webmail': False, 'pattern': '{f}{last}', 'organization': None, 'emails': [{'value': 'fbach#fromthebachrow.com', 'type': 'personal', 'confidence': 91, 'sources': [{'domain': 'fromthebachrow.com', 'uri': 'http://fromthebachrow.com/contact', 'extracted_on': '2017-07-01'}], 'first_name': None, 'last_name': None, 'position': None, 'linkedin': None, 'twitter': None, 'phone_number': None}]}`
for each URL I call my function on. I want my code to return just the email address for each key labeled 'value'.
Value is a key that is contained in a list that itself is an element of the directory my function outputs. I am able to access the output dictionary to grab the list that is keyed to 'emails', but I don't know how to access the dictionary contained in the list. I want my code to return the value in that dictionary that is keyed with 'value', and I want it to do so for all of my urls.
from pyhunyrt import PyHunter
import csv
file=open('urls.csv')
reader=cvs.reader (file)
urls=list(reader)
hunter=PyHunter('API Key')
for item in urls:
output=hunter.domain_search(item)
output['emails'`
which returns a list that looks like this for each item:
[{
'value': 'fbach#fromthebachrow.com',
'type': 'personal',
'confidence': 91,
'sources': [{
'domain': 'fromthebachrow.com',
'uri': 'http://fromthebachrow.com/contact',
'extracted_on': '2017-07-01'
}],
'first_name': None,
'last_name': None,
'position': None,
'linkedin': None,
'twitter': None,
'phone_number': None
}]
How do I grab the first dictionary in that list and then access the email paired with 'value' so that my output is just an email address for each url I input initially?
To grab the first dict (or any item) in a list, use list[0], then to grab a value of a key value use ["value"]. To combine it, you should use list[0]["value"]

Categories