I have a dictionary in a file and print the name value from the file
di = {'elk': [{'url_1': 'localhost:8080/api/running',
'url_2': 'localhost:8080/api/',
'name': 'cat',
'method': 'GET'}],
'a': [{'url_1': 'localhost:8080/api/running',
'url_2': 'localhost:8080/api/',
'name': 'mouse',
'method': 'GET'}]}
#Read a file
import os
with open('g.txt','r') as fh:
fh_n = fh.read()
#Save in to list
test = []
for k,v in di.items():
test.append(v[0]['name'])
test
['cat', 'mouse']
import ast
with open('g.txt','r') as fh:
fh_n = fh.read()
#first split string and convert into dictionary
data = ast.literal_eval(fh_n.split("=")[1].strip())
#or
#di = remove from text file
#ast.literal_eval(fh_n)
name = [i[0]['name'] for i in data.values()]
print(name)
O/P:
['cat', 'mouse']
OR
convert text file data into json file
g.json file
[{
"di": {
"elk": [
{
"url_1": "localhost:8080/api/running",
"url_2": "localhost:8080/api/",
"name": "cat",
"method": "GET"
}
],
"a": [
{
"url_1": "localhost:8080/api/running",
"url_2": "localhost:8080/api/",
"name": "mouse",
"method": "GET"
}
]
}
}
]
.py file
import json
with open('g.json') as fh:
data = json.load(fh)
name = [i[0]['name'] for i in data[0]['di'].values()]
print(name)
O/P:
['cat', 'mouse']
You can use json to get your result:-
di = {'elk': [{'url_1': 'localhost:8080/api/running',
'url_2': 'localhost:8080/api/',
'name': 'cat',
'method': 'GET'}],
'a': [{'url_1': 'localhost:8080/api/running',
'url_2': 'localhost:8080/api/',
'name': 'mouse',
'method': 'GET'}]}
import json
file = open('g.json', 'w')
json.dump(di, file) # Saving di into g.json file
file.close()
file_open = open('g.json', 'r+')
my_di = json.load(file_open) # Loading the saved g.json file
file_open.close()
print(type(di))
print(di)
I hope it may help you.
Related
So, I am trying to extract specific data and write it to a file, this JSON response has odd brackets around the information I want and need to be stripped off and I'm not really sure how to get to the 'desired output'.
Maybe its better to do it in an xls document? The end goal is to compare this list against another to find which hosts are missing.
Its a very lengthy response, so I just grabbed a snippet.
The JSON response
[
{
"adapter_list_length": 3,
"adapters": [
"adapter1",
"adapter2",
"adapter3"
],
"id": "",
"labels": [
"",
""
],
"specific_data.data.hostname": [
"HOSTNAME1"
],
"specific_data.data.last_seen": "",
"specific_data.data.network_interfaces.ips": [
"123.45.67.89"
],
"specific_data.data.os.type": [
""
]
},
{
"adapter_list_length": 3,
"adapters": [
"adapter1",
"adapter2",
"adapter3"
],
"id": "",
"labels": [
"",
""
],
"specific_data.data.hostname": [
"HOSTNAME2"
My test writer:
names = [item['specific_data.data.hostname'] for item in data]
with open ('namelist.csv', mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter='\n', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(names)
Current output:
['HOSTNAME1']
['HOSTNAME2']
Desired Output:
Hostnames: IPaddress:
HOSTNAME1 123.45.67.89
HOSTNAME2 123.456.78.9
.... ....
... ....
You can have it done this way:
import csv
data = [{'adapter_list_length': 3, 'adapters': ['adapter1', 'adapter2', 'adapter3'],
'id': '', 'labels': ['', ''], 'specific_data.data.hostname': ['HOSTNAME1'],
'specific_data.data.last_seen': '', 'specific_data.data.network_interfaces.ips':
['123.45.67.89'], 'specific_data.data.os.type': ['']}, {'adapter_list_length': 3,
'adapters': ['adapter1', 'adapter2', 'adapter3'], 'id': '', 'labels': ['', ''],
'specific_data.data.hostname': ['HOSTNAME2'],'specific_data.data.last_seen': '',
'specific_data.data.network_interfaces.ips': ['123.45.67.80'],
'specific_data.data.os.type': ['']}]
names = [item['specific_data.data.hostname'][0] for item in data]
ips = [item['specific_data.data.network_interfaces.ips'][0] for item in data]
dets = list(zip(names,ips))
print('Hostnames:','\t','IPaddress:')
for i,j in dets:
print(i,'\t',j)
fields = ['Hostnames:', 'IPaddress:']
rows = [list(x) for x in dets]
filename = "dumb.csv"
with open(filename, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(rows)
I have file1.txt with following contents;
[
{
"SERIAL": "124584",
"X": "30024.1",
},
{
"SERIAL": "114025",
"X": "14006.2",
}
]
I have file2.txt with following contents;
[
{
"SERIAL": "344588",
"X": "48024.1",
},
{
"SERIAL": "255488",
"X": "56006.2",
}
]
I want to combine the 2 files into single file output.txt that looks like this;
[
{
"SERIAL": "124584",
"X": "30024.1",
},
{
"SERIAL": "114025",
"X": "14006.2",
},
{
"SERIAL": "344588",
"X": "48024.1",
},
{
"SERIAL": "255488",
"X": "56006.2",
},
]
The tricky part is the [] at the end of each individual file.
I am using python v3.7
Firstly to be JSON compliant, you may remove all the trailing commas (ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Trailing_commas)
Then you can use the following code:
import json
with open("file1.txt") as f1:
d1 = json.load(f1)
with open("file2.txt") as f2:
d2 = json.load(f2)
d3 = d1 + d2
with open("output.txt", "w") as out:
json.dump(d3, out)
Here is the solution to read content from file and then append them.
from ast import literal_eval
with open("/home/umesh/Documents/text1.txt", "r") as data
first_file_data = data.read()
with open("/home/umesh/Documents/text2.txt", "r") as data:
second_file_data = data.read()
first_file_data = literal_eval(first_file_data)
second_file_data = literal_eval(second_file_data)
for item in second_file_data:
first_file_data.append(item)
print(first_file_data)
OUTPUT
[{'SERIAL': '124584', 'X': '30024.1'},{'SERIAL': '114025', 'X': '14006.2'},{'SERIAL': '344588', 'X': '48024.1'},{'SERIAL': '255488', 'X': '56006.2'}]
text file content
This solves your problem
import ast
import json
with open('file1.txt') as f:
data = ast.literal_eval(f.read())
with open('file2.txt') as f:
data2 = ast.literal_eval(f.read())
data.extend(data2)
print(data)
with open('outputfile', 'w') as fout: # write to a file
json.dump(data, fout)
OUTPUT:
[{'SERIAL': '124584', 'X': '30024.1'}, {'SERIAL': '114025', 'X': '14006.2'}, {'SERIAL': '344588', 'X': '48024.1'}, {'SERIAL': '255488', 'X': '56006.2'}]
Since both of the content of the files are lists you can concatenate them together as following
file1 = [{'SERIAL': '124584', 'X': '30024.1'}, {'SERIAL': '114025', 'X': '14006.2'}]
file2 = [{'SERIAL': '344588', 'X': '48024.1'}, {'SERIAL': '255488', 'X': '56006.2'}]
totals = file1 + file2
Result
[{'SERIAL': '124584', 'X': '30024.1'},
{'SERIAL': '114025', 'X': '14006.2'},
{'SERIAL': '344588', 'X': '48024.1'},
{'SERIAL': '255488', 'X': '56006.2'}]
I have a JSON file
[
{
"api_key": "123123112313121321",
"collaborators_count": 1,
"created_at": "",
"custom_event_fields_used": 0,
"discarded_app_versions": [],
"discarded_errors": [],
"errors_url": "https://api.bugsnag.com/projects/1231231231312/errors",
"events_url": "https://api.bugsnag.com/projects/1231231231213/events",
"global_grouping": [],
"html_url": "https://app.bugsnag.com/lol/kek/",
"id": "34234243224224",
"ignore_old_browsers": true,
"ignored_browser_versions": {},
"is_full_view": true,
"language": "javascript",
"location_grouping": [],
"name": "asdasdaasd",
"open_error_count": 3,
"release_stages": [
"production"
],
"resolve_on_deploy": false,
"slug": "wqeqweqwwqweq",
"type": "js",
"updated_at": "2020-04-06T15:22:10.480Z",
"url": "https://api.bugsnag.com/projects/12312312213123",
"url_whitelist": null
}
]
What I need is to remove all lines apart from "id:" and "name:" and preserve the JSON structure. Can anybody advise a Python or bash script to handle this?
With jq:
$ jq 'map({id: .id, name: .name})' input.json
[
{
"id": "34234243224224",
"name": "asdasdaasd"
}
]
Using python, you could first deserialize the JSON file(JSON array of objects) with json.load, then filter out the keys you want with a list comprehension:
from json import load
keys = ["name", "id"]
with open("test.json") as json_file:
data = load(json_file)
filtered_json = [{k: obj.get(k) for k in keys} for obj in data]
print(filtered_json)
Output:
[{'name': 'asdasdaasd', 'id': '34234243224224'}]
If we want to serialize this python list to another output file, we can use json.dump:
from json import load
from json import dump
keys = ["name", "id"]
with open("test.json") as json_file, open("output.json", mode="w") as json_output:
data = load(json_file)
filtered_json = [{k: obj.get(k) for k in keys} for obj in data]
dump(filtered_json, json_output, indent=4, sort_keys=True)
output.json
[
{
"id": "34234243224224",
"name": "asdasdaasd"
}
]
You can try this:
import json
with open('<input filename>', 'r') as f:
data = json.load(f)
new_data = []
for item in data:
new_item = {key: value for key, value in item.items() if key == "id" or key =="name"}
new_data.append(new_item)
with open('<output filename>', 'w') as f:
json.dump(new_data, f)
Covert your JSON into Pandas Dataframe
{
import pandas as pd
df=pd.read_json('your json variable')
res=df.drop(['url_whitelis','api_key'],axis=1)
pd.to_json(res) }
I have a JSON File which contains some data as below:
{
'count': 2,
'next': '?page=2',
'previous': None,
'results': [
{
'category': 'Triggers',
'id': '783_23058',
'name': 'Covid-19'
},
{
'category': 'Sources',
'id': '426_917746',
'name': 'Covid19Conversations'
}
]
}
I am able to extract the first 'id' and 'name' values as below
Doc_details = dict()
for item in companies:
doc_id = companies['results'][0]['id']
doc_name = companies['results'][0]['name']
Doc_details[doc_name] = doc_id
for key, value in Doc_details.items():
print(key,value)
Output:
Covid-19 783_23058
I am new to python. Can someone help me with:
Loop through it and extract all the key,value pairs
Save the results to an excel file.
If you already have the object, you can iterate through companies['results'] using list comprehension and map the objects to (key, value) pairs.
companies = {
'count': 2,
'next': '?page=2',
'previous': None,
'results': [{
'category': 'Triggers',
'id': '783_23058',
'name': 'Covid-19'
}, {
'category': 'Sources',
'id': '426_917746',
'name': 'Covid19Conversations'
}]
}
pairs = list(map(lambda x: [ x['id'], x['name'] ], companies['results']))
csv = '\n'.join('\t'.join(val for val in pair) for pair in pairs)
print(csv)
Result
783_23058 Covid-19
426_917746 Covid19Conversations
Writing to a file
Convert the list of pairs to a CSV file. See: Writing a Python list of lists to a csv file.
import csv
with open('pairs.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(pairs)
If you only want the name, id pairs, you can just do:
for result in companies['results']:
print(result['name'], result['id'])
# =>
# Covid-19 783_23058
# Covid19Conversations 426_917746
IIUC: You can use inbuilt json package to parse the json file as python dict and then you can use pandas library to write the excel file:
Try this:
import json
import pandas as pd
from pandas import ExcelWriter
with open("json_file.json", "r") as file:
info = json.load(file) # info contains all key-value pairs
# save to excel
writer = ExcelWriter('excel_file.xlsx')
pd.DataFrame(info["results"]).to_excel(writer, index=False)
writer.save()
Update:
The only issue I have now is when running the command to add a user it create a completely duplicate key.
Question:
json.dump() simply adds the entry to the end of the json, I want it to overwrite the entire file with the new updated entry
Setup: (Create blank "Banks" Field)
with open(DATA_FILENAME, mode='w', encoding='utf-8') as f:
data = {"banks": []}
json.dump(data, f)
Set User: (Create a User Key inside "Banks")
member = ctx.message.author
entry = {'name': member.name, 'id': member.id, 'balance': 0}
with open(DATA_FILENAME, 'r+') as outfile:
data = json.load(outfile)
data['banks'].append((entry))
json.dump(data, outfile, indent=4)
Output of first use:
{"banks": []}{
"banks": [
{
"name": "ViperZ-14",
"id": 367151547575959562,
"balance": 0
}
]
}
What I need:
{
"banks": [
{
"name": "ViperZ-14",
"id": 367151547575959562,
"balance": 0
}
]
}
file_path = '/home/vishnudev/Downloads/new.json'
import json
def load(file, mode, data=[]):
with open(file, mode) as f:
if mode == 'r':
return json.load(f)
elif mode == 'w':
json.dump(data, f)
def get_data_func():
return {
'name': 'vishnu',
'data': 'dev'
}
d = load(file_path, 'r')
print(d)
d.append(get_data_func())
load(file_path, 'w', d)
d = load(file_path, 'r')
print(d)
Output:
On running the above twice I get
[{'name': 'vishnu', 'data': 'dev'}]
[{'name': 'vishnu', 'data': 'dev'}, {'name': 'vishnu', 'data': 'dev'}]
I have found that the solution was to simply seek to the beginning of the document. The json.dump() does overwrite but it only overwrites whats in its way. AKA, seeking/placing the cursor at the top of the document will overwrite the entire document using the new entry.