Update:
The only issue I have now is when running the command to add a user it create a completely duplicate key.
Question:
json.dump() simply adds the entry to the end of the json, I want it to overwrite the entire file with the new updated entry
Setup: (Create blank "Banks" Field)
with open(DATA_FILENAME, mode='w', encoding='utf-8') as f:
data = {"banks": []}
json.dump(data, f)
Set User: (Create a User Key inside "Banks")
member = ctx.message.author
entry = {'name': member.name, 'id': member.id, 'balance': 0}
with open(DATA_FILENAME, 'r+') as outfile:
data = json.load(outfile)
data['banks'].append((entry))
json.dump(data, outfile, indent=4)
Output of first use:
{"banks": []}{
"banks": [
{
"name": "ViperZ-14",
"id": 367151547575959562,
"balance": 0
}
]
}
What I need:
{
"banks": [
{
"name": "ViperZ-14",
"id": 367151547575959562,
"balance": 0
}
]
}
file_path = '/home/vishnudev/Downloads/new.json'
import json
def load(file, mode, data=[]):
with open(file, mode) as f:
if mode == 'r':
return json.load(f)
elif mode == 'w':
json.dump(data, f)
def get_data_func():
return {
'name': 'vishnu',
'data': 'dev'
}
d = load(file_path, 'r')
print(d)
d.append(get_data_func())
load(file_path, 'w', d)
d = load(file_path, 'r')
print(d)
Output:
On running the above twice I get
[{'name': 'vishnu', 'data': 'dev'}]
[{'name': 'vishnu', 'data': 'dev'}, {'name': 'vishnu', 'data': 'dev'}]
I have found that the solution was to simply seek to the beginning of the document. The json.dump() does overwrite but it only overwrites whats in its way. AKA, seeking/placing the cursor at the top of the document will overwrite the entire document using the new entry.
Related
I have a .json file structured as well:
"[{\"dataset\": \"x0\", \"test\": \"Test 3 \", \"results\": {\"TP\": 0, \"FP\": 0, \"FN\": 0, \"TN\": 17536}, \"dir\": \"/Users//Test_3\"}]"
When I try to read it with the following code:
with open(dir, 'r+') as f:
data = json.load(f)
print(data[0])
I get [ as output, which means it is reading the json object as a string.
I do not understand if the problem is how I'm saving it. Since I populate it in a loop, the code which creates this object is the following one:
json_obj = []
for i in range(len(dictionary)):
dataset, test, dir = retrieve_data()
tp, fp, tn, fn = calculate_score()
json_obj.append({'dataset': dataset,
'test': test,
'results': {'TP': tp, 'FP': fp, 'FN': fn, 'TN': tn},
'dir': dir })
json_dump = json.dumps(json_obj)
with open(save_folder, 'w') as outfile:
json.dump(json_dump, outfile)
The structure I tried to create is the following one:
{
"dataset": "1",
"test": "trial1",
"results": {
"TP": 5,
"FP": 3,
"FN": 2,
"TN": 5
},
"dir": dir
}
How can I read it correctly to make it parsable?
You are converting json_obj to a string and then dumping the string to a file. Dump json_obj directly to the file:
#json_dump = json.dumps(json_obj)
with open(save_folder, 'w') as outfile:
json.dump(json_obj, outfile)
import os, json, time
path = "C:\\Users\\Marius\\Desktop\\homework" #se defineste adresa
with os.scandir(path) as listOfEntries:
for item in listOfEntries:
data={}
if item.is_file():
filename_ext=os.path.splitext(item)
size=(os.path.getsize(item))
creation=(time.ctime(os.path.getctime(item)))
extension=(os.path.splitext(os.path.basename(item))[1])
if filename_ext not in data:
data[item.path] = {'name': item.name, 'path': path, 'extension': extension, 'creation': creation, 'size': size}
print(data)
j_data = json.dumps(data, indent=4)
with open('files.json', 'w') as f:
json.dump(data, f, indent=4)
I cannot figure it out. Any help would be appreciated.
I tried to make a python file to generate the results needed, but I need them to be as an output JSON file.
I want the results to be as an output to files.JSON, something like the following result, but with all the files from that specific folder:
{
"C:\\Users\\Marius\\Desktop\\homework\\test.py": {
"name": "test.py",
"path": "C:\\Users\\Marius\\Desktop\\homework",
"extension": ".py",
"creation": "Sat Dec 26 08:39:59 2020",
"size": 733
}
}
Solved with:
import os, json, time
path = "C:\\Users\\Marius\\Desktop\\homework"
data={}
with os.scandir(path) as listOfEntries:
for item in listOfEntries:
filename_ext=os.path.splitext(item)
size=(os.path.getsize(item))
creation=(time.ctime(os.path.getctime(item)))
extension=(os.path.splitext(os.path.basename(item))[1])
if item.is_file():
if filename_ext not in data:
data[item.path] = {'name': item.name, 'path': path, 'extension': extension, 'creation': creation, 'size': size}
print(data)
j_data = json.dumps(data, indent=4)
with open('files.json', 'w') as f:
json.dump(data, f, indent=4)
I have a concatenated CSV file that I am attempting to output into JSON format. How should I go about implementing the logic that the CSV file only get converted to a JSON object all fields have a value ?
import glob , os
import pandas as pd
import json
import csv
with open('some.csv', 'r', newline='') as csvfile, \
open('output.json', 'w') as jsonfile:
for row in csv.DictReader(csvfile):
restructured = {
'STATION_CODE': row['STORE_CODE'],
'id': row['ARTICLE_ID'],
'name': row['ITEM_NAME'],
'data':
{
# fieldname: value for (fieldname, value) in row.items()
'STORE_CODE': row['STORE_CODE'],
'ARTICLE_ID': row['ARTICLE_ID'],
'ITEM_NAME': row['ITEM_NAME'],
'BARCODE': row['BARCODE'],
'SALE_PRICE': row['SALE_PRICE'],
'LIST_PRICE': row['LIST_PRICE'],
'UNIT_PRICE': row['UNIT_PRICE'],
}
}
json.dump(restructured, jsonfile, indent=4)
jsonfile.write('\n')
Currently this will provide all values from the CSV file into the JSON output, which is unintended behavior. Any inputs on how to correct this ?
First I loop through all the elements of CSV and add it to a JSON array. If any row element value is empty, that row will be ignored. Once I have the all rows in the JSON array, I will output it to the JSON file
import json
import csv
csvjsonarr = []
with open('some.csv', 'r', newline='') as csvfile :
for row in csv.DictReader(csvfile):
hasemptyvalues = False
for rowidx in row :
if row[rowidx] == "" :
hasemptyvalues = True
break
if hasemptyvalues == True :
continue
restructured = {
'STATION_CODE': row['STORE_CODE'],
'id': row['ARTICLE_ID'],
'name': row['ITEM_NAME'],
'data': {
'STORE_CODE': row['STORE_CODE'],
'ARTICLE_ID': row['ARTICLE_ID'],
'ITEM_NAME': row['ITEM_NAME'],
'BARCODE': row['BARCODE'],
'SALE_PRICE': row['SALE_PRICE'],
'LIST_PRICE': row['LIST_PRICE'],
'UNIT_PRICE': row['UNIT_PRICE'],
}
}
csvjsonarr.append(restructured)
if len(csvjsonarr) > 0 :
with open('output.json', 'w') as jsonfile :
json.dump(csvjsonarr, jsonfile, indent=4)
Code Including JSON File Code:
Python( Suppose To Append A New User and Balance):
import json
with open('users_balance.json', 'r') as file:
data = json.load(file)['user_list']
data['user_list']
data.append({"user": "sdfsd", "balance": 40323420})
with open('users_balance.json', 'w') as file:
json.dump(data, file, indent=2)
Json(Object The Code Is Appending To):
{
"user_list": [
{
"user": "<#!672986823185661955>",
"balance": 400
},
{
"user": "<#!737747404048171043>",
"balance": 500
}
],
}
Error(Traceback Error Given After Executing Code):
data = json.load(file)['user_list']
KeyError: 'user_list'
The solution is this:
import json
with open('users_balance.json', 'r') as file:
data = json.load(file)
data['user_list'].append({"user": "sdfsd", "balance": 40323420})
with open('users_balance.json', 'w') as file:
json.dump(data, file, indent=2)
I have a dictionary in a file and print the name value from the file
di = {'elk': [{'url_1': 'localhost:8080/api/running',
'url_2': 'localhost:8080/api/',
'name': 'cat',
'method': 'GET'}],
'a': [{'url_1': 'localhost:8080/api/running',
'url_2': 'localhost:8080/api/',
'name': 'mouse',
'method': 'GET'}]}
#Read a file
import os
with open('g.txt','r') as fh:
fh_n = fh.read()
#Save in to list
test = []
for k,v in di.items():
test.append(v[0]['name'])
test
['cat', 'mouse']
import ast
with open('g.txt','r') as fh:
fh_n = fh.read()
#first split string and convert into dictionary
data = ast.literal_eval(fh_n.split("=")[1].strip())
#or
#di = remove from text file
#ast.literal_eval(fh_n)
name = [i[0]['name'] for i in data.values()]
print(name)
O/P:
['cat', 'mouse']
OR
convert text file data into json file
g.json file
[{
"di": {
"elk": [
{
"url_1": "localhost:8080/api/running",
"url_2": "localhost:8080/api/",
"name": "cat",
"method": "GET"
}
],
"a": [
{
"url_1": "localhost:8080/api/running",
"url_2": "localhost:8080/api/",
"name": "mouse",
"method": "GET"
}
]
}
}
]
.py file
import json
with open('g.json') as fh:
data = json.load(fh)
name = [i[0]['name'] for i in data[0]['di'].values()]
print(name)
O/P:
['cat', 'mouse']
You can use json to get your result:-
di = {'elk': [{'url_1': 'localhost:8080/api/running',
'url_2': 'localhost:8080/api/',
'name': 'cat',
'method': 'GET'}],
'a': [{'url_1': 'localhost:8080/api/running',
'url_2': 'localhost:8080/api/',
'name': 'mouse',
'method': 'GET'}]}
import json
file = open('g.json', 'w')
json.dump(di, file) # Saving di into g.json file
file.close()
file_open = open('g.json', 'r+')
my_di = json.load(file_open) # Loading the saved g.json file
file_open.close()
print(type(di))
print(di)
I hope it may help you.