Writing to a JSON file and updating said file - python

I have the following code that will write to a JSON file:
import json
def write_data_to_table(word, hash):
data = {word: hash}
with open("rainbow_table\\rainbow.json", "a+") as table:
table.write(json.dumps(data))
What I want to do is open the JSON file, add another line to it, and close it. How can I do this without messing with the file?
As of right now when I run the code I get the following:
write_data_to_table("test1", "0123456789")
write_data_to_table("test2", "00123456789")
write_data_to_table("test3", "000123456789")
#<= {"test1": "0123456789"}{"test2": "00123456789"}{"test3": "000123456789"}
How can I update the file without completely screwing with it?
My expected output would probably be something along the lines of:
{
"test1": "0123456789",
"test2": "00123456789",
"test3": "000123456789",
}

You may read the JSON data with :
parsed_json = json.loads(json_string)
You now manipulate a classic dictionary. You can add data with :
parsed_json.update({'test4': 0000123456789})
Then you can write data to a file using :
with open('data.txt', 'w') as outfile:
json.dump(parsed_json, outfile)

If you are sure the closing "}" is the last byte in the file you can do this:
>>> f = open('test.json', 'a+')
>>> json.dump({"foo": "bar"}, f) # create the file
>>> f.seek(0)
>>> f.read()
'{"foo": "bar"}'
>>> f.seek(-1, 2)
>>> f.write(',\n', f.write(',\n' + json.dumps({"spam": "bacon"})[1:]))
>>> f.seek(0)
>>> print(f.read())
{"foo": "bar",
"spam": "bacon"}
Since your data is not hierarchical, you should consider a flat format like "TSV".

Related

How to delete everything inside an object in a json file but keep the object?

I want to delete everything in the object "name" in the given json file example but keep the the object, in simple words I want to clear the object.
{
"names": [
{
"player": "Player_Name",
"TB:": "12389",
"BW:": "596",
"SW:": "28",
"CQ:": "20"
}
]
}
I used tried this code:
with open('players.json', 'w') as w:
with open('players.json', 'r') as r:
for line in r:
element = json.loads(line.strip())
if 'names' in element:
del element['names']
w.write(json.dumps(element))
but it just clears the whole json file
sorry for my bad english
The problem is that you open the same file twice - for reading and for writing simultaneously. Also a JSON cannot be parsed line by line, only as a whole.
import json
# 1. read
with open('players.json', 'r') as r:
data = json.load(r)
# 2. modify
# (you might want to check if data is a dict)
data['names'] = []
# 3. write
with open('players.json', 'w') as w:
data = json.dump(data, w)

How can I replace replace \" with '

I have the following content:
{
"z":"[{\"ItemId\":\"1234\",\"a\":\"1234\",\"b\":\"4567\",\"c\":\"d\"}]"
}
This is a part of the json response I get from a certain API. I need to replace the \"s with 's. Unfortunately, that's where I got stuck!
Most of the answers I get are simply replacing the \ with "" or " " so that did not help me. So my question are the following:
How can I replace the \" with ':
in a file where I copy-pasted the content?
if I receive this as a response to a certain API call?
I tried the following to replace the content in a file but I am clearly only replacing the "s with ':
with open(file, "r") as f:
content = f.read()
new_content = content.replace("\"", "'")
with open(file, "w") as new_file:
new_file.write(new_content)
If what you're trying to do is transform each value from a JSON string to a Python repr() string, while keeping the wrapper format as JSON, that might look like:
with open(filename, "r") as old_file:
old_content = json.load(old_file)
new_content = {k: repr(json.loads(v)) for k, v in old_content.items()}
with open(filename, "w") as new_file:
json.dump(new_content, new_file)
If your old file contains:
{"z":"[{\"ItemId\":\"1234\",\"a\":\"1234\",\"b\":\"4567\",\"c\":\"d\"}]"}
...the new file will contain:
{"z": "[{'ItemId': '1234', 'a': '1234', 'b': '4567', 'c': 'd'}]"}
Note that in this new file, the inner fields are now in Python format, not JSON format; they can no longer be parsed by JSON parsers. Usually, I would suggest doing something different instead, as in:
with open(filename, "r") as old_file:
old_content = json.load(old_file)
new_content = {k: json.loads(v) for k, v in old_content.items()}
with open(filename, "w") as new_file:
json.dump(new_content, new_file)
...which would yield an output file with:
{"z": [{"ItemId": "1234", "a": "1234", "b": "4567", "c": "d"}]}
...which is both easy-to-read and easy to process with standard JSON-centric tools (jq, etc).
Using json module, you can dumps the data then loads it using the following:
import json
data = {
"z": "[{\"ItemId\":\"1234\",\"a\":\"1234\",\"b\":\"4567\",\"c\":\"d\"}]"
}
g = json.dumps(data)
c = json.loads(data)
print(c)
print(str(c).replace("\"","'"))
Output:
{'z': '[{"ItemId":"1234","a":"1234","b":"4567","c":"d"}]'}
{'z': '[{'ItemId':'1234','a':'1234','b':'4567','c':'d'}]'}

How to write human-readable data to to a JSON file

When I export a file from python to json file it contains charecters like,
{"-": "text", "menu": {"-": "node", "id": 2244676, "prev": "[2/40] \u0d2a\u0d4d\u0d30\u0d2f\u0d4b\u0d1c\u0d15 \u0d15\u0d4d\u0d30\u0d3f\u0d2f
I used
with open('messages.json', 'w') as outfile:
json.dump(all_messages, outfile, cls=DateTimeEncoder)
in python. How to convert it to normal unicode text?
If you want the output JSON to be human-readable, use UTF-8 encoding and the ensure_ascii=False parameter:
with open('messages.json', 'w', encoding='utf8') as outfile:
json.dump(all_messages, outfile, cls=DateTimeEncoder,ensure_ascii=False)
If you just want to read the data back in again, json.load will convert it back to Unicode:
with open('messages.json', encoding='utf8') as infile:
data = json.load(infile)
Examples with simple strings:
>>> s = '[2/40] പ്രയോജക ക്രിയ'
>>> print(json.dumps(s))
"[2/40] \u0d2a\u0d4d\u0d30\u0d2f\u0d4b\u0d1c\u0d15 \u0d15\u0d4d\u0d30\u0d3f\u0d2f"
>>> print(json.dumps(s,ensure_ascii=False))
"[2/40] പ്രയോജക ക്രിയ"
>>> out = json.dumps(s)
>>> out
'"[2/40] \\u0d2a\\u0d4d\\u0d30\\u0d2f\\u0d4b\\u0d1c\\u0d15 \\u0d15\\u0d4d\\u0d30\\u0d3f\\u0d2f"'
>>> json.loads(out)
'[2/40] പ്രയോജക ക്രിയ'

Python file input (write mode) issue with JSON

I'm learning Python and I'm following official documentation from:
Section: 7.2.2. Saving structured data with json for Python 3
I'm testing the json.dump() function to dump my python set into a file pointer:
>>> response = {"success": True, "data": ["test", "array", "response"]}
>>> response
{'success': True, 'data': ['test', 'array', 'response']}
>>> import json
>>> json.dumps(response)
'{"success": true, "data": ["test", "array", "response"]}'
>>> f = open('testfile.txt', 'w', encoding='UTF-8')
>>> f
<_io.TextIOWrapper name='testfile.txt' mode='w' encoding='UTF-8'>
>>> json.dump(response, f)
The file testfile.txt already exists in my working directory and even if it didn't, statement f = open('testfile.txt', 'w', encoding='UTF-8') would have re-create it, truncated.
The json.dumps(response) converts my response set into a valid JSON object, so that's fine.
Problem is when I use the json.dumps(response, f) method, which actually updates my testfile.txt, but it gets truncated.
I've managed to do a reverse workaround like:
>>> f = open('testfile.txt', 'w', encoding='UTF-8')
>>> f.write(json.dumps(response));
56
>>>
After which the contents of my testfile.txt become as expected:
{"success": true, "data": ["test", "array", "response"]}
Even, this approach works too:
>>> json.dump(response, open('testfile.txt', 'w', encoding='UTF-8'))
Why does this approach fail?:
>>> f = open('testfile.txt', 'w', encoding='UTF-8')
>>> json.dump(response, f)
Note that I don't get any errors from the console; just a truncated file.
It looks like you aren't exiting the interactive prompt to check the file. Close the file to flush it:
f.close()
It will close if you exit the interactive prompt as well.

Python read JSON file and modify

Hi I am trying to take the data from a json file and insert and id then perform POST REST.
my file data.json has:
{
'name':'myname'
}
and I would like to add an id so that the json data looks like:
{
'id': 134,
'name': 'myname'
}
So I tried:
import json
f = open("data.json","r")
data = f.read()
jsonObj = json.loads(data)
I can't get to load the json format file.
What should I do so that I can convert the json file into json object and add another id value.
Set item using data['id'] = ....
import json
with open('data.json', 'r+') as f:
data = json.load(f)
data['id'] = 134 # <--- add `id` value.
f.seek(0) # <--- should reset file position to the beginning.
json.dump(data, f, indent=4)
f.truncate() # remove remaining part
falsetru's solution is nice, but has a little bug:
Suppose original 'id' length was larger than 5 characters. When we then dump with the new 'id' (134 with only 3 characters) the length of the string being written from position 0 in file is shorter than the original length. Extra chars (such as '}') left in file from the original content.
I solved that by replacing the original file.
import json
import os
filename = 'data.json'
with open(filename, 'r') as f:
data = json.load(f)
data['id'] = 134 # <--- add `id` value.
os.remove(filename)
with open(filename, 'w') as f:
json.dump(data, f, indent=4)
I would like to present a modified version of Vadim's solution. It helps to deal with asynchronous requests to write/modify json file. I know it wasn't a part of the original question but might be helpful for others.
In case of asynchronous file modification os.remove(filename) will raise FileNotFoundError if requests emerge frequently. To overcome this problem you can create temporary file with modified content and then rename it simultaneously replacing old version. This solution works fine both for synchronous and asynchronous cases.
import os, json, uuid
filename = 'data.json'
with open(filename, 'r') as f:
data = json.load(f)
data['id'] = 134 # <--- add `id` value.
# add, remove, modify content
# create randomly named temporary file to avoid
# interference with other thread/asynchronous request
tempfile = os.path.join(os.path.dirname(filename), str(uuid.uuid4()))
with open(tempfile, 'w') as f:
json.dump(data, f, indent=4)
# rename temporary file replacing old file
os.rename(tempfile, filename)
There is really quite a number of ways to do this and all of the above are in one way or another valid approaches... Let me add a straightforward proposition. So assuming your current existing json file looks is this....
{
"name":"myname"
}
And you want to bring in this new json content (adding key "id")
{
"id": "134",
"name": "myname"
}
My approach has always been to keep the code extremely readable with easily traceable logic. So first, we read the entire existing json file into memory, assuming you are very well aware of your json's existing key(s).
import json
# first, get the absolute path to json file
PATH_TO_JSON = 'data.json' # assuming same directory (but you can work your magic here with os.)
# read existing json to memory. you do this to preserve whatever existing data.
with open(PATH_TO_JSON,'r') as jsonfile:
json_content = json.load(jsonfile) # this is now in memory! you can use it outside 'open'
Next, we use the 'with open()' syntax again, with the 'w' option. 'w' is a write mode which lets us edit and write new information to the file. Here s the catch that works for us ::: any existing json with the same target write name will be erased automatically.
So what we can do now, is simply write to the same filename with the new data
# add the id key-value pair (rmbr that it already has the "name" key value)
json_content["id"] = "134"
with open(PATH_TO_JSON,'w') as jsonfile:
json.dump(json_content, jsonfile, indent=4) # you decide the indentation level
And there you go!
data.json should be good to go for an good old POST request
try this script:
with open("data.json") as f:
data = json.load(f)
data["id"] = 134
json.dump(data, open("data.json", "w"), indent = 4)
the result is:
{
"name":"mynamme",
"id":134
}
Just the arrangement is different, You can solve the problem by converting the "data" type to a list, then arranging it as you wish, then returning it and saving the file, like that:
index_add = 0
with open("data.json") as f:
data = json.load(f)
data_li = [[k, v] for k, v in data.items()]
data_li.insert(index_add, ["id", 134])
data = {data_li[i][0]:data_li[i][1] for i in range(0, len(data_li))}
json.dump(data, open("data.json", "w"), indent = 4)
the result is:
{
"id":134,
"name":"myname"
}
you can add if condition in order not to repeat the key, just change it, like that:
index_add = 0
n_k = "id"
n_v = 134
with open("data.json") as f:
data = json.load(f)
if n_k in data:
data[n_k] = n_v
else:
data_li = [[k, v] for k, v in data.items()]
data_li.insert(index_add, [n_k, n_v])
data = {data_li[i][0]:data_li[i][1] for i in range(0, len(data_li))}
json.dump(data, open("data.json", "w"), indent = 4)
This implementation should suffice:
with open(jsonfile, 'r') as file:
data = json.load(file)
data[id] = value
with open(jsonfile, 'w') as file:
json.dump(data, file)
using context manager for the opening of the jsonfile.
data holds the updated object and dumped into the overwritten jsonfile in 'w' mode.
Not exactly your solution but might help some people solving this issue with keys.
I have list of files in folder, and i need to make Jason out of it with keys.
After many hours of trying the solution is simple.
Solution:
async def return_file_names():
dir_list = os.listdir("./tmp/")
json_dict = {"responseObj":[{"Key": dir_list.index(value),"Value": value} for value in dir_list]}
print(json_dict)
return(json_dict)
Response look like this:
{
"responseObj": [
{
"Key": 0,
"Value": "bottom_mask.GBS"
},
{
"Key": 1,
"Value": "bottom_copper.GBL"
},
{
"Key": 2,
"Value": "copper.GTL"
},
{
"Key": 3,
"Value": "soldermask.GTS"
},
{
"Key": 4,
"Value": "ncdrill.DRD"
},
{
"Key": 5,
"Value": "silkscreen.GTO"
}
]
}

Categories