Appending date and time stamp while using json.dump? - python

Hi i am currently dumping the output of image matrix per frame in the following manner. I
with open(os.path.join('logs', 'frame_{0}_keypoints.json'.format(str(frame_number).zfill(12))).format(frame_dict), 'w') as outfile:
json.dump(my_dict, outfile)
With this i can get, json files per frame , with the format frame_000000000001_keypoints , I was wondering if there is a way to append time stamp to the logs ?? for example like 20190607T220005 YearMonthDayTimeinHoursMinutesSeconds ??

My answer assume you want to add a timestamp to 'my_dict'.
import json
import time
class TimeStampAdderEncoder(json.JSONEncoder):
def encode(self, obj):
obj['timestamp'] = time.time()
return json.JSONEncoder().encode(obj)
my_dict = {'key': 3}
my_dict_as_str = json.dumps(my_dict, cls=TimeStampAdderEncoder)
print(my_dict_as_str)
output
{"key": 3, "timestamp": 1561542873.109698}

If you wish to have a output .json file with this format frame_ 20190607T220005_keypoints, you can use :
import datetime
now = datetime.datetime.now()
now_isoFormat = now.isoformat() # GET - 2019-06-26T09:20:30.943730
now_custom_isoFormat = "{YEAR}{MONTH}{DAY}T{HOUR}{MINUTES}{SECONDS}".format(
YEAR=now.year,
MONTH=now.month,
DAY=now.day,
HOUR=now.hour,
MINUTES=now.minute,
SECONDS=now.second) # GET - 2019626T092030
with open(os.path.join('logs', 'frame_{0}_keypoints.json'.format(now_custom_isoFormat)).format(frame_dict), 'w') as outfile:
json.dump(my_dict, outfile)
If you wish to keep the frame number :
with open(os.path.join('logs', 'frame_{date}_keypoints_{frame}.json'
.format(date=now_custom_isoFormat,
frame=str(frame_number).zfill(12)))
.format(frame_dict), 'w') as outfile:
json.dump(my_dict, outfile)

Related

create and append data in json format to json file - python

How to create a null json file and append each details to the json file in the following format
[
{"name":"alan","job":"clerk"},
{"name":"bob","job":"engineer"}
]
Code
import json
with open("test.json", mode='w', encoding='utf-8') as f:
json.dump([], f)
test_data = ['{"name":"alan","job":"clerk"}','{"name":"bob","job":"engineer"}']
for i in test_data:
with open("test.json", mode='w', encoding='utf-8') as fileobj:
json.dump(i, fileobj)
How this can be efficiently done
You can't modify the json content like that. You'll need to modify the data structure and then completely rewrite the json file. You might be able to just read the data from jsone at startup, and write it at shutdown.
import json
def store_my_data(data, filename='test.json'):
""" write data to json file """
with open(filename, mode='w', encoding='utf-8') as f:
json.dump(data, f)
def load_my_data(filename='test.json'):
""" load data from json file """
with open(filename, mode='r', encoding='utf-8') as f:
return json.load(f)
raise Exception # skipping some steps here
test_data = [
{"name": "alan", "job": "clerk"},
{"name": "bob", "job": "engineer"}
]
item_one = test_data[0]
item_two = test_data[1]
# You already know how to store data in a json file.
store_my_data(test_data)
# Suppose you don't have any data at the start.
current_data = []
store_my_data(current_data)
# Later, you want to add to the data.
# You will have to change your data in memory,
# then completely rewrite the file.
current_data.append(item_one)
current_data.append(item_two)
store_my_data(current_data)

Parse JSON to CSV + additional columns

I'm attempting to parse a JSON file with the following syntax into CSV:
{"code":2000,"message":"SUCCESS","data":
{"1":
{"id":1,
"name":"first_name",
"icon":"url.png",
"attribute1":"value",
"attribute2":"value" ...},
"2":
{"id":2,
"name":"first_name",
"icon":"url.png",
"attribute1":"value",
"attribute2":"value" ...},
"3":
{"id":3,
"name":"first_name",
"icon":"url.png",
"attribute1":"value",
"attribute2":"value" ...}, and so forth
}}}
I have found similar questions (e.g. here and here and I am working with the following method:
import requests
import json
import csv
import os
jsonfile = "/path/to.json"
csvfile = "/path/to.csv"
with open(jsonfile) as json_file:
data=json.load(json_file)
data_file = open(csvfile,'w')
csvwriter = csv.writer(data_file)
csvwriter.writerow(data["data"].keys())
for row in data:
csvwriter.writerow(row["data"].values())
data_file.close()
but I am missing something.
I get this error when I try to run:
TypeError: string indices must be integers
and my csv output is:
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,96
At the end of the day, I am trying to convert the following function (from PowerShell) to Python. This converted the JSON to CSV and added 3 additional custom columns to the end:
$json = wget $lvl | ConvertFrom-Json
$json.data | %{$_.psobject.properties.value} `
| select-object *,#{Name='Custom1';Expression={$m}},#{Name='Level';Expression={$l}},#{Name='Custom2';Expression={$a}},#{Name='Custom3';Expression={$r}} `
| Export-CSV -path $outfile
The output looks like:
"id","name","icon","attribute1","attribute2",..."Custom1","Custom2","Custom3"
"1","first_name","url.png","value","value",..."a","b","c"
"2","first_name","url.png","value","value",..."a","b","c"
"3","first_name","url.png","value","value",..."a","b","c"
As suggested by martineau in a now-deleted answer, my key name was incorrect.
I ended up with this:
import json
import csv
jsonfile = "/path/to.json"
csvfile = "/path/to.csv"
with open(jsonfile) as json_file:
data=json.load(json_file)
data_file = open(csvfile,'w')
csvwriter = csv.writer(data_file)
#get sample keys
header=data["data"]["1"].keys()
#add new fields to dict
keys = list(header)
keys.append("field2")
keys.append("field3")
#write header
csvwriter.writerow(keys)
#for each entry
total = data["data"]
for row in total:
rowdefault = data["data"][str(row)].values()
rowdata = list(rowdefault)
rowdata.append("value1")
rowdata.append("value2")
csvwriter.writerow(rowdata)
Here, I'm grabbing each row by its name id via str(row).

how to remove a back slash from a JSON file

I want to create a json file like this:
{"946705035":4,"946706692":4 ...}
I am taking a column that only contains Unix Timestamp and group them.
result = data['Last_Modified_Date_unixtimestamp_no_time'].value_counts()
In [21]: result.head()
Out[21]:
1508284800 131
1508716800 106
1508371200 101
1508457600 99
1508630400 96
Name: Last_Modified_Date_unixtimestamp_no_time, dtype: int64
transform to a dict
result = result.to_dict()
result
'''
{1507161600: 1,
1507852800: 1,
1508198400: 64,
1508284800: 131,
...
1535155200: 1,
1535241600: 1}
'''
import json
result = json.dumps(result)
with open('result.json', 'w') as fp:
json.dump(result, fp, indent=4)
result
this is the data structure that I expected
{"946705035":4,"946706692":4}
You're dumping the JSON twice, which causes quotes to be escaped on the second dump. (After the first json.dumps the result is only a string, so you're just dumping a string instead of a dict again)
import json
# result = json.dumps(result)
with open('result.json', 'w') as fp:
json.dump(result, fp, indent=4)
Or remove the second dump:
import json
result = json.dumps(result)
with open('result.json', 'w') as fp:
# json.dump(result, fp, indent=4)
print(result, file=fp)
data_json=df.to_json(orient='records')
parsed=json.loads(data_json)
with open('my_data.json', 'w') as f:
json.dump(parsed, f, indent=4)
First convert the dataframe to dict
response = df.to_dict(orient="records")
And then encode the response to json
json_compatible_data = jsonable_encoder(response)
This should work well.
The simplest way to solve the above problem is to play with json.dumps() and json.loads().
import json
result = json.dumps(result)
with open('result.json', 'w') as fp:
json.loads(result, fp)

How to create a dictionary based on a text file?

I'm writing a simple python game where I have a text file in the following format where the key on the left is the player's name and the value on the right is the player's score:
Name 134
Next Name 304958
Etc....
Question: How can I read in a text file in that format and create a dictionary from the values on each line, and once the player exits the program, the file is updated with the latest dictionary entries?
I already have some code commented out that I've started but have been unable to implement and get working. Any help is appreciated.
Here is my code:
# with open('scores.txt', 'r') as file:
# scores = {}
# for line in file:
# line = line.split()
# do stuff
# with open("scores.txt", "w") as f: # Save dictionary in file
# do stuff
To load that format:
with open('scores.txt', 'r') as infile:
scores = {}
for line in infile:
name, _, score = line.rpartition(' ')
scores[name] = int(score)
To save that format:
with open('scores.txt', 'w') as outfile:
for name, score in scores:
outfile.write('%s %s\n' % (name, score))
penne12 is correct, though. You could save a few lines of code by using the json library to store JSON instead of this particular text format.
Here's an example that uses JSON as suggested in the comments:
import json
def load_game_data():
data = None
with open('savegame.json', 'r') as savefile:
data = json.load(savefile)
return data
def save_game_data(data):
with open('savegame.json', 'w') as savefile:
json.dump(data, savefile)
# Store the game data as a dictionary:
data = { 'player_name' : 'wolfram', 'hp' : 8 }
save_game_data(data)
data = load_game_data()
print(data)
# prints {'player_name': 'wolfram', 'hp': 8}
print(data['player_name'])
print(data['hp'])
The data gets saved to disk as JSON and is loaded from disk as a dictionary, which is easy to use. You'll need to add code error handling, of course, this is just intended as a simple illustration.

Python read JSON file and modify

Hi I am trying to take the data from a json file and insert and id then perform POST REST.
my file data.json has:
{
'name':'myname'
}
and I would like to add an id so that the json data looks like:
{
'id': 134,
'name': 'myname'
}
So I tried:
import json
f = open("data.json","r")
data = f.read()
jsonObj = json.loads(data)
I can't get to load the json format file.
What should I do so that I can convert the json file into json object and add another id value.
Set item using data['id'] = ....
import json
with open('data.json', 'r+') as f:
data = json.load(f)
data['id'] = 134 # <--- add `id` value.
f.seek(0) # <--- should reset file position to the beginning.
json.dump(data, f, indent=4)
f.truncate() # remove remaining part
falsetru's solution is nice, but has a little bug:
Suppose original 'id' length was larger than 5 characters. When we then dump with the new 'id' (134 with only 3 characters) the length of the string being written from position 0 in file is shorter than the original length. Extra chars (such as '}') left in file from the original content.
I solved that by replacing the original file.
import json
import os
filename = 'data.json'
with open(filename, 'r') as f:
data = json.load(f)
data['id'] = 134 # <--- add `id` value.
os.remove(filename)
with open(filename, 'w') as f:
json.dump(data, f, indent=4)
I would like to present a modified version of Vadim's solution. It helps to deal with asynchronous requests to write/modify json file. I know it wasn't a part of the original question but might be helpful for others.
In case of asynchronous file modification os.remove(filename) will raise FileNotFoundError if requests emerge frequently. To overcome this problem you can create temporary file with modified content and then rename it simultaneously replacing old version. This solution works fine both for synchronous and asynchronous cases.
import os, json, uuid
filename = 'data.json'
with open(filename, 'r') as f:
data = json.load(f)
data['id'] = 134 # <--- add `id` value.
# add, remove, modify content
# create randomly named temporary file to avoid
# interference with other thread/asynchronous request
tempfile = os.path.join(os.path.dirname(filename), str(uuid.uuid4()))
with open(tempfile, 'w') as f:
json.dump(data, f, indent=4)
# rename temporary file replacing old file
os.rename(tempfile, filename)
There is really quite a number of ways to do this and all of the above are in one way or another valid approaches... Let me add a straightforward proposition. So assuming your current existing json file looks is this....
{
"name":"myname"
}
And you want to bring in this new json content (adding key "id")
{
"id": "134",
"name": "myname"
}
My approach has always been to keep the code extremely readable with easily traceable logic. So first, we read the entire existing json file into memory, assuming you are very well aware of your json's existing key(s).
import json
# first, get the absolute path to json file
PATH_TO_JSON = 'data.json' # assuming same directory (but you can work your magic here with os.)
# read existing json to memory. you do this to preserve whatever existing data.
with open(PATH_TO_JSON,'r') as jsonfile:
json_content = json.load(jsonfile) # this is now in memory! you can use it outside 'open'
Next, we use the 'with open()' syntax again, with the 'w' option. 'w' is a write mode which lets us edit and write new information to the file. Here s the catch that works for us ::: any existing json with the same target write name will be erased automatically.
So what we can do now, is simply write to the same filename with the new data
# add the id key-value pair (rmbr that it already has the "name" key value)
json_content["id"] = "134"
with open(PATH_TO_JSON,'w') as jsonfile:
json.dump(json_content, jsonfile, indent=4) # you decide the indentation level
And there you go!
data.json should be good to go for an good old POST request
try this script:
with open("data.json") as f:
data = json.load(f)
data["id"] = 134
json.dump(data, open("data.json", "w"), indent = 4)
the result is:
{
"name":"mynamme",
"id":134
}
Just the arrangement is different, You can solve the problem by converting the "data" type to a list, then arranging it as you wish, then returning it and saving the file, like that:
index_add = 0
with open("data.json") as f:
data = json.load(f)
data_li = [[k, v] for k, v in data.items()]
data_li.insert(index_add, ["id", 134])
data = {data_li[i][0]:data_li[i][1] for i in range(0, len(data_li))}
json.dump(data, open("data.json", "w"), indent = 4)
the result is:
{
"id":134,
"name":"myname"
}
you can add if condition in order not to repeat the key, just change it, like that:
index_add = 0
n_k = "id"
n_v = 134
with open("data.json") as f:
data = json.load(f)
if n_k in data:
data[n_k] = n_v
else:
data_li = [[k, v] for k, v in data.items()]
data_li.insert(index_add, [n_k, n_v])
data = {data_li[i][0]:data_li[i][1] for i in range(0, len(data_li))}
json.dump(data, open("data.json", "w"), indent = 4)
This implementation should suffice:
with open(jsonfile, 'r') as file:
data = json.load(file)
data[id] = value
with open(jsonfile, 'w') as file:
json.dump(data, file)
using context manager for the opening of the jsonfile.
data holds the updated object and dumped into the overwritten jsonfile in 'w' mode.
Not exactly your solution but might help some people solving this issue with keys.
I have list of files in folder, and i need to make Jason out of it with keys.
After many hours of trying the solution is simple.
Solution:
async def return_file_names():
dir_list = os.listdir("./tmp/")
json_dict = {"responseObj":[{"Key": dir_list.index(value),"Value": value} for value in dir_list]}
print(json_dict)
return(json_dict)
Response look like this:
{
"responseObj": [
{
"Key": 0,
"Value": "bottom_mask.GBS"
},
{
"Key": 1,
"Value": "bottom_copper.GBL"
},
{
"Key": 2,
"Value": "copper.GTL"
},
{
"Key": 3,
"Value": "soldermask.GTS"
},
{
"Key": 4,
"Value": "ncdrill.DRD"
},
{
"Key": 5,
"Value": "silkscreen.GTO"
}
]
}

Categories