Modifying JSON key values in Python - python

I am trying to load a JSON file and change specific key values then save the updated entries to a new file. This JSON file has many entries with the same format. This is my furthest attempt before coming here, however it does not save the new values.
What am I missing?
#!/usr/bin/python
import simplejson as json
import names
in_file = open('Names.json', 'r')
out_file = open('Names_new.json','w')
data_file = in_file.read()
data = json.loads(data_file)
for x in data:
nickname = x['nickname']
newname = names.get_first_name()
nickname = newname
out_file.write(json.dumps(data))
out_file.close()

The problem is that you didn't change x['nickname'] when you wanted to assign newname to it. Instead, you only modified the variable nickname.
Try assigning the x['nickname'] directly:
for x in data:
x['nickname'] = names.get_first_name()

You are just dumping old JSON data again into a new file without modifying its contents.
Instead, you should change the contents of the file with newname:
#!/usr/bin/python
import simplejson as json
import names
in_file = open('Names.json', 'r')
out_file = open('Names_new.json','w')
data_file = in_file.read()
data = json.loads(data_file)
for x in data:
newname = names.get_first_name()
x['nickname'] = newname
out_file.write(json.dumps(data))
out_file.close()

Related

How to use elements in list by order

My goal is to change multiple csv files in a folder into JSON.
First, I needed to list my csv files
for file in os.listdir("C:/Users/folder_to_csv"):
filename = os.fsdecode(file)
if filename.endswith(".csv"):
#check if csv files are listed correctly
print(os.path.join("C:/Users/folder_to_csv", filename))
With this, I was able to call csv files in that folder.
Result:
C:/Users/folder_to_csv\file_1.csv C:/Users/folder_to_csv\file_2.csv C:/Users/folder_to_csv\file_3.csv
Then, I wanted to use all of the csv files in 'csvlist' to jsonObj, however for some reason, my codes are only using the first file (C:/Users/folder_to_csv\file_1.csv)
This is what I have tried so far:
import json
import csv
import requests
import threading
import os
for file in os.listdir("C:/Users/folder_to_csv"):
filename = os.fsdecode(file)
if filename.endswith(".csv"):
csvlist = os.path.join("C:/Users/folder_to_csv", filename)
data = {}
def main():
#loop csv list so my codes can read all csv files
length = len(csvlist)
for i in range(length):
i += 1
path = csvlist
#switch csv to json
with open(path, mode='r') as f:
reader = csv.DictReader(f)
processdata = [row for row in reader]
dlist = processdata
jsonObj = json.dumps(dlist)
})
print(jsonObj)
main()
In the initial loop, you keep redefining the csvlist variable. I suppose you want it to be a list? Then just create an initial empty list and append to it instead of redefining
csvlist = []
...
csvlist.append(os.path.join("C:/Users/folder_to_csv", filename))

python change the value of a specific line

I have a 1000 json files, I need to change the value of a specific line with numeric sequence in all files.
An example
the specific line is - "name": "carl 00",
I need it to be like following
File 1
"name": "carl 1",
File 1
"name": "carl 2",
File 3
"name": "carl 3",
What is the right script to achieve the above using python
This should do the trick. But you're not very clear about how the data is stored in the actual json file. I listed two different approaches. The first is to parse the json file into a python dict then manipulate the data and then turn it back into a character string and then save it. The second is what I think you mean by "line". You can split the file's character string into a list then change the line you want, and remake the full string again, then save it.
This also assumes your json files are in the same folder as the python script.
import os
import json
my_files = [name1, name2, name3, ...] # ['file_name.json', ...]
folder_path = os.path.dirname(__file__)
for i, name in enumerate(my_files):
path = f'{folder_path}/{name}'
with open(path, 'r') as f:
json_text = f.read()
# if you know the key(s) in the json file...
json_dict = json.loads(json_text)
json_dict['name'] = json_dict['name'].replace('00', str(i))
new_json_str = json.dumps(json_dict)
# if you know the line number in the file...
line_list = json_text.split('\n')
line_list[line_number - 1] = line_list[line_number - 1].replace('00', str(i))
new_json_str = '\n'.join(line_list)
with open(path, 'w') as f:
f.write(new_json_str)
Based on your edit, this is what you want:
import os
import json
my_files = [f'{i}.json' for i in range(1, 1001)]
folder_path = os.path.dirname(__file__) # put this .py file in same folder as json files
for i, name in enumerate(my_files):
path = f'{folder_path}/{name}'
with open(path, 'r') as f:
json_text = f.read()
json_dict = json.loads(json_text)
json_dict['name'] = f'carl {i}'
# include these lines if you want "symbol" and "subtitle" changed
json_dict['symbol'] = f'carl {i}'
json_dict['subtitle'] = f'carl {i}'
new_json_str = json.dumps(json_dict)
with open(path, 'w') as f:
f.write(new_json_str)
Without knowing more, the below loop will accomplish the posts requirements.
name = 'carl'
for i in range(0,1001):
print(f'name: {name} {i}')

Python - changing content of .txt files from folder and saving in new folder

I need to change some key words in multiple .txt files, using dictionary strucure for this. Then, save changed files in new localization. I write code attached below, but when I run it is warking all the time, and when I break it there is only one empty file cretead.
import os
import os.path
from pathlib import Path
dir_path = Path("C:\\Users\\myuser\\Documents\\scripts_new")
#loading pair of words from txt file into dictionary
myfile = open("C:\\Users\\myuser\\Desktop\\Python\\dictionary.txt")
data_dict = {}
for line in myfile:
k, v = line.strip().split(':')
data_dict[k.strip()] = v.strip()
myfile.close()
# Get the list of all files and directories
path_dir = "C:\\Users\\myuser\\Documents\\scripts"
# iterate over files in
# that directory
for filename in os.listdir(path_dir):
f = os.path.join(path_dir, filename)
name = os.path.join(filename)
text_file = open(f)
#read whole file to a string
sample_string = text_file.read()
# Iterate over all key-value pairs in dictionary
for key, value in data_dict.items():
# Replace key character with value character in string
sample_string = sample_string.replace(key, value)
with open(os.path.join(dir_path,name), "w") as file1:
toFile = input(sample_string)
file1.write(toFile)
I have found a solution, with a little different approach. Maybe this code might be usefull for someone:
import os
#loading pair of words from txt file into dictionary
myfile = open("C:\\Users\\user\\Desktop\\Python\\dictionary.txt")
data_dict = {}
for line in myfile:
k, v = line.strip().split(':')
data_dict[k.strip()] = v.strip()
myfile.close()
sourcepath = os.listdir("C:\\Users\\user\\Documents\\scripts\\")
for file in sourcepath:
input_file = "C:\\Users\\user\\Documents\\scripts\\" + file
print('Conversion is ongoing for: ' + input_file)
with open(input_file, 'r') as input_file:
filedata = input_file.read()
destination_path = "C:\\Users\\user\\Documents\\scripts_new\\"+ file
# Iterate over all key-value pairs in dictionary
for key, value in data_dict.items():
filedata = filedata.replace(key,value)
with open(destination_path,'w') as file:
file.write(filedata)
Hmmm... I think your problem might actually be use of the line
toFile = input(sample_string)
As that'll halt the program awaiting a user input
Anyway, it could probably do with a little organisation into functions. Even this below is a bit... meh.
import os
import os.path
from pathlib import Path
dir_path = Path("C:\\Users\\myuser\\Documents\\scripts_new")
# -----------------------------------------------------------
def load_file(fileIn):
#loading pair of words from txt file into dictionary
with open(fileIn) as myfile:
data_dict = {}
for line in myfile:
k, v = line.strip().split(':')
data_dict[k.strip()] = v.strip()
return data_dict
# -----------------------------------------------------------
def work_all_files(starting_dir, moved_dir, data_dict):
# Iterate over files within the dir - note non recursive
for filename in os.listdir(starting_dir):
f = os.path.join(starting_dir, filename)
with open(f, 'r') as f1:
#read whole file to a string
sample_string = f1.read()
new_string = replace_strings(sample_string, data_dict)
with open(os.path.join(moved_dir, filename), "w") as file1:
file1.write(new_string)
# -----------------------------------------------------------
def replace_strings(sample_string, data_dict):
# Iterate over all key-value pairs in dictionary
# and if they exist in sample_string, replace them
for key, value in data_dict.items():
# Replace key character with value character in string
sample_string = sample_string.replace(key, value)
return sample_string
# -----------------------------------------------------------
if __name__ == "__main__":
# Get the dict-val pairings first
data_dict = load_file("C:\\Users\\myuser\\Desktop\\Python\\dictionary.txt")
#Then run over all the files within dir
work_all_files("C:\\Users\\myuser\\Documents\\scripts", "C:\\Users\\myuser\\Documents\\new_scripts", data_dict)
We could have housed all this in a class and then transported a few variables around using the instance (i.e. "self") - would have been cleaner. But first step is learning to break things into functions.

How to open multiple Json files and save them in multiple variables

I have some Json files. The naming convention of the file is dataset_ML-Model_params.json. For example, House_Lasso_params.json,
House_RF_params.json, Bike_Lasso_params.json, and Bike_RF_params.json.
All of these files contains tuning-hyperparameters in dict format. I can open 1 file using the below code
filename = f"{args.dataset}_Lasso_params.json"
outfile = HT_OUT / filename
with open(outfile, "r") as file:
d_loaded = json.load(file)
Passing the value to the model.
Lasso(**d_loaded, precompute=True)
Again for another file
filename = f"{args.dataset}_RF_params.json"
outfile = HT_OUT / filename
with open(outfile, "r") as file:
rf_loaded = json.load(file)
RF(**rf_loaded)
Here, args.dataset contains the dataset name. Could you tell me, how can I load these 2 files and save them in different variables. So that later i can pass the variable to the model. Like
# After opening and saving the json file in different variable
Lasso(**lasso_params, precompute=True)
RF(**rf_params)
Make a list of all models
MODEL_NAMES = ["Lasso", "Ridge"]
Make another dictionary to save the params for each model
models_params = {}
for model_name in MODEL_NAMES:
filename = f"{args.dataset}_{model_name}_params.json"
outfile = HT_OUT / filename
with open(outfile, "r") as file:
d_loaded = json.load(file)
models_params[model_name] = d_loaded
Later, use the get(key) to access your expected params.
Lasso(**(models_params.get('Lasso')), precompute=True)
RF(**(models_params.get('RF')))
You can also check the params
print(Lasso(**(models_params.get('Lasso')), precompute=True).get_params())
You could use another dict that gonna contain params that you need.
For example,
model_params = {'lasso_params': smth_here, 'rf_params': smth_here}
So then you can get needed value by
*model_params['lasso_params']
To get all files by that wildcard (dataset_ML-Model_params.json.) you could use library called glob:
from glob import glob
glob('*_params.json') # return ['lasso_params', 'rf_params', ...]
And then just read them one by one.

Python looping through files and saving content to dict

I have the following code:
import os
import json
import ipaddress
iplist = []
ipiflist = []
mydict = {}
for filename in os.listdir('data/'):
with open(os.path.join('data/', filename), 'r') as f:
data = json.load(f)
mydict.update(data)
print(mydict)
In the data directory there are several JSON files that I open in this loop.
I update the dict in every loop and for this reason I get the following output:
{'ipif_1001': '10.10.160.129', 'ipif_1002': '10.10.160.142', 'ipif_1003': '10.10.160.169', 'ipif_1004': '10.10.160.173', 'ipif_3334': '10.10.160.194', 'IpIf3337': '10.10.160.126'}
{'ipif_1001': '10.10.160.129', 'ipif_1002': '10.10.160.142', 'ipif_1003': '10.10.160.170', 'ipif_1004': '10.10.160.174', 'ipif_3334': '10.10.160.194', 'IpIf3337': '10.10.160.126', 'ipif_1005': '10.10.160.178', 'ipif_1006': '10.10.160.182'}
{'ipif_1001': '10.10.160.129', 'ipif_1002': '10.10.160.142', 'ipif_1003': '10.10.160.170', 'ipif_1004': '10.10.160.174', 'ipif_3334': '10.10.160.194', 'IpIf3337': '10.10.160.126', 'ipif_1005': '10.10.160.178', 'ipif_1006': '10.10.160.182', 'IpIf1001': '10.10.160.138', 'IpIf1002': '10.10.160.141', 'IpIf1003': '10.10.160.153', 'IpIf1006': '10.10.160.181', 'IpIf_CPEDCN': '10.10.160.241', 'IpIf_DCNMgt': '10.10.191.253', 'ipif1164': '10.10.160.166', 'IpIf1010': '10.10.170.1'}
I only need the summarized output from the last loop. How can I only access this?
Thanks for your help
The for loop in python has an else statement, which will only be executed when the loop was successful. Thus there you can plot your last resut?
for filename in os.listdir('data/'):
with open(os.path.join('data/', filename), 'r') as f:
data = json.load(f)
mydict.update(data)
else:
print(mydict)
import os
import json
import ipaddress
iplist = []
ipiflist = []
mydict = {}
list = os.listdir('data/')
for filename in os.listdir('data/'):
with open(os.path.join('data/', filename), 'r') as f:
data = json.load(f)
if list[list.count-1] == filename: #check last filename in the directory with the current filename in the loop
mydict.update(data)
print(mydict)
Try it like this

Categories