I have a concatenated CSV file that I am attempting to output into JSON format. How should I go about implementing the logic that the CSV file only get converted to a JSON object all fields have a value ?
import glob , os
import pandas as pd
import json
import csv
with open('some.csv', 'r', newline='') as csvfile, \
open('output.json', 'w') as jsonfile:
for row in csv.DictReader(csvfile):
restructured = {
'STATION_CODE': row['STORE_CODE'],
'id': row['ARTICLE_ID'],
'name': row['ITEM_NAME'],
'data':
{
# fieldname: value for (fieldname, value) in row.items()
'STORE_CODE': row['STORE_CODE'],
'ARTICLE_ID': row['ARTICLE_ID'],
'ITEM_NAME': row['ITEM_NAME'],
'BARCODE': row['BARCODE'],
'SALE_PRICE': row['SALE_PRICE'],
'LIST_PRICE': row['LIST_PRICE'],
'UNIT_PRICE': row['UNIT_PRICE'],
}
}
json.dump(restructured, jsonfile, indent=4)
jsonfile.write('\n')
Currently this will provide all values from the CSV file into the JSON output, which is unintended behavior. Any inputs on how to correct this ?
First I loop through all the elements of CSV and add it to a JSON array. If any row element value is empty, that row will be ignored. Once I have the all rows in the JSON array, I will output it to the JSON file
import json
import csv
csvjsonarr = []
with open('some.csv', 'r', newline='') as csvfile :
for row in csv.DictReader(csvfile):
hasemptyvalues = False
for rowidx in row :
if row[rowidx] == "" :
hasemptyvalues = True
break
if hasemptyvalues == True :
continue
restructured = {
'STATION_CODE': row['STORE_CODE'],
'id': row['ARTICLE_ID'],
'name': row['ITEM_NAME'],
'data': {
'STORE_CODE': row['STORE_CODE'],
'ARTICLE_ID': row['ARTICLE_ID'],
'ITEM_NAME': row['ITEM_NAME'],
'BARCODE': row['BARCODE'],
'SALE_PRICE': row['SALE_PRICE'],
'LIST_PRICE': row['LIST_PRICE'],
'UNIT_PRICE': row['UNIT_PRICE'],
}
}
csvjsonarr.append(restructured)
if len(csvjsonarr) > 0 :
with open('output.json', 'w') as jsonfile :
json.dump(csvjsonarr, jsonfile, indent=4)
Related
I need column1 to be used as a KEY and its value to be a dict of column2 (as key) & column3 (as value) so that output will become a nested dictonary.
For example:
I have a csv file as shown below:
customer1,subkey1,val1
customer2,subkey2,val2
customer2,subkey3,val3
customer2,subkey4,val4
customer3,subkey5,val5
customer3,subkey6,val6
expecting output to be:
{
customer1: {
subkey1:val1
},
customer2: {
subkey2:val2,
subkey3:val3,
subkey4:val4
},
customer3: {
subkey5:val5,
subkey6:val6
}
}
I have tried to convert below sample code as per my requirement, but no luck:
import csv
import json
def csv_to_json(csvFilePath, jsonFilePath):
jsonArray = []
#read csv file
with open(csvFilePath, encoding='utf-8') as csvf:
#load csv file data using csv library's dictionary reader
csvReader = csv.DictReader(csvf)
#convert each csv row into python dict
for row in csvReader:
#add this python dict to json array
jsonArray.append(row)
#convert python jsonArray to JSON String and write to file
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
jsonString = json.dumps(jsonArray, indent=4)
jsonf.write(jsonString)
csvFilePath = r'data.csv'
jsonFilePath = r'data.json'
csv_to_json(csvFilePath, jsonFilePath)
Try this
worked for me I used the csv.reader for this and changed the variable into a dict object
import csv
import json
def csv_to_json(csvFilePath, jsonFilePath):
jsonDict = {}
# read csv file
with open(csvFilePath, encoding='utf-8') as csvf:
# load csv file data using csv library's dictionary reader
csvReader = csv.reader(csvf, delimiter=',')
# convert each csv row into python dict
for row in csvReader:
# add this python dict to json array
if row[0] in jsonDict:
jsonDict[row[0]][row[1]] = row[2]
else:
jsonDict[row[0]] = {row[1]: row[2]}
# convert python jsonArray to JSON String and write to file
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
jsonString = json.dumps(jsonDict, indent=4)
jsonf.write(jsonString)
csvFilePath = r'data.csv'
jsonFilePath = r'data.json'
csv_to_json(csvFilePath, jsonFilePath)
output
{
"customer1": {
"subkey1": "val1"
},
"customer2": {
"subkey2": "val2",
"subkey3": "val3",
"subkey4": "val4"
},
"customer3": {
"subkey5": "val5",
"subkey6": "val6"
}
}
import collections
import csv
with open('file.csv') as f:
reader = csv.reader(f, delimiter=',')
dict_1 = collections.defaultdict(dict)
for row in reader:
dict_1[row[0]][row[1]] = row[2]
print(dict(dict_1))
# Output
{
customer1: {
subkey1:val1
},
customer2: {
subkey2:val2,
subkey3:val3,
subkey4:val4
},
customer3: {
subkey5:val5,
subkey6:val6
}
}
In this case, DictReader doesn't really help you because it gives each row as a dict in the form:
{"column1": "customer1", "column2": "subkey1", "column3": "val1"}
So it will actually be simpler to use a regular reader and parse manually. You just need to expand existing customers, so it will be helpful to use a defaultdict:
import csv
import json
from collections import defaultdict
def csv_to_json(csvFilePath, jsonFilePath):
jsonDict = defaultdict(dict)
# read csv file
with open(csvFilePath, encoding='utf-8') as csvf:
# load csv file data using csv library's reader
csvReader = csv.reader(csvf)
# convert each csv row into a list
for row in csvReader:
# add this list to json dict
jsonDict[row[0]][row[1]] = row[2]
# write python jsonDict to JSON file
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
json.dump(jsonDict, jsonf, indent=4)
Note that the json file can be written simpler by using dump instead of dumps.
I've this two Json files
zone.json
{"0":{"id":1,"name":"XYZ"}}
region.json
{"0":{"id":1,"name":"ABC"},"1":{"id":2,"name":"DEF"}}
I need to use these json datas as values to create a dictionary with a manually entered key.
{"zone": {"0":{"id":1,"name":"XYZ"}}, "region": {"0":{"id":1,"name":"ABC"},"1":{"id":2,"name":"DEF"}}}
Can anyone please explain me how to create this dictionary in Python by using the name of files as values? or any other appproach?
Use json module to parse the data. You can split the filename by . and use the first part as a key:
import json
file1 = 'zone.txt'
file2 = 'region.txt'
with open(file1, 'r') as f1, open(file2, 'r') as f2:
out = {
file1.split('.')[0]: json.load(f1),
file2.split('.')[0]: json.load(f2)
}
print(out)
Prints:
{'zone': {'0': {'id': 1, 'name': 'XYZ'}}, 'region': {'0': {'id': 1, 'name': 'ABC'}, '1': {'id': 2, 'name': 'DEF'}}}
Edit (to save the file):
with open('output.txt', 'w') as f_out:
json.dump(out, f_out)
Alternative using pathlib:
from pathlib import Path
import json
zonepath = Path("zone.json")
regionpath = Path("region.json")
zonedict = json.loads(zonepath.read_text())
regiondict = json.loads(regionpath.read_text())
result = {zonepath.stem: zonedict, regionpath.stem: regiondict}
I have a JSON File which contains some data as below:
{
'count': 2,
'next': '?page=2',
'previous': None,
'results': [
{
'category': 'Triggers',
'id': '783_23058',
'name': 'Covid-19'
},
{
'category': 'Sources',
'id': '426_917746',
'name': 'Covid19Conversations'
}
]
}
I am able to extract the first 'id' and 'name' values as below
Doc_details = dict()
for item in companies:
doc_id = companies['results'][0]['id']
doc_name = companies['results'][0]['name']
Doc_details[doc_name] = doc_id
for key, value in Doc_details.items():
print(key,value)
Output:
Covid-19 783_23058
I am new to python. Can someone help me with:
Loop through it and extract all the key,value pairs
Save the results to an excel file.
If you already have the object, you can iterate through companies['results'] using list comprehension and map the objects to (key, value) pairs.
companies = {
'count': 2,
'next': '?page=2',
'previous': None,
'results': [{
'category': 'Triggers',
'id': '783_23058',
'name': 'Covid-19'
}, {
'category': 'Sources',
'id': '426_917746',
'name': 'Covid19Conversations'
}]
}
pairs = list(map(lambda x: [ x['id'], x['name'] ], companies['results']))
csv = '\n'.join('\t'.join(val for val in pair) for pair in pairs)
print(csv)
Result
783_23058 Covid-19
426_917746 Covid19Conversations
Writing to a file
Convert the list of pairs to a CSV file. See: Writing a Python list of lists to a csv file.
import csv
with open('pairs.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(pairs)
If you only want the name, id pairs, you can just do:
for result in companies['results']:
print(result['name'], result['id'])
# =>
# Covid-19 783_23058
# Covid19Conversations 426_917746
IIUC: You can use inbuilt json package to parse the json file as python dict and then you can use pandas library to write the excel file:
Try this:
import json
import pandas as pd
from pandas import ExcelWriter
with open("json_file.json", "r") as file:
info = json.load(file) # info contains all key-value pairs
# save to excel
writer = ExcelWriter('excel_file.xlsx')
pd.DataFrame(info["results"]).to_excel(writer, index=False)
writer.save()
Update:
The only issue I have now is when running the command to add a user it create a completely duplicate key.
Question:
json.dump() simply adds the entry to the end of the json, I want it to overwrite the entire file with the new updated entry
Setup: (Create blank "Banks" Field)
with open(DATA_FILENAME, mode='w', encoding='utf-8') as f:
data = {"banks": []}
json.dump(data, f)
Set User: (Create a User Key inside "Banks")
member = ctx.message.author
entry = {'name': member.name, 'id': member.id, 'balance': 0}
with open(DATA_FILENAME, 'r+') as outfile:
data = json.load(outfile)
data['banks'].append((entry))
json.dump(data, outfile, indent=4)
Output of first use:
{"banks": []}{
"banks": [
{
"name": "ViperZ-14",
"id": 367151547575959562,
"balance": 0
}
]
}
What I need:
{
"banks": [
{
"name": "ViperZ-14",
"id": 367151547575959562,
"balance": 0
}
]
}
file_path = '/home/vishnudev/Downloads/new.json'
import json
def load(file, mode, data=[]):
with open(file, mode) as f:
if mode == 'r':
return json.load(f)
elif mode == 'w':
json.dump(data, f)
def get_data_func():
return {
'name': 'vishnu',
'data': 'dev'
}
d = load(file_path, 'r')
print(d)
d.append(get_data_func())
load(file_path, 'w', d)
d = load(file_path, 'r')
print(d)
Output:
On running the above twice I get
[{'name': 'vishnu', 'data': 'dev'}]
[{'name': 'vishnu', 'data': 'dev'}, {'name': 'vishnu', 'data': 'dev'}]
I have found that the solution was to simply seek to the beginning of the document. The json.dump() does overwrite but it only overwrites whats in its way. AKA, seeking/placing the cursor at the top of the document will overwrite the entire document using the new entry.
I have written a code to convert my csvfile which is '|' delimited file to get specific json format.
Csv file format:
comment|address|city|country
crowded|others|others|US
pretty good|others|others|US ....
I have tried with other codes as well since I'm new to python I'm stuck in between. If somebody helps me to correct the mistake I'm doing it would be helpful.
import csv
import json
from collections import OrderedDict
csv_file = 'test.csv'
json_file = csv_file + '.json'
def main(input_file):
csv_rows = []
with open(input_file, 'r') as csvfile:
reader = csv.DictReader(csvfile)
title = reader.fieldnames
for row in reader:
entry = OrderedDict()
for field in title:
entry[field] = row[field]
csv_rows.append(entry)
with open(json_file, 'w') as f:
json.dump(csv_rows, f, sort_keys=True, indent=4, ensure_ascii=False)
f.write('\n')
if __name__ == "__main__":
main(csv_file)
I want in json format as below
{
"reviewer": {
"city": "",
"country": ""
"address": "Orlando, Florida"
},
But I'm getting output like this:
[
{
"COMMENT|\"ADDRESS\"|\"CITY\"|"COUNTRY":"Crowded"|"Others"|"Others"|
},
{
"COMMENT|\"ADDRESS\"|\"CITY\"|"COUNTRY":"pretty good"|"Others"|"Others"|
},
You're missing the separator parameter. Instead of:
reader = csv.DictReader(csvfile)
Use:
reader = csv.DictReader(csvfile, delimiter='|')