I want to convert an excel spreadsheet data to a JSON file. Here is the code I currently have:
Data
excel spreadsheet
Code
import xlrd
from collections import OrderedDict
import json
wb = xlrd.open_workbook('./file1.xlsx')
sh = wb.sheet_by_index(0)
data_list = []
for rownum in range(1, sh.nrows):
data = OrderedDict()
row_values = sh.row_values(rownum)
data['name'] = row_values[0]
data['description'] = row_values[1]
data_list.append(data)
data_list = {'columns': data_list}
j = json.dumps(data_list)
with open('seq1.json', 'w') as f:
f.write(j)
Output
{"columns": [{"name": "FILEID", "description": "FILETYPE"}]}
Expected output
{
"columns": [
{
"name": "fileid",
"description": "FILEID"
},
{
"name": "filetype",
"description": "FILETYPE"
},
{
"name": "stusab",
"description": "STUSAB"
},
{
"name": "chariter",
"description": "CHARITER"
},
{
"name": "sequence",
"description": "SEQUENCE"
},
{
"name": "logrecno",
"description": "LOGRECNO"
}
],
The "name" column should be displaying the first row while the "description" column should be displaying the second row.
What modification can I do in my function to get the output I am looking for?
You need to iterate over columns, not rows
import xlrd
from collections import OrderedDict
import json
wb = xlrd.open_workbook('./file1.xls')
sh = wb.sheet_by_index(0)
data_list = []
data = OrderedDict()
for colnum in range(0, sh.ncols):
data['name'] = sh.row_values(0)[colnum]
data['description'] = sh.row_values(1)[colnum]
data_list.append(data.copy())
data_list = {'columns': data_list}
j = json.dumps(data_list)
with open('seq1.json', 'w') as f:
f.write(j)
You should give a try to:
import excel2json
excel2json.convert_from_file('file.xlsx')
You can use pandas
import pandas as pd
df = pd.read_excel('./file1.xlsx')
with open('seq1.json', 'w') as f:
f.write(df.to_json())
Related
I'm trying to convert csv format to JSON, I googled I'm not getting the correct way to modify it to get the desired one.
This is my code in python:
import csv
import json
def csv_to_json(csvFilePath, jsonFilePath):
jsonArray = []
#reading csv (encoding is important)
with open(csvFilePath, encoding='utf-8') as csvf:
#csv library function
csvReader = csv.DictReader(csvf)
#convert each csv row into python dictionary
for column in csvReader:
#add this python dictionary to json array
jsonArray.append(column)
#convertion
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
jsonString = json.dumps(jsonArray, indent=4)
jsonf.write(jsonString)
csvFilePath='example.csv'
jsonFilePath='output.json'
csv_to_json(csvFilePath, jsonFilePath)
and this is my csv file format:
My actual JSON Output:
[
{
"Area": "IT",
"Employee": "Carl",
},
{
"Area": "IT",
"Employee": "Walter",
},
{
"Area": "Financial Resources",
"Employee": "Jennifer",
}
]
My desired JSON Output:
[
{
"Area": "IT",
"Employee": ["Carl","Walter"],
},
{
"Area": "Financial Resources",
"Employee": ["Jennifer"],
}
]
Thank you in advance!
Something like this should work.
def csv_to_json(csvFilePath, jsonFilePath):
areas = {}
with open(csvFilePath, encoding='utf-8') as csvf:
csvReader = csv.DictReader(csvf)
for column in csvReader:
area, employee = column["Area"], column["Employee"] # split values
if area in areas: # add all keys and values to one dictionary
areas[area].append(employee)
else:
areas[area] = [employee]
# convert dictionary to desired output format.
jsonArray = [{"Area": k, "Employee": v} for k,v in areas.items()]
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
jsonString = json.dumps(jsonArray, indent=4)
jsonf.write(jsonString)
I am trying to covert my CSV email list to a JSON format to mass email via API. This is my code thus far but am having trouble with the output. Nothing is outputting on my VS code editor.
import csv
import json
def make_json(csvFilePath, jsonFilePath):
data = {}
with open(csvFilePath, encoding='utf-8') as csvf:
csvReader = csv.DictReader(csvf)
for rows in csvReader:
key = rows['No']
data[key] = rows
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
jsonf.write(json.dumps(data, indent=4))
csvFilePath = r'/data/csv-leads.csv'
jsonFilePath = r'Names.json'
make_json(csvFilePath, jsonFilePath)
Here is my desired JSON format
{
"EmailAddress": "hello#youngstowncoffeeseattle.com",
"Name": "Youngstown Coffee",
"ConsentToTrack": "Yes"
},
Heres my CSV list
No,EmailAddress,ConsentToTrack
Zylberschtein's Delicatessen & Bakery,catering#zylberschtein.com,Yes
Youngstown Coffee,hello#youngstowncoffeeseattle.com,Yes
It looks like you could use a csv.DictReader to make this easier.
If I have data.csv that looks like this:
Name,EmailAddress,ConsentToTrack
Zylberschtein's Delicatessen,catering#zylberschtein.com,yes
Youngstown Coffee,hello#youngstowncoffeeseattle.com,yes
I can convert it into JSON like this:
>>> import csv
>>> import json
>>> fd = open('data.csv')
>>> reader = csv.DictReader(fd)
>>> print(json.dumps(list(reader), indent=2))
[
{
"Name": "Zylberschtein's Delicatessen",
"EmailAddress": "catering#zylberschtein.com",
"ConsentToTrack": "yes"
},
{
"Name": "Youngstown Coffee",
"EmailAddress": "hello#youngstowncoffeeseattle.com",
"ConsentToTrack": "yes"
}
]
Here I've assumed the headers in the CSV can be used verbatim. I'll update this with an exmaple if you need to modify key names (e.g. convert "No" to "Name"),.
If you need to rename a column, it might look more like this:
import csv
import json
with open('data.csv') as fd:
reader = csv.DictReader(fd)
data = []
for row in reader:
row['Name'] = row.pop('No')
data.append(row)
print(json.dumps(data, indent=2))
Given this input:
No,EmailAddress,ConsentToTrack
Zylberschtein's Delicatessen,catering#zylberschtein.com,yes
Youngstown Coffee,hello#youngstowncoffeeseattle.com,yes
This will output:
[
{
"EmailAddress": "catering#zylberschtein.com",
"ConsentToTrack": "yes",
"Name": "Zylberschtein's Delicatessen"
},
{
"EmailAddress": "hello#youngstowncoffeeseattle.com",
"ConsentToTrack": "yes",
"Name": "Youngstown Coffee"
}
]
and to print on my editor is it simply print(json.dumps(list(reader), indent=2))?
I'm not really familiar with your editor; print is how you generate console output in Python.
I have csv like this:
id,company_name,country,country_id
1,batstop,usa, xx
2,biorice,italy, yy
1,batstop,italy, yy
3,legstart,canada, zz
I want an array of dictionaries to import to firebase. I need to group the different country informations for the same company in a nested list of dictionaries. This is the desired output:
[ {'id':'1', 'agency_name':'batstop', countries [{'country':'usa','country_id':'xx'}, {'country':'italy','country_id':'yy'}]} ,
{'id':'2', 'agency_name':'biorice', countries [{'country':'italy','country_id':'yy'}]},
{'id':'3', 'legstart':'legstart', countries [{'country':'canada','country_id':'zz'}]} ]
Recently I had a similar task, the groupby function from itertools and the itemgetter function from operator - both standard python libraries - helped me a lot. Here's the code considering your csv, note how defining the primary keys of your csv dataset is important.
import csv
import json
from operator import itemgetter
from itertools import groupby
primary_keys = ['id', 'company_name']
# Start extraction
with open('input.csv', 'r') as file:
# Read data from csv
reader = csv.DictReader(file)
# Sort data accordingly to primary keys
reader = sorted(reader, key=itemgetter(*primary_keys))
# Create a list of tuples
# Each tuple containing a dict of the group primary keys and its values, and a list of the group ordered dicts
groups = [(dict(zip(primary_keys, _[0])), list(_[1])) for _ in groupby(reader, key=itemgetter(*primary_keys))]
# Create formatted dict to be converted into firebase objects
group_dicts = []
for group in groups:
group_dict = {
"id": group[0]['id'],
"agency_name": group[0]['company_name'],
"countries": [
dict(country=_['country'], country_id=_['country_id']) for _ in group[1]
],
}
group_dicts.append(group_dict)
print("\n".join([json.dumps(_, indent=2) for _ in group_dicts]))
Here's the output:
{
"id": "1",
"agency_name": "batstop",
"countries": [
{
"country": "usa",
"country_id": " xx"
},
{
"country": "italy",
"country_id": " yy"
}
]
}
{
"id": "2",
"agency_name": "biorice",
"countries": [
{
"country": "italy",
"country_id": " yy"
}
]
}
{
"id": "3",
"agency_name": "legstart",
"countries": [
{
"country": "canada",
"country_id": " zz"
}
]
}
There's no external library,
Hope it suits you well!
You can try this, you may have to change a few parts to get it working with your csv, but hope it's enough to get you started:
csv = [
"1,batstop,usa, xx",
"2,biorice,italy, yy",
"1,batstop,italy, yy",
"3,legstart,canada, zz"
]
output = {} # dictionary useful to avoid searching in list for existing ids
# Parse each row
for line in csv:
cols = line.split(',')
id = int(cols[0])
agency_name = cols[1]
country = cols[2]
country_id = cols[3]
if id in output:
output[id]['countries'].append([{'country': country,
'country_id': country_id}])
else:
output[id] = {'id': id,
'agency_name': agency_name,
'countries': [{'country': country,
'country_id': country_id}]
}
# Put into list
json_output = []
for key in output.keys():
json_output.append( output[key] )
# Check output
for row in json_output:
print(row)
So I have a json template and I am reading from a csv to update the some of the value of the json properties. I then put all the json in a array to write to a file. but in my file, all the json elements have the same value.
The issue is the old values are being overwritten some how. How should I fix that?
def main():
df = pd.read_csv("Daily_EXRATE.csv")
df = df.loc[df['Field1'] == '04']
opdb = {
"sell_rate": 1.2676,
"type": "currency_exchange",
"version": "1"
}
opdbarray = []
for index, rowsr in df.iterrows():
data = {}
data = rowsr.to_json()
data = json.loads(data)
opdb["sell_rate"] = data["Field11"]
opdbarray.append(opdb)
print(json.dumps(opdb, indent = 4 ))
# now write output to a file
jsonDataFile = open("ccData_1.json", "w")
jsonDataFile.write(json.dumps(opdbarray, indent=4, sort_keys=True))
jsonDataFile.close()
outputs are all the same
[
{
"sell_rate": "2.1058000000",
"type": "currency_exchange",
"version": "1"
},
{
"sell_rate": "2.1058000000",
"type": "currency_exchange",
"version": "1"
},
{
"sell_rate": "2.1058000000",
"type": "currency_exchange",
"version": "1"
},
You're appending the same obdb dictionary to apdbarray each time through the loop, just replacing its sell_rate element. You need to create a new dictionary each time.
def main():
df = pd.read_csv("Daily_EXRATE.csv")
df = df.loc[df['Field1'] == '04']
opdbarray = []
for index, rowsr in df.iterrows():
data = {}
data = rowsr.to_json()
data = json.loads(data)
opdb = {
"sell_rate": 1.2676,
"type": "currency_exchange",
"version": "1",
"sell_rate": data["Field11"]
}
opdbarray.append(opdb)
print(json.dumps(opdb, indent = 4 ))
# now write output to a file
jsonDataFile = open("ccData_1.json", "w")
jsonDataFile.write(json.dumps(opdbarray, indent=4, sort_keys=True))
jsonDataFile.close()
I am trying to parse through JSON code and write the results into a csv file. The "name" values are supposed to be the column headers and the 'value' values are what need to be stored.This is my code. the CSV file writer does not separate the strings with commas: eventIdlistingsvenueperformer and when I try to do something like: header = col['name']+',' I get: eventId","listings","venue","performer And it isn't read as a csv file so...My questions are: am I going about this right? and how could I separate the strings by commas?
"results": [
{
"columns": [
{
"name": "eventId",
"value": "XXXX",
"defaultHidden": false
},
{
"name": "listings",
"value": "8",
"defaultHidden": false
},
{
"name": "venue",
"value": "Nationwide Arena",
"defaultHidden": false
}]
this is my code:
json_decode=json.loads(data)
report_result = json_decode['results']
with open('testReport2.csv','w') as result_data:
csvwriter = csv.writer(result_data,delimiter=',')
count = 0
for res in report_result:
deeper = res['columns']
for col in deeper:
if count == 0:
header = col['name']
csvwriter.writerow([header,])
count += 1
for written in report_result:
deeper =res['columns']
for col in deeper:
csvwriter.writerow([trouble,])
result_data.close()
try below code:
json_decode=json.loads(data)
report_result = json_decode['results']
new_dict = {}
for result in report_result:
columns = result["columns"]
for value in columns:
new_dict[value['name']] = value['value']
with open('testReport2.csv','w') as result_data:
csvwriter = csv.DictWriter(result_data,delimiter=',',fieldnames=new_dict.keys())
csvwriter.writeheader()
csvwriter.writerow(new_dict)
Try this:
json_decode=json.loads(data)
report_result = json_decode['results']
with open('testReport2.csv','w') as result_data:
csvwriter = csv.writer(result_data,delimiter=',')
header = list(report_result[0]['columns'][0].keys())
csvwriter.writerow(header)
for written in report_result:
for row in written['columns']:
deeper =row.values()
csvwriter.writerow(deeper)
result_data.close()