Good Afternoon,
I am still pretty new to Python but have found it particularly addicting, but there are def some "quirks" to python that have been a pain to get through. I am currently trying to take a JSON file and flatten it out into a table. There are a ton of posts, specifically here on stack overflow on converting it into a flat dict, but that doesn't allow me to convert it into a table. This has been way harder than I expected.
I am currently getting the following error, which to me seems like it is something wrong with my key generation portion.
Code:
import json
import os
import csv
import copy
from pandas.io.json._normalize import nested_to_record
#Basic Veriables
scriptDirectory = os.path.dirname(os.path.realpath(__file__))
def getKeys(dictionary:dict, result:list = None, parentKey='', sep='.',skipParent = False) -> list:
if result == None:
result = []
#Loop through all keeys and return unique options
for key in dictionary.keys():
newKey = (parentKey + sep + key if parentKey else str(key))
if type(dictionary[key]) == dict:
result = (getKeys(dictionary[key],result=result,parentKey = ('' if skipParent else str(newKey))))
else:
if key != "" and newKey not in result:
result.append(newKey)
return result
def convertKey(data:str,languageDict:dict):
try:
return languageDict[data]
except KeyError:
return data
#Read the JSON files.
#Library File
with open(os.path.join(scriptDirectory,'inventoryItem.json'), "r",encoding='utf-8') as read_file:
lib = json.load(read_file)
#English Dictionary
with open(os.path.join(scriptDirectory,'en.json'), "r",encoding='utf-8') as read_file:
en = json.load(read_file)
for key in lib['inventoryItem'].keys():
print(key)
dictTemplate = dict.fromkeys(getKeys(dictionary=lib['inventoryItem'][key],skipParent = True),None)
print(dictTemplate)
firstItem = 0
try:
with open(os.path.join(scriptDirectory,'export',f"{key}.csv"),"w", newline='', encoding='utf-8') as csvfile:
for item in lib['inventoryItem'][key]:
entry = copy.deepcopy(dictTemplate)
entry.update(nested_to_record(lib['inventoryItem'][key][item], sep='.'))
if key == 'coin':
entry['name'] = convertKey(data = f"LIB_COIN_NAME_{entry['id']}",languageDict=en)
entry['description'] = convertKey(data = f"LIB_COIN_DESC_{entry['id']}",languageDict=en)
entry['obtainNavigatorData.not_enough_message'] = convertKey(data = entry['obtainNavigatorData.not_enough_message'],languageDict=en)
entry['obtainNavigatorData.not_enough_title'] = convertKey(data = entry['obtainNavigatorData.not_enough_title'],languageDict=en)
entry['obtainNavigatorData.button_label'] = convertKey(data = entry['obtainNavigatorData.button_label'],languageDict=en)
elif key == 'consumable':
#print(dictTemplate)
pass
elif key == 'scroll':
del entry["fragmentMergeCost"]
del entry["fragmentSellCost"]
del entry["fragmentBuyCost"]
del entry["buyCost"]
if firstItem == 0:
firstItem += 1
writer =csv.DictWriter(csvfile, fieldnames=entry.keys(),delimiter = ';')
writer.writeheader()
writer.writerow(entry)
except IOError:
print("I/O error")
Data set thats giving me trouble.
"consumable": {
"96": {
"id": 96,
"rewardType": "",
"rewardAmount": 0,
"effectDescription": {
"": ""
},
"buyCost": null,
"sellCost": null,
"buySpecialCost": null,
"assetAtlas": 4,
"assetTexture": "social_vk",
"iconAssetTexture": "",
"color": 1,
"hidden": 0,
"descLocaleId": "PLAY_AT_HOME_TICKET",
"obtainNavigatorData": null
},
Desired Output.
This is from a section called "coins", but the example data is from "consumables". Certain sections work fine, but others cause issues because the value is set to "","".
Finally figured out the solution. For anyone who wants to properly convert a JSON file to a table, this is the only solution I could come up with :)
import json
import os
import csv
import copy
def processJSON(initialDict:dict, createTemplate:bool = False, existingKeys:dict = None, parentKey:str = None, sep:str ='.', skipParent:bool = False) -> dict:
outPut = (existingKeys if existingKeys else {})
#Loop through all keeys and return unique options
for key in initialDict.keys():
#Create needed variables
keyTitle = str(parentKey + sep + key if parentKey else key)
#Loop
if type(initialDict[key]) == dict:
parentTitle = ('' if skipParent else keyTitle)
outPut.update(processJSON(initialDict=initialDict[key], existingKeys = outPut, createTemplate = createTemplate, parentKey = parentTitle, sep = sep))
elif keyTitle not in outPut.keys():
keyValue = str('' if createTemplate else initialDict[key])
outPut[keyTitle] = keyValue
return dict(outPut)
def convertKey(data:str,languageDict:dict):
try:
return languageDict[data]
except KeyError:
return data
#Basic Veriables
scriptDirectory = os.path.dirname(os.path.realpath(__file__))
#Read the JSON files.
#Library File
with open(os.path.join(scriptDirectory,'inventoryItem.json'), "r",encoding='utf-8') as read_file:
lib = json.load(read_file)
#English Dictionary
with open(os.path.join(scriptDirectory,'en.json'), "r",encoding='utf-8') as read_file:
en = json.load(read_file)
for key in lib['inventoryItem'].keys():
firstItem = True
header = processJSON(initialDict=lib['inventoryItem'][key], createTemplate=True, skipParent=True)
try:
with open(os.path.join(scriptDirectory,'export',f"{key}.csv"),"w", newline='', encoding='utf-8') as csvfile:
for item in lib['inventoryItem'][key]:
#Copy Header because Python sucks
row = dict(copy.deepcopy(header))
row.update(processJSON(initialDict=lib['inventoryItem'][key][item]))
if key == 'coin':
row['name'] = convertKey(data = f"LIB_COIN_NAME_{row['id']}",languageDict=en)
row['description'] = convertKey(data = f"LIB_COIN_DESC_{row['id']}",languageDict=en)
row['obtainNavigatorData.not_enough_message'] = convertKey(data = row['obtainNavigatorData.not_enough_message'],languageDict=en)
row['obtainNavigatorData.not_enough_title'] = convertKey(data = row['obtainNavigatorData.not_enough_title'],languageDict=en)
row['obtainNavigatorData.button_label'] = convertKey(data = row['obtainNavigatorData.button_label'],languageDict=en)
elif key == 'consumable':
row['name'] = convertKey(data = f"LIB_CONSUMABLE_NAME_{row['id']}",languageDict=en)
row['description'] = convertKey(data = f"LIB_CONSUMABLE_DESC_{row['descLocaleId']}",languageDict=en)
row['obtainNavigatorData.button_label'] = convertKey(data = row['obtainNavigatorData.button_label'],languageDict=en)
row['obtainNavigatorData.not_enough_message'] = convertKey(data = row['obtainNavigatorData.not_enough_message'],languageDict=en)
row['obtainNavigatorData.not_enough_title'] = convertKey(data = row['obtainNavigatorData.not_enough_title'],languageDict=en)
elif key == 'gear':
row['name'] = convertKey(data = f"LIB_GEAR_NAME_{row['id']}",languageDict=en)
elif key == 'petGear':
row['name'] = convertKey(data = f"LIB_PET_GEAR_NAME_{row['id']}",languageDict=en)
elif key == 'pseudo':
row['name'] = convertKey(data = f"LIB_PSEUDO_{row['constName']}",languageDict=en)
row['description'] = convertKey(data = f"LIB_PSEUDO_DESC_{row['id']}",languageDict=en)
elif key == 'scroll':
row['name'] = convertKey(data = f"LIB_SCROLL_NAME_{row['id']}",languageDict=en)
del row["fragmentMergeCost"]
del row["fragmentSellCost"]
del row["fragmentBuyCost"]
del row["buyCost"]
else:
print(key)
if firstItem:
firstItem = False
writer = csv.DictWriter(csvfile, fieldnames=row.keys(),delimiter = ',')
writer.writeheader()
writer.writerow(row)
except IOError:
print("I/O error")
Related
I work on a project on python.
I want to return a list of name from a text file.
I start with one name I know.
My text file is like :
ALPHA;n10;Output
ALPHA;n11;Input
ALPHA;n12;Input
BETA;n10;Input
BETA;n14;Input
CHARLIE;n10;Input
CHARLIE;n13;Output
DELTA;n13;Output
DELTA;n12;Input
Let's say I start from the name ALPHA and I know it's an Output.
So I have to search the number link to this name which is n10.
I want to return all the name of the number n10 which are in Input.
So at the end I want the list ["BETA", "CHARLIE"]
For the moment I code the following function :
file = "path of the texte file"
name = "ALPHA"
liste_new_name = []
def search_new_name(liste):
file_txt = open(file, "r")
contenu = file_txt.readline()
file_txt.close()
if contenu.split(";")[0] == name and ";Output" in contenu:
num = contenu.split(";")[1]
if num in contenu and ";Input" in contenu:
liste.append(contenu.split(";")[0]
return liste
print(liste)
else:
print("No new name found")
else:
print("No num found")
search_new_name(liste_new_name)
My problem is that I have "No num found" but like the example I know I should have a list.
I would parse the file into a dictionary. This will make searching much easier and will allow you to do multiple searches without having to re-read the file:
def parse_file(path):
data = {}
with open(path, 'r') as in_file:
for line in in_file:
try:
name, n, direction = line.strip().split(';')
if name not in data:
data[name] = {"Input": [], "Output": []}
data[name][direction].append(n)
except KeyError:
print(f"Error with: {line}")
except ValueError:
pass
return data
This will return a dictionary like:
{
'ALPHA': {'Input': ['n11', 'n12'], 'Output': ['n10']},
'BETA': {'Input': ['n10', 'n14'], 'Output': []},
'CHARLIE': {'Input': ['n10'], 'Output': ['n13']},
'DELTA': {'Input': ['n12'], 'Output': ['n13']}
}
With that searches can be done with a simple list comprehension:
def search_new_name(name, data):
if name not in data: return None
return [key for key,value in data.items() if any(x in data[key]["Input"] for x in data[name]["Output"])]
Sample usage:
data = parse_file(r"C:\foo\bar.txt")
print(search_new_name("ALPHA", data))
Output:
['BETA', 'CHARLIE']
You will have to read all the lines and creating a dictionary with the 'number' and 'type' combination as the key will solve the problem.
file = "path of the texte file"
name = "ALPHA"
liste_new_name = []
def search_new_name(name):
name_map = {} ## dict to save all the info
search_key = False
file_txt = open(file, "r")
all_lines = file_txt.readlines()
for contenu in all_lines:
[l_name,l_num,l_type] = contenu.split(";")
key = l_num + "_" + l_type ## use num and type combination as a key
if l_name == name and l_type == "Output":
search_key = l_num+"_"+l_type
if key in name_map:
name_map[key] = name_map[key].append(l_name)
else:
name_map[key] = [l_name]
if search_key is False:
print("Num not found")
return []
else:
search_num = search_key.split('_')[0]
if search_num+'_Input' in name_map:
return name_map[search_num+'_Input']
else:
## return empty list if no input found
return []
search_new_name(name)
I try to continue with my idea with two functions like that :
file = "path of the text file"
name = "ALPHA"
new_l_name = []
num = []
def search_num(num):
file_txt = open(file, "r")
contenu = file_txt.readline()
while contenu:
contenu = fichier_txt.readline()
if contenu.split(";")[0] == name and ";Output" in contenu:
num.append(contenu.split(";")[1]
return num
else:
print("No num found")
file_txt.close()
search_num(num)
def search_new_name(liste):
file_txt = open(file, "r")
contenu = file_txt.readline()
while contenu:
contenu = file_txt.readline()
if contenu.split(";")[1] == num[0] and ";Input" in contenu:
new_name = contenu.split(";")[0]
liste.append(new_name)
print("the list of new name : {}".format(liste))
return liste
else:
print("No new name found")
search_new_name(new_l_name)
Finally, I have the num we search in return but the list of the new name return the list with the first new name found in the textfile but not the others. It returns ["BETA"] and not ["BETA", "CHARLIE"] as we want.
If someone have an idea.
Thanks.
import codecs
import csv
data = csv.reader(codecs.open("2019VAERSData.csv", "r", "latin1"))
keys = next(data)
serious_keys = list(map(lambda key: keys.index(key), ["DISABLE", "DIED", "ER_VISIT", "HOSPITAL"]))
writer = csv.writer(open("2019-vaers-serious.csv", "w"))
keys += ["SERIOUS"]
writer.writerow(keys)
for row in data:
is_serious = False
for key in serious_keys:
if row[key] == "Y":
is_serious = True
row += ["Y" if is_serious else "N"]
writer.writerow(row)
I need help sorting my key-value pair. My output is in this url http://pastebin.com/ckKAtP5y.
However, what I've been trying to do is.
{
"courses": [
{
"professors": [
{
"first_name": "Zvezdelina",
"last_name": "Stankova",
"professor_url": "http://www.ratemyprofessors.com/ShowRatings.jsp?tid=375269",
"helpfullness": 4.3,
"clarity": 4.3,
"overall_rating": 4.3
}],
"course_name": "CHEM 1",
"course_mentioned_times": 37
},
{
"professors": [
{
"first_name": "Alan",
"last_name": "Shabel",
"professor_url": "http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1309831",
"helpfullness": 3.9,
"clarity": 3.5,
"overall_rating": 3.7
}],
"course_name": "CHEMISTRY 231",
"course_mentioned_times": 50
}
]
So what I want to do is I want to compare 'CHEM' and 'CHEMISTRY' in "course_name" and just get me the most 'course_mentioned_times' and remove the other one. In this case I'd want CHEMISTRY 231 because it's mentioned 50 times.
Here's what I've been helped with so far.
if __name__ == "__main__":
import json
#'output_info.json is http://pastebin.com/ckKAtP5y
with open('output_info.json') as data_file:
data = json.load(data_file)
temp_data = data
greater = []
len1 = len(data['courses'])
len2 = len1
for i in range(0,len1):
for j in range(0, len2):
if i==j:
continue
if data['courses'][i]['course_name'][0] == temp_data['courses'][j]['course_name'][0]:
if data['courses'][i]['course_name'][1] == temp_data['courses'][j]['course_name'][1]:
if data['courses'][i]['course_name'][2] == temp_data['courses'][j]['course_name'][2]:
if data['courses'][i]['course_mentioned_times']> temp_data['courses'][j]['course_mentioned_times']:
greater.append(i)
else:
greater.append(j)
final = []
for i in greater:
if i not in final:
final.append(i)
list_order = []
for i in range(0,len(data['courses'])):
list_order.append(i)
new_final = []
for i in list_order:
if i not in final:
new_final.append(i)
for i in new_final:
if i!=new_final[0]:
i=i-1
data['courses'].pop(i)
# Writing the new json data back to data.json file.
with open('data.json', 'w') as f:
json.dump(data, f)
This code gives me an IndexError
data['courses'].pop(i)
IndexError: pop index out of range
After a lot of back and forth in question's comments:
#coding:utf-8
import json
filename = 'data.json'
with open(filename, 'r') as f:
data = json.load(f)
courses = data.get('courses', None)
if courses:
keys = sorted(set([course.get('course_name', None).strip().split()[0][0:3] for course in courses]))
results = {'courses': {}}
for key in keys:
results['courses'][key] = []
temp = {}
for course in courses:
course_name = course.get('course_name', None)
professors = course.get('professors', None)
if course_name.strip().split()[0][0:3] == key:
course_mentioned_times = course.get('course_mentioned_times')
temp[course_name] = {'course_mentioned_times':course_mentioned_times, 'professors': professors}
results['courses'][key] = temp
else:
raise Exception('No courses could be found on {}'.format(filename))
def get_most_mentioned(name):
name = name[0:3]
data = results.get('courses', None).get(name)
max_mentioned_times = max(map(lambda m: data.get(m, None).get('course_mentioned_times'), data.keys()))
most_mentioned = []
for course_name, values in data.items():
course_mentioned_times = values.get('course_mentioned_times', None)
if course_mentioned_times == max_mentioned_times:
most_mentioned.append({'course_name': course_name, 'course_mentioned_times': course_mentioned_times, \
'professors': values.get('professors')})
return most_mentioned
print "Course with most mentioned times:"
print "---------------------------------"
for key in keys:
print "[*] For Key '{}':".format(key)
for item in get_most_mentioned(key):
course_name = item.get('course_name', None)
print " Course Name: {}".format(course_name)
print " Mentioned Times: {}\n".format(item.get('course_mentioned_times'))
print " Professors:\n"
for i, professor in enumerate(item.get('professors', None), start=1):
print " {}) Full name: {} {}".format(i, professor.get('first_name'), professor.get('last_name'))
print " URL: {}".format(professor.get('professor_url'))
print " Helpfullness: {}".format(professor.get('helpfullness'))
print " Clarity: {}".format(professor.get('clarity'))
print " Overall_rating: {}".format(professor.get('overall_rating'))
print ""
print ""
import json
import collections
with open('output_info.json') as data_file:
data = json.load(data_file)
courses = data['courses']
courses_by_prefix = collections.defaultdict(list)
for course in courses:
prefix = course['course_name'].split(' ', 2)[0].upper()[:3]
courses_by_prefix[prefix].append(course)
results = []
for prefix, courselist in courses_by_prefix.items():
mosttimes = max(courselist, key=lambda c: c['course_mentioned_times'])
results.append(mosttimes)
print(results)
The client includes 3 rows at the bottom that contain totals for me to reconcile against in my program. Only problem is that my program is exhausting the input file with readlines() before it can do anything else. Is there a way to keep the file from being exhausted during my get_recon_total function call?
#!/usr/bin/env python
# pre_process.py
import csv
import sys
def main():
infile = sys.argv[1]
outfile = sys.argv[2]
with open(infile, 'rbU') as in_obj:
# Create reader object, get fieldnames for later on
reader, fieldnames = open_reader(in_obj)
nav_tot_cnt, nav_rec_cnt, nav_erec_cnt = get_recon_totals(in_obj)
print nav_tot_cnt, nav_rec_cnt, nav_erec_cnt
# This switches the dictionary to a sorted list... necessary??
reader_list = sorted(reader, key=lambda key: (key['PEOPLE_ID'],
key['DON_DATE']))
# Create a list to contain section header information
header_list = create_header_list(reader_list)
# Create dictionary that contains header list as the key,
# then all rows that match as a list of dictionaries.
master_dict = map_data(header_list, reader_list)
# Write data to processed file, create recon counts to compare
# to footer record
tot_cnt, rec_cnt, erec_cnt = write_data(master_dict, outfile, fieldnames)
print tot_cnt, rec_cnt, erec_cnt
def open_reader(file_obj):
'''
Uses DictReader from the csv module to take the first header line
as the fieldnames, then applies them to each element in the file.
Returns the DictReader object and the fieldnames being used (used
later when data is printed out with DictWriter.)
'''
reader = csv.DictReader(file_obj, delimiter=',')
return reader, reader.fieldnames
def create_header_list(in_obj):
p_id_list = []
for row in in_obj:
if (row['PEOPLE_ID'], row['DON_DATE']) not in p_id_list:
p_id_list.append((row['PEOPLE_ID'], row['DON_DATE']))
return p_id_list
def map_data(header_list, data_obj):
master_dict = {}
client_section_list = []
for element in header_list:
for row in data_obj:
if (row['PEOPLE_ID'], row['DON_DATE']) == element:
client_section_list.append(row)
element = list(element)
element_list = [client_section_list[0]['DEDUCT_AMT'],
client_section_list[0]['ND_AMT'],
client_section_list[0]['DEDUCT_YTD'],
client_section_list[0]['NONDEDUCT_YTD']
]
try:
element_list.append((float(client_section_list[0]['DEDUCT_YTD']) +
float(client_section_list[0]['NONDEDUCT_YTD'])
))
except ValueError:
pass
element.extend(element_list)
element = tuple(element)
master_dict[element] = client_section_list
client_section_list = []
return master_dict
def write_data(in_obj, outfile, in_fieldnames):
with open(outfile, 'wb') as writer_outfile:
writer = csv.writer(writer_outfile, delimiter=',')
dict_writer = csv.DictWriter(writer_outfile,
fieldnames=in_fieldnames,
extrasaction='ignore')
tot_cnt = 0
rec_cnt = 0
email_cnt = 0
for k, v in in_obj.iteritems():
writer_outfile.write(' -01- ')
writer.writerow(k)
rec_cnt += 1
for i, e in enumerate(v):
if v[i]['INT_CODE_EX0006'] != '' or v[i]['INT_CODE_EX0028'] != '':
email_cnt += 1
writer_outfile.write(' -02- ')
dict_writer.writerow(e)
tot_cnt += 1
return tot_cnt, rec_cnt, email_cnt
def get_recon_totals(in_obj):
print in_obj
client_tot_cnt = 0
client_rec_cnt = 0
client_erec_cnt = 0
for line in in_obj.readlines():
line = line.split(',')
if line[0] == 'T' and line[1] == 'Total Amount':
print 'Total Amount found.'
client_tot_cnt = line[2]
elif line[0] == 'T' and line[1] == 'Receipt Count':
print 'Receipt Count found.'
client_rec_cnt = line[2]
elif line[0] == 'T' and line[1] == 'Email Receipt Count':
print 'E-Receipt Count Found.'
client_erec_cnt = line[2]
return client_tot_cnt, client_rec_cnt, client_erec_cnt
if __name__ == '__main__':
main()
If your file is not very large, you can convert reader generator to a list of dcitonary , by calling list() on reader and then use it in your code instead of trying to read from the file directly.
Example -
def main():
infile = sys.argv[1]
outfile = sys.argv[2]
with open(infile, 'rbU') as in_obj:
# Create reader object, get fieldnames for later on
reader, fieldnames = open_reader(in_obj)
reader_list = list(reader)
nav_tot_cnt, nav_rec_cnt, nav_erec_cnt = get_recon_totals(reader_list)
print nav_tot_cnt, nav_rec_cnt, nav_erec_cnt
# This switches the dictionary to a sorted list... necessary??
reader_list = sorted(reader_list, key=lambda key: (key['PEOPLE_ID'],
key['DON_DATE']))
.
.
def get_recon_totals(reader_list):
print in_obj
client_tot_cnt = 0
client_rec_cnt = 0
client_erec_cnt = 0
for line in reader_list: #line here is a dict
if line[<fieldname for first column>] == 'T' and line[<fieldname for secondcolumn>] == 'Total Amount':
print 'Total Amount found.'
client_tot_cnt = line[<fieldname for third column>]
.
. #continued like above
.
return client_tot_cnt, client_rec_cnt, client_erec_cnt
I'm reading the data from one file named SPD_file. Matching the data with another file named Custom. And all the records which are matching in both the files will be written into the third file.
But it seems that something is wrong, because the code is matching the records and printing on console. But when I'm writing into another file nothing is coming into the new file, other than the header.
workbook = xlrd.open_workbook(SPD_file)
worksheets = workbook.sheet_names()
mapping_records = {}
for worksheet_name in worksheets:
worksheet = workbook.sheet_by_name(worksheet_name)
mapping_record = MappingRecord()
if worksheet_name == "CD":
for curr_row in range(0,worksheet.nrows):
mapping_record = worksheet.row(curr_row)
print worksheet_name
print mapping_record[0].value
for curr_row in mapping_record:
#print "In Loop...."
spd_record = MappingRecord()
spd_record.id = "00002269"
spd_record.erocode = None
spd_record.scno = None
mapping_records[mapping_record[8]] = spd_record
print "Read SPD File....."
custom_file_name = "Custom_" + today.strftime('%Y-%m-%d') + ".csv"
custom_file = ops_home + path + "\\" + custom_file_name
custom = open(custom_file, 'rb')
reader = csv.reader(custom, delimiter=',', quotechar='"')
for line in reader:
if mapping_records.has_key(mapping_record[8]):
spd_record = mapping_records[mapping_record[8]]
if line[7] == "ERO Code":
spd_record.erocode = line[8]
elif line[7] == "Service Number":
spd_record.scno = line[8]
#create a new file.
New_file = ops_home + '\\Reports\\SPD_new_' + today.strftime('%d%m%Y') + '.xlsx'
workbook = xlsxwriter.Workbook(New_file)
# Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': 1})
money = workbook.add_format({'num_format': '#,##0.00'})
worksheetCd = workbook.add_worksheet("CD")
cdHeader = ("Merchant ID", "EroCode", "Service Number")
cd_row = 0
cd_col = 0
for columnHeader in cdHeader:
worksheetCd.write(cd_row, cd_col, columnHeader,bold)
cd_col += 1
for ctx in mapping_records:
spd_record = mapping_records[ctx]
if spd_record.payment_mode == "CRD":
cd_row += 1
cd_col = 0
cdRow = (spd_record.id, spd_record.erocode, spd_record.scno)
for columnData in cdRow:
if cd_col == 5 or cd_col == 19 or cd_col ==20 or cd_col ==21:
worksheetCd.write_number(cd_row, cd_col, columnData, money)
else:
worksheetCd.write(cd_row, cd_col, columnData)
cd_col += 1
workbook.close()