Python: Trying to print fields of config.ini to csv file - python

I have a config.ini file which the user should supply certain fields they want to show as headers in a CSV file. I have tried a few different methods but cannot seem to get what I need printed.
This is what the INI file looks like:
import csv
from dataclasses import fields
from distutils.command.config import config
import pandas as pd
from jira import JIRA
from atlassian import Confluence
from configparser import ConfigParser
csvFilename = "csvHeaders.csv" #File name
## Config file
config_file = ConfigParser() #create/get configparser object
config_file.read("config.ini")
#Get the fields from config file
#fieldlist = []
fieldlist = config_file["FIELDINFO"]
#print(fieldlist['fields'])
#print (fieldlist) #1ST PRINT STATEMENT -- This prints <Section:FIELDINFO>
#with open ('config.ini', 'w') as conf:
# config_object.write(conf)
for i in fieldlist:
#print(fieldlist[i])
#csvHeaders = fieldlist['fields']
csvHeaders = fieldlist[i]
#print(csvHeaders) #2ND PRINT STATEMENT -- Prints list of fields in config.ini
with open(csvFilename, 'w') as f:
csvwriter = csv.writer(f)
#csvwriter.writerow(fieldlist[i])
#csvwriter.writerow(csvHeaders)
#csvwriter.writerow(fieldlist['fields'])
csvwriter.writerow(fieldlist)
#for i in (fieldlist):
# csvwriter.writerow(fieldlist[i])
Currently this prints out, field1,field2,field3 etc rather than the field name. When I try to manipulate the writerow statement to fieldlist[i], it prints the field names but with commas in between each character. What am I doing wrong? Thank you.

You can try this code.
import csv
from configparser import ConfigParser
csvFilename = "csvHeaders.csv" #File name
# Config file
config_file = ConfigParser() #create/get configparser object
config_file.read("config.ini")
#Get the list
fieldlist = config_file["FIELDINFO"]
#create headers
csvHeaders = []
#copy the field names into an arry
for i in fieldlist:
csvHeaders.append(fieldlist[i])
with open(csvFilename, 'w', newline='') as f:
csvwriter = csv.DictWriter(f, fieldnames=csvHeaders)
#write header
csvwriter.writeheader()
#write a sample row.
csvwriter.writerow({csvHeaders[0]:'1',
csvHeaders[1]:'2',
csvHeaders[2]:'3',
csvHeaders[3]:'4',
csvHeaders[4]:'5',
csvHeaders[5]:'6',
csvHeaders[6]:'7'}
)
Refer csv file documentation

To create a CSV header from the field values in config.ini, you can do the following:
import csv
from configparser import ConfigParser
csvFilename = "csvHeaders.csv" #File name
## Config file
config_file = ConfigParser() #create/get configparser object
config_file.read("config.ini")
fieldlist = config_file["FIELDINFO"]
with open(csvFilename, 'w', newline='') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(fieldlist.values())
This would create a normal CSV file header as:
Issue Key,Status,Created,Updated,Priority,Summary,Type
Note: A CSV file by default will have commas separating each column (which is why the are called Comma Separated Variable files). You can change the delimiter being used but the default should normally be used.
If one of your fields in the INI file happens to contain a comma, the CSV library will automatically wrap the entry in quotes for you which is needed to allow a program reading the file back in to work correctly.

Related

Import CSV file (convert to JSON format) into Deta Base (database)

I try to convert the CSV file into JSON format and import it into Deta Base (database). Here is a main.py code I run from a terminal. No error shows up, but Deta Base (database) is empty not even created. Any suggestion on what is wrong with my Python script?
import csv
from deta import Deta
# Initialize with a Project Key
deta = Deta("HERE-IS-DETA-ID")
# Path to CSV file
csvFilePath = r"data.csv"
# This how to connect to or create a database
db = deta.Base("simple_db_testing")
# Create and use DB
someone = deta.Base("somewhere")
# Define conversion from CSV to JSON
def csv_to_json(csvFilePath):
jsonArray = []
# Read csv file
with open(csvFilePath, encoding='utf-8') as csvf:
# Load csv file data using csv library's dictionary reader
csvReader = csv.DictReader(csvf)
print(csvReader)
# Convert each csv row into python dict
for row in csvReader:
# Add this python dict to json array
jsonArray.append(row)
print(jsonArray)
# Inser JsonArray into DB named "someone"
for each in jsonArray:
someone.put(each)
import csv
from deta import Deta
# Initialize with a Project Key
deta = Deta("HERE-IS-DETA-ID")
# Path to CSV file
csvFilePath = r"data.csv"
# This how to connect to or create a database
db = deta.Base("simple_db_testing")
# Create and use DB
someone = deta.Base("somewhere")
# Define conversion from CSV to JSON
def csv_to_json(csvFilePath):
# Read csv file
with open(csvFilePath, encoding='utf-8') as csvf:
# Load csv file data using csv library's dictionary reader
csvReader = csv.DictReader(csvf)
# Convert each csv row into python dict
for row in csvReader:
# Insert object into DB named "someone"
someone.put(row)
It was a missing calling function at the end of the script.
import csv
from deta import Deta
# Initialize with a Project Key
deta = Deta("HERE-IS-DETA-ID")
# Path to CSV file
csvFilePath = r"data2.csv"
# Create and use DB - this database is being used
someone = deta.Base("somewhere2")
# Define conversion from CSV to JSON
def csv_to_json(csvFilePath):
# Read csv file
with open(csvFilePath, encoding='utf-8') as csvf:
# Load csv file data using csv library's dictionary reader
csvReader = csv.DictReader(csvf)
# Convert each csv row into python dict
for row in csvReader:
# Insert object into DB named "someone"
someone.put(row)
# Print rows inserted into DB
print(row)
#need to call the function
csv_to_json(csvFilePath)

Python converts multiple JSON files in a folder directory to CSV

I have a lot of JSON files, I put them in my folder, I want to convert them to CSV format,
Should I use import glob? ? I am a novice, how can I modify my codeļ¼Œ
#-*-coding:utf-8-*-
import csv
import json
import sys
import codecs
def trans(path):
jsonData = codecs.open('C:/Users/jeri/Desktop/1', '*.json', 'r', 'utf-8')
# csvfile = open(path+'.csv', 'w')
# csvfile = open(path+'.csv', 'wb')
csvfile = open('C:/Users/jeri/Desktop/1.csv', 'w', encoding='utf-8', newline='')
writer = csv.writer(csvfile, delimiter=',')
flag = True
for line in jsonData:
dic = json.loads(line)
if flag:
keys = list(dic.keys())
print(keys)
flag = False
writer.writerow(list(dic.values()))
jsonData.close()
csvfile.close()
if __name__ == '__main__':
path=str(sys.argv[0])
print(path)
trans(path)
Yes using glob would be a good way to iterate through the .json files in your folder! But glob doesn't have anything to do with the reading/writing of files. After importing glob, you can use it like this:
for curr_file in glob.glob("*.json"):
# Process each file here
I see that you've used the json module to read in your code snippet. I'd say the better way to go about it is to use pandas.
df = pd.read_json()
I say this because with the pandas library, you can simply convert from .json to .csv using
df.to_csv('file_name.csv')
Combining the three together, it would look like this:
for curr_file in glob.glob("*.json"):
# Process each file here
df = pd.read_json(curr_file)
df.to_csv('file_name.csv')
Also, note that if your json has nested objects, it can't be directly converted to csv, you'll have to settle the organization of data prior to the conversion.

Read a file from a folder and extract a specific key from the file and save as in CSV file

I'm new to Python and the task I am performing is to extract a specific key value from a list of .iris ( which contains the list of nested dictionary format) files in a specific directory.
I wanted to extract the specific value and save it as a new .csv file and repeat it for all other files.
Below is my sample of .iris file from which I should extract only for the these keys ('uid','enabled','login','name').
{"streamType":"user",
"uid":17182,
"enabled":true,
"login":"xyz",
"name":"abcdef",
"comment":"",
"authSms":"",
"email":"",
"phone":"",
"location":"",
"extraLdapOu":"",
"mand":997,
"global":{
"userAccount":"View",
"uid":"",
"retention":"No",
"enabled":"",
"messages":"Change"},
"grants":[{"mand":997,"role":1051,"passOnToSubMand":true}],
I am trying to convert the .iris file to .json and reading the files one by, but unfortunately, I am not getting the exact output as desired.
Please, could anyone help me?
My code (added from comments):
import os
import csv
path = ''
os.chdir(path)
# Read iris File
def read_iris_file(file_path):
with open(file_path, 'r') as f:
print(f.read())
# iterate through all files
for file in os.listdir():
# Check whether file is in iris format or not
if file.endswith(".iris"):
file_path = f"{path}\{file}"
# call read iris file function
print(read_iris_file(file_path))
Your files contain data in JSON format, so we can use built-in json module to parse it. To iterate over files with certain extension you can use pathlib.glob() with next pattern "*.iris". Then we can use csv.DictWriter() and pass "ignore" to extrasaction argument which will make DictWriter ignore keys which we don't need and write only those which we passed to fieldnames argument.
Code:
import csv
import json
from pathlib import Path
path = Path(r"path/to/folder")
keys = "uid", "enabled", "login", "name"
with open(path / "result.csv", "w", newline="") as out_f:
writer = csv.DictWriter(out_f, fieldnames=keys, extrasaction='ignore')
writer.writeheader()
for file in path.glob("*.iris"):
with open(file) as inp_f:
data = json.load(inp_f)
writer.writerow(data)
Try the below (the key point here is loading the iris file using ast)
import ast
fields = ('uid','enabled','login','name')
with open('my.iris') as f1:
data = ast.literal_eval(f1.read())
with open('my.csv','w') as f2:
f2.write(','.join(fields) + '\n')
f2.write(','.join(data[f] for f in fields) + '\n')
my.csv
uid,enabled,login,name
17182,true,xyz,abcdef

nested JSON to CSV using python script

i'm new to python and I've got a large json file that I need to convert to csv - below is a sample
{ "status": "success","Name": "Theresa May","Location": "87654321","AccountCategory": "Business","AccountType": "Current","TicketNo": "12345-12","AvailableBal": "12775.0400","BookBa": "123475.0400","TotalCredit": "1234567","TotalDebit": "0","Usage": "5","Period": "May 11 2014 to Jul 11 2014","Currency": "GBP","Applicants": "Angel","Signatories": [{"Name": "Not Available","BVB":"Not Available"}],"Details": [{"PTransactionDate":"24-Jul-14","PValueDate":"24-Jul-13","PNarration":"Cash Deposit","PCredit":"0.0000","PDebit":"40003.0000","PBalance":"40003.0000"},{"PTransactionDate":"24-Jul-14","PValueDate":"23-Jul-14","PTest":"Cash Deposit","PCredit":"0.0000","PDebit":"40003.0000","PBalance":"40003.0000"},{"PTransactionDate":"25-Jul-14","PValueDate":"22-Jul-14","PTest":"Cash Deposit","PCredit":"0.0000","PDebit":"40003.0000","PBalance":"40003.0000"},{"PTransactionDate":"25-Jul-14","PValueDate":"21-Jul-14","PTest":"Cash Deposit","PCredit":"0.0000","PDebit":"40003.0000","PBalance":"40003.0000"},{"PTransactionDate":"25-Jul-14","PValueDate":"20-Jul-14","PTest":"Cash Deposit","PCredit":"0.0000","PDebit":"40003.0000","PBalance":"40003.0000"}]}
I need this to show up as
name, status, location, accountcategory, accounttype, availablebal, totalcredit, totaldebit, etc as columns,
with the pcredit, pdebit, pbalance, ptransactiondate, pvaluedate and 'ptest' having new values each row as the JSON file shows
I've managed to put this script below together looking online, but it's showing me an empty csv file at the end. What have I done wrong? I have used the online json to csv converters and it works, however as these are sensitive files I'm hoping to write/manage with my own script so I can see exactly how it works. Please see below for my python script - can I have some advise on what to change? thanks
import csv
import json
infile = open("BankStatementJSON1.json","r")
outfile = open("testing.csv","w")
writer = csv.writer(outfile)
for row in json.loads(infile.read()):
writer.writerow(row)
import csv, json, sys
# if you are not using utf-8 files, remove the next line
sys.setdefaultencoding("UTF-8") # set the encode to utf8
# check if you pass the input file and output file
if sys.argv[1] is not None and sys.argv[2] is not None:
fileInput = sys.argv[1]
fileOutput = sys.argv[2]
inputFile = open("BankStatementJSON1.json","r") # open json file
outputFile = open("testing2.csv","w") # load csv file
data = json.load("BankStatementJSON1.json") # load json content
inputFile.close() # close the input file
output = csv.writer("testing.csv") # create a csv.write
output.writerow(data[0].keys()) # header row
for row in data:
output.writerow(row.values()) # values row
This works for the JSON example you posted. The issue is that you have nested dict and you can't create sub-headers and sub rows for pcredit, pdebit, pbalance, ptransactiondate, pvaluedate and ptest as you want.
You can use csv.DictWriter:
import csv
import json
with open("BankStatementJSON1.json", "r") as inputFile: # open json file
data = json.loads(inputFile.read()) # load json content
with open("testing.csv", "w") as outputFile: # open csv file
output = csv.DictWriter(outputFile, data.keys()) # create a writer
output.writeheader()
output.writerow(data)
Make sure you're closing the output file at the end as well.

Merge txt files in a folder and replacing characters in python

I have a doubt about how to do to continue the code, I need to take all files from a folder and merge them in 1 file with another text format.
Example:
The Input files are of text format like this:
"{'nr': '3173391045', 'data': '27/12/2017'}"
"{'nr': '2173391295', 'data': '05/01/2017'}"
"{'nr': '5173351035', 'data': '07/03/2017'}"
The Output files must be lines like this:
"3173391045","27/09/2017"
"2173391295","05/01/2017"
"5173351035","07/03/2017"
This is my working code, it's working for merge and taking out the blank lines
import glob2
import datetime
filenames=glob2.glob("*.txt")
with open(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")+".SAI", 'w') as file:
for filename in filenames:
with open(filename,"r") as f:
file.write(f.read())
I'm trying something with .replace but is not working, I get syntax errors or blank files
filedata = filedata.replace("{", "") for line in filedata
If your input files had contained valid JSON strings, the correct way would have been to parse the lines as JSON and write them back in csv. As strings are enclosed in single quotes (') they are rejected by the json module of the Python library, and my advice is to use a regex to parse them. Code could become:
import glob2
import datetime
import csv
import re
# the regex to parse the line
rx = re.compile(r".*'nr'\s*:\s*'(\d+)'.*'data'\s*:\s*'([/\d]+)'")
filenames=glob2.glob("*.txt")
with open(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")+".SAI", 'w') as file:
wr = csv.writer(file, quoting = csv.QUOTE_ALL)
for filename in filenames:
with open(filename,"r") as f:
for line in f: # process line by line
m = rx.match(line)
wr.writerow(m.groups())
With a few tweaks, the input data can be coerced into a form suitable for JSON parsing:
from datetime import datetime
import json
import glob2
import csv
with open(datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")+".SAI", 'w', newline='') as f_output:
csv_output = csv.writer(f_output, quoting=csv.QUOTE_ALL)
for filename in glob2.glob('*.txt'):
with open(filename) as f_input:
for row in f_input:
row_dict = json.loads(row.strip('"\n').replace("'", '"'))
csv_output.writerow([row_dict['nr'], row_dict['data']])
Giving you:
"3173391045","27/12/2017"
"2173391295","05/01/2017"
"5173351035","07/03/2017"
Note, in Python 3.x the output file should be opened with newline=''. Without this, extra blank lines can appear in the output file.
using regex/replaces to parse those strings is dangerous. You could always stumble on a data containing the delimiter, the comma, etc..
And in this case, even if json cannot read those lines,ast.literal_eval can without any modification whatsoever:
import ast
with open("output.csv",newline="") as fw:
cw = csv.writer(fw)
for filename in filenames:
with open(filename) as f:
for line in f:
d = ast.literal_eval(line)
cw.writerow([d['nr'],d['data'])

Categories