I have the code which worked well until the payload changed.
field_types = [
('subject', str),
('type', str)
]
#reading a raw csv file
output = []
with open('file.csv','r',encoding = 'utf-8-sig') as f:
for row in csv.DictReader(f):
row.update((key, conversion(row[key]))
for key, conversion in field_types)
output.append(row)
with open('file.json','w') as outfile: #storing records as json
json.dump(output,outfile,sort_keys = True, indent = 4)
The outcome was saved nicely:
{
"subject": "1",
"type": "2"
},
{
"subject": "1",
"type": "3"
}
]
The current requirement that it should be saved as i.e. I guess as a jsonarray within an array. Have you had this situation? How to achieve it?
{
"subject":
{
"id": "1"
},
"type": "2"
},
{
"subject":
{
"id": "1"
},
"type": "3"
}
]
This should do the job:
def str2dict(s):
return dict(id=s)
field_types = [
('subject', str2dict),
('type', str)
]
Related
i am generating a Json-like structure and I need write it to a File, any format of file, but i am getting this error.
I have tried written it as Json and string without any luck.
import csv
score = []
test = ''
with open("score_test.txt") as f:
spam = csv.reader(f, delimiter='|')
for line in spam:
score.append(line)
open = """{
"info": {
"_postman_id": "",
"name": "GD-API-Testing-Update",
"schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
},
"name": "Score_Tests",
"item": [
{
"name": "Score_Ok",
"item": ["""
for num, row in enumerate(score):
test += """
{
"name": "Score_Ok_%d",
"event": [
{
"listen": "test",
"script": {
"exec": [
"pm.test(\"Status test of API-GD.\", function () {",
" pm.response.to.have.status(200);",
"});",
"",
"pm.test(\"Score test.\", function () {",
" pm.response.to.not.be.error;",
" const responseJson = pm.response.json();",
" pm.expect(responseJson.gd_result).to.eql("%s");",
"});"
],
"type": "text/javascript"
}
}
],
"request": {
"method": "GET",
"header": [
{
"key": "X-Access-Key-Id",
"value": "{{X-Access-Key-Id}}",
"type": "text"
},
{
"key": "X-Secret-Access-Key",
"value": "{{X-Secret-Access-Key}}",
"type": "text"
},
{
"key": "Accept-Version",
"value": "{{Accept-Version}}",
"type": "text"
}
],
"url": {
"raw": "{{URL-API-GD}}/api/model/:model_id/mdn/:mdn",
"host": [
"{{URL-API-GD}}"
],
"path": [
"api",
"model",
":model_id",
"mdn",
":mdn"
],
"variable": [
{
"key": "model_id",
"value": "{{Test-Model}}"
},
{
"key": "mdn",
"value": "%s"
}
]
}
},
"response": []
}""" % (num, row[1], row[0])
if num != len(score) - 1:
test += ","
close = """ ]
}
]
}"""
sample = open + test + close
with open("sample.txt", "a") as f:
f.write(sample)
This is the error:
Traceback (most recent call last):
File "/Users/user/Documents/training/TestForPostman/testGenerator.py", line 111, in <module>
with open("sample.txt", "a") as f:
TypeError: 'str' object is not callable
The for Loop it is appending the generated string I need, and finally closed the structure at the end.
I already checked the json structure and it is ok.
You are using open as variable in your code sample = open + test + close which is causing issue. Avoid using any reserved word as a variable.
Rename the open variable with something else then it will work.
I want to append a column name with the value of it's data type to a JSON file. I can't seem to figure out how to get the data type of the value based on the name. Not sure how to append this correctly in the for-loop for data['type'].
Excel Spreadsheet
Code
import xlrd
from collections import OrderedDict
import json
wb = xlrd.open_workbook('./file1.xlsx')
sh = wb.sheet_by_index(0)
data_list = []
data = OrderedDict()
for colnum in range(0, sh.ncols):
data['name'] = sh.row_values(0)[colnum]
data['description'] = sh.row_values(1)[colnum]
data_list.append(data.copy())
data_list = {'columns': data_list}
j = json.dumps(data_list)
with open('seq1.json', 'w') as f:
f.write(j)
Output
{
"columns": [
{
"name": "FILEID",
"description": "FILEID"
},
{
"name": "FILETYPE",
"description": "FILETYPE"
},
}
Expected output
{
"columns": [
{
"name": "fileid",
"description": "FILEID",
"type": "keyword"
},
{
"name": "filetype",
"description": "FILETYPE",
"type": "keyword"
},
}
I want to print a user from a JSON list into Python that I select however I can only print all the users. How do you print a specific user? At the moment I have this which prints all the users out in a ugly format
import json
with open('Admin_sample.json') as f:
admin_json = json.load(f)
print(admin_json['staff'])
The JSON file looks like this
{
"staff": [
{
"id": "DA7153",
"name": [
"Fran\u00c3\u00a7ois",
"Ullman"
],
"department": {
"name": "Admin"
},
"server_admin": "true"
},
{
"id": "DA7356",
"name": [
"Bob",
"Johnson"
],
"department": {
"name": "Admin"
},
"server_admin": "false"
},
],
"assets": [
{
"asset_name": "ENGAGED SLOTH",
"asset_type": "File",
"owner": "DA8333",
"details": {
"security": {
"cia": [
"HIGH",
"INTERMEDIATE",
"LOW"
],
"data_categories": {
"Personal": "true",
"Personal Sensitive": "true",
"Customer Sensitive": "true"
}
},
"retention": 2
},
"file_type": "Document",
"server": {
"server_name": "ISOLATED UGUISU",
"ip": [
10,
234,
148,
52
]
}
},
{
"asset_name": "ISOLATED VIPER",
"asset_type": "File",
"owner": "DA8262",
"details": {
"security": {
"cia": [
"LOW",
"HIGH",
"LOW"
],
"data_categories": {
"Personal": "false",
"Personal Sensitive": "false",
"Customer Sensitive": "true"
}
},
"retention": 2
},
},
]
I just can't work it out. Any help would be appreciated.
Thanks.
You need to index into the staff list, e.g.:
print(admin_json['staff'][0])
I suggest reading up a bit on dictionaries in Python. Dictionary values can be set to any object: in this case, the value of the staff key is set to a list of dicts. Here's an example that will loop through all the staff members and print their names:
staff_list = admin_json['staff']
for person in staff_list:
name_parts = person['name']
full_name = ' '.join(name_parts) # combine name parts into a string
print(full_name)
Try something like this:
import json
def findStaffWithId(allStaff, id):
for staff in allStaff:
if staff["id"] == id:
return staff
return {} # no staff found
with open('Admin_sample.json') as f:
admin_json = json.load(f)
print(findStaffWithId(admin_json['staff'], "DA7356"))
You can list all the users name with
users = [user["name"] for user in admin_json['staff']]
You have two lists in this JSON file. When you try to parse it, you'll be reach a list. For example getting the first staff id:
print(admin_json['staff'][0]['id'])
This will print:
DA7153
When you use "json.loads" this will simply converts JSON file to the Python dictionary. For further info:
https://docs.python.org/3/tutorial/datastructures.html#dictionaries
I want to create the JSON file from CSV file using the generic python script.
Found hone package from GitHub but some of the functionalities missing in that code.
csv to json
I want to code like generic template CSV to JSON.
[
{
"birth": {
"day": "7",
"month": "May",
"year": "1985"
},
"name": "Bob",
"reference": "TRUE",
"reference name": "Smith"
}
]
Only handled above type of JSON only.
[
{
"Type": "AwsEc2Instance",
"Id": "i-cafebabe",
"Partition": "aws",
"Region": "us-west-2",
"Tags": {
"billingCode": "Lotus-1-2-3",
"needsPatching": "true"
},
"Details": {
"AwsEc2Instance": {
"Type": "i3.xlarge",
"ImageId": "ami-abcd1234",
"IpV4Addresses": [ "54.194.252.215", "192.168.1.88" ],
"IpV6Addresses": [ "2001:db812341a2b::123" ],
"KeyName": "my_keypair",
"VpcId": "vpc-11112222",
"SubnetId": "subnet-56f5f633",
"LaunchedAt": "2018-05-08T16:46:19.000Z"
}
}
}
]
I want to handle nested array[] ,{}
I have done something like this before and below code can be modified as I have not seen your dataset.
dataframe = pd.read_excel('dataframefilepath', encoding='utf-8', header=0)
'''Adding to list to finally save it as JSON'''
df = []
for (columnName, columnData) in dataframe.iteritems():
if dataframe.columns.get_loc(columnName) > 0:
for indata, rwdata in dataframe.iterrows():
for insav, rwsave in df_to_Save.iterrows():
if rwdata.Selected_Prediction == rwsave.Selected_Prediction:
#print()
df_to_Save.loc[insav, 'Value_to_Save'] = rwdata[dataframe.columns.get_loc(columnName)]
#print(rwdata[dataframe.columns.get_loc(columnName)])
df.append(df_to_Save.set_index('Selected_Prediction').T.to_dict('record'))
df = eval(df)
'''Saving in JSON format'''
path_to_save = '\\your path'
with open(path_to_save, 'w') as json_file:
json.dump(df, json_file)
I have rather very weird requirement now. I have below json and somehow I have to convert it into flat csv.
[
{
"authorizationQualifier": "SDA",
"authorizationInformation": " ",
"securityQualifier": "ASD",
"securityInformation": " ",
"senderQualifier": "ASDAD",
"senderId": "FADA ",
"receiverQualifier": "ADSAS",
"receiverId": "ADAD ",
"date": "140101",
"time": "0730",
"standardsId": null,
"version": "00501",
"interchangeControlNumber": "123456789",
"acknowledgmentRequested": "0",
"testIndicator": "T",
"functionalGroups": [
{
"functionalIdentifierCode": "ADSAD",
"applicationSenderCode": "ASDAD",
"applicationReceiverCode": "ADSADS",
"date": "20140101",
"time": "07294900",
"groupControlNumber": "123456789",
"responsibleAgencyCode": "X",
"version": "005010X221A1",
"transactions": [
{
"name": "ASDADAD",
"transactionSetIdentifierCode": "adADS",
"transactionSetControlNumber": "123456789",
"implementationConventionReference": null,
"segments": [
{
"BPR03": "ad",
"BPR14": "QWQWDQ",
"BPR02": "1.57",
"BPR13": "23223",
"BPR01": "sad",
"BPR12": "56",
"BPR10": "32424",
"BPR09": "12313",
"BPR08": "DA",
"BPR07": "123456789",
"BPR06": "12313",
"BPR05": "ASDADSAD",
"BPR16": "21313",
"BPR04": "SDADSAS",
"BPR15": "11212",
"id": "aDSASD"
},
{
"TRN02": "2424",
"TRN03": "35435345",
"TRN01": "3435345",
"id": "FSDF"
},
{
"REF02": "fdsffs",
"REF01": "sfsfs",
"id": "fsfdsfd"
},
{
"DTM02": "2432424",
"id": "sfsfd",
"DTM01": "234243"
}
],
"loops": [
{
"id": "24324234234",
"segments": [
{
"N101": "sfsfsdf",
"N102": "sfsf",
"id": "dgfdgf"
},
{
"N301": "sfdssfdsfsf",
"N302": "effdssf",
"id": "fdssf"
},
{
"N401": "sdffssf",
"id": "sfds",
"N402": "sfdsf",
"N403": "23424"
},
{
"PER06": "Wsfsfdsfsf",
"PER05": "sfsf",
"PER04": "23424",
"PER03": "fdfbvcb",
"PER02": "Pedsdsf",
"PER01": "sfsfsf",
"id": "fdsdf"
}
]
},
{
"id": "2342",
"segments": [
{
"N101": "sdfsfds",
"N102": "vcbvcb",
"N103": "dsfsdfs",
"N104": "343443",
"id": "fdgfdg"
},
{
"N401": "dfsgdfg",
"id": "dfgdgdf",
"N402": "dgdgdg",
"N403": "234244"
},
{
"REF02": "23423342",
"REF01": "fsdfs",
"id": "sfdsfds"
}
]
}
]
}
]
}
]
}
]
The column header name corresponding to deeper key-value make take nested form, like functionalGroups[0].transactions[0].segments[0].BPR15.
I am able to do this in java using this github project (here you can find the output format I desire in the explanation) in one line:
flatJson = JSONFlattener.parseJson(new File("files/simple.json"), "UTF-8");
The output was:
date,securityQualifier,testIndicator,functionalGroups[1].functionalIdentifierCode,functionalGroups[1].date,functionalGroups[1].applicationReceiverCode, ...
140101,00,T,HP,20140101,ETIN,...
But I want to do this in python. I tried as suggested in this answer:
with open('data.json') as data_file:
data = json.load(data_file)
df = json_normalize(data, record_prefix=True)
with open('temp2.csv', "w", newline='\n') as csv_file:
csv_file.write(df.to_csv())
However, for column functionalGroups, it dumps json as a cell value.
I also tried as suggested in this answer:
with open('data.json') as f: # this ensures opening and closing file
a = json.loads(f.read())
df = pandas.DataFrame(a)
print(df.transpose())
But this also seem to do the same:
0
acknowledgmentRequested 0
authorizationInformation
authorizationQualifier SDA
date 140101
functionalGroups [{'functionalIdentifierCode': 'ADSAD', 'applic...
interchangeControlNumber 123456789
receiverId ADAD
receiverQualifier ADSAS
securityInformation
securityQualifier ASD
senderId FADA
senderQualifier ASDAD
standardsId None
testIndicator T
time 0730
version 00501
Is it possible to do what I desire in python?