I have a JSON file like this: [{"ID": "12345", "Name":"John"}, {"ID":"45321", "Name":"Max"}...] called myclass.json. I used json.load library to get "ID" and "Name" values.
I have another .txt file with the content below. File name is list.txt:
Student,12345,Age 14
Student,45321,Age 15
.
.
.
I'm trying to create a script in python that compares the two files line by line and replace the student ID for the students name in list.txt file, so the new file would be:
Student,John,Age 14
Student,Max,Age 15
.
.
Any ideas?
My code so far:
import json
with open('/myclass.json') as f:
data = json.load(f)
for key in data:
x = key['Name']
z = key['ID']
with open('/myclass.json', 'r') as file1:
with open('/list.txt', 'r+') as file2:
for line in file2:
x = z
try this:
import json
import csv
with open('myclass.json') as f:
data = json.load(f)
with open('list.txt', 'r') as f:
reader = csv.reader(f)
rows = list(reader)
def get_name(id_):
for item in data:
if item['ID'] == id_:
return item["Name"]
with open('list.txt', 'w') as f:
writer = csv.writer(f)
for row in rows:
name = get_name(id_ = row[1])
if name:
row[1] = name
writer.writerows(rows)
Keep in mind that this script technically does not replace the items in the list.txt file one by one, but instead reads the entire file in and then overwrites the list.txt file entirely and constructs it from scratch. I suggest making a back up of list.txt or naming the new txt file something different incase the program crashes from some unexpected input.
One option is individually open each file for each mode while appending a list for matched ID values among those two files as
import json
with open('myclass.json','r') as f_in:
data = json.load(f_in)
j=0
lis=[]
with open('list.txt', 'r') as f_in:
for line in f_in:
if data[j]['ID']==line.split(',')[1]:
s = line.replace(line.split(',')[1],data[j]['Name'])
lis.append(s)
j+=1
with open('list.txt', 'w') as f_out:
for i in lis:
f_out.write(i)
Related
I am trying to get the unique values from a csv file. Here's an example of the file:
12,life,car,good,exellent
10,gift,truck,great,great
11,time,car,great,perfect
The desired output in the new file is this:
12,10,11
life,gift,time
car,truck
good.great
excellent,great,perfect
Here is my code:
def attribute_values(in_file, out_file):
fname = open(in_file)
fout = open(out_file, 'w')
# get the header line
header = fname.readline()
# get the attribute names
attrs = header.strip().split(',')
# get the distinct values for each attribute
values = []
for i in range(len(attrs)):
values.append(set())
# read the data
for line in fname:
cols = line.strip().split(',')
for i in range(len(attrs)):
values[i].add(cols[i])
# write the distinct values to the file
for i in range(len(attrs)):
fout.write(attrs[i] + ',' + ','.join(list(values[i])) + '\n')
fout.close()
fname.close()
The code currently outputs this:
12,10
life,gift
car,truck
good,great
exellent,great
12,10,11
life,gift,time
car,car,truck
good,great
exellent,great,perfect
How can I fix this?
You could try to use zip to iterate over the columns of the input file, and then eliminate the duplicates:
import csv
def attribute_values(in_file, out_file):
with open(in_file, "r") as fin, open(out_file, "w") as fout:
for column in zip(*csv.reader(fin)):
items, row = set(), []
for item in column:
if item not in items:
items.add(item)
row.append(item)
fout.write(",".join(row) + "\n")
Result for the example file:
12,10,11
life,gift,time
car,truck
good,great
exellent,great,perfect
My program takes a csv file as input and writes it as an output file in json format. On the final line, I use the print command to output the contents of the json format file to the screen. However, it does not print out the json file contents and I don't understand why.
Here is my code that I have so far:
import csv
import json
def jsonformat(infile,outfile):
contents = {}
csvfile = open(infile, 'r')
reader = csvfile.read()
for m in reader:
key = m['No']
contents[key] = m
jsonfile = open(outfile, 'w')
jsonfile.write(json.dumps(contents))
csvfile.close()
jsonfile.close()
return jsonfile
infile = 'orders.csv'
outfile = 'orders.json'
output = jsonformat(infile,outfile)
print(output)
Your function returns the jsonfile variable, which is a file.
Try adding this:
jsonfile.close()
with open(outfile, 'r') as file:
return file.read()
Your function returns a file handle to the file jsonfile that you then print. Instead, return the contents that you wrote to that file. Since you opened the file in w mode, any previous contents are removed before writing the new contents, so the contents of your file are going to be whatever you just wrote to it.
In your function, do:
def jsonformat(infile,outfile):
...
# Instead of this:
# jsonfile.write(json.dumps(contents))
# do this:
json_contents = json.dumps(contents, indent=4) # indent=4 to pretty-print
jsonfile.write(json_contents)
...
return json_contents
Aside from that, you aren't reading the CSV file the correct way. If your file has a header, you can use csv.DictReader to read each row as a dictionary. Then, you'll be able to use for m in reader: key = m['No']. Change reader = csvfile.read() to reader = csv.DictReader(csvfile)
As of now, reader is a string that contains all the contents of your file. for m in reader makes m each character in this string, and you cannot access the "No" key on a character.
a_file = open("sample.json", "r")
a_json = json.load(a_file)
pretty_json = json.dumps(a_json, indent=4)
a_file.close()
print(pretty_json)
Using this sample to print the contents of your json file. Have a good day.
I am very new to python.
I have a list of stock names in a csv. I extract the names and put it before a website domain to create urls. I am trying to write the urls I created into another csv, but it only writes the last one out of the list. I want it to write all of the url into the csv.
with open('names.csv', 'r') as datafile:
for line in datafile:
domain = f'https://ceo.ca/{line}'
urls_link = (domain.strip())
print(urls_link)
y = open("url.csv","w")
y.writelines(urls_link)
y.close()
names.csv: https://i.stack.imgur.com/WrrLw.png
url.csv: https://i.stack.imgur.com/BYEgN.png
I would want the url csv look like this:
https://i.stack.imgur.com/y4xre.png
I apologise if I worded some things horribly.
You can use csv module in python
Try using this code:
from csv import writer,reader
in_FILE = "names.csv"
out_FILE = 'url.csv'
urls = list()
with open(in_FILE, 'r') as infile:
read = reader(infile, delimiter=",")
for domain_row in read:
for domain in domain_row:
url = f'https://ceo.ca/{domain.strip()}'
urls.append(url)
with open(out_FILE, 'w') as outfile:
write = writer(outfile)
for url in urls:
write.writerow([url])
I am working on one program and trying to achieve following functionalities.
add new student
Remove student based on id
here is my code
from csv import writer
import csv
def add(file_name, list_of_elem):
# Open file in append mode
with open(file_name, 'a+', newline='') as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(list_of_elem)
def remove():
id = input("Enter ID : ")
with open('students.csv', 'rb') as inp, open('students.csv', 'wb') as out:
writer = csv.writer(out)
for row in csv.reader(inp):
if row[0] != id:
writer.writerow(row)
# List of strings
row_contents = [11,'mayur','Java','Tokyo','Morning']
# Append a list as new line to an old csv file
add('students.csv', row_contents)
remove()
add function works properly but when i tried remove function it removes all existing entries.Could anyone please help me.
First I will show the code and below I will left some comments about the changes.
from csv import writer
import csv
def add(file_name, list_of_elem):
# Open file in append mode
with open(file_name, 'a+', newline = '') as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(list_of_elem)
def remove():
idt = input("Enter ID : ")
with open('students.csv', 'r') as inp:
newrows = []
data = csv.reader(inp)
for row in data:
if row[0] != idt:
newrows.append(row)
with open('students.csv', 'w') as out:
csv_writer = writer(out)
for row in newrows:
csv_writer.writerow(row)
def display():
with open('students.csv','r') as f:
data = csv.reader(f)
for row in data:
print(row)
# List of strings
row_contents = [10,'mayur','Java','Tokyo','Morning']
add('students.csv', row_contents)
row_contents = [11,'mayur','Java','Tokyo','Morning']
add('students.csv', row_contents)
row_contents = [12,'mayur','Java','Tokyo','Morning']
add('students.csv', row_contents)
# Append a list as new line to an old csv file
display()
remove()
If your file is a CSV, you should use a text file, instead of a binary one.
I changed the name of the variable id to ìdt because id is built-in to return the identity of an object and it's not a good practice overwrite built-in functions.
To remove only rows with an specific idt you should read all the file, store into a var (list), remove what you want to delete and only after that save the result.
You should use a temporary file instead of opening and writing to the same file simultaneously. Checkout this answer: https://stackoverflow.com/a/17646958/14039323
I have 200 CSV files in my folder.
What I am trying to do is read first row of each files and write in new csv.
And on top, I want to write [file,field1,field2,...fieldn]
n is maximum number of fields.
import csv
import glob
list=[]
hel=[]
files=glob.glob('C:/dataset/*.csv')
with open('test.csv', 'w',newline='') as testfile:
csv_writer = csv.writer(testfile)
for file in files:
with open(file, 'r') as infile:
file=file[file.rfind('\\')+1:]
file=file.strip('.csv')
reader = csv.reader(infile)
headers = next(reader)
hel.append((len(headers)))
max(hel)
lst = [file] + headers
csv_writer.writerow(lst)
It came out that maximum number of fields of 200 files are 255.
So on top of new csv file, I want to write file, field1, field2 ... field 255.
How can I do this?
import csv
import glob
list=[]
hel=[]
files=glob.glob('C:/dataset/*.csv')
with open('test.csv', 'w',newline='') as testfile:
csv_writer = csv.writer(testfile)
for file in files:
with open(file, 'r') as infile:
file=file[file.rfind('\\')+1:]
file=file.strip('.csv')
reader = csv.reader(infile)
headers = next(reader)
hel.append((len(headers)))
b=['field{}'.format(i) for i in range(1,max(hel)+1)]
lst = [file] + headers
csv_writer.writerow(lst)
Now b is list that looks like this ['field1','field2'...'field255']
I need to insert 'file' before 'field1' and write that row on the top of new csv file. Writing code after csv_writer.writerow(lst) gives me csv file with 'field1','field2'.. every other line. How can I fix this problem
You first need to read all your input files to determine the maximum number of fields is 255. Then you need to construct a list of field names to write into the output file (just once, not in a loop):
['field{}'.format(i) for i in range(1, 256)]
You can pass that list to the csv module to write it.
Read the field count and first line from each file before writing the file.
import glob
from itertools import chain
import os
from os.path import splitext, basename
def first_line(filepath):
with open(filepath) as f:
return next(f)
def write_test_file(dest_file_path, source_path_name):
source_paths = glob.glob(source_path_name)
first_lines = list(map(first_line, source_paths))
max_count = max(l.count(",") for l in first_lines)
field_names = map("field{}".format, range(1, max_count + 2))
header = ",".join(chain(["file"], field_names)) + os.linesep
file_names = (splitext(basename(p))[0] for p in source_paths)
content = chain([header], map(",".join, zip(file_names, first_lines)))
with open(dest_file_path, 'w') as testfile:
testfile.write("".join(content))
write_test_file('test.csv', 'C:/dataset/*.csv')