I want to add a new column and new values to it. I'm just using normal file handling to do it (just adding a delimiter). I actually did try using csv but the csv file would have one letter per cell after running the code.
#import csv
#import sys
#csv.field_size_limit(sys.maxsize)
inp = open("city2", "r")
inp2 = open("op", "r")
oup = open("op_mod.csv", "a+")
#alldata = []
count = 0
for line in inp2:
check = 0
if count == 0:
count = count + 1
colline = line + "\t" + "cities"
oup.write(colline)
continue
for city in inp:
if city in line:
print(city, line)
linemod = line + "\t" + city #adding new value to an existing row
#alldata.append(linemod)
oup.write(linemod) #writing the new value
check = 1
break
if check == 0:
check = 1
#oup.write(line)
#alldata.append(line)
inp.close()
inp = open("city2", "r")
#writer.writerows(alldata)
inp.close()
inp2.close()
oup.close()
Expected result:
existing fields/values ... new field/value
actual result:
existing fields/values ... new line
new field/value ...next line
there is a carriage return at the end of line, you can remove it using line.rstrip() similar to this answer:
Deleting carriage returns caused by line reading
Related
I want to delete a line of text from a .txt file given an integer corresponding to the txt file's line number. For example, given the integer 2, delete line 2 of the text file.
I'm sort of lost on what to put into my program.
f = open('text1.txt','r+')
g = open('text2.txt',"w")
line_num = 0
search_phrase = "Test"
for line in f.readlines():
line_num += 1
if line.find(search_phrase) >= 0:
print("text found a line" , line_num)
decision = input("enter letter corresponding to a decision: (d = delete lines, s = save to new txt) \n")
if decision == 'd':
//delete the current line
if decision == 's':
//save the current line to a new file
Any help is appreciated! Thanks :)
This way:
with open('text1.txt','r') as f, open('text2.txt',"w") as g:
to_delete=[2,4]
for line_number, line in enumerate(f.readlines(), 1):
if line_number not in to_delete:
g.write(line)
else:
print(f'line {line_number}, "{line.rstrip()}" deleted')
Here it goes.
f = open('data/test.txt','rb')
text = f.readlines() # all lines are read into a list and you can acess it as a list
deleted_line = text.pop(1) #line is deleted and stored into the variable
print(text)
print(deleted_line)
f.write(text) # here you save it with the new data, you can always delete the data in the file to replace by the new one
I am writing a code in python where I am removing all the text after a specific word but in output lines are missing. I have a text file in unicode which have 3 lines:
my name is test1
my name is
my name is test 2
What I want is to remove text after word "test" so I could get the output as below
my name is test
my name is
my name is test
I have written a code but it does the task but also removes the second line "my name is"
My code is below
txt = ""
with open(r"test.txt", 'r') as fp:
for line in fp.readlines():
splitStr = "test"
index = line.find(splitStr)
if index > 0:
txt += line[:index + len(splitStr)] + "\n"
with open(r"test.txt", "w") as fp:
fp.write(txt)
It looks like if there is no keyword found the index become -1.
So you are avoiding the lines w/o keyword.
I would modify your if by adding the condition as follows:
txt = ""
with open(r"test.txt", 'r') as fp:
for line in fp.readlines():
splitStr = "test"
index = line.find(splitStr)
if index > 0:
txt += line[:index + len(splitStr)] + "\n"
elif index < 0:
txt += line
with open(r"test.txt", "w") as fp:
fp.write(txt)
No need to add \n because the line already contains it.
Your code does not append the line if the splitStr is not defined.
txt = ""
with open(r"test.txt", 'r') as fp:
for line in fp.readlines():
splitStr = "test"
index = line.find(splitStr)
if index != -1:
txt += line[:index + len(splitStr)] + "\n"
else:
txt += line
with open(r"test.txt", "w") as fp:
fp.write(txt)
In my solution I simulate the input file via io.StringIO. Compared to your code my solution remove the else branch and only use one += operater. Also splitStr is set only one time and not on each iteration. This makes the code more clear and reduces possible errore sources.
import io
# simulates a file for this example
the_file = io.StringIO("""my name is test1
my name is
my name is test 2""")
txt = ""
splitStr = "test"
with the_file as fp:
# each line
for line in fp.readlines():
# cut somoething?
if splitStr in line:
# find index
index = line.find(splitStr)
# cut after 'splitStr' and add newline
line = line[:index + len(splitStr)] + "\n"
# append line to output
txt += line
print(txt)
When handling with files in Python 3 it is recommended to use pathlib for that like this.
import pathlib
file_path = pathlib.Path("test.txt")
# read from wile
with file_path.open('r') as fp:
# do something
# write back to the file
with file_path.open('w') as fp:
# do something
Suggestion:
for line in fp.readlines():
i = line.find('test')
if i != -1:
line = line[:i]
I am trying to read all the lines in a specific file, and it prints the number of the line as an index.
What I am trying to do is to delete the line by inputting the number of the line by the user.
As far as it is now, it prints all the lines with the number of that line, but when I enter the number of the line to be deleted, it's not deleted.
This is the code of the delete function:
def deleteorders ():
index = 0
fh = open ('orders.txt', 'r')
lines = fh.readlines()
for line in lines:
lines = fh.readlines()
index = index+1
print (str(index) + ' ' + line)
try:
indexinp = int(input('Enter the number of the order to be deleted, or "B" to go back: '))
if indexinp == 'B':
return
else:
del line[indexinp]
print (line)
fh = open ('orders.txt', 'w')
fh.writelines(line)
fh.close()
except:
print ('The entered number is not in the range')
return
This should work (you'll need to add the error handling back in):
lines = enumerate(open('orders.txt'))
for i, line in lines:
print i, line
i = int(input(">"))
open('orders.txt', 'w').write(''.join((v for k, v in lines if k != i)))
I am trying to parse/process some information from a text file using Python. This file contains names, employee numbers and other data. I do not know the names or employee numbers ahead of time. I do know that after the names there is the text: "Per End" and before the employee number there is the text: "File:". I can find these items using the .find() method. But, how do I ask Python to look at the information that comes before or after "Per End" and "File:"? In this specific case the output should be the name and employee number.
The text looks like this:
SMITH, John
Per End: 12/10/2016
File:
002013
Dept:
000400
Rate:10384 60
My code is thus:
file = open("Register.txt", "rt")
lines = file.readlines()
file.close()
countPer = 0
for line in lines:
line = line.strip()
print (line)
if line.find('Per End') != -1:
countPer += 1
print ("Per End #'s: ", countPer)
file = open("Register.txt", "rt")
lines = file.readlines()
file.close()
for indx, line in enumerate(lines):
line = line.strip()
print (line)
if line.find('Per End') != -1:
print lines[indx-1].strip()
if line.find('File:') != -1:
print lines[indx+1].strip()
enumerate(lines) gives access to indices and line as well, there by you can access previous and next lines as well
here is my stdout directly ran in python shell:
>>> file = open("r.txt", "rt")
>>> lines = file.readlines()
>>> file.close()
>>> lines
['SMITH, John\n', 'Per End: 12/10/2016\n', 'File:\n', '002013\n', 'Dept:\n', '000400\n', 'Rate:10384 60\n']
>>> for indx, line in enumerate(lines):
... line = line.strip()
... if line.find('Per End') != -1:
... print lines[indx-1].strip()
... if line.find('File:') != -1:
... print lines[indx+1].strip()
SMITH, John
002013
Here is how I would do it.
First, some test data.
test = """SMITH, John\n
Per End: 12/10/2016\n
File:\n
002013\n
Dept:\n
000400\n
Rate:10384 60\n"""
text = [line for line in test.splitlines(keepends=False) if line != ""]
Now for the real answer.
count_per, count_num = 0, 0
Using enumerate on an iterable gives you an index automagically.
for idx, line in enumerate(text):
# Just test whether what you're looking for is in the `str`
if 'Per End' in line:
print(text[idx - 1]) # access the full set of lines with idx
count_per += 1
if 'File:' in line:
print(text[idx + 1])
count_num += 1
print("Per Ends = {}".format(count_per))
print("Files = {}".format(count_num))
yields for me:
SMITH, John
002013
Per Ends = 1
Files = 1
I am doing text processing and using 'readline()' function as follows:
ifd = open(...)
for line in ifd:
while (condition)
do something...
line = ifd.readline()
condition = ....
#Here when the condition becomes false I need to rewind the pointer so that the 'for' loop read the same line again.
ifd.fseek() followed by readline is giving me a '\n' character. How to rewind the pointer so that the whole line is read again.
>>> ifd.seek(-1,1)
>>> line = ifd.readline()
>>> line
'\n'
Here is my code
labtestnames = sorted(tmp)
#Now read each line in the inFile and write into outFile
ifd = open(inFile, "r")
ofd = open(outFile, "w")
#read the header
header = ifd.readline() #Do nothing with this line. Skip
#Write header into the output file
nl = "mrn\tspecimen_id\tlab_number\tlogin_dt\tfluid"
offset = len(nl.split("\t"))
nl = nl + "\t" + "\t".join(labtestnames)
ofd.write(nl+"\n")
lenFields = len(nl.split("\t"))
print "Reading the input file and converting into modified file for further processing (correlation analysis etc..)"
prevTup = (0,0,0)
rowComplete = 0
k=0
for line in ifd:
k=k+1
if (k==200): break
items = line.rstrip("\n").split("\t")
if((items[0] =='')):
continue
newline= list('' for i in range(lenFields))
newline[0],newline[1],newline[3],newline[2],newline[4] = items[0], items[1], items[3], items[2], items[4]
ltests = []
ltvals = []
while(cmp(prevTup, (items[0], items[1], items[3])) == 0): # If the same mrn, lab_number and specimen_id then fill the same row. else create a new row.
ltests.append(items[6])
ltvals.append(items[7])
pos = ifd.tell()
line = ifd.readline()
prevTup = (items[0], items[1], items[3])
items = line.rstrip("\n").split("\t")
rowComplete = 1
if (rowComplete == 1): #If the row is completed, prepare newline and write into outfile
indices = [labtestnames.index(x) for x in ltests]
j=0
ifd.seek(pos)
for i in indices:
newline[i+offset] = ltvals[j]
j=j+1
if (rowComplete == 0): #
currTup = (items[0], items[1], items[3])
ltests = items[6]
ltvals = items[7]
pos = ifd.tell()
line = ifd.readline()
items = line.rstrip("\n").split("\t")
newTup = (items[0], items[1], items[3])
if(cmp(currTup, newTup) == 0):
prevTup = currTup
ifd.seek(pos)
continue
else:
indices = labtestnames.index(ltests)
newline[indices+offset] = ltvals
ofd.write(newline+"\n")
The problem can be handled more simply using itertools.groupby. groupby can cluster all the contiguous lines that deal with the same mrn, specimen_id, and lab_num.
The code that does this is
for key, group in IT.groupby(reader, key = mykey):
where reader iterates over the lines of the input file, and mykey is defined by
def mykey(row):
return (row['mrn'], row['specimen_id'], row['lab_num'])
Each row from reader is passed to mykey, and all rows with the same key are clustered together in the same group.
While we're at it, we might as well use the csv module to read each line into a dict (which I call row). This frees us from having to deal with low-level string manipulation like line.rstrip("\n").split("\t") and instead of referring to columns by index numbers (e.g. row[3]) we can write code that speaks in higher-level terms such as row['lab_num'].
import itertools as IT
import csv
inFile = 'curious.dat'
outFile = 'curious.out'
def mykey(row):
return (row['mrn'], row['specimen_id'], row['lab_num'])
fieldnames = 'mrn specimen_id date lab_num Bilirubin Lipase Calcium Magnesium Phosphate'.split()
with open(inFile, 'rb') as ifd:
reader = csv.DictReader(ifd, delimiter = '\t')
with open(outFile, 'wb') as ofd:
writer = csv.DictWriter(
ofd, fieldnames, delimiter = '\t', lineterminator = '\n', )
writer.writeheader()
for key, group in IT.groupby(reader, key = mykey):
new = {}
row = next(group)
for key in ('mrn', 'specimen_id', 'date', 'lab_num'):
new[key] = row[key]
new[row['labtest']] = row['result_val']
for row in group:
new[row['labtest']] = row['result_val']
writer.writerow(new)
yields
mrn specimen_id date lab_num Bilirubin Lipase Calcium Magnesium Phosphate
4419529 1614487 26.2675 5802791G 0.1
3319529 1614487 26.2675 5802791G 0.3 153 8.1 2.1 4
5713871 682571 56.0779 9732266E 4.1
This seems to be a perfect use case for yield expressions. Consider the following example that prints lines from a file, repeating some of them at random:
def buflines(fp):
r = None
while True:
r = yield r or next(fp)
if r:
yield None
from random import randint
with open('filename') as fp:
buf = buflines(fp)
for line in buf:
print line
if randint(1, 100) > 80:
print 'ONCE AGAIN::'
buf.send(line)
Basically, if you want to process an item once again, you send it back to the generator. On the next iteration you will be reading the same item once again.