I am trying to read all the lines in a specific file, and it prints the number of the line as an index.
What I am trying to do is to delete the line by inputting the number of the line by the user.
As far as it is now, it prints all the lines with the number of that line, but when I enter the number of the line to be deleted, it's not deleted.
This is the code of the delete function:
def deleteorders ():
index = 0
fh = open ('orders.txt', 'r')
lines = fh.readlines()
for line in lines:
lines = fh.readlines()
index = index+1
print (str(index) + ' ' + line)
try:
indexinp = int(input('Enter the number of the order to be deleted, or "B" to go back: '))
if indexinp == 'B':
return
else:
del line[indexinp]
print (line)
fh = open ('orders.txt', 'w')
fh.writelines(line)
fh.close()
except:
print ('The entered number is not in the range')
return
This should work (you'll need to add the error handling back in):
lines = enumerate(open('orders.txt'))
for i, line in lines:
print i, line
i = int(input(">"))
open('orders.txt', 'w').write(''.join((v for k, v in lines if k != i)))
Related
I am having an issue getting the train function to work correctly in python. I can not modify the def function. I am at the point where I need to get the second file to read lines one at a time for PosList and i need to match the value of movieWordCount[z] in OpenPos. If the file is there, then I am good to incrment column 2 by one of t hat line (segmented by a space). If it is not, then I need the else to append it to the file end. It does not work. It does not append the values if it is missing and I am not sure if it will find the value if it is there. I have been stuck getting thsi to work for two days.
Here is my code segment I am working with:
with open("PosList") as OpenPos:
lines = OpenPos.readlines()
print lines
if movieWordCount[z] in lines:
print "found"
#Now use tokenize to split it apart by space and set to new array for me to call column2
else:
print "not found"
lines.append(movieWordCount[z] + " 1" + "\n")
Here is my full code:
#!/usr/bin/python
#Import Counter
import collections
from collections import Counter
#Was already here but pickle is used for data input and export
import math, os, pickle, re
class Bayes_Classifier:
def __init__(self, trainDirectory = "movie_reviews/"):
#If file listing exists skip to train
if os.path.isfile('iFileList'):
print "file found"
self.train()
#self.classify()
#If file listing does not exist skip to train
if not os.path.isfile('iFileList'):
print "no file"
newfile = 'iFileList'
tempList = set()
subDir = './movie_reviews'
for filenames in os.listdir(subDir):
my_sub_path = os.path.join(os.sep,subDir,filenames)
tempList.add(filenames)
self.save("filenames", "try3")
f = []
for fFileObj in os.walk("movie_reviews/"):
f.extend(fFileObj)
break
pickle.dump(f, open( "save.p", "wb" ))
self.save(f, "try4")
with open(newfile, 'wb') as fi:
pickle.dump(tempList, fi)
#print tempList
self.train()
#self.classify()
def train(self):
'''Trains the Naive Bayes Sentiment Classifier.'''
print "File ready for training"
#Open iFileList to use as input for opening movie files
x = 0
OpenIFileList = open('iFileList','r')
print "iFileList now Open"
#Loop through the file
for line in OpenIFileList:
#print "Ready to read lines"
#print "reading line " + line
if x > 4:
if x % 2 == 0:
#print line
s = line
if '-' in s:
comp = s.split("'")
#print comp[2]
print comp[1] #This is What you need for t he movie file
compValue1 = comp[1]
#Determine Positive/Negative.
#compType is the variable I am storing it to.
compType = compValue1.split("-",2)[1]
#print compType #Prints that middle value like 5 or 1
# This will do the work based on the value.
if compType == '5':
# print "you have a five" #Confirms the loop I am in.
#If file does not exists create it
if not os.path.exists('PosList'):
print "no file"
file('PosList', 'w').close()
#Open file that needs to be reviewed for word count
compValue2 = "movie_reviews/" + compValue1
print compValue2 #Prints the directory and file path
OpenMovieList = open(compValue2,'r')
for commentLine in OpenMovieList:
commentPositive = commentLine.split(" ")
commentPositiveCounter = Counter(commentPositive)
#print commentPositiveCounter # " Comment Pos goes here"
#if commentLine != '' or commentLine != ' ':
#Get first word, second word, ....
if commentLine and (not commentLine.isspace()):
movieWordCount = self.tokenize(commentLine)
y = len(movieWordCount) #determines length of string
print y
z = 0
#print movieWordCount[0] # Shows the zero position in the file.
while z < y:
print "position " + str(z) + " word is " + movieWordCount[z] # Shows the word we are at and position id
with open("PosList") as OpenPos:
lines = OpenPos.readlines()
print lines
if movieWordCount[z] in lines:
print "found"
else:
print "not found"
lines.append(movieWordCount)
z = z + 1
#Close the files
OpenMovieList.close()
OpenPos.close()
x += 1
#for line2 in OpenIFileList.readlines():
#for line in open('myfile','r').readlines():
#do_something(line)
#Save results
#Close the File List
OpenIFileList.close()
def loadFile(self, sFilename):
'''Given a file name, return the contents of the file as a string.'''
f = open(sFilename, "r")
sTxt = f.read()
f.close()
return sTxt
def save(self, dObj, sFilename):
'''Given an object and a file name, write the object to the file using pickle.'''
f = open(sFilename, "w")
p = pickle.Pickler(f)
p.dump(dObj)
f.close()
def load(self, sFilename):
'''Given a file name, load and return the object stored in the file.'''
f = open(sFilename, "r")
u = pickle.Unpickler(f)
dObj = u.load()
f.close()
return dObj
def tokenize(self, sText):
'''Given a string of text sText, returns a list of the individual tokens that
occur in that string (in order).'''
lTokens = []
sToken = ""
for c in sText:
if re.match("[a-zA-Z0-9]", str(c)) != None or c == "\'" or c == "_" or c == '-':
sToken += c
else:
if sToken != "":
lTokens.append(sToken)
sToken = ""
if c.strip() != "":
lTokens.append(str(c.strip()))
if sToken != "":
lTokens.append(sToken)
return lTokens
To open a file for writing, you can use
with open('PosList', 'w') as Open_Pos
As you are using the with form, you do not need to close the file; Python will do that for you at the end of the with-block.
So assuming that the way you add data to the lines variable is correct, you could remove the superfluous code OpenMovieList.close() and OpenPos.close(), and append 2 lines to your code:
with open("PosList") as OpenPos:
lines = OpenPos.readlines()
print lines
if movieWordCount[z] in lines:
print "found"
else:
print "not found"
lines.append(movieWordCount)
with open("PosList", "w") as OpenPos:
OpenPos.write(lines)
I am trying to parse/process some information from a text file using Python. This file contains names, employee numbers and other data. I do not know the names or employee numbers ahead of time. I do know that after the names there is the text: "Per End" and before the employee number there is the text: "File:". I can find these items using the .find() method. But, how do I ask Python to look at the information that comes before or after "Per End" and "File:"? In this specific case the output should be the name and employee number.
The text looks like this:
SMITH, John
Per End: 12/10/2016
File:
002013
Dept:
000400
Rate:10384 60
My code is thus:
file = open("Register.txt", "rt")
lines = file.readlines()
file.close()
countPer = 0
for line in lines:
line = line.strip()
print (line)
if line.find('Per End') != -1:
countPer += 1
print ("Per End #'s: ", countPer)
file = open("Register.txt", "rt")
lines = file.readlines()
file.close()
for indx, line in enumerate(lines):
line = line.strip()
print (line)
if line.find('Per End') != -1:
print lines[indx-1].strip()
if line.find('File:') != -1:
print lines[indx+1].strip()
enumerate(lines) gives access to indices and line as well, there by you can access previous and next lines as well
here is my stdout directly ran in python shell:
>>> file = open("r.txt", "rt")
>>> lines = file.readlines()
>>> file.close()
>>> lines
['SMITH, John\n', 'Per End: 12/10/2016\n', 'File:\n', '002013\n', 'Dept:\n', '000400\n', 'Rate:10384 60\n']
>>> for indx, line in enumerate(lines):
... line = line.strip()
... if line.find('Per End') != -1:
... print lines[indx-1].strip()
... if line.find('File:') != -1:
... print lines[indx+1].strip()
SMITH, John
002013
Here is how I would do it.
First, some test data.
test = """SMITH, John\n
Per End: 12/10/2016\n
File:\n
002013\n
Dept:\n
000400\n
Rate:10384 60\n"""
text = [line for line in test.splitlines(keepends=False) if line != ""]
Now for the real answer.
count_per, count_num = 0, 0
Using enumerate on an iterable gives you an index automagically.
for idx, line in enumerate(text):
# Just test whether what you're looking for is in the `str`
if 'Per End' in line:
print(text[idx - 1]) # access the full set of lines with idx
count_per += 1
if 'File:' in line:
print(text[idx + 1])
count_num += 1
print("Per Ends = {}".format(count_per))
print("Files = {}".format(count_num))
yields for me:
SMITH, John
002013
Per Ends = 1
Files = 1
As per my know this may work for one condition but it is clears my file and replacing the find string in file.
#!/usr/bin/python
open('vpc.xml','r+') as p: #open a file in read and write mode
p=rp.readlines()
print p #printing what content read by p
for line in p: #for loop up to end of line in file
if "<mac address=" in line: #if condition encounter by for loop line then it replaces the string specified in if condition.
print line
r.write(line.replace("<mac address=","<mac address1="))
elif "<source bridge=" in line:
print line
r.write(line.replace("<source bridge=","<source bridge1="))
elif "<target dev" in line:
print line
r.write(line.replace("<target dev","<target dev1"))
else :
print 'no changes'
continue
#!/usr/bin/python
o = input("enter how many vpcs")
y = input("enter how many eth interfaces")
for k in range(1,o):
h='vpc'+str(k)+'.xml'
print h
#j = input("enter how many eth interfaces")
ri=open(h,'r')
line = ri.read()
ri.close()
for i in range(0,y)
for s in range(i+1)
dev_val={'network':'bridge','<man':'<manneth0','<man':'vpc'+str(i)+_eth+str(i),}
for key,val in dev_val.items():
line = line.replace(key,val)
print line
with open(h,'w') as v:
v.write(line)
v.close()
Here is what I am trying to do:
I am trying to solve an issue that has to do with wrapping in a text file.
I want to open a txt file, read a line and if the line contains what I want it to contain, check the next line to see if it does not contain what is in the first line. If it does not, add the line to the first line.
import re
stuff = open("my file")
for line in stuff:
if re.search("From ", line):
first = line
print first
if re.search('From ', handle.next()):
continue
else: first = first + handle.next()
else: continue
I have looked a quite a few things and cannot seem to find an answer. Please help!
I would try to do something like this, but this is invalid for triples of "From " and not elegant at all.
lines = open("file", 'r').readlines()
lines2 = open("file2", 'w')
counter_list=[]
last_from = 0
for counter, line in enumerate(lines):
if "From " in line and counter != last_from +1:
last_from = counter
current_count = counter
if current_count+1 == counter:
if "From " in line:
counter_list.append(current_count+1)
for counter, line in enumerate(lines):
if counter in counter_list:
lines2.write(line)
else:
lines2.write(line, '\n')
Than you can check the lines2 if its helped.
You could also revert order of lines, then check in next line not in previous. That would solve your problem in one loop.
Thank you Martjin for helping me reset my mind frame! This is what I came up with:
handle = open("my file")
first = ""
second = ""
sent = ""
for line in handle:
line = line.rstrip()
if len(first) > 0:
if line.startswith("From "):
if len(sent) > 0:
print sent
else: continue
first = line
second = ""
else:
second = second + line
else:
if line.startswith("From "):
first = line
sent = first + second
It is probably crude, but it definitely got the job done!
def match_text(raw_data_file, concentration):
file = open(raw_data_file, 'r')
lines = ""
print("Testing")
for num, line in enumerate(file.readlines(), 0):
w = ' WITH A CONCENTRATION IN ' + concentration
if re.search(w, line):
for i in range(0, 6):
lines += linecache.getline(raw_data_file, num+1)
try:
write(lines, "lines.txt")
print("Lines Data Created...")
except:
print("Could not print Line Data")
else:
print("Didn't Work")
I am trying to open a .txt file and search for a specific string.
If you are simply trying to write all of the lines that hold your string to a file, this will do.
def match_text(raw_data_file, concentration):
look_for = ' WITH A CONCENTRATION IN ' + concentration
with open(raw_data_file) as fin, open('lines.txt', 'w') as fout:
fout.writelines(line for line in fin if look_for in line)
Fixed my own issue. The following works to find a specific line and get the lines following the matched line.
def match_text(raw_data_file, match_this_text):
w = match_this_text
lines = ""
with open(raw_data_file, 'r') as inF:
for line in inF:
if w in line:
lines += line //Will add the matched text to the lines string
for i in range(0, however_many_lines_after_matched_text):
lines += next(inF)
//do something with 'lines', which is final multiline text
This will return multiple lines plus the matched string that the user wants. I apologize if the question was confusing.