I have a text file that I want to parse based on the condition that if I find the match phrase in the line then I have to jump to the next line to fetch the value{unfortunately that's how the reports logs are generated}. I have created _dict to check my key and fetch my values in the next line.
Lines = f1.readlines()
numlines = len(Lines)
f1.close()
f1 = open('Testlog.txt','r')
f2 =open('writetoFile','r+')
f3 =open('Results.txt','w')
new_line="Test Name SubTest passed failed status "
f3.write(new_line)
f3.write("\n")
while i < numlines:
line=f1.readline()
if "Test Name" in line:
f2.write(line)
i=i+1
line =f1.readline()
if "true" in line:
f2.write(line)
line = line.strip('\n ')
#print line
data = re.split(r"\s{2,}",line)
Test_Name=data[4]
SubTest=data[6]
passed=data[7]
failed=data[8]
status=data[9]
result = Test_Name + " " + SubTest + " " + passed + " " + failed + " " + status
print result
f3.write(result)
f3.write("\n")
i=i+1
I was wondering if there better way to do this
What is your method for parsing the line? Can you post sample code, that will help.
To answer your second question, you could make a Dictionary in which each key refers to a List, then you can use a for loop to iterate through each of the values (or whatever you'll need)
foo = { 1 : ['a','b','c'] }
for value in foo[1]:
print(value)
prints a b c
Related
def print_(self, trv):
rec = " "
for line in trv.get_children():
rec += " "
for value in trv.item(line)['values']:
rec += str(value) + " "
textfile = open('Reportnew.txt', 'w')
textfile.write(rec)
textfile.close()
I can barely understand your question because it's not formatted properly.
But if you want to append new lines use this
with open('Reportnew.txt', 'a') as textfile: #notice the 'a' instead of 'w'
textfile.write(rec) #can write "\n" + rec OR rec + "\n" if rec doesn't have \n in it already
Or this
with open('Reportnew.txt', 'a') as textfile: #notice the 'a' instead of 'w'
textfile.write("\n")
textfile.write(rec)
Are you looking for something like this:
def print_(self, trv):
rec = ""
for line in trv.get_children():
rec += " ".join(map(str, trv.item(line)['values'])) + "\n"
with open("Reportnew.txt", "w") as file:
file.write(rec)
I used " ".join(...) to join all of the items adding spaces in between. I also added "\n" at the end of each record.
I am having an issue getting the train function to work correctly in python. I can not modify the def function. I am at the point where I need to get the second file to read lines one at a time for PosList and i need to match the value of movieWordCount[z] in OpenPos. If the file is there, then I am good to incrment column 2 by one of t hat line (segmented by a space). If it is not, then I need the else to append it to the file end. It does not work. It does not append the values if it is missing and I am not sure if it will find the value if it is there. I have been stuck getting thsi to work for two days.
Here is my code segment I am working with:
with open("PosList") as OpenPos:
lines = OpenPos.readlines()
print lines
if movieWordCount[z] in lines:
print "found"
#Now use tokenize to split it apart by space and set to new array for me to call column2
else:
print "not found"
lines.append(movieWordCount[z] + " 1" + "\n")
Here is my full code:
#!/usr/bin/python
#Import Counter
import collections
from collections import Counter
#Was already here but pickle is used for data input and export
import math, os, pickle, re
class Bayes_Classifier:
def __init__(self, trainDirectory = "movie_reviews/"):
#If file listing exists skip to train
if os.path.isfile('iFileList'):
print "file found"
self.train()
#self.classify()
#If file listing does not exist skip to train
if not os.path.isfile('iFileList'):
print "no file"
newfile = 'iFileList'
tempList = set()
subDir = './movie_reviews'
for filenames in os.listdir(subDir):
my_sub_path = os.path.join(os.sep,subDir,filenames)
tempList.add(filenames)
self.save("filenames", "try3")
f = []
for fFileObj in os.walk("movie_reviews/"):
f.extend(fFileObj)
break
pickle.dump(f, open( "save.p", "wb" ))
self.save(f, "try4")
with open(newfile, 'wb') as fi:
pickle.dump(tempList, fi)
#print tempList
self.train()
#self.classify()
def train(self):
'''Trains the Naive Bayes Sentiment Classifier.'''
print "File ready for training"
#Open iFileList to use as input for opening movie files
x = 0
OpenIFileList = open('iFileList','r')
print "iFileList now Open"
#Loop through the file
for line in OpenIFileList:
#print "Ready to read lines"
#print "reading line " + line
if x > 4:
if x % 2 == 0:
#print line
s = line
if '-' in s:
comp = s.split("'")
#print comp[2]
print comp[1] #This is What you need for t he movie file
compValue1 = comp[1]
#Determine Positive/Negative.
#compType is the variable I am storing it to.
compType = compValue1.split("-",2)[1]
#print compType #Prints that middle value like 5 or 1
# This will do the work based on the value.
if compType == '5':
# print "you have a five" #Confirms the loop I am in.
#If file does not exists create it
if not os.path.exists('PosList'):
print "no file"
file('PosList', 'w').close()
#Open file that needs to be reviewed for word count
compValue2 = "movie_reviews/" + compValue1
print compValue2 #Prints the directory and file path
OpenMovieList = open(compValue2,'r')
for commentLine in OpenMovieList:
commentPositive = commentLine.split(" ")
commentPositiveCounter = Counter(commentPositive)
#print commentPositiveCounter # " Comment Pos goes here"
#if commentLine != '' or commentLine != ' ':
#Get first word, second word, ....
if commentLine and (not commentLine.isspace()):
movieWordCount = self.tokenize(commentLine)
y = len(movieWordCount) #determines length of string
print y
z = 0
#print movieWordCount[0] # Shows the zero position in the file.
while z < y:
print "position " + str(z) + " word is " + movieWordCount[z] # Shows the word we are at and position id
with open("PosList") as OpenPos:
lines = OpenPos.readlines()
print lines
if movieWordCount[z] in lines:
print "found"
else:
print "not found"
lines.append(movieWordCount)
z = z + 1
#Close the files
OpenMovieList.close()
OpenPos.close()
x += 1
#for line2 in OpenIFileList.readlines():
#for line in open('myfile','r').readlines():
#do_something(line)
#Save results
#Close the File List
OpenIFileList.close()
def loadFile(self, sFilename):
'''Given a file name, return the contents of the file as a string.'''
f = open(sFilename, "r")
sTxt = f.read()
f.close()
return sTxt
def save(self, dObj, sFilename):
'''Given an object and a file name, write the object to the file using pickle.'''
f = open(sFilename, "w")
p = pickle.Pickler(f)
p.dump(dObj)
f.close()
def load(self, sFilename):
'''Given a file name, load and return the object stored in the file.'''
f = open(sFilename, "r")
u = pickle.Unpickler(f)
dObj = u.load()
f.close()
return dObj
def tokenize(self, sText):
'''Given a string of text sText, returns a list of the individual tokens that
occur in that string (in order).'''
lTokens = []
sToken = ""
for c in sText:
if re.match("[a-zA-Z0-9]", str(c)) != None or c == "\'" or c == "_" or c == '-':
sToken += c
else:
if sToken != "":
lTokens.append(sToken)
sToken = ""
if c.strip() != "":
lTokens.append(str(c.strip()))
if sToken != "":
lTokens.append(sToken)
return lTokens
To open a file for writing, you can use
with open('PosList', 'w') as Open_Pos
As you are using the with form, you do not need to close the file; Python will do that for you at the end of the with-block.
So assuming that the way you add data to the lines variable is correct, you could remove the superfluous code OpenMovieList.close() and OpenPos.close(), and append 2 lines to your code:
with open("PosList") as OpenPos:
lines = OpenPos.readlines()
print lines
if movieWordCount[z] in lines:
print "found"
else:
print "not found"
lines.append(movieWordCount)
with open("PosList", "w") as OpenPos:
OpenPos.write(lines)
the code below reads the data.txt file and prints the records in the data.txt file.
text_file = open("data.txt", "r")
lines = text_file.readlines()
print (lines)
print (lines)
text_file.close()
def print_all_records(records):
print("Date" + "\t\t" + "Branch" + "\t\t" + "Daily Sale" + "\t\t" + "Transactions")
for record in records:
parts = record.split(",")
print(parts[0] + "\t" + parts[1] + "\t" + "$" + parts[2] + "\t\t" + parts[3])
example of information in the data.txt file
1-2-2014,Frankton,42305.67,23
12-4-2014,Glenview,21922.22,17
10-2-2015,Glenview,63277.9,32
how do i make it so that i can query the records by date. for example if a user input the date 1 2 2014 it would search the data.txt file to find if that date exists then print that line of the record. and if it doesnt find anything it asks the user try again and again until it finds a date that matches a record.
I'm assuming that you use Python 3.
def print_entries(date):
"""Prints all the entries that match with date"""
with open('a.txt', 'r') as f:
flag = False
content = f.readlines()
content = [line.strip('\n').split(',') for line in content]
for row in content:
if row[0] == date:
flag = True
print(*row, sep='\t')
if not flag:
print('Try again')
return flag
while not print_entries(input("Enter date :")):
pass
If you're using Python 2, replace print(*row, sep = '\t') with print('\t'.join(row)).
Running the program -
Enter date :12-4-2014
12-4-2014 Glenview 21922.22 17
I am a little new to python and I am trying to write this script to cancel print jobs over 1 mb.. (the line where it is checking for size is set to 1 mb just to make sure it is working). for some reason my last else statement keeps saying it has invalid syntax. I checked to see if all parentheses were closed and I could not find an unmatched pair. could someone please tell me why it says it is invalid? Also can you take a look at my line 24 (fullname = ...grep...) to make sure the syntax on that is correct?
#! /usr/bin/python
import os
infile = open ('test.pl', 'r')
outfile = open('print.reportpython', 'w+')
newfile = infile.readlines()
newfile.pop(0)
count = 0
firstline = newfile[0]
splitline = firstline.split()
currentuser = splitline[1]
currentuser = str(currentuser)
for line in newfile:
newline = line.split()
names = newline[1]
size = int(newline[2])
names = str(names)
print names
if names is currentuser:
if size >= 1:
os.popen ("cancel lab01-10292")
fullname = os.popen("cat /etc/passwd |grep " + newline[1] + "cut -d':' -f5")
count += 1
print count
else:
print outfile.write ("(" + currentuser + ")")
print outfile.write (" ")
count = 0
currentuser = names
You do:
if foo:
bar
baz
else:
bomb
Which is wrong. All the lines between the if and its corresponding else must be indented deeper than the if and else, like this:
if foo:
bar
baz
else:
bomb
The else is at the same indentation as the previous line, but the statement on the previous line doesn't have an else clause. Fix your indentation.
I am using Python 2.7.9. I'm working on a program that is supposed to produce the following output in a .csv file per loop:
URL,number
Here's the main loop of the code I'm using:
csvlist = open(listfile,'w')
f = open(list, "r")
def hasQuality(item):
for quality in qualities:
if quality in item:
return True
return False
for line in f:
line = line.split('\n')
line = line[0]
# print line
itemname = urllib.unquote(line).decode('utf8')
# print itemhash
if hasQuality(itemname):
try:
looptime = time.time()
url = baseUrl + line
results = json.loads(urlopen(url).read())
# status = results.status_code
content = results
if 'median_price' in content:
medianstr = str(content['median_price']).replace('$','')
medianstr = medianstr.replace('.','')
median = float(medianstr)
volume = content['volume']
print url+'\n'+itemname
print 'Median: $'+medianstr
print 'Volume: '+str(volume)
if (median > minprice) and (volume > minvol):
csvlist.write(line + ',' + medianstr + '\n')
print '+ADDED TO LIST'
else:
print 'No median price given for '+itemname+'.\nGiving up on item.'
print "Finished loop in " + str(round(time.time() - looptime,3)) + " seconds."
except ValueError:
print "we blacklisted fool?? cause we skippin beats"
else:
print itemname+'is a commodity.\nGiving up on item.'
csvlist.close()
f.close()
print "Finished script in " + str(round(time.time() - runtime, 3)) + " seconds."
It should be generating a list that looks like this:
AWP%20%7C%20Asiimov%20%28Field-Tested%29,3911
M4A1-S%20%7C%20Hyper%20Beast%20%28Field-Tested%29,4202
But it's actually generating a list that looks like this:
AWP%20%7C%20Asiimov%20%28Field-Tested%29
,3911
M4A1-S%20%7C%20Hyper%20Beast%20%28Field-Tested%29
,4202
Whenever it is ran on a Windows machine, I have no issue. Whenever I run it on my EC2 instance, however, it adds that extra newline. Any ideas why? Running commands on the file like
awk 'NR%2{printf $0" ";next;}1' output.csv
do not do anything. I have transferred it to my Windows machine and it still reads the same. However, when I paste the output into Steam's chat client it concatenates it in the way that I want.
Thanks in advance!
This is where the problem occurs
code:
csvlist.write(line + ',' + medianstr + '\n')
This can be cleared is you strip the space
modified code:
csvlist.write(line.strip() + ',' + medianstr + '\n')
Problem:
The problem is due to the fact you are reading raw lines from the input file
Raw_lines contain \n to indicate there is a new line for every line which is not the last and for the last line it just ends with the given character .
for more details:
Just type print(repr(line)) before writing and see the output