Implement the function isWhiteLine(), which takes a string and returns TRUE if the
string contains only white space & tab characters. Program should read a file given as command-line argument, and print only non-blank lines onto the standard output.
import sys
def isWhiteLine(x):
return x.isspace()
file_name = sys.argv[1]
f = open(file_name, "r")
for i in f:
if (isWhiteLine(i) == False):
print(str(i).strip())
f.close()
The code below should support what you are looking for.
import sys
def print_non_empty_lines(file_name):
with open(file_name, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line:
print(line)
if __name__ == "__main__":
if len(sys.argv) > 1:
print_non_empty_lines(sys.argv[1])
Related
I have a file with one column. How to delete repeated lines in a file?
On Unix/Linux, use the uniq command, as per David Locke's answer, or sort, as per William Pursell's comment.
If you need a Python script:
lines_seen = set() # holds lines already seen
outfile = open(outfilename, "w")
for line in open(infilename, "r"):
if line not in lines_seen: # not a duplicate
outfile.write(line)
lines_seen.add(line)
outfile.close()
Update: The sort/uniq combination will remove duplicates but return a file with the lines sorted, which may or may not be what you want. The Python script above won't reorder lines, but just drop duplicates. Of course, to get the script above to sort as well, just leave out the outfile.write(line) and instead, immediately after the loop, do outfile.writelines(sorted(lines_seen)).
If you're on *nix, try running the following command:
sort <file name> | uniq
uniqlines = set(open('/tmp/foo').readlines())
this will give you the list of unique lines.
writing that back to some file would be as easy as:
bar = open('/tmp/bar', 'w').writelines(uniqlines)
bar.close()
You can do:
import os
os.system("awk '!x[$0]++' /path/to/file > /path/to/rem-dups")
Here You are using bash into python :)
You have also other way:
with open('/tmp/result.txt') as result:
uniqlines = set(result.readlines())
with open('/tmp/rmdup.txt', 'w') as rmdup:
rmdup.writelines(set(uniqlines))
get all your lines in the list and make a set of lines and you are done.
for example,
>>> x = ["line1","line2","line3","line2","line1"]
>>> list(set(x))
['line3', 'line2', 'line1']
>>>
If you need to preserve the ordering of lines - as set is unordered collection - try this:
y = []
for l in x:
if l not in y:
y.append(l)
and write the content back to the file.
Its a rehash of whats already been said here - here what I use.
import optparse
def removeDups(inputfile, outputfile):
lines=open(inputfile, 'r').readlines()
lines_set = set(lines)
out=open(outputfile, 'w')
for line in lines_set:
out.write(line)
def main():
parser = optparse.OptionParser('usage %prog ' +\
'-i <inputfile> -o <outputfile>')
parser.add_option('-i', dest='inputfile', type='string',
help='specify your input file')
parser.add_option('-o', dest='outputfile', type='string',
help='specify your output file')
(options, args) = parser.parse_args()
inputfile = options.inputfile
outputfile = options.outputfile
if (inputfile == None) or (outputfile == None):
print parser.usage
exit(1)
else:
removeDups(inputfile, outputfile)
if __name__ == '__main__':
main()
Python One liners :
python -c "import sys; lines = sys.stdin.readlines(); print ''.join(sorted(set(lines)))" < InputFile > OutputFile
adding to #David Locke's answer, with *nix systems you can run
sort -u messy_file.txt > clean_file.txt
which will create clean_file.txt removing duplicates in alphabetical order.
Look at script I created to remove duplicate emails from text files. Hope this helps!
# function to remove duplicate emails
def remove_duplicate():
# opens emails.txt in r mode as one long string and assigns to var
emails = open('emails.txt', 'r').read()
# .split() removes excess whitespaces from str, return str as list
emails = emails.split()
# empty list to store non-duplicate e-mails
clean_list = []
# for loop to append non-duplicate emails to clean list
for email in emails:
if email not in clean_list:
clean_list.append(email)
return clean_list
# close emails.txt file
emails.close()
# assigns no_duplicate_emails.txt to variable below
no_duplicate_emails = open('no_duplicate_emails.txt', 'w')
# function to convert clean_list 'list' elements in to strings
for email in remove_duplicate():
# .strip() method to remove commas
email = email.strip(',')
no_duplicate_emails.write(f"E-mail: {email}\n")
# close no_duplicate_emails.txt file
no_duplicate_emails.close()
If anyone is looking for a solution that uses a hashing and is a little more flashy, this is what I currently use:
def remove_duplicate_lines(input_path, output_path):
if os.path.isfile(output_path):
raise OSError('File at {} (output file location) exists.'.format(output_path))
with open(input_path, 'r') as input_file, open(output_path, 'w') as output_file:
seen_lines = set()
def add_line(line):
seen_lines.add(line)
return line
output_file.writelines((add_line(line) for line in input_file
if line not in seen_lines))
edit it within the same file
lines_seen = set() # holds lines already seen
with open("file.txt", "r+") as f:
d = f.readlines()
f.seek(0)
for i in d:
if i not in lines_seen:
f.write(i)
lines_seen.add(i)
f.truncate()
Readable and Concise
with open('sample.txt') as fl:
content = fl.read().split('\n')
content = set([line for line in content if line != ''])
content = '\n'.join(content)
with open('sample.txt', 'w') as fl:
fl.writelines(content)
Here is my solution
if __name__ == '__main__':
f = open('temp.txt','w+')
flag = False
with open('file.txt') as fp:
for line in fp:
for temp in f:
if temp == line:
flag = True
print('Found Match')
break
if flag == False:
f.write(line)
elif flag == True:
flag = False
f.seek(0)
f.close()
cat <filename> | grep '^[a-zA-Z]+$' | sort -u > outfile.txt
To filter and remove duplicate values from the file.
Here is my solution
d = input("your file:") #write your file name here
file1 = open(d, mode="r")
file2 = open('file2.txt', mode='w')
file2 = open('file2.txt', mode='a')
file1row = file1.readline()
while file1row != "" :
file2 = open('file2.txt', mode='a')
file2read = open('file2.txt', mode='r')
file2r = file2read.read().strip()
if file1row not in file2r:
file2.write(file1row)
file1row = file1.readline()
file2read.close()
file2.close
def read_files(file_allmail,file_blacklist):
with open("all_email_large.txt", 'r') as f:
allmail = f.read().splitlines()
with open("bkacklist_large.txt", 'r') as f1:
blacklist = f1.read().splitlines()
return allmail,blacklist
Instead of pre-selecting a document I would like to be able to parameterize them in the command line. In other words, I want to be able to select different documents that python should make a list of.
You probably want argparse: https://docs.python.org/3/howto/argparse.html
Changed slightly, but from the docs:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file_allmail", help="doc to open")
parser.add_argument("file_blacklist", help="doc to open")
args = parser.parse_args()
def read_files(file_allmail, file_blacklist):
with open(file_allmail, 'r') as f:
allmail = f.read().splitlines()
with open(file_blacklist, 'r') as f1:
blacklist = f1.read().splitlines()
return allmail, blacklist
if __name__ == '__main__':
allmail, blacklist = read_files(args.file_allmail, args.file_blacklist)
print(allmail)
print(blacklist)
You call it with:
python your_python.py --file_allmail all_email_large.txt --file_blacklist bkacklist_large.txt
If you alter your function, then you can simply call read_files('filename1.txt','filename2.txt') at any point for any text files.
def read_files(file_allmail,file_blacklist):
with open(file_allmail, 'r') as f:
allmail = f.read().splitlines()
with open(file_blacklist, 'r') as f1:
blacklist = f1.read().splitlines()
return allmail,blacklist
I have a long script that is full of lines like this
[UBInt8.parse, b"\x01", 0x01, None],
[UBInt8.build, 0x01, b"\x01", None],
I need to turn them through regular expressions into
assert UBInt8.parse(b"\x01") == 0x01
assert UBInt8.build(0x01) == b"\x01"
Lists are always of length 4. 1st is method, 2nd is its argument, 3rd is return value, 4th is always None. I already used regex to solve a similar problem (someone produced the parser) but I need help writing the formatting string:
See Removing six.b from multiple files . This is the code I used before, the formatting expression needs to be rewritten and I dont speak regex. :(
import re
import os
indir = 'files'
for root, dirs, files in os.walk(indir):
for f in files:
fname = os.path.join(root, f)
with open(fname) as f:
txt = f.read()
txt = re.sub(r'six\.(b\("[^"]*"\))', r'\1', txt)
with open(fname, 'w') as f:
f.write(txt)
print(fname)
Here is the manual parsing that I came up with. No regex.
#!/usr/bin/python3
import re, os, sys
def processfile(fname):
print(fname+'... ')
with open(fname, 'rt') as f:
txt = f.readlines()
with open(fname+"-trans", 'wt') as f:
for line in txt:
items = list(map(str.strip, line.strip().strip(",[]").split(",")))
if len(items) == 4:
if items[1] == "None":
items[1] = ""
if items[3] == "None":
o = "assert {0}({1}) == {2}".format(*items)
else:
if items[1] == "":
o = "assert raises({0}) == {3}".format(*items)
else:
o = "assert raises({0}, {1}) == {3}".format(*items)
f.write(" "+o+"\n")
else:
f.write(line)
processfile(sys.argv[1])
I have one file named mcelog.conf and I am reading this file in my code. Contents of the file are
no-syslog = yes # (or no to disable)
logfile = /tmp/logfile
Program will read the mcelog.conf file and will check for the no-syslog tag, if no-syslog = yes then program has to check for the tag logfile and will read the logfile tag. Can anyone let me know how I can get the value /tmp/logfile
with open('/etc/mcelog/mcelog.conf', 'r+') as fp:
for line in fp:
if re.search("no-syslog =", line) and re.search("= no", line):
memoryErrors = readLogFile("/var/log/messages")
mcelogPathFound = true
break
elif re.search("no-syslog =", line) and re.search("= yes", line):
continue
elif re.search("logfile =", line):
memoryErrors = readLogFile(line) # Here I want to pass the value "/tmp/logfile" but currently "logfile = /tmp/logfile" is getting passed
mcelogPathFound = true
break
fp.close()
You can just split the line to get the value you want:
line.split(' = ')[1]
However, you might want to look at the documentation for configparser module.
Change the code to:
with open('/etc/mcelog/mcelog.conf', 'r+') as fp:
for line in fp:
if re.search("no-syslog =", line) and re.search("= no", line):
memoryErrors = readLogFile("/var/log/messages")
mcelogPathFound = true
break
elif re.search("no-syslog =", line) and re.search("= yes", line):
continue
elif re.search("logfile =", line):
emoryErrors = readLogFile(line.split("=")[1].strip()) # Here I want to pass the value "/tmp/logfile" but currently "logfile = /tmp/logfile" is getting passed
mcelogPathFound = true
break
fp.close()
This is because you want to read only a part of the line rather the whole thing so I have just split it up by the "=" sign and then stripped it to remove any blanks
I liked the suggestion of the configparser module, so here is an example of that (Python 3)
For the given input, it will output reading /var/log/messages
import configparser, itertools
config = configparser.ConfigParser()
filename = "/tmp/mcelog.conf"
def readLogFile(filename):
if filename:
print("reading", filename)
else:
raise ValueError("unable to read file")
section = 'global'
with open(filename) as fp:
config.read_file(itertools.chain(['[{}]'.format(section)], fp), source = filename)
no_syslog = config[section]['no-syslog']
if no_syslog == 'yes':
logfile = "/var/log/messages"
elif no_syslog == 'no':
logfile = config[section]['logfile']
if logfile:
mcelogPathFound = True
memoryErrors = readLogFile(logfile)
#!/usr/bin/env python`
import sys`
import binascii`
import string
sample = "foo.apples"
data_file = open("file1.py","r")
dat_file = open("file2.txt", "w")
for line in data_file:
if sample in line:
dat_file.writelines(line)
dat_file.close()`
When I do this I am able to find the string foo.apples. The problem is foo.apples is present in various lines in the python file. I want those lines which are inside a particular function. I need the lines within this def function.
Example:
def start():
foo.apples(a,b)
foo.apples(c,d) ... so on.
The following program finds defs and will append the sample string to the output file if the indentation remains within the def.
import re
sample = 'foo.apples'
data_file = open("file1.py", "r")
out_file = open("file2.txt", "w")
within_def = False
def_indent = 0
for line in data_file:
def_match = re.match(r'(\s*)def\s+start\s*\(', line) # EDIT: fixed regex
if def_match and not within_def:
within_def = True
def_indent = len(def_match.group(1))
elif within_def and re.match(r'\s{%s}\S' % def_indent, line):
within_def = False
if within_def and sample in line:
out_file.writelines(line)
out_file.close()
data_file.close()
Tested working on an example file1.py.
One, slightly off the beaten path approach to this would be to use the getsource method of the inspect module. Consider the following (theoretical) test1.py file:
class foo(object):
apples = 'granny_smith'
#classmethod
def new_apples(cls):
cls.apples = 'macintosh'
def start():
"""This is a pretty meaningless python function.
Attempts to run it will definitely result in an exception being thrown"""
print foo.apples
foo.apples = 3
[x for x in range(10)]
import bar as foo
Now you want to know about the start code:
import inspect
import test1 #assume it is somewhere that can be imported
print inspect.getsource(test1.start)
Ok, now we have only the source of that function. We can now parse through that:
for line in inspect.getsource(test1.start).splitlines():
if 'foo.apples' in line:
print line
There are some advantages here -- python does all the work of parsing out the function blocks when it imports the file. The downside though is that the file actually needs to be imported. Depending on where your files are coming from, this could introduce a HUGE security hole in your program -- You'll be running (potentially) "untrusted" code.
Here's a very non pythonic way, untested, but it should work.
sample = "foo.apples"
infile = open("file1.py", "r")
outfile = open("file2.txt", "w")
in_function = False
for line in infile.readlines():
if in_function:
if line[0] in(" ", "\t"):
if sample in line:
outfile.write(line)
else:
in_function = False
elif line.strip() == "def start():":
in_function = True
infile.close()
outfile.close()
I would suggest doing a function of this, which takes sample, the input file, and the function which we're supposed to search from as it's parameters. It would then return a list or tuple of all the lines that had the text in them.
def findFromFile(file, word, function):
in_function = False
matches = []
infile = open(file, "r")
for line in infile.readlines():
if in_function:
if line[0] in(" ", "\t"):
if word in line:
matches.append(line)
else:
in_function = False
elif line.strip() == "def %s():"%function:
in_function = True
infile.close()
return matches