How to find a phrase in a large text file in Python? - python

I am trying to write an algorithm to find a phrase with words on different lines in a big text file using Python.
The file contents are as follows
fkerghiohgeoihhgergerig ooetbjoptj
enbotobjeob hi how
are you lerjgoegjepogjejgpgrg]
ekrngeigoieghetghehtigehtgiethg
ieogetigheihietipgietigeitgegitie
.......
The algorithm should search for the phrase "hi how are you" and return True in this case.
Since, the file can be huge, all file contents cannot be read at once

You can read the file one character at a time and change line feeds to spaces. Then its just a question of running down the list of wanted characters.
def find_words(text, fileobj):
i = 0
while True:
c = fileobj.read(1)
if not c:
break
if c == "\n": # python combines \r\n
c = " "
if c != text[i]:
i = 0
if c == text[i]:
i += 1
if i == len(text):
return True
return False
If you want to be a little more liberal about whitespace and case sensitivity, you could remove all whitespace and lower case everything before the compare.
import re
import itertools
from string import whitespace
def find_words(text, fileobj):
chars = list(itertools.chain.from_iterable(re.split(r"\s+", text.lower())))
i = 0
while True:
c = fileobj.read(1)
if not c:
break
c = c.lower()
if c in whitespace:
continue
if c != chars[i]:
i = 0
if c == chars[i]:
i += 1
if i == len(chars):
return True
return False

Here is one way to solve the problem:
import re
def find_phrase():
phrase = "hi how are you"
words = dict(zip(phrase.split(), [False]*len(phrase.split())))
with open("data.txt", "r") as f:
for line in f:
for word in words:
if re.search( r"\b" + word + r"\b", line):
words[word] = True
if all(words.values()):
return True
return False
EDIT:
def find_phrase():
phrase = "hi how are you"
with open("data.txt", "r") as f:
for line in f:
if phrase in line:
return True
return False

If it is "pretty large" file, then access the lines sequentially and don't read the whole file into memory:
with open('largeFile', 'r') as inF:
for line in inF:
if 'myString' in line:
# do_something
break
Edit:
Since the words of the string can be on consecutive lines you would want to use a counter to keep a track of words iterated. For example,
counter = 0
words_list = ["hi","hello","how"]
with open('largeFile', 'r') as inF:
for line in inF:
# print( words_list[counter] ,line)
if words_list[counter] in line and len(line.split()) == 1 :
counter +=1
else:
counter = 0
if counter == len(words_list):
print ("here")
break;
Text File
fkerghiohgeoihhgergerig ooetbjoptj enbotobjeob
hi
hello
how
goegjepogjejgpgrg] ekrngeigoieghetghehtigehtgiethg ieoge
It gives the output here since the consecutive words are found

Related

Count and store in a file

counts the occurrences of letter a in the first 200 characters in the file characters.txt
the result should get stored inside a new folder with a txt file
Example:
characters.txt: abcdefghijklmnopqerstuvwxzy
so there is 1 occurrence of g
then "1" should be stored in foulder/file.txt
file = open(filename, "r")
text = file.read()
count = 0
for char in text:
if char == letter:
count += 1
os.mkdir("g")
f = open("res.txt", mode = "w")
f.write(count)
f.close
Your code works, but in the samples provided you dont call it.
I made a local version without your file code.
def letterFrequency(letter):
count = 0
for char in 'abcdefghijklmnopqerstuvwxzy':
if char == letter:
count += 1
return count
print(letterFrequency('g'))
If you only want to search the first 200 character of a file you should use a while loop. Also you will need to account for rows with less than 200 characters.
I modified your given example and added some improvements.
The code below is a minimal working example:
import os
file = open("./Desktop/text.txt", "r")
text = file.read()
count = 0
letter = "g"
if len(text) < 200:
text = text[0:199]
for char in text:
if char == letter:
count += 1
try:
os.mkdir("./Desktop/DIR")
except FileExistsError:
print("Dir already exists")
f = open("./Desktop/DIR/res.txt", "w")
f.write(str(count))

List index out of bounds when reading CSV File

I am trying to simply process some twitter data in which I want to count the most frequent words produced in the dataset.
However, I keep getting the following error on Line 45:
IndexError Traceback (most recent call last) <ipython-input 346-f03e745247f4> in <module>()
43 for line in f:
44 parts = re.split("^\d+\s", line)
45 tweet = re.split("\s(Status)", parts[-1])[10]
46 tweet = tweet.replace("\\n"," ")
47 terms_all = [term for term in process_tweet(tweet)]
IndexError: list index out of range
I have added my full code for review, can someone please advise.
import codecs
import re
from collections import Counter
from nltk.corpus import stopwords
word_counter = Counter()
def punctuation_symbols():
return [".", "", "$","%","&",";",":","-","&","?"]
def is_rt_marker(word):
if word == "b\"rt" or word == "b'rt" or word == "rt":
return True
return False
def strip_quotes(word):
if word.endswith(""):
word = word[0:-1]
if word.startswith(""):
word = word[1:]
return word
def process_tweet(tweet):
keep = []
for word in tweet.split(" "):
word = word.lower()
word = strip_quotes(word)
if len(word) == 0:
continue
if word.startswith("https"):
continue
if word in stopwords.words('english'):
continue
if word in punctuation_symbols():
continue
if is_rt_marker(word):
continue
keep.append(word)
return keep
with codecs.open("C:\\Users\\XXXXX\\Desktop\\USA_TWEETS-out.csv", "r", encoding="utf-8") as f:
n = 0
for line in f:
parts = re.split("^\d+\s", line)
tweet = re.split("\s(Status)", parts[1])[0]
tweet = tweet.replace("\\n"," ")
terms_all = [term for term in process_tweet(tweet)]
word_counter.update(terms_all)
n += 1
if n == 50:
break
print(word_counter.most_common(10))
parts = re.split("^\d+\s", line)
tweet = re.split("\s(Status)", parts[1])[0]
These are likely the problematic lines.
You assume that parts did split and has more than 1 element. Splitting can fail to find the split-by string in line, so parts becomes equal to [line]. Then parts[1] crashes.
Add a check before the second line. Print the line value to better understand what happens.

How do I count the number of lines that are full-line comments in python?

I'm trying to create a function that accepts a file as input and prints the number of lines that are full-line comments (i.e. the line begins with #followed by some comments).
For example a file that contains say the following lines should print the result 2:
abc
#some random comment
cde
fgh
#another random comment
So far I tried along the lines of but just not picking up the hash symbol:
infile = open("code.py", "r")
line = infile.readline()
def countHashedLines(filename) :
while line != "" :
hashes = '#'
value = line
print(value) #here you will get all
#if(value == hashes): tried this but just wasn't working
# print("hi")
for line in value:
line = line.split('#', 1)[1]
line = line.rstrip()
print(value)
line = infile.readline()
return()
Thanks in advance,
Jemma
I re-worded a few statements for ease of use (subjective) but this will give you the desired output.
def countHashedLines(lines):
tally = 0
for line in lines:
if line.startswith('#'): tally += 1
return tally
infile = open('code.py', 'r')
all_lines = infile.readlines()
num_hash_nums = countHashedLines(all_lines) # <- 2
infile.close()
...or if you want a compact and clean version of the function...
def countHashedLines(lines):
return len([line for line in lines if line.startswith('#')])
I would pass the file through standard input
import sys
count = 0
for line in sys.stdin: """ Note: you could also open the file and iterate through it"""
if line[0] == '#': """ Every time a line begins with # """
count += 1 """ Increment """
print(count)
Here is another solution that uses regular expressions and will detect comments that have white space in front.
import re
def countFullLineComments(infile) :
count = 0
p = re.compile(r"^\s*#.*$")
for line in infile.readlines():
m = p.match(line)
if m:
count += 1
print(m.group(0))
return count
infile = open("code.py", "r")
print(countFullLineComments(infile))

How to search for string within another string?

I am trying to create a simple word search program.
I have successfully opened an external file that contains the grid of the word search. I also have successfully opened a file that contains the words that are to be searched for. I have stored every line of the grid in a list and every word from the file in a list called words[].
I am attempting to search for the words in each line of the grid. My code currently does not search for the word in each line of the grid.
gridlines_horizontal = []
gridlines_vertical = []
words = []
not_found = []
found_words = {}
def puzzle(fname) :
print ""
for line in f :
gridlines_horizontal.append(line)
for line in gridlines_horizontal :
print line,
for item in zip(*(gridlines_horizontal[::-1])):
gridlines_vertical.append(item)
Here I am trying to get each word in words[] one at a time and see if the word is in any of the lines of the word search grid. If the word is present in any of the lines I am then trying to print the word. The code currently does not do this.
def horizontal_search(word,gridlines_horizontal) :
x = 0
for line in gridlines_horizontal :
if words[0] in line or words[0] in line[::-1]:
found_words.update({words[0]:" "})
print words[0]
else :
not_found.append(words)
x = x + 1
def vertical_search(word,gridlines_vertical):
x = 0
for line in gridlines_vertical:
if words[x] in line or words[x] in line[::-1]:
print words[0]
found_words.update({words[x]:" "})
else:
not_found.append(words[x])
x = x + 1
while True:
try:
fname = input("Enter a filename between double quotation marks: ")
with open(fname) as f:
puzzle(fname)
break
except IOError as e :
print""
print("Problem opening file...")
print ""
while True:
try:
fname2 = input("Enter a filename for your words between double quotation marks: ")
with open(fname2) as f:
for line in f:
words.append(line)
""" line in words:
line = lin """
break
except IOError as e :
print("")
print("Problem opening file...")
There are a couple mistakes in your code:
- You aren't being consistent in using words[x], in your code you would want to replace every words[0] with words[x] BUT
- this isn't necessary because you can use nested 'for' loops.
So for horizontal search:
def horizontal_search(words,gridlines_horizontal):
for word in words:
for line in gridlines_horizontal:
if word in line or word in line[::-1]:
found_words.update({word : " "})
print(word)
break
else:
not_found.append(word)
Did you look at find?
a = 'this is a string'
b = 'string'
if (a.find(b) > -1):
print 'found substring in string'
else:
print 'substring not found in string'
Live demo of above code
EDIT:
I am not sure if its a typo, but you are passing word as parameter instead of words
def horizontal_search(word,gridlines_horizontal) :
x = 0 ^----------------------------------
for line in gridlines_horizontal : |
if words[0] in line or words[0] in line[::-1]: |
^-- see here <------------not matching here -----
Similar issue with def vertical_search(words,gridlines_vertical) :

Replace four letter word in python

I am trying to write a program that opens a text document and replaces all four letter words with **. I have been messing around with this program for multiple hours now. I can not seem to get anywhere. I was hoping someone would be able to help me out with this one. Here is what I have so far. Help is greatly appreciated!
def censor():
filename = input("Enter name of file: ")
file = open(filename, 'r')
file1 = open(filename, 'w')
for element in file:
words = element.split()
if len(words) == 4:
file1 = element.replace(words, "xxxx")
alist.append(bob)
print (file)
file.close()
here is revised verison, i don't know if this is much better
def censor():
filename = input("Enter name of file: ")
file = open(filename, 'r')
file1 = open(filename, 'w')
i = 0
for element in file:
words = element.split()
for i in range(len(words)):
if len(words[i]) == 4:
file1 = element.replace(i, "xxxx")
i = i+1
file.close()
for element in file:
words = element.split()
for word in words:
if len(word) == 4:
etc etc
Here's why:
say the first line in your file is 'hello, my name is john'
then for the first iteration of the loop: element = 'hello, my name is john'
and words = ['hello,','my','name','is','john']
You need to check what is inside each word thus for word in words
Also it might be worth noting that in your current method you do not pay any attention to punctuation. Note the first word in words above...
To get rid of punctuation rather say:
import string
blah blah blah ...
for word in words:
cleaned_word = word.strip(string.punctuation)
if len(cleaned_word) == 4:
etc etc
Here is a hint: len(words) returns the number of words on the current line, not the length of any particular word. You need to add code that would look at every word on your line and decide whether it needs to be replaced.
Also, if the file is more complicated than a simple list of words (for example, if it contains punctuation characters that need to be preserved), it might be worth using a regular expression to do the job.
It can be something like this:
def censor():
filename = input("Enter name of file: ")
with open(filename, 'r') as f:
lines = f.readlines()
newLines = []
for line in lines:
words = line.split()
for i, word in enumerate(words):
if len(word) == 4:
words[i] == '**'
newLines.append(' '.join(words))
with open(filename, 'w') as f:
for line in newLines:
f.write(line + '\n')
def censor(filename):
"""Takes a file and writes it into file censored.txt with every 4-letterword replaced by xxxx"""
infile = open(filename)
content = infile.read()
infile.close()
outfile = open('censored.txt', 'w')
table = content.maketrans('.,;:!?', ' ')
noPunc = content.translate(table) #replace all punctuation marks with blanks, so they won't tie two words together
wordList = noPunc.split(' ')
for word in wordList:
if '\n' in word:
count = word.count('\n')
wordLen = len(word)-count
else:
wordLen = len(word)
if wordLen == 4:
censoredWord = word.replace(word, 'xxxx ')
outfile.write(censoredWord)
else:
outfile.write(word + ' ')
outfile.close()

Categories