I am trying to make a job that takes in a text file, then counts the number of syllables in each word, then ultimately returns the top 10 words with the most syllables. I'm able to get all of the word/syllable pairs sorted in descending order, however, I am struggling to figure out how to return only the top 10 words. Here's my code so far:
from mrjob.job import MRJob
from mrjob.step import MRStep
import re
WORD_RE = re.compile(r"[\w']+")
class MRMostUsedWordSyllables(MRJob):
def steps(self):
return [
MRStep(mapper=self.word_splitter_mapper,
reducer=self.sorting_word_syllables),
MRStep(reducer=self.reducer_word_sorted),
MRStep(reducer=self.get_top_10_reducer)
]
def word_splitter_mapper(self, _, line):
#for word in line.split():
for word in WORD_RE.findall(line):
yield(word.lower(), None)
def sorting_word_syllables(self, word, count):
count = 0
vowels = 'aeiouy'
word = word.lower().strip()
if word in vowels:
count +=1
for index in range(1,len(word)):
if word[index] in vowels and word[index-1] not in vowels:
count +=1
if word.endswith('e'):
count -= 1
if word.endswith('le'):
count+=1
if count == 0:
count +=1
yield None, (int(count), word)
def reducer_word_sorted(self, _, syllables_counts):
for count, word in sorted(syllables_counts, reverse=True):
yield (int(count), word)
def get_top_10_reducer(self, count, word):
self.aList = []
for value in list(range(count)):
self.aList.append(value)
self.bList = []
for i in range(10):
self.bList.append(max(self.aList))
self.aList.remove(max(self.aList))
for i in range(10):
yield self.bList[i]
if __name__ == '__main__':
import time
start = time.time()
MRMostUsedWordSyllables.run()
end = time.time()
print(end - start)
I know my issue is with the "get_top_10_reducer" function. I keep getting ValueError: max() arg is an empty sequence.
According to the error, one of your reducers has returned 0 for the count. Do you have an empty line in your input, for example? You should filter this data out as early as possible.
Overall, I think you need to remove reducer_word_sorted. There is no guarantee this returns sorted data. Instead, I think it regroups all data based on the numeric count key, then emits in a non-deterministic order to the next step.
That being said, your top 10 reducer is never using the value of word parameter , which should be a list itself, actually, grouped by each count key emitted by the previous reducer.
With the reducer_word_sorted removed, the sorting_word_syllables returns None for its key... This is fine because you then have all split words in a giant list, so define a regular function
def get_syllable_count_pair(word):
return (syllables(word), word, )
Use that within the reducer
def get_top_10_reducer(self, count, word):
assert count == None # added for a guard
with_counts = [get_syllable_count_pair(w) for w in word]
# Sort the words by the syllable count
sorted_counts = sorted(syllables_counts, reverse=True, key=lambda x: x[0])
# Slice off the first ten
for t in sorted_counts[:10]:
yield t
Related
I am trying to find greatest length of a word from the string return it by using values of each letter from alphabets by assigning each letter it's value as per it's rank . So for example For a string s = 'abcd a', I intend to return 10 [a=1 + b=2 + c =3 + d=4] .But, I am getting output as 7 When I debugged the code, I noticed that in while loop my code skips i=2 and directly jumps on i=3. Where am I going wrong? Below is my code.
class Solution(object):
def highest_scoring_word(self,s):
# Dictionary of English letters
dt = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,
'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,
'm':13,'n':14,'o':15,'p':16,'q':17,
'r':18,'s':19,'t':20,'u':21,'v':22,
'w':23,'x':24,'y':25,'z':26}
value_sum =0
max_value =value_sum
for i in range(0,len(s)):
if s.upper():
s= s.lower()
words = s.split()
# convert the string in char array
to_char_array = list(words[i])
j=0
while j<len(to_char_array):
if to_char_array[j] in dt.keys() :
value_sum = max(dt.get(to_char_array[j]),value_sum + dt.get(to_char_array[j]))
max_value = max(value_sum,max_value)
else:
pass
j +=j+1
return max_value
if __name__ == '__main__':
p = 'abcd a'
print(Solution().highest_scoring_word(p))
`
I have created a dictionary where I have stored all letters in english alphabet and their values and later I have split the string into words using split() and then after converting each individual word into character array I have traversed it to find their occurrence in the dictionary and add to the final value. I am expecting to get a correct value of a string and finally the greatest value.
As you are using a class and methods, make use of them:
from string import ascii_lowercase as dt
class Solution(object):
def __init__(self, data):
self.scores = {}
self.words = data.lower().strip().split()
def get_scoring(self):
# for each word caculate the scoring
for word in self.words:
score = 0
# for each character in the word, find its index in 'a..z' and add it to score
# same as in your dt implementation (just using index not absolute values)
for c in word:
score += dt.find(c) + 1
self.scores[word] = score
print(self.scores)
# filer the dictionary by its greates value in order to get the word with max score:
return max(self.scores.keys(), key=lambda k: self.scores[k])
if __name__ == '__main__':
p = 'abcd fg11'
maxWord = Solution(p).get_scoring()
print(maxWord)
Out:
{'abcd': 10, 'fg11': 13}
fg11
Try using this:
class Solution(object):
def highest_scoring_word(self,s):
# Dictionary of English letters
dt = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,
'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,
'm':13,'n':14,'o':15,'p':16,'q':17,
'r':18,'s':19,'t':20,'u':21,'v':22,
'w':23,'x':24,'y':25,'z':26}
value_sum1 =0
max_value1 =value_sum1
value_sum2 =0
max_value2 =value_sum2
for i in range(0,len(s)):
if s.upper():
s= s.lower()
words = s.split()
if len(words)>1:
# convert the string in char array
to_char_array = list(words[0])
j=0
while j<len(to_char_array):
if to_char_array[j] in dt.keys() :
value_sum1 = max(dt.get(to_char_array[j]),value_sum1 + dt.get(to_char_array[j]))
max_value1 = max(value_sum1,max_value1)
else:
pass
j=j+1
to_char_array = list(words[1])
j=0
while j<len(to_char_array):
if to_char_array[j] in dt.keys() :
value_sum2 = max(dt.get(to_char_array[j]),value_sum2 + dt.get(to_char_array[j]))
max_value2 = max(value_sum2,max_value2)
else:
pass
j=j+1
if max_value2>max_value1:
return max_value2
elif max_value1>max_value2:
return max_value1
else:
return 'Both words have equal score'
else:
# convert the string in char array
to_char_array = list(words[i])
j=0
while j<len(to_char_array):
if to_char_array[j] in dt.keys() :
value_sum1 = max(dt.get(to_char_array[j]),value_sum1 + dt.get(to_char_array[j]))
max_value1 = max(value_sum1,max_value1)
else:
pass
j=j+1
return max_value1
if __name__ == '__main__':
p = 'abcd fg'
print(Solution().highest_scoring_word(p))
It is maybe of interest that the code can be greatly simplified by using features available in Python:
the_sum = sum(ord(c)-96 for c in s.lower() if c.isalpha())
to break this down. for c in s.lower() gets the lower-case characters one by one; the function ord() gives the numerical value with a of 97 so we subtract to get 1. Then we check if the character is a letter and if so accept it. Then sum() adds up all the numbers. You could break up this one line an check how the separate parts work.
Background
I'm working on a HackerRank problem Word Order. The task is to
Read the following input from stdin
4
bcdef
abcdefg
bcde
bcdef
Produce the output that reflects:
Number of unique words in first line
Count of occurrences for each unique words
Example:
3 # Number of unique words
2 1 1 # count of occurring words, 'bcdef' appears twice = 2
Problem
I've coded two solutions, the second one passes initial tests but fail due to exceeding time limit. First one would also work but I was unnecessarily sorting outputs (time limit issue would occur though).
Notes
In first solution I was unnecessarily sorting values, this is fixed in the second solution
I'm keen to be making better (proper) use of standard Python data structures, list/dictionary comprehension - I would be particularly keen to receive a solution that doesn't import any addittional modules, with exception of import os if needed.
Code
import os
def word_order(words):
# Output no of distinct words
distinct_words = set(words)
n_distinct_words = len(distinct_words)
print(str(n_distinct_words))
# Count occurrences of each word
occurrences = []
for distinct_word in distinct_words:
n_word_appearances = 0
for word in words:
if word == distinct_word:
n_word_appearances += 1
occurrences.append(n_word_appearances)
occurrences.sort(reverse=True)
print(*occurrences, sep=' ')
# for o in occurrences:
# print(o, end=' ')
def word_order_two(words):
'''
Run through all words and only count multiple occurrences, do the maths
to calculate unique words, etc. Attempt to construct a dictionary to make
the operation more memory efficient.
'''
# Construct a count of word occurrences
dictionary_words = {word:words.count(word) for word in words}
# Unique words are equivalent to dictionary keys
unique_words = len(dictionary_words)
# Obtain sorted dictionary values
# sorted_values = sorted(dictionary_words.values(), reverse=True)
result_values = " ".join(str(value) for value in dictionary_words.values())
# Output results
print(str(unique_words))
print(result_values)
return 0
if __name__ == '__main__':
q = int(input().strip())
inputs = []
for q_itr in range(q):
s = input()
inputs.append(s)
# word_order(words=inputs)
word_order_two(words=inputs)
Those nested loops are very bad performance wise (they make your algorithm quadratic) and quite unnecessary. You can get all counts in single iteration. You could use a plain dict or the dedicated collections.Counter:
from collections import Counter
def word_order(words):
c = Counter(words)
print(len(c))
print(" ".join(str(v) for _, v in c.most_common()))
The "manual" implementation that shows the workings of the Counter and its methods:
def word_order(words):
c = {}
for word in words:
c[word] = c.get(word, 0) + 1
print(len(c))
print(" ".join(str(v) for v in sorted(c.values(), reverse=True)))
# print(" ".join(map(str, sorted(c.values(), reverse=True))))
Without any imports, you could count unique elements by
len(set(words))
and count their occurrences by
def counter(words):
count = dict()
for word in words:
if word in count:
count[word] += 1
else:
count[word] = 1
return count.values()
You can use Counter then print output like below:
>>> from collections import Counter
>>> def counter_words(words):
... cnt = Counter(words)
... print(len(cnt))
... print(*[str(v) for k,v in c.items()] , sep=' ')
>>> inputs = ['bcdef' , 'abcdefg' , 'bcde' , 'bcdef']
>>> counter_words(inputs)
3
2 1 1
I am trying to attempt the mapreduce pairs pattern in python. Need to check if a word is in a text file and then find the word next to it and yield a pair of both words. keep running into either:
neighbors = words[words.index(w) + 1]
ValueError: substring not found
or
ValueError: ("the") is not in list
file cwork_trials.py
from mrjob.job import MRJob
class MRCountest(MRJob):
# Word count
def mapper(self, _, document):
# Assume document is a list of words.
#words = []
words = document.strip()
w = "the"
neighbors = words.index(w)
for word in words:
#searchword = "the"
#wor.append(str(word))
#neighbors = words[words.index(w) + 1]
yield(w,1)
def reducer(self, w, values):
yield(w,sum(values))
if __name__ == '__main__':
MRCountest.run()
Edit:
Trying to use the pairs pattern to search a document for every instance of a specific word and then find the word next to it each time. Then yielding a pair result for each instance i.e. find instances of "the" and the word next to it i.e. [the], [book], [the], [cat] etc.
from mrjob.job import MRJob
class MRCountest(MRJob):
# Word count
def mapper(self, _, document):
# Assume document is a list of words.
#words = []
words = document.split(" ")
want = "the"
for w, want in enumerate(words, 1):
if (w+1) < len(words):
neighbors = words[w + 1]
pair = (want, neighbors)
for u in neighbors:
if want is "the":
#pair = (want, neighbors)
yield(pair),1
#neighbors = words.index(w)
#for word in words:
#searchword = "the"
#wor.append(str(word))
#neighbors = words[words.index(w) + 1]
#yield(w,1)
#def reducer(self, w, values):
#yield(w,sum(values))
if __name__ == '__main__':
MRCountest.run()
As it stands I get yields of every word pair with multiples of the same pairing.
When you use words.index("the") then you will only get the first instance of "the" in your list or string, and as you have found, you will get an error if "the" isn't present.
Also you mention that you are trying to produce pairs, but only yield a single word.
I think what you are trying to do is something more like this:
def get_word_pairs(words):
for i, word in enumerate(words):
if (i+1) < len(words):
yield (word, words[i + 1]), 1
if (i-1) > 0:
yield (word, words[i - 1]), 1
assuming you are interested in neighbours in both directions. (If not, you only need the first yield.)
Lastly, since you use document.strip(), I suspect that document is in fact a string and not a list. If that's the case, you can use words = document.split(" ") to get the word list, assuming you don't have any punctuation.
I've done some digging and most use arrays, but our class is not that far and we're to use mostly for loops to return the most repeated letter in a function.
Here was my code so far, but all I could get was to return the count of the first letter.
def most_repeated_letters(word_1):
x = 0
z = 0
for letter in word_1:
y = word_1.count(letter[0:])
if y > z:
z = y
x += 1
return z
print most_repeated_letters('jackaby')
Make use collections.Counter
from collections import Counter
c = Counter('jackaby').most_common(1)
print(c)
# [('a', 2)]
There are a few problems with your code:
you calculate the count of the most common letter, but not the letter itself
you return inside the loop and thus after the very first letter
also, you never use x, and the slicing of letter is unneccesary
Some suggestions to better spot those errors yourself:
use more meaningful variable names
use more than two spaces for indentation
Fixing those, your code might look something like this:
def most_repeated_letters(word_1):
most_common_count = 0
most_common_letter = None
for letter in word_1:
count = word_1.count(letter)
if count > most_common_count:
most_common_count = count
most_common_letter = letter
return most_common_letter
Once you are comfortable with Python's basic language features, you should have a closer look at the builtin functions. In fact, your entire function can be reduced to a single line using max, using the word_1.count as the key function for comparison.
def most_repeated_letters(word_1):
return max(word_1, key=word_1.count)
But while this is very short, it is not very efficient, as the count function is called for each letter in the word, giving the function quadratic complexity O(n²). Instead, you can use a dict to store counts of individual letters and increase those counts in a single pass over the word in O(n).
def most_repeated_letters(word_1):
counts = {}
for letter in word_1:
if letter not in counts:
counts[letter] = 1
else:
counts[letter] += 1
return max(counts, key=counts.get)
And this is basically the same as what collections.Counter would do, as already described in another answer.
If you don't want to use Counter:
def most_repeated_letters(word_1):
lettersCount = {}
for ch in word_1:
if ch not in lettersCount:
lettersCount[ch] = 1
else:
lettersCount[ch] += 1
return max(lettersCount, key=lettersCount.get)
print(most_repeated_letters('jackabybb'))
Here is a code that works for multiple:
def most_repeated_letters(word_1):
d = {}
for letter in word_1:
if not d.get(letter):
d[letter] = 0
d[letter] = d.get(letter) + 1
ret = {}
for k,v in d.iteritems():
if d[k] == max(d.values()):
ret[k] = v
return ret
most_repeated_letters('jackaby')
If you don’t want to use the collections modue :
def mostRepeatedLetter(text):
counter = {}
for letter in text:
if letter in counter:
counter[letter]+=1
else:
counter[letter]=1
max = { letter: 0, quantity: 0 }
for key, value in counter.items():
if value > max.quantity:
max.letter, max.quantity = key, value
return max
This code produces n-grams and the number of counts the n gram appears.
I have a csv file with rows and a column containing string of words for every row.
This code for example when it searches gets a 4 gram like 'this is my puppy' it also counts the number of occurrences it occurs in that same row.
My intention is that when it gets an occurrence of n-gram in a row it should count it once and count its second time in a another row and so on.
e.g row Word
1 this is my puppy what this is my puppy
2 this is my puppy
so this code counts 'this is my puppy' as 3 times. Yet i want it to be 2 times
This is the python code
import collections
import re
import sys
import time
def tokenize(string):
"""Convert string to lowercase and split into words (ignoring
punctuation), returning list of words.
"""
return re.findall(r'\w+', string.lower())
def count_ngrams(lines, min_length=4, max_length=5):
"""Iterate through given lines iterator (file object or list of
lines) and return n-gram frequencies. The return value is a dict
mapping the length of the n-gram to a collections.Counter
object of n-gram tuple and number of times that n-gram occurred.
Returned dict includes n-grams of length min_length to max_length.
"""
lengths = range(min_length, max_length + 1)
ngrams = {length: collections.Counter() for length in lengths}
queue = collections.deque(maxlen=max_length)
# Helper function to add n-grams at start of current queue to dict
def add_queue():
current = tuple(queue)
for length in lengths:
if len(current) >= length:
ngrams[length][current[:length]] += 1
# Loop through all lines and words and add n-grams to dict
for line in lines:
for word in tokenize(line):
queue.append(word)
if len(queue) >= max_length:
add_queue()
# Make sure we get the n-grams at the tail end of the queue
while len(queue) > min_length:
queue.popleft()
add_queue()
return ngrams
def print_most_frequent(ngrams, num=10):
"""Print num most common n-grams of each length in n-grams dict."""
for n in sorted(ngrams):
print('----- {} most common {}-grams -----'.format(num, n))
for gram, count in ngrams[n].most_common(num):
print('{0}: {1}'.format(' '.join(gram), count))
print('')
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: python ngrams.py filename')
sys.exit(1)
start_time = time.time()
with open("PWorm.csv") as f:
ngrams = count_ngrams(f)
print_most_frequent(ngrams)
elapsed_time = time.time() - start_time
print('Took {:.03f} seconds'.format(elapsed_time))
Your help will be highly appreciated.
Thank you
Instead of populating you ngrams semi-manually, you can use a defaultdict
To prevent the same ngram in a line to count twice, you'll have to make an ngram-dict per line, and then combine that with the general ngram dict
def count_ngrams(lines, min_length=4, max_length=5):
"""Iterate through given lines iterator (file object or list of
lines) and return n-gram frequencies. The return value is a dict
mapping the length of the n-gram to a collections.Counter
object of n-gram tuple and number of times that n-gram occurred.
Returned dict includes n-grams of length min_length to max_length.
"""
lengths = range(min_length, max_length + 1)
ngrams = collections.defaultdict(collections.Counter)
queue = collections.deque(maxlen=max_length)
# Helper function to add n-grams at start of current queue to dict
def add_queue(ngrams_line):
current = tuple(queue)
for length in lengths:
if len(current) >= length:
ngrams_line[length][current[:length]] = 1 # instead of += 1
# to combine the 2 defaultdict(Counter)
def combine_ngrams(ngram, ngramline):
for k, v in ngramsline.items():
ngrams[k] += v
return ngrams
# Loop through all lines and words and add n-grams to dict
for line in lines:
ngrams_line = collections.defaultdict(collections.Counter)
for word in tokenize(line):
queue.append(word)
if len(queue) >= max_length:
add_queue(ngrams_line)
ngrams = combine_ngrams(ngrams, ngrams_line)
# Make sure we get the n-grams at the tail end of the queue
ngrams_line = collections.defaultdict(collections.Counter)
while len(queue) > min_length:
queue.popleft()
add_queue(ngrams_line)
ngrams = combine_ngrams(ngrams, ngrams_line)
return ngrams
I don't 100% understand the part after while len(queue) > min_length:, or why the queue doesn't get reset everyline, you you'll might have to adjust my answer a bit