finding keywords frequency in a c code using python excluding comments - python

I want to create a program, that can count the frequency of keywords used in a C code, excluding the commented ones or inside printf command.
def counting(f, word):
counter = 0
for w in f.split():
if word==w:
counter += 1
return counter
key=open('c_keywords.txt')
keyw=key.read().split()
file=open('a1.cpp').read()
for key in keyw:
x = counting(file,key)
if x != 0:
print (key, ":", x)

Here is an example of how to do it with a textfile, you can edit the text.txt and use your C code file instead
with open('text.txt', 'r') as doc:
print('opened txt')
for words in doc:
wordlist = words.split()
for numbers in range(len(wordlist)):
for inner_numbers in range(len(wordlist)):
if inner_numbers != numbers:
if wordlist[numbers] == wordlist[inner_numbers]:
print('word: %s == %s' %(wordlist[numbers], wordlist[inner_numbers]))

Use:
f = open('keywords_c.txt')
count = 0
words = []
for x in f:
w = x.split()
for a in w:
words.append(a)
print(words)
cpp = open('Simple_c.cpp')
program = []
for y in cpp:
if y.startswith('printf'):
continue
elif y.startswith('//'):
continue
else:
w = y.split()
for b in w:
if any(b in s for s in words):
count +=1
print(count)

Related

Using Python, how to print output string as -> aaa3bb2c1ddddd5 when Input string is aaabbcddddd

Using Python, how to print output string as -> aaa3bb2c1ddddd5 when Input string is aaabbcddddd
I want to concatenate actual character value and number of times a character is repeated in a string
def mycode(myString):
lenstr = len(myString)
print('length of string is '+str(lenstr));
for ele in myString:
count=0
for character in myString:
if character == ele:
count = count+1
totalstr = ele+str(count)
return totalstr
If the string is always sorted and grouped together like that, then you can use a collections.Counter to do it.
from collections import Counter
inp = "aaabbcddddd"
counter = Counter(inp)
out = "".join(k * v + str(v) for k,v in counter.items())
Or in one line:
print(''.join(k * v + str(v) for k,v in Counter(inp).items()))
Output:
aaa3bb2c1ddddd5
Or you can do it manually:
inp = "aaabbcddddd"
last = inp[0]
out = inp[0]
count = 1
for i in inp[1:]:
if i == last:
count += 1
else:
out += str(count)
count = 1
last = i
out += i
out += str(count)
print(out)
Here is a one line solution using a regex replacement with callback:
inp = "aaabbcddddd"
output = re.sub(r'((\w)\2*)', lambda m: m.group(1) + str(len(m.group(1))), inp)
print(output) # aaa3bb2c1ddddd5
Another one-liner:
import itertools
test = 'aaabbcddddd'
out = ''.join(f"{(g := ''.join(ig))}{len(g)}" for _, ig in itertools.groupby(test))
assert out == 'aaa3bb2c1ddddd5'
def char_counter_string(string):
prev_char = None
char_counter = 0
output = ''
for char_index in range(len(string)+1):
if char_index == len(string):
output += str(char_counter)
break
if string[char_index] != prev_char and prev_char is not None:
output += str(char_counter)
char_counter = 0
output += string[char_index]
char_counter += 1
prev_char = string[char_index]
return output
if __name__ == '__main__':
print(char_counter_string('aaabbcddddd'))
you can do like..
Code:
Time Complexity: O(n)
input_string="aaabbcddddd"
res=""
count=1
for i in range(1, len(input_string)):
if input_string[i] == input_string[i-1]:
count += 1
else:
res+=input_string[i-1]*count + str(count)
count = 1
res+=input_string[-1]*count + str(count)
print(res) #aaa3bb2c1ddddd5
Here's another way, ...
Full disclosure: ... as long as the run of characters is 10 or less, it will work. I.e., if there are 11 of anything in row, this won't work (the count will be wrong).
It's just a function wrapping a reduce.
from functools import reduce
def char_rep_count(in_string):
return reduce(
lambda acc, inp:
(acc[:-1]+inp+str(int(acc[-1])+1))
if (inp==acc[-2])
else (acc+inp+"1"),
in_string[1:],
in_string[0]+"1"
)
And here's some sample output:
print(char_rep_count("aaabbcdddd"))
aaa3bb2c1dddd4
I think this fulfils the brief and is also very fast:
s = 'aaabbcddddd'
def mycode(myString):
if myString:
count = 1
rs = [prev := myString[0]]
for c in myString[1:]:
if c != prev:
rs.append(f'{count}')
count = 1
else:
count += 1
rs.append(prev := c)
rs.append(f'{count}')
return ''.join(rs)
return myString

How to add a while true loop to count how many times a word was said in the text?

I'm trying to use dictionary and some loops to figure out how many times a word was written and if user types in "quit" then the program should stop. This is what I have so far:
import string
text = open('text.txt', 'r')
val = dict()
for i in text:
i = i.strip().lower().split(" ")
print(i)
This is one approach to the problem:
with open('text.txt', 'r') as file:
data = file.read()
data = data.replace('\n', ' ')
data = data.split(' ')
while True:
counter = 0
search_term = input('Word: ')
if search_term == 'quit':
break
for word in data:
if word.lower() == search_term.lower() or word.lower() == search_term.lower() + '.':
counter += 1
if counter == 0:
print('None found!')
else:
print(f'Number of "{search_term}": {counter}')

Search Strings in a List with Loop Return Order

I'm very new to Python and I have a question.
I have a List that looks like this:
List = ["B-Guild","I-Guild","I-Guild","L-Guild","B-Gene","L-Gene","U-Car"]
All of the words with B-(I)-L belong to each other and I want to use a function to show that.
def combine(x):
foo = []
regexp_B = ("B-" + r'.*')
regexp_I = ("I-" + r'.*')
regexp_L = ("L-" + r'.*')
regexp_U = ("U-" + r'.*')
for i in range(0,len(x),1):
if re.match(regexp_B, x[i]):
print("Found B")
foo.append[i+x[i]]
if re.match(regexp_I, x[i+1]):
print("Found I")
foo.append[i+1+x[i+1]]
if re.match(regexp_I, x[i+1]):
print("Found I")
foo.append[i+1+x[i+1]]
else:
print("Found L")
foo.append[i+1+x[i+1]]
else:
print("Found L")
foo.append[i1+x[i1]]
elif re.match(regexp_L, x[i]):
print("L")
foo.append[i1+x[i1]]
elif re.match(regexp_U, x[i]):
print("Found U")
foo.append[i1+x[i1]]
return foo
List_New = combine(List)
Desired Output:
foo = ["0B-Guild","0I-Guild","0I-Guild","OL-Guild","1B-Gene","1L-Gene","2U-Car"]
Edit:
The output follows this logic: Every time a "B-" prefix appears, the words to follow are part of one "theme" until a "L-" prefix appears. These words got to have the same number before them so they can be grouped for further functions. "U-" prefixes don't follow that logic and just need a number before them to distinguish them from the other words. Think of it as a Counter that groups these word into a cluster.
def combine(some_list):
current_group = 0 # starts with 0
g_size = 0 # current group size
for elem in some_list:
g_size += 1
if elem.startswith('U-') and g_size > 1:
g_size = 1
current_group += 1
yield '{}{}'.format(current_group, elem)
if elem.startswith(('L-', 'U-')): # each L- or U- also finishes a group
g_size = 0
current_group += 1
>>> List = ["B-Guild","I-Guild","I-Guild","L-Guild","B-Gene","L-Gene","U-Car"]
>>> print(list(combine(List)))
>>> List = ["B-Guild","I-Guild","I-Guild","L-Guild","B-Guild","L-Guild","U-Guild"]
>>> print(list(combine(List)))

Create sentences with markov chain in python

I have a Python code that uses markov chains to generate sentences, but for the code works I have to define 2 starting words, but I want that the first word was randomly chosen.
this is the code:
import random
def getLines(filename):
return [line[0:-1] for line in open(filename).readlines()]
def getWords(lines):
words = []
for line in lines:
words.extend(line.split())
return words
def createProbabilityHash(words):
numWords = len(words)
wordCount = {}
for word in words:
if wordCount.has_key(word):
wordCount[word] += 1
else:
wordCount[word] = 1
for word in wordCount.keys():
wordCount[word] /= 1.0 * numWords
return wordCount
def getRandomWord(wordCount):
randomValue = random.random()
cumulative = 0.0
choosenWord = ""
print wordCount
for word in wordCount:
probability = wordCount[word]
if probability > cumulative:
cumulative = probability
choosenWord = word
return choosenWord
words = getWords(getLines("frases.txt"))
wordMap = {}
previous = (words[0], words[1])
for word in words[2:]:
if wordMap.has_key(previous):
wordMap[previous].append(word)
else:
wordMap[previous] = [word]
previous = (previous[1], word)
for word in wordMap.keys():
probabilityHash = createProbabilityHash(wordMap[word])
wordMap[word] = probabilityHash
palavras = ['hello', 'apple', 'something', 'yeah', 'nope', 'lalala']
previous = (".", "A") #Starting words
numWords = 10 # The number of words to print
print previous[0], previous[1],
for i in range(numWords):
word = getRandomWord(wordMap[previous])
print word,
if word.endswith(","):
print "\n"
if word.endswith("."):
break
previous = (previous[1], word)
This will choose at random from the words in your dictionary:
import random
previous[0] = random.choice(wordMap.keys())

Parsing Data from live website in Python Enumerate problem!

The following script is supposed to fetch a specific line number and parse it from a live website. It works for like 30 loops but then it seems like enumerate(f) stops working correctly... the "i" in the for loop seems to stop at line 130 instead of like 200 something. Could this be due to the website I'm trying to fetch data from or something else? Thanks!!
import sgmllib
class MyParser(sgmllib.SGMLParser):
"A simple parser class."
def parse(self, s):
"Parse the given string 's'."
self.feed(s)
self.close()
def __init__(self, verbose=0):
"Initialise an object, passing 'verbose' to the superclass."
sgmllib.SGMLParser.__init__(self, verbose)
self.divs = []
self.descriptions = []
self.inside_div_element = 0
def start_div(self, attributes):
"Process a hyperlink and its 'attributes'."
for name, value in attributes:
if name == "id":
self.divs.append(value)
self.inside_div_element = 1
def end_div(self):
"Record the end of a hyperlink."
self.inside_div_element = 0
def handle_data(self, data):
"Handle the textual 'data'."
if self.inside_div_element:
self.descriptions.append(data)
def get_div(self):
"Return the list of hyperlinks."
return self.divs
def get_descriptions(self, check):
"Return a list of descriptions."
if check == 1:
self.descriptions.pop(0)
return self.descriptions
def rm_descriptions(self):
"Remove all descriptions."
self.descriptions.pop()
import urllib
import linecache
import sgmllib
tempLine = ""
tempStr = " "
tempStr2 = ""
myparser = MyParser()
count = 0
user = ['']
oldUser = ['none']
oldoldUser = [' ']
array = [" ", 0]
index = 0
found = 0
k = 0
j = 0
posIndex = 0
a = 0
firstCheck = 0
fCheck = 0
while a < 1000:
print a
f = urllib.urlopen("SITE")
a = a+1
for i, line in enumerate(f):
if i == 187:
print i
tempLine = line
print line
myparser.parse(line)
if fCheck == 1:
result = oldUser[0] is oldUser[1]
u1 = oldUser[0]
u2 = oldUser[1]
tempStr = oldUser[1]
if u1 == u2:
result = 1
else:
result = user is oldUser
fCheck = 1
user = myparser.get_descriptions(firstCheck)
tempStr = user[0]
firstCheck = 1
if result:
array[index+1] = array[index+1] +0
else:
j = 0
for z in array:
k = j+2
tempStr2 = user[0]
if k < len(array) and tempStr2 == array[k]:
array[j+3] = array[j+3] + 1
index = j+2
found = 1
break
j = j+1
if found == 0:
array.append(tempStr)
array.append(0)
oldUser = user
found = 0
print array
elif i > 200:
print "HERE"
break
print array
f.close()
Perhaps the number of lines on that web page are fewer than you think? What does this give you?:
print max(i for i, _ in enumerate(urllib.urlopen("SITE")))
Aside: Your indentation is stuffed after the while a < 1000: line. Excessive empty lines and one-letter names don't assist the understanding of your code.
enumerate is not broken. Instead of such speculation, inspect your data. Suggestion: replace
for i, line in enumerate(f):
by
lines = list(f)
print "=== a=%d linecount=%d === % (a, len(lines))
for i, line in enumerate(lines):
print " a=%d i=%d line=%r" % (a, i, line)
Examine the output carefully.

Categories