Remove Stop Words Python - python

So I am reading in a csv file and the getting the words in it. I am trying to remove stop words. Here is my code.
import pandas as pd
from nltk.corpus import stopwords as sw
def loadCsv(fileName):
df = pd.read_csv(fileName, error_bad_lines=False)
df.dropna(inplace = True)
return df
def getWords(dataframe):
words = []
for tweet in dataframe['SentimentText'].tolist():
for word in tweet.split():
word = word.lower()
words.append(word)
return set(words) #Create a set from the words list
def removeStopWords(words):
for word in words: # iterate over word_list
if word in sw.words('english'):
words.remove(word) # remove word from filtered_word_list if it is a stopword
return set(words)
df = loadCsv("train.csv")
words = getWords(df)
words = removeStopWords(words)
On this line
if word in sw.words('english'):
I get the following error.
exception: no description
Further down the line I am going to try to remove punctuation, any pointers for that too would be great.
Any help is much appreciated.
EDIT
def removeStopWords(words):
filtered_word_list = words #make a copy of the words
for word in words: # iterate over words
if word in sw.words('english'):
filtered_word_list.remove(word) # remove word from filtered_word_list if it is a stopword
return set(filtered_word_list)

Here is a simplified version of the problem, without Panda. I believe the issue with the original code is with modifying the set words while iterating over it. By using a conditional list comprehension, we can test for each word, creating a new list, and ultimately converting it into a set, as per the original code.
from nltk.corpus import stopwords as sw
def removeStopWords(words):
return set([w for w in words if not w in sw.words('english')])
sentence = 'this is a very common english sentence with a finite set of words from my imagination'
words = set(sentence.split())
print(removeStopWords(words))

Change removeStopWords function to the following:
def getFilteredStopWords(words):
list_stopWords=list(set(sw.words('english')))
filtered_words=[w for w in words if not w in list_stopWords# remove word from filtered_words if it is a stopword
return filtered_words

def remmove_stopwords(sentence):
list_stop_words = set(stopwords.words('english'))
words = sentence.split(' ')
filtered_words = [w for w in words if w not in list_stop_words]
sentence_list = ' '.join(w for w in filtered_words)
return sentence_list

Related

why does if not in(x,y) not work at all in python

I want to select words only if the word in each rows of my column not in stop words and not in string punctuation.
This is my data after tokenizing and removing the stopwords, i also want to remove the punctuation at the same time i remove the stopwords. See in number two after usf there's comma. I think of if word not in (stopwords,string.punctuation) since it would be not in stopwords and not in string.punctuation i see it from here but it resulting in fails to remove stopwords and the punctuation. How to fix this?
data['text'].head(5)
Out[38]:
0 ['ve, searching, right, words, thank, breather...
1 [free, entry, 2, wkly, comp, win, fa, cup, fin...
2 [nah, n't, think, goes, usf, ,, lives, around,...
3 [even, brother, like, speak, ., treat, like, a...
4 [date, sunday, !, !]
Name: text, dtype: object
import pandas as pd
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string
data = pd.read_csv(r"D:/python projects/read_files/SMSSpamCollection.tsv",
sep='\t', header=None)
data.columns = ['label','text']
stopwords = set(stopwords.words('english'))
def process(df):
data = word_tokenize(df.lower())
data = [word for word in data if word not in (stopwords,string.punctuation)]
return data
data['text'] = data['text'].apply(process)
If you still want to do it in one if statement, you could convert string.punctuation to a set and combine it with stopwords with an OR operation. This is how it would look like:
data = [word for word in data if word not in (stopwords|set(string.punctuation))]
then you need to change
data = [word for word in data if word not in (stopwords,string.punctuation)]
to
data = [word for word in data if word not in stopwords and word not in string.punctuation]
in function process you must Convert type(String) to pandas.core.series.Series and use
concat
the function will be:
'
def process(df):
data = word_tokenize(df.lower())
data = [word for word in data if word not in
pd.concat([stopwords,pd.Series(string.punctuation)]) ]
return data

Stopwords coming up in most influential words

I am running some NLP code, trying to find the most influential (positively or negatively) words in a survey. My problem is that, while I successfully add some extra stopwords to the NLTK stopwords file, they keep coming up as influential words later on.
So, I have a dataframe, first column contains scores, second column contains comments.
I add extra stopwords:
stopwords = stopwords.words('english')
extra = ['Cat', 'Dog']
stopwords.extend(extra)
I check that they are added, using the len method before and after.
I create this function to remove punctuation and stopwords from my comments:
def text_process(comment):
nopunc = [char for char in comment if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords]
I run the model (not going to include the whole code since it doesn't make a difference):
corpus = df['Comment']
y = df['Label']
vectorizer = CountVectorizer(analyzer=text_process)
x = vectorizer.fit_transform(corpus)
...
And then to get the most influential words:
feature_to_coef = {word: coef for word, coef in zip(vectorizer.get_feature_names(), nb.coef_[0])}
for best_positive in sorted(
feature_to_coef.items(),
key=lambda x: x[1],
reverse=True)[:20]:
print (best_positive)
But, Cat and Dog are in the results.
What am I doing wrong, any ideas?
Thank you very much!
Looks like it is because you have capitalize words 'Cat' and 'Dog'
In your text_process function, you have if word.lower() not in stopwords which only works if the stopwords are lower case

How to add more stopwords in nltk list?

I have the following code. I have to add more words in nltk stopword list. After i run thsi, it does not add the words in the list
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import string
stop = set(stopwords.words('english'))
new_words = open("stopwords_en.txt", "r")
new_stopwords = stop.union(new_word)
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
def clean(doc):
stop_free = " ".join([i for i in doc.lower().split() if i not in new_stopwords])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
return normalized
doc_clean = [clean(doc).split() for doc in emails_body_text]
Don't do things blindly. Read in your new list of stopwords, inspect it to see that it's right, then add it to the other stopword list. Start with the code suggested by #greg_data, but you'll need to strip newlines and maybe do other things -- who knows what your stopwords file looks like?
This might do it, for example:
new_words = open("stopwords_en.txt", "r").read().split()
new_stopwords = stop.union(new_words)
PS. Don't keep splitting and joining your document; tokenize once and work with the list of tokens.

Stemming words with NLTK (python)

I am new to Python text processing, I am trying to stem word in text document, has around 5000 rows.
I have written below script
from nltk.corpus import stopwords # Import the stop word list
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer('english')
def Description_to_words(raw_Description ):
# 1. Remove HTML
Description_text = BeautifulSoup(raw_Description).get_text()
# 2. Remove non-letters
letters_only = re.sub("[^a-zA-Z]", " ", Description_text)
# 3. Convert to lower case, split into individual words
words = letters_only.lower().split()
stops = set(stopwords.words("english"))
# 5. Remove stop words
meaningful_words = [w for w in words if not w in stops]
# 5. stem words
words = ([stemmer.stem(w) for w in words])
# 6. Join the words back into one string separated by space,
# and return the result.
return( " ".join( meaningful_words ))
clean_Description = Description_to_words(train["Description"][15])
But when I test results words were not stemmed , can anyone help me to know what is issue , I am doing something wrong in "Description_to_words" function
And, when I execute stem command separately like below it works.
from nltk.tokenize import sent_tokenize, word_tokenize
>>> words = word_tokenize("MOBILE APP - Unable to add reading")
>>>
>>> for w in words:
... print(stemmer.stem(w))
...
mobil
app
-
unabl
to
add
read
Here's each step of your function, fixed.
Remove HTML.
Description_text = BeautifulSoup(raw_Description).get_text()
Remove non-letters, but don't remove whitespaces just yet. You can also simplify your regex a bit.
letters_only = re.sub("[^\w\s]", " ", Description_text)
Convert to lower case, split into individual words: I recommend using word_tokenize again, here.
from nltk.tokenize import word_tokenize
words = word_tokenize(letters_only.lower())
Remove stop words.
stops = set(stopwords.words("english"))
meaningful_words = [w for w in words if not w in stops]
Stem words. Here is another issue. Stem meaningful_words, not words.
return ' '.join(stemmer.stem(w) for w in meaningful_words])

Python: Auto-correct

I have two files check.txt and orig.txt. I want to check every word in check.txt and see if it matches with any word in orig.txt. If it does match then the code should replace that word with its first match otherwise it should leave the word as it is. But somehow its not working as required. Kindly help.
check.txt looks like this:
ukrain
troop
force
and orig.txt looks like:
ukraine cnn should stop pretending & announce: we will not report news while it reflects bad on obama #bostonglobe #crowleycnn #hardball
rt #cbcnews: breaking: .#vice journalist #simonostrovsky, held in #ukraine now free and safe http://t.co/sgxbedktlu http://t.co/jduzlg6jou
russia 'outraged' at deadly shootout in east #ukraine - moscow:... http://t.co/nqim7uk7zg
#groundtroops #russianpresidentvladimirputin
http://pastebin.com/XJeDhY3G
f = open('check.txt','r')
orig = open('orig.txt','r')
new = open('newfile.txt','w')
for word in f:
for line in orig:
for word2 in line.split(" "):
word2 = word2.lower()
if word in word2:
word = word2
else:
print('not found')
new.write(word)
There are two problems with your code:
when you loop over the words in f, each word will still have a new line character, so your in check does not work
you want to iterate orig for each of the words from f, but files are iterators, being exhausted after the first word from f
You can fix those by doing word = word.strip() and orig = list(orig), or you can try something like this:
# get all stemmed words
stemmed = [line.strip() for line in f]
# set of lowercased original words
original = set(word.lower() for line in orig for word in line.split())
# map stemmed words to unstemmed words
unstemmed = {word: None for word in stemmed}
# find original words for word stems in map
for stem in unstemmed:
for word in original:
if stem in word:
unstemmed[stem] = word
print unstemmed
Or shorter (without that final double loop), using difflib, as suggested in the comments:
unstemmed = {word: difflib.get_close_matches(word, original, 1) for word in stemmed}
Also, remember to close your files, or use the with keyword to close them automatically.

Categories