I would like to extract noun-adjective pair for Aspect Based Sentiment Analysis using Spacy's Dependency parser on my pandas dataframe. I was trying this code on Amazon fine food reviews dataset from Kaggle: Named Entity Recognition in aspect-opinion extraction using dependency rule matching
However, something seems to be wrong the way I feed my pandas dataframe to spacy. My results are not the way I would expect them to be. Could someone help me debug this please. Thanks a lot.
!python -m spacy download en_core_web_lg
import nltk
nltk.download('vader_lexicon')
import spacy
nlp = spacy.load("en_core_web_lg")
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
def find_sentiment(doc):
# find roots of all entities in the text
for i in df['Text'].tolist():
doc = nlp(i)
ner_heads = {ent.root.idx: ent for ent in doc.ents}
rule3_pairs = []
for token in doc:
children = token.children
A = "999999"
M = "999999"
add_neg_pfx = False
for child in children:
if(child.dep_ == "nsubj" and not child.is_stop): # nsubj is nominal subject
if child.idx in ner_heads:
A = ner_heads[child.idx].text
else:
A = child.text
if(child.dep_ == "acomp" and not child.is_stop): # acomp is adjectival complement
M = child.text
# example - 'this could have been better' -> (this, not better)
if(child.dep_ == "aux" and child.tag_ == "MD"): # MD is modal auxiliary
neg_prefix = "not"
add_neg_pfx = True
if(child.dep_ == "neg"): # neg is negation
neg_prefix = child.text
add_neg_pfx = True
if (add_neg_pfx and M != "999999"):
M = neg_prefix + " " + M
if(A != "999999" and M != "999999"):
rule3_pairs.append((A, M, sid.polarity_scores(M)['compound']))
return rule3_pairs
df['three_tuples'] = df['Text'].apply(find_sentiment)
df.head()
My result is coming like this which clearly means something is wrong with my loop:
If you call apply on df['Text'], then you are essentially looping over every value in that column and passing that value to a function.
Here, however, your function itself iterates over the same dataframe column that you are applying the function to while also overwriting the value that is passed to it early in the function.
So I would start by rewriting the function as follows and see if it produces the intended results. I can't say for sure, as you didn't post any sample data, but this should at least move the ball forward:
def find_sentiment(text):
doc = nlp(text)
ner_heads = {ent.root.idx: ent for ent in doc.ents}
rule3_pairs = []
for token in doc:
children = token.children
A = "999999"
M = "999999"
add_neg_pfx = False
for child in children:
if(child.dep_ == "nsubj" and not child.is_stop): # nsubj is nominal subject
if child.idx in ner_heads:
A = ner_heads[child.idx].text
else:
A = child.text
if(child.dep_ == "acomp" and not child.is_stop): # acomp is adjectival complement
M = child.text
# example - 'this could have been better' -> (this, not better)
if(child.dep_ == "aux" and child.tag_ == "MD"): # MD is modal auxiliary
neg_prefix = "not"
add_neg_pfx = True
if(child.dep_ == "neg"): # neg is negation
neg_prefix = child.text
add_neg_pfx = True
if (add_neg_pfx and M != "999999"):
M = neg_prefix + " " + M
if(A != "999999" and M != "999999"):
rule3_pairs.append((A, M, sid.polarity_scores(M)['compound']))
return rule3_pairs
Related
i'm having a hard time finding the bleus core for my seq to seq model for the task of question generation , my questions are the following :
if i use the sentence bleu to find the score beetween each refrence and the output and then devide the total of these sentence-bleu scores by the len of the test data , will it be the same as the corpus bleu ?
and for the corpus bleu implemented in the code as the nltk corpus bleu ?
import ntpath
import sys
import codecs
import os
import math
import operator
import functools
def fetch_data(cand, ref):
references = []
if '.eng' in ref:
reference_file = codecs.open(ref, 'r', 'utf-8')
references.append(reference_file.readlines())
else:
for root, dirs, files in os.walk(ref):
for f in files:
reference_file = codecs.open(os.path.join(root, f), 'r', 'utf-8')
references.append(reference_file.readlines())
candidate_file = codecs.open(cand, 'r', 'utf-8')
candidate = candidate_file.readlines()
return candidate, references
def count_ngram(candidate, references, n):
clipped_count = 0
count = 0
r = 0
c = 0
for si in range(len(candidate)):
# Calculate precision for each sentence
ref_counts = []
ref_lengths = []
# Build dictionary of ngram counts
for reference in references:
ref_sentence = reference[si]
ngram_d = {}
words = ref_sentence.strip().split()
ref_lengths.append(len(words))
limits = len(words) - n + 1
# loop through the sentance consider the ngram length
for i in range(limits):
ngram = ' '.join(words[i:i+n]).lower()
if ngram in ngram_d.keys():
ngram_d[ngram] += 1
else:
ngram_d[ngram] = 1
ref_counts.append(ngram_d)
# candidate
cand_sentence = candidate[si]
cand_dict = {}
words = cand_sentence.strip().split()
limits = len(words) - n + 1
for i in range(0, limits):
ngram = ' '.join(words[i:i + n]).lower()
if ngram in cand_dict:
cand_dict[ngram] += 1
else:
cand_dict[ngram] = 1
clipped_count += clip_count(cand_dict, ref_counts)
count += limits
r += best_length_match(ref_lengths, len(words))
c += len(words)
if clipped_count == 0:
pr = 0
else:
pr = float(clipped_count) / count
bp = brevity_penalty(c, r)
return pr, bp
def clip_count(cand_d, ref_ds):
"""Count the clip count for each ngram considering all references"""
count = 0
for m in cand_d.keys():
m_w = cand_d[m]
m_max = 0
for ref in ref_ds:
if m in ref:
m_max = max(m_max, ref[m])
m_w = min(m_w, m_max)
count += m_w
return count
def best_length_match(ref_l, cand_l):
"""Find the closest length of reference to that of candidate"""
least_diff = abs(cand_l-ref_l[0])
best = ref_l[0]
for ref in ref_l:
if abs(cand_l-ref) < least_diff:
least_diff = abs(cand_l-ref)
best = ref
return best
def brevity_penalty(c, r):
if c > r:
bp = 1
else:
bp = math.exp(1-(float(r)/c))
return bp
def geometric_mean(precisions):
return (functools.reduce(operator.mul, precisions)) ** (1.0 / len(precisions))
def BLEU(candidate, references):
precisions = []
for i in range(4):
pr, bp = count_ngram(candidate, references, i+1)
precisions.append(pr)
bleu = geometric_mean(precisions) * bp
return bleu
if __name__ == "__main__":
candidate, references = fetch_data(sys.argv[1], sys.argv[2])
bleu = BLEU(candidate, references)
print (bleu)
I'm not sure about the implementation you show but for implementations strictly following the original paper such as NLTKs it would not be the same: https://github.com/nltk/nltk/blob/develop/nltk/translate/bleu_score.py#L123.
Using sentence-BLEU means basically calling corpus-BLEU with just a one-sentence-corpus, but the other way around doesn't work. The scores should not be drastically different but they do differ because of macro-average vs micro-average.
I used BLEU for Seq2Seq evaluation before and just used sentence-BLEU and it worked just fine.
sorry for my poor English..
I'm work with LDA2Vec by cemoody .. in preprocessing stage to get vocab using (int64), but I have a problem with negative numbers can't deals with it and get this
ERROR "Negative indices reserved for special tokens"
AND, when I used (uint64) as:
dat = doc.to_array([attr, LIKE_EMAIL, LIKE_URL]).astype('uint64')
print(dat)
if len(dat) > 0:
msg = "Negative indices reserved for special tokens"
assert dat.min() >= 0, msg
# Replace email and URL tokens
# select the indices of tokens that are URLs or Emails
idx = (dat[:, 1] > 0) | (dat[:, 2] > 0)
dat = dat.astype('uint64')
dat[idx] = skip
length = min(len(dat), max_length)
data[row, :length] = dat[:length, 0].ravel()
uniques = np.unique(data)
vocab = {v: nlp.vocab[v].lower_ for v in uniques if v != skip}
vocab[skip] = '<SKIP>'
return data, vocab
the result for text=["cold","warm"] is: {3117178197819627377: 'cold', 4469020372817945905: 'warm', 18446744073709551614: ''}
but when retrying to get string the key is mismatching
keys = np.array(loose_counts)[:, 0]
result: [1.84467441e+19 3.11717820e+18 4.46902037e+18]
... therefore when try to get string for each key return OoV.
can anyone halp me??
solve this problem by using different processes in tokenization
voc = Vocab()
if nlp is None:
nlp = Arabic()
data = np.zeros((len(texts), max_length) , dtype='int32')
data[:] = skip
for row, doc in enumerate(nlp.pipe(texts, **kwargs)):
s = str(doc)
dat = voc.word2index(s.split(), train=True)
if len(dat) > 0:
dat = np.array(dat).astype('int32')
#print(dat)
msg = "Negative indices reserved for special tokens"
assert dat.min() >= 0, msg
length = min(len(dat), max_length)
data[row ,:length] = dat[:length]
uniques = np.unique(data)
vocab = {v: voc.index2word(v) for v in uniques if v != skip}
vocab[skip] = '<SKIP>'
return data, vocab
For instance, we have a document such as this -
Table Of Content
Introduction
<text: A>
1.1 Background
<text: B>
1.2 Problem statement
<text: C>
Approach
<text: D>
2.1.1 Outline of the algorithm
<text: E>
I need to pattern match a "string" in all of the texts in the document. For example my search string could be "REQ-". Which could match "REQ-1", "REQ-2" to "REQ-10".
Suppose if "REQ-1" was located in text:C, and "REQ-2" in text:E, then the output I am looking for is
("REQ-1", "1.2"), ("REQ-2", "2.1.1") etc
Essentially, it matches the search string, identify all matches, and for each match, returns a 2-tuple of the matched string and the "section id" in the document containing the matched string.
def get_creds():
credentials = service_account.Credentials.from_service_account_file(
"cred_new.json", scopes=SCOPES
)
return credentials
def search_paragraph_element(element, search_str):
text_run = element.get('textRun')
if not text_run:
return False
res = text_run.get('content').find(search_str)
if res != -1:
return True
return False
def search_structural_elements(elements, search_str):
text = ''
hd_1 = 0
hd_2 = 0
hd_3 = 0
for value in elements:
if 'paragraph' in value:
if value['paragraph']['paragraphStyle']['namedStyleType'] == 'HEADING_1':
hd_1 = hd_1 + 1
hd_2 = 0
hd_3 = 0
elif value['paragraph']['paragraphStyle']['namedStyleType'] == 'HEADING_2':
hd_2 = hd_2 + 1
hd_3 = 0
elif value['paragraph']['paragraphStyle']['namedStyleType'] == 'HEADING_3':
hd_3 = hd_3 + 1
elements = value.get('paragraph').get('elements')
for elem in elements:
res = search_paragraph_element(elem, search_str)
if res is True:
return str(hd_1) + '.' + str(hd_2) + '.' + str(hd_3)
return text
def main():
"""Uses the Docs API to print out the text of a document."""
credentials = get_creds()
service = build("docs", "v1", credentials=credentials).documents()
properties = service.get(documentId=REQ_DOCUMENT_ID).execute()
doc_content = properties.get('body').get('content')
print(search_structural_elements(doc_content, "MySearchString"))
if __name__ == '__main__':
main()
``
I am trying to implement Okapi BM25 in python. While I have seen some tutorials how to do it, it seems I am stuck in the process.
So I have collection of documents (and has as columns 'id' and 'text') and queries (and has as columns 'id' and 'text'). I have done the pre-processing steps and I have my documents and queries as a list:
documents = list(train_docs['text']) #put the documents text to list
queries = list(train_queries_all['text']) #put the queries text to list
Then for BM25 I do this:
pip install rank_bm25
#calculate BM25
from rank_bm25 import BM25Okapi
bm25 = BM25Okapi(documents)
#compute the score
bm_score = BM25Okapi.get_scores(documents, query=queries)
But it wouldn't work.
Then I tried to do this:
import math
import numpy as np
from multiprocessing import Pool, cpu_count
nd = len(documents) # corpus_size = 3612 (I am not sure if this is necessary)
class BM25:
def __init__(self, documents, tokenizer=None):
self.corpus_size = len(documents)
self.avgdl = 0
self.doc_freqs = []
self.idf = {}
self.doc_len = []
self.tokenizer = tokenizer
if tokenizer:
documents = self._tokenize_corpus(documents)
nd = self._initialize(documents)
self._calc_idf(nd)
def _initialize(self, documents):
nd = {} # word -> number of documents with word
num_doc = 0
for document in documents:
self.doc_len.append(len(document))
num_doc += len(document)
frequencies = {}
for word in document:
if word not in frequencies:
frequencies[word] = 0
frequencies[word] += 1
self.doc_freqs.append(frequencies)
for word, freq in frequencies.items():
if word not in nd:
nd[word] = 0
nd[word] += 1
self.avgdl = num_doc / self.corpus_size
return nd
def _tokenize_corpus(self, documents):
pool = Pool(cpu_count())
tokenized_corpus = pool.map(self.tokenizer, documents)
return tokenized_corpus
def _calc_idf(self, nd):
raise NotImplementedError()
def get_scores(self, queries):
raise NotImplementedError()
def get_top_n(self, queries, documents, n=5):
assert self.corpus_size == len(documents), "The documents given don't match the index corpus!"
scores = self.get_scores(queries)
top_n = np.argsort(scores)[::-1][:n]
return [documents[i] for i in top_n]
class BM25T(BM25):
def __init__(self, documents, k1=1.5, b=0.75, delta=1):
# Algorithm specific parameters
self.k1 = k1
self.b = b
self.delta = delta
super().__init__(documents)
def _calc_idf(self, nd):
for word, freq in nd.items():
idf = math.log((self.corpus_size + 1) / freq)
self.idf[word] = idf
def get_scores(self, queries):
score = np.zeros(self.corpus_size)
doc_len = np.array(self.doc_len)
for q in queries:
q_freq = np.array([(doc.get(q) or 0) for doc in self.doc_freqs])
score += (self.idf.get(q) or 0) * (self.delta + (q_freq * (self.k1 + 1)) /
(self.k1 * (1 - self.b + self.b * doc_len / self.avgdl) + q_freq))
return score
and then I try to get the scores:
score = BM25.get_scores(self=documents, queries)
But I get as a meesage:
score = BM25.get_scores(self=documents, queries)
SyntaxError: positional argument follows keyword argument
Does anyone has an idea why there is this error? Thank you in advance.
1 ) tokenize corpus or send tokinizing function to class
2 ) send only queries to "get_scores" function
read official example
from rank_bm25 import BM25Okapi
corpus = [
"Hello there good man!",
"It is quite windy in London",
"How is the weather today?"
]
tokenized_corpus = [doc.split(" ") for doc in corpus]
bm25 = BM25Okapi(tokenized_corpus)
query = "windy London"
tokenized_query = query.split(" ")
doc_scores = bm25.get_scores(tokenized_query)
I suggest you to use fastbm25, which is more fast than other bm25 version.
`pip install fastbm25
usage
from fastbm25 import fastbm25
corpus = [
"How are you !",
"Hello Jack! Nice to meet you!",
"I am from China, I like math."
]
tokenized_corpus = [doc.lower().split(" ") for doc in corpus]
model = fastbm25(tokenized_corpus)
query = "where are you from".lower().split()
result = model.top_k_sentence(query,k=1)
print(result)
you can learn mroe from https://github.com/zhusleep/fastbm25
I have a column of data (easily imported from Google Docs thanks to gspread) that I'd like to intelligently align. I ingest entries into a dictionary. Input can include email, twitter handle or a blog URL. For example:
mike.j#gmail.com
#mikej45
j.mike#world.eu
_http://tumblr.com/mikej45
Right now, the "dumb" version is:
def NomineeCount(spreadsheet):
worksheet = spreadsheet.sheet1
nominees = worksheet.col_values(6) # F = 6
unique_nominees = {}
for c in nominees:
pattern = re.compile(r'\s+')
c = re.sub(pattern, '', c)
if unique_nominees.has_key(c) == True: # If we already have the name
unique_nominees[c] += 1
else:
unique_nominees[c] = 1
# Print out the alphabetical list of nominees with leading vote count
for w in sorted(unique_nominees.keys()):
print string.rjust(str(unique_nominees[w]), 2)+ " " + w
return nominees
What's an efficient(-ish) way to add in some smarts during the if process?
You can try with defaultdict:
from collections import defaultdict
unique_nominees = defaultdict(lambda: 0)
unique_nominees[c] += 1