Why am I getting a garbage tokenizer? - python

from tokenizers import Tokenizer, models, normalizers, pre_tokenizers, decoders, trainers
tokenizer = Tokenizer(models.Unigram())
tokenizer.normalizer = normalizers.NFKC()
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel()
tokenizer.decoder = decoders.ByteLevel()
trainer = trainers.UnigramTrainer(
vocab_size=30000,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
special_tokens=["<PAD>", "<BOS>", "<EOS>", '<s>', '</s>', '<unk>', '<mask>'],
min_frequency = 2
)
def batch_iterator(batch_size=10, size=5000):
for i in range(100):
query = f"select note_text from db.note where id > {i * size} limit 50;"
df = pd.read_sql(sql=query, con=db)
for x in range(0, size, batch_size):
yield list(df['note_text'].loc[0:5000])[x:x + batch_size]
tokenizer.train_from_iterator(batch_iterator(), trainer=trainer, length=100*5000)
A single note may look something like this:
!~!~!~!~!~!~!~!~!~!~!~!~!~!~Discussing settlement with Amy.!~!~
The output looks as follows:
out = tokenizer.encode('There should be an inspection come Monday 1/2/2022!')
['ĠThe', 'r', 'e', 'Ġsh', 'ould', 'Ġbe', 'Ġan', 'Ġinspect', 'ion', 'Ġ', 'com', 'e', 'Ġ', 'M', 'ond', 'a', 'y', 'Ġ', '1', '/', '2', '/', '20', '2', '2', '!']

Related

Removing accents from keyword strings

This is a word processing code for chabot, in it it removes some articles and prepositions to make it easier for the bot to read
import json
from random import choice
class ChatterMessage:
def __init__(self, raw):
self.raw = str(raw).lower()
self.processed_str = self.reduce()
self.responses = self.get_responses()
self.data = self.process_response()
self.response = choice(self.data['response'])
def remove_unwanted_chars(self, string):
list_of_chars = ['?', ".", ",", "!", "#", "[", "]", "{", "}", "#", "$", "%", "*", "&", "(", ")", "-", "_", "+", "="]
new_str = ""
for char in string:
if char not in list_of_chars:
new_str += str(char)
return new_str
def get_responses(self, response_file="info.json"):
with open(response_file, 'r') as file:
return json.loads(file.read())
def reduce(self):
stopwords = ['de', 'a', 'o', 'que', 'e', 'é', 'do', 'da', 'em', 'um', 'para', 'com', 'não', 'uma', 'os', 'no', 'se', 'na', 'por', 'mais', 'as', 'dos', 'como', 'mas', 'ao', 'ele', 'das', 'à', 'seu', 'sua', 'ou', 'quando', 'muito', 'nos', 'já', 'eu', 'também', 'só', 'pelo', 'pela', 'até', 'isso', 'ela', 'entre', 'depois', 'sem', 'mesmo', 'aos', 'seus', 'quem', 'nas', 'me', 'esse', 'eles', 'você', 'essa', 'num', 'nem', 'suas', 'meu', 'às', 'minha', 'numa', 'pelos', 'elas', 'qual', 'nós', 'lhe', 'deles', 'essas', 'esses', 'pelas', 'este', 'dele', 'tu', 'te', 'vocês', 'vos', 'lhes', 'meus', 'minhas', 'teu', 'tua', 'teus', 'tuas', 'nosso', 'nossa', 'nossos', 'nossas', 'dela', 'delas', 'esta', 'estes', 'estas', 'aquele', 'aquela', 'aqueles', 'aquelas', 'isto', 'aquilo', 'estou', 'está', 'estamos', 'estão', 'estive', 'esteve', 'estivemos', 'estiveram', 'estava', 'estávamos', 'estavam', 'estivera', 'estivéramos', 'esteja', 'estejamos', 'estejam', 'estivesse', 'estivéssemos', 'estivessem', 'estiver', 'estivermos', 'estiverem', 'hei', 'há', 'havemos', 'hão', 'houve', 'houvemos', 'houveram', 'houvera', 'houvéramos', 'haja', 'hajamos', 'hajam', 'houvesse', 'houvéssemos', 'houvessem', 'houver', 'houvermos', 'houverem', 'houverei', 'houverá', 'houveremos', 'houverão', 'houveria', 'houveríamos', 'houveriam', 'sou', 'somos', 'são', 'era', 'éramos', 'eram', 'fui', 'foi', 'fomos', 'foram', 'fora', 'fôramos', 'seja', 'sejamos', 'sejam', 'fosse', 'fôssemos', 'fossem', 'for', 'formos', 'forem', 'serei', 'será', 'seremos', 'serão', 'seria', 'seríamos', 'seriam', 'tenho', 'tem', 'temos', 'tém', 'tinha', 'tínhamos', 'tinham', 'tive', 'teve', 'tivemos', 'tiveram', 'tivera', 'tivéramos', 'tenha', 'tenhamos', 'tenham', 'tivesse', 'tivéssemos', 'tivessem', 'tiver', 'tivermos', 'tiverem', 'terei', 'terá', 'teremos', 'terão', 'teria', 'teríamos', 'teriam']
custom_filter = []
keywords_list = []
strlist = self.raw.split(" ")
for x in strlist:
if x not in stopwords and x not in custom_filter:
keywords_list.append(self.remove_unwanted_chars(x))
return keywords_list
def process_response(self):
percentage = lambda x, y: (100 * y) / x
total = sum(len(x['keywords']) for x in self.responses)
most_acc = 0
response_data = None
acc = 0
for value in self.responses:
c = 0
for x in value['keywords']:
if str(x).lower() in self.processed_str:
c += 1
if c > most_acc:
most_acc = c
acc = percentage(total, most_acc)
print(acc)
response_data = value
if acc < 6:
return {"response": "Sorry, I do not understand. Be more clear please"}
for x in self.processed_str:
if x not in response_data['keywords']:
response_data['keywords'].append(x)
return response_data
if __name__ == '__main__':
while True:
k = input("Você: ")
res = ChatterMessage(k)
.response
print("Bot:", res)
How to remove accents from keyword strings to "make it easier" for chatbot to read? I found this explanation: How to remove string accents using Python 3? But I don't know how it would be applied to this code as the bot always stops responding
You could use the Python package unidecode that replaces special characters with ASCII equivalents.
from unidecode import unidecode
text = "Björn, Łukasz and Σωκράτης."
print(unidecode(text))
# ==> Bjorn, Lukasz and Sokrates.
You could apply this to both the input and keywords.
# In the function definition of reduce(), place this line of code after
# stopwords = ['de', 'a', 'o', .....])
stopwords = [unidecode(s) for s in stopwords]
# In "__main__": replace k = input("Você: ") with the following line of code.
k = unidecode(input("Você: "))
If it makes sense, you could also force the strings to be all lowercase. This will make your string comparisons even more robust.
k = unidecode(input("Você: ").lower())
Because you requested the entire code:
import json
from random import choice
from unidecode import unidecode
class ChatterMessage:
def __init__(self, raw):
self.raw = str(raw).lower()
self.processed_str = self.reduce()
self.responses = self.get_responses()
self.data = self.process_response()
self.response = choice(self.data['response'])
def remove_unwanted_chars(self, string):
list_of_chars = ['?', ".", ",", "!", "#", "[", "]", "{", "}", "#", "$", "%", "*", "&", "(", ")", "-", "_", "+", "="]
new_str = ""
for char in string:
if char not in list_of_chars:
new_str += str(char)
return new_str
def get_responses(self, response_file="info.json"):
with open(response_file, 'r') as file:
return json.loads(file.read())
def reduce(self):
stopwords = ['de', 'a', 'o', 'que', 'e', 'é', 'do', 'da', 'em', 'um', 'para', 'com', 'não', 'uma', 'os', 'no', 'se', 'na', 'por', 'mais', 'as', 'dos', 'como', 'mas', 'ao', 'ele', 'das', 'à', 'seu', 'sua', 'ou', 'quando', 'muito', 'nos', 'já', 'eu', 'também', 'só', 'pelo', 'pela', 'até', 'isso', 'ela', 'entre', 'depois', 'sem', 'mesmo', 'aos', 'seus', 'quem', 'nas', 'me', 'esse', 'eles', 'você', 'essa', 'num', 'nem', 'suas', 'meu', 'às', 'minha', 'numa', 'pelos', 'elas', 'qual', 'nós', 'lhe', 'deles', 'essas', 'esses', 'pelas', 'este', 'dele', 'tu', 'te', 'vocês', 'vos', 'lhes', 'meus', 'minhas', 'teu', 'tua', 'teus', 'tuas', 'nosso', 'nossa', 'nossos', 'nossas', 'dela', 'delas', 'esta', 'estes', 'estas', 'aquele', 'aquela', 'aqueles', 'aquelas', 'isto', 'aquilo', 'estou', 'está', 'estamos', 'estão', 'estive', 'esteve', 'estivemos', 'estiveram', 'estava', 'estávamos', 'estavam', 'estivera', 'estivéramos', 'esteja', 'estejamos', 'estejam', 'estivesse', 'estivéssemos', 'estivessem', 'estiver', 'estivermos', 'estiverem', 'hei', 'há', 'havemos', 'hão', 'houve', 'houvemos', 'houveram', 'houvera', 'houvéramos', 'haja', 'hajamos', 'hajam', 'houvesse', 'houvéssemos', 'houvessem', 'houver', 'houvermos', 'houverem', 'houverei', 'houverá', 'houveremos', 'houverão', 'houveria', 'houveríamos', 'houveriam', 'sou', 'somos', 'são', 'era', 'éramos', 'eram', 'fui', 'foi', 'fomos', 'foram', 'fora', 'fôramos', 'seja', 'sejamos', 'sejam', 'fosse', 'fôssemos', 'fossem', 'for', 'formos', 'forem', 'serei', 'será', 'seremos', 'serão', 'seria', 'seríamos', 'seriam', 'tenho', 'tem', 'temos', 'tém', 'tinha', 'tínhamos', 'tinham', 'tive', 'teve', 'tivemos', 'tiveram', 'tivera', 'tivéramos', 'tenha', 'tenhamos', 'tenham', 'tivesse', 'tivéssemos', 'tivessem', 'tiver', 'tivermos', 'tiverem', 'terei', 'terá', 'teremos', 'terão', 'teria', 'teríamos', 'teriam']
stopwords = [unidecode(s) for s in stopwords]
custom_filter = []
keywords_list = []
strlist = self.raw.split(" ")
for x in strlist:
if x not in stopwords and x not in custom_filter:
keywords_list.append(self.remove_unwanted_chars(x))
return keywords_list
def process_response(self):
percentage = lambda x, y: (100 * y) / x
total = sum(len(x['keywords']) for x in self.responses)
most_acc = 0
response_data = None
acc = 0
for value in self.responses:
c = 0
for x in value['keywords']:
if str(x).lower() in self.processed_str:
c += 1
if c > most_acc:
most_acc = c
acc = percentage(total, most_acc)
print(acc)
response_data = value
if acc < 6:
return {"response": "Sorry, I do not understand. Be more clear please"}
for x in self.processed_str:
if x not in response_data['keywords']:
response_data['keywords'].append(x)
return response_data
if __name__ == '__main__':
while True:
k = unidecode(input("Você: "))
res = ChatterMessage(k).response
print("Bot:", res)

Python only encryption/obfuscation

I'm looking for simple password-based obfuscation/security of strings.
I've pretty much gone over each example of > Simple way to encode a string according to a password?
And none of them work with my python 3.7.
I got the error with ord() so I updated the code, but even after, its still broken. For examle:
from itertools import cycle
def encode_zip_cycle(key, clear):
enc = [chr((ord(clear_char) + ord(key_char)) % 256)
for clear_char, key_char in zip(clear, cycle(key))]
return base64.urlsafe_b64encode("".join(enc).encode())
def decode_zip_cycle(key, enc):
enc = base64.urlsafe_b64decode(enc)
dec = [chr((256 + enc_char - ord(key_char)) % 256)
for enc_char, key_char in zip(enc, cycle(key))]
print(dec)
return "".join(dec)
text = "ATTACKATONCEfor Live 2154125-21-512^!££613-123!"
s = "1235348udgfjff"
print("Text : " + text)
print("Shift : " + str(s))
print("Cipher: ", encode_zip_cycle(s, text)) # , type(encode(s, text)))
print("Original text: ", decode_zip_cycle(s, encode_zip_cycle(s, text)))
Gives me
Text : ATTACKATONCEfor Live 2154125-21-512^!££613-123!
Shift : 1235348udgfjff
Cipher: b'csKGwod2dn95w4nCs8K1wqnCr8OMw5XCo1J_wp7CqcKZWMKVwoTCmcKXwp_CmsKXY2dgZ2RhbcKmwpbDhcKHDQnCnGJlYGZlZ1k='
['A', '\x90', 'S', '\x8d', 'T', 'B', '>', '\n', '\x15', '\\', '#', 'X', 'M', '\\', '\x84', '\x90', 'v', '\x8d', '|', '\x8f', 'T', 'N', '1', '[', '=', 'è', '\x19', '\\', 'm', '\x90', 'v', '\x8d', 'f', '$', '\x8a', ' ', '^', '\x1d', '\\', '/', '\\', '1', '\x91', 'm', '\x8f', 'e', '\x8f', 'c', '+', 'ò', 'ü', '\x00', 'þ', '÷', '\x07', '\\', 'u', '\x90', 'c', '\x8e', 'R', '\x8e', 'O', '\x98', '¥', '[', '6', 'ø', 'ÿ', 'ú', '5', '3', '4', '$']
Original text: ASTB>
\#XM\v|TN1[=è\mvf$ ^\/\1mec+òü þ÷\ucRO¥[6øÿú534$
In encode_zip_cycle you encode the "encrypted" string into utf-8 before doing the second encoding into base64. Yet, you don't revert this operation later in decode_zip_cycle.
This is the correct decode_zip_cycle function:
def decode_zip_cycle(key, enc):
enc = base64.urlsafe_b64decode(enc).decode()
dec = [chr((256 + ord(enc_char) - ord(key_char)) % 256)
for enc_char, key_char in zip(enc, cycle(key))]
print(dec)
return "".join(dec)

Combining three lists in Python with sorting

How to efficiently and smartly combine 3 lists in the way like below?
sex = ['M', 'M', 'F', 'F', 'M', 'F', 'M', 'F', 'F', 'F']
actresses = ['Natalie Portman', 'Anne Hathaway', 'Talia Shire', 'Diane Keaton', 'Keira Knightley', 'Uma Thurman']
actors = ['Morgan Freeman', 'Leonardo DiCaprio', 'Robert De Niro', 'Brad Pitt']
Result:
[('M', 'Morgan Freeman'),
('M', 'Leonardo DiCaprio'),
('F', 'Natalie Portman'),
('F', 'Anne Hathaway'),
('M', 'Robert De Niro'),
('F', 'Talia Shire'),
('M', 'Brad Pitt'),
('F', 'Diane Keaton'),
('F', 'Keira Knightley'),
('F', 'Uma Thurman')]
My solution:
sex = ['M', 'M', 'F', 'F', 'M', 'F', 'M', 'F', 'F', 'F']
actresses = ['Natalie Portman', 'Anne Hathaway', 'Talia Shire', 'Diane Keaton', 'Keira Knightley', 'Uma Thurman', ]
actors = ['Morgan Freeman', 'Leonardo DiCaprio', 'Robert De Niro', 'Brad Pitt']
result = []
for s in sex:
if s == 'F':
result.append((s, actresses.pop(0)))
elif s == 'M':
result.append((s, actors.pop(0)))
print(f'result = {result}')
What is the best way for a long lists (e.g. 1 million items)?
You can place references to the lists in a dictionary and do a list comprehension
In [8]: sexes = ['M', 'M', 'F', 'F', 'M', 'F', 'M', 'F', 'F', 'F']
...: actresses = ['Natalie Portman', 'Anne Hathaway', 'Talia Shire', 'Diane Keaton', 'Keira Knightley', 'Uma Thurman', ]
...: actors = ['Morgan Freeman', 'Leonardo DiCaprio', 'Robert De Niro', 'Brad Pitt']
...:
...: mf = {'M':iter(actors), 'F':iter(actresses)}
...: [(sex, next(mf[sex])) for sex in sexes]
Out[8]:
[('M', 'Morgan Freeman'),
('M', 'Leonardo DiCaprio'),
('F', 'Natalie Portman'),
('F', 'Anne Hathaway'),
('M', 'Robert De Niro'),
('F', 'Talia Shire'),
('M', 'Brad Pitt'),
('F', 'Diane Keaton'),
('F', 'Keira Knightley'),
('F', 'Uma Thurman')]
In [9]:
If your list are longish and you are going to consume one pair sex-person at once you can use a generator expression in place of the list comprehension
pairs = ((sex, next(mf[s])) for sex in sexes)
for sex, person in pairs:
...
or possibly even simpler
for sex in sexes:
person = next(mf[sex])
...
If your lists were stored on disk you can use the same pattern introduced above but using generator expressions in place of lists
mf = {'M':(line.strip() for line in open('male_performers.txt'),
'F':(line.strip() for line in open('female_performers.txt')}
sexes = (line.strip() for line in open('sexes.txt'))
for sex in sexes:
performer = next(mf[sex])
You are popping from starting of the list which has time complexity of O(N). What you could do instead is keep an index for both actors and actresses lists and increment them in the loop.
sex = ['M', 'M', 'F', 'F', 'M', 'F', 'M', 'F', 'F', 'F']
actresses = ['Natalie Portman', 'Anne Hathaway', 'Talia Shire', 'Diane Keaton', 'Keira Knightley', 'Uma Thurman', ]
actors = ['Morgan Freeman', 'Leonardo DiCaprio', 'Robert De Niro', 'Brad Pitt']
result = []
actors_i = 0
actresses_i = 0
for s in sex:
if s == 'F':
result.append((s, actresses[actresses_i]))
actresses_i += 1
elif s == 'M':
result.append((s, actors[actors_i]))
actors_i += 1
print(f'result = {result}')
After this point, I don't think there are any improvements left other than making your code more readable because you have to go over every item in the sex list and you are using operations which has cost of O(1) in the loop. So the complexity is O(N).
Given that all actors have a label of 'M' and all actresses have a label of 'F', you could use pandas to group the information in a way that should have faster performance than looping through large lists.
Here is an example:
import pandas as pd
actresses = ['Natalie Portman', 'Anne Hathaway', 'Talia Shire', 'Diane Keaton', 'Keira Knightley', 'Uma Thurman', ]
actors = ['Morgan Freeman', 'Leonardo DiCaprio', 'Robert De Niro', 'Brad Pitt']
df_actresses = pd.DataFrame(actresses, columns=['name'])
df_actors = pd.DataFrame(actors, columns=['name'])
df_actresses['sex'] = 'F'
df_actors['sex'] = 'M'
df = pd.concat([df_actresses, df_actors], axis=0)
# if you really need it to be a list
result = df.values.tolist()
Thank you for all the answers. Yes, using pop(0) was a very bad idea in this case. I tried to compare all solutions for 1 million pseudo items. In my opinion the results were very good except for the use of pop(0).
Results:
combine_with_pop Items = 1000000. Average time: 45.49504270553589 secs
combine_without_pop Items = 1000000. Average time: 0.33301634788513185 secs
combine_dict Items = 1000000. Average time: 0.21431212425231932 secs
combine_generator Items = 1000000. Average time: 0.2770370960235596 secs
combine_frames Items = 1000000. Average time: 0.06862187385559082 secs
Test:
import pandas as pd
import string
import random
import time
import inspect
from statistics import mean
result_size = 1000000
g_number_of_repetitions = 5
def init():
# Generate sexes
population = ('M', 'F')
male_weight = 0.48
weights = (0.4, 1 - male_weight)
actresses = []
actors = []
sexes = random.choices(population, weights, k=result_size)
male_amount = sexes.count('M')
female_amount = result_size - male_amount
# Generate pseudo 'actresses' and 'actors'
act_len = 20
for a in range(female_amount):
actresses.append(''.join(random.choices(string.ascii_lowercase, k=act_len)))
for a in range(male_amount):
actors.append(''.join(random.choices(string.ascii_lowercase, k=act_len)))
return sexes, actresses, actors
def combine_with_pop(number_of_repetitions, sexes, random_actresses, random_actors):
time_measurements = []
for i in range(number_of_repetitions):
actors = random_actors[:]
actresses = random_actresses[:]
result = []
t0 = time.time()
for s in sexes:
if s == 'F':
result.append((s, actresses.pop(0)))
elif s == 'M':
result.append((s, actors.pop(0)))
time_one_round = time.time() - t0
time_measurements.append(time_one_round)
print(
f'{inspect.currentframe().f_code.co_name.ljust(20)} '
f'Items = {result_size}. Average time: {str(mean(time_measurements))} secs')
def combine_without_pop(number_of_repetitions, sexes, random_actresses, random_actors):
time_measurements = []
for i in range(number_of_repetitions):
actors = random_actors[:]
actresses = random_actresses[:]
result = []
actors_i = 0
actresses_i = 0
t0 = time.time()
for s in sexes:
if s == 'F':
result.append((s, actresses[actresses_i]))
actresses_i += 1
elif s == 'M':
result.append((s, actors[actors_i]))
actors_i += 1
time_one_round = time.time() - t0
time_measurements.append(time_one_round)
print(
f'{inspect.currentframe().f_code.co_name.ljust(20)} '
f'Items = {result_size}. Average time: {str(mean(time_measurements))} secs')
def combine_dict(number_of_repetitions, sexes, random_actresses, random_actors):
time_measurements = []
for i in range(number_of_repetitions):
actors = random_actors[:]
actresses = random_actresses[:]
result = []
t0 = time.time()
mf = {'M': iter(actors), 'F': iter(actresses)}
result = [(sex, next(mf[sex])) for sex in sexes]
time_one_round = time.time() - t0
time_measurements.append(time_one_round)
print(
f'{inspect.currentframe().f_code.co_name.ljust(20)} '
f'Items = {result_size}. Average time: {str(mean(time_measurements))} secs')
def combine_generator(number_of_repetitions, sexes, random_actresses, random_actors):
time_measurements = []
for i in range(number_of_repetitions):
actors = random_actors[:]
actresses = random_actresses[:]
result = []
t0 = time.time()
mf = {'M': iter(actors), 'F': iter(actresses)}
for sex in sexes:
person = next(mf[sex])
result.append((sex, person))
time_one_round = time.time() - t0
time_measurements.append(time_one_round)
print(
f'{inspect.currentframe().f_code.co_name.ljust(20)} '
f'Items = {result_size}. Average time: {str(mean(time_measurements))} secs')
def combine_frames(number_of_repetitions, sexes, random_actresses, random_actors):
time_measurements = []
for i in range(number_of_repetitions):
actors = random_actors[:]
actresses = random_actresses[:]
result = []
df_actresses = pd.DataFrame(actresses, columns=['name'])
df_actors = pd.DataFrame(actors, columns=['name'])
t0 = time.time()
df_actresses['sex'] = 'F'
df_actors['sex'] = 'M'
df = pd.concat([df_actresses, df_actors], axis=0)
# if you really need it to be a list
# result = df.values.tolist()
time_one_round = time.time() - t0
time_measurements.append(time_one_round)
print(
f'{inspect.currentframe().f_code.co_name.ljust(20)} '
f'Items = {result_size}. Average time: {str(mean(time_measurements))} secs')
g_sexes, g_actresses, g_actors = init()
combine_with_pop(g_number_of_repetitions, g_sexes, g_actresses, g_actors)
combine_without_pop(g_number_of_repetitions, g_sexes, g_actresses, g_actors)
combine_dict(g_number_of_repetitions, g_sexes, g_actresses, g_actors)
combine_generator(g_number_of_repetitions, g_sexes, g_actresses, g_actors)
combine_frames(g_number_of_repetitions, g_sexes, g_actresses, g_actors)

Scrape several instances of a webpage the fastest way possible

So, after a lot of attempts, search and research I give up.
I have a webpage where all employees name, phone, mail and userid can be query. The way you do that is that the request to the server needs to have at least 4 digits, with all 26 ascll character + 0-9 numbers. I was able to do it with Selenium in Python...but it whould take 20 days to go through - see code.
from selenium import webdriver
import csv
alphanum = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'x', 'y', 'z', '1',
'2', '3', '4', '5', '6', '7', '8', '9', '0']
driver = webdriver.Firefox()
driver.get('http://brnet.intra.corpintra.net/Quem/pessoas2/Default.asp')
list_base = dict()
i = 0
data_str = []
found = False
for first_chr in alphanum:
for second_chr in alphanum:
for third_chr in alphanum:
text = first_chr + second_chr + third_chr
element_name = driver.find_element_by_name('nome').clear()
element_name = driver.find_element_by_name('nome')
element_name.send_keys(text)
element_search = driver.find_element_by_name('B1')
element_search.click()
if driver.find_elements_by_class_name('dados'):
for table_data in driver.find_elements_by_class_name('dados'):
cells_table = table_data.find_elements_by_tag_name('td')
for cell_data in cells_table:
data_str.append(cell_data.text.strip())
if list_base:
for key, value in list_base.items():
for data in data_str:
if data in value:
found = False
else:
found = True
else:
found = False
if found is False:
list_base[i] = data_str
i = i+1
data_str = []
found = False
driver.back()
w = csv.writer(open("output.csv", "w"))
for key, value in list_base.items():
w.writerow([key, value])
driver.quit()
Is there a way to reduce the time?

Is the cc recipients in a received email a Python list? (Google App Engine)

I am trying to pull the cc'ed email addresses from received email. I am working in the development server.
The tutorial says that "cc contains a list of the cc recipients." But it seems that message.cc returns a string. I am just using the code I copied from the cookbook:
class ReceiveEmail(InboundMailHandler):
def receive(self, message):
logging.info("Received email from %s" % message.sender)
plaintext = message.bodies(content_type='text/plain')
for text in plaintext:
txtmsg = ""
txtmsg = text[1].decode()
logging.info("Body is %s" % txtmsg)
logging.info("CC email is %s" % message.cc)
So if I have 1 cc, the log shows:
CC email is cc12#example.com
If there are more than 1:
CC email is cc12#example.com, cc13#example.com
To get the first email "cc12#example.com", I tried:
logging.info("CC email is %s" % message.cc[0])
but this gives:
CC email is c
so the result is treated as a string.
When I try
logging.info("CC email is %s" % list(message.cc)
I get
['c', 'c', '1', '2', '#', 'e', 'x', 'a', 'm', 'p', 'l', 'e', '.', 'c', 'o', 'm', ',', ' ', 'c', 'c', '1', '3', '#', 'e', 'x', 'a', 'm', 'p', 'l', 'e', '.', 'c', 'o', 'm', ',', ' ', 'c', 'c', '1', '4', '#', 'e', 'x', 'a', 'm', 'p', 'l', 'e', '.', 'c', 'o', 'm'
Again, it appears that message.cc returns string not list.
Do I need to use regex to get the emails? Any suggestions about what I am doing wrong? Thanks!
Try:
cc_list = message.cc.split(',')
cc
A recipient's email address (a string) or a list of email addresses to appear on the Cc: line in the message header.
Message Fields
cc is a string
message.cc.split(", ")[0] is "cc12#example.com" that you want.

Categories