The code i've been buidling came across this error today and i cant figure out whats wrong and why its happening.
The error:
Traceback (most recent call last):
File "D:/Drive/Outros/Python/Projects/Simple_Dict_bot.py", line 63, in loop
last_msg()
File "D:/Drive/Outros/Python/Projects/Simple_Dict_bot.py", line 35, in last_msg
loop()
File "D:/Drive/Outros/Python/Projects/Simple_Dict_bot.py", line 63, in loop
last_msg()
File "D:/Drive/Outros/Python/Projects/Simple_Dict_bot.py", line 35, in last_msg
loop()
File "D:/Drive/Outros/Python/Projects/Simple_Dict_bot.py", line 61, in loop
open_chatroom()
File "D:/Drive/Outros/Python/Projects/Simple_Dict_bot.py", line 21, in open_chatroom
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CLASS_NAME, '_1ZMSM')))
File "D:\py\lib\site-packages\selenium\webdriver\support\wait.py", line 71, in until
value = method(self._driver)
File "D:\py\lib\site-packages\selenium\webdriver\support\expected_conditions.py", line 64, in __call__
return _find_element(driver, self.locator)
File "D:\py\lib\site-packages\selenium\webdriver\support\expected_conditions.py", line 411, in _find_element
return driver.find_element(*by)
File "D:\py\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 978, in find_element
'value': value})['value']
File "D:\py\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 319, in execute
response = self.command_executor.execute(driver_command, params)
File "D:\py\lib\site-packages\selenium\webdriver\remote\remote_connection.py", line 374, in execute
return self._request(command_info[0], url, body=data)
File "D:\py\lib\site-packages\selenium\webdriver\remote\remote_connection.py", line 397, in _request
resp = self._conn.request(method, url, body=body, headers=headers)
File "D:\py\lib\site-packages\urllib3\request.py", line 80, in request
method, url, fields=fields, headers=headers, **urlopen_kw
File "D:\py\lib\site-packages\urllib3\request.py", line 171, in request_encode_body
return self.urlopen(method, url, **extra_kw)
File "D:\py\lib\site-packages\urllib3\poolmanager.py", line 330, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "D:\py\lib\site-packages\urllib3\connectionpool.py", line 672, in urlopen
chunked=chunked,
File "D:\py\lib\site-packages\urllib3\connectionpool.py", line 421, in _make_request
six.raise_from(e, None)
File "<string>", line 3, in raise_from
File "D:\py\lib\site-packages\urllib3\connectionpool.py", line 416, in _make_request
httplib_response = conn.getresponse()
File "D:\py\lib\http\client.py", line 1321, in getresponse
response.begin()
File "D:\py\lib\http\client.py", line 320, in begin
self.headers = self.msg = parse_headers(self.fp)
File "D:\py\lib\http\client.py", line 214, in parse_headers
return email.parser.Parser(_class=_class).parsestr(hstring)
File "D:\py\lib\email\parser.py", line 68, in parsestr
return self.parse(StringIO(text), headersonly=headersonly)
File "D:\py\lib\email\parser.py", line 57, in parse
feedparser.feed(data)
File "D:\py\lib\email\feedparser.py", line 176, in feed
self._call_parse()
File "D:\py\lib\email\feedparser.py", line 180, in _call_parse
self._parse()
File "D:\py\lib\email\feedparser.py", line 295, in _parsegen
if self._cur.get_content_maintype() == 'message':
File "D:\py\lib\email\message.py", line 594, in get_content_maintype
ctype = self.get_content_type()
File "D:\py\lib\email\message.py", line 578, in get_content_type
value = self.get('content-type', missing)
File "D:\py\lib\email\message.py", line 471, in get
return self.policy.header_fetch_parse(k, v)
File "D:\py\lib\email\_policybase.py", line 316, in header_fetch_parse
return self._sanitize_header(name, value)
File "D:\py\lib\email\_policybase.py", line 287, in _sanitize_header
if _has_surrogates(value):
File "D:\py\lib\email\utils.py", line 57, in _has_surrogates
s.encode()
RecursionError: maximum recursion depth exceeded while calling a Python object
Process finished with exit code 1
This will get the last unread message.
def w:
try:
post = driver.find_elements_by_class_name("_12pGw")
ultimo = len(post) - 1
texto = post[ultimo].find_element_by_css_selector(
"span.selectable-text").text
return texto
except Exception:
loop()
This creates a dictionary from a pandas df and replies the user with matching answer.
def y:
df = pd.read_excel(r'D:\Drive\Outros\Python\Project\Dict.xlsx', error_bad_lines=False, encoding='utf-8-sig')
d = df.set_index('msg')['reply'].to_dict()
try:
input_field = driver.find_element_by_class_name("_3u328")
try:
x = next(v for k, v in d.items() if last_msg() in k)
except StopIteration:
x = 'Não entendi, este comando é invalido'
input_field.send_keys(x)
time.sleep(1)
driver.find_element_by_class_name("_3M-N-").click()
try:
driver.find_element_by_class_name("_2zCfw").send_keys('Lonely bot')
driver.find_element_by_xpath("//span[#title = '{}']".format('Lonely bot')).click()
driver.find_element_by_class_name("_2heX1").click()
WebDriverWait(driver, 600).until(EC.invisibility_of_element_located((By.NAME, "status-time")))
except TimeoutException:
loop()
except NoSuchElementException:
loop()
Here i defined a loop to keep the code online
def loop:
try:
z()
time.sleep(1)
w()
y()
driver.refresh()
except TimeoutException:
loop()
This is the first read and reply.
while True:
try:
open_chatroom()
time.sleep(1)
w()
y()
driver.refresh()
except TimeoutException:
loop()
I never experienced this before. How can i change my code so my loop doesnt break with this error?
At your exception handler, your function loop calls itself. every TimeoutException exception you creating a new stack frame, and I guess these stack frames are never emptied, eventually causing a RecursionError.
Looking at the last few items in the traceback, it seems that loop and last_msg are calling each other repeatedly, so the there is a recursion that involves two routines instead of just one calling itself. There's also a similar possible cycle through the functions loop and conversation.
The goal is to keep the chatbot running in a loop all the time, even if you hit an error of some kind, but the problem arises when loop gets called again in the exception handlers. It starts another copy of loop inside last_msg while the first copy of loop is still running. So last_msg calls loop and that in turn calls last_msg again, and none of the calls ever finish, they just pile up until you run ouf of space.
The way to solve this is to just return from the function where you catch the exception, and to replace the loop function with a while True: loop (just like the last code block in the original question).
Catching the exceptions prevents them from stopping the while loop. If something does fail then the while loop will keep trying again forever, but then it's doing it forever inside one function call, rather than a new recursive call.
Related
So basically this all stems from a previous question I had, so I'll post that question & my edit in its entirely below:
So I have a script I've been working with for a few days trying to get a list of emails from a csv I have, but now I've run into this roadblock. Here is the code:
import sys
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import re
import csv
list1 = []
list2 = []
list3 = []
def addList():
with open('file.csv', 'rt') as f:
reader = csv.reader(f)
for row in reader:
for s in row:
list2.append(s)
def getAddress(url):
http = "http://"
https = "https://"
if http in url:
return url
elif https in url:
return url
else:
url = "http://" + url
return url
def parseAddress(url):
global list3
try:
website = urllib2.urlopen(getAddress(url))
html = website.read()
addys = re.findall('''[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*#(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?''', html, flags=re.IGNORECASE)
global list1
list1.append(addys)
except urllib2.HTTPError as err:
print ("Cannot retrieve URL: HTTP Error Code: "), err.code
list3.append(url)
except urllib2.URLError as err:
print ("Cannot retrive URL: ") + err.reason[1]
list3.append(url)
def execute():
global list2
addList()
totalNum = len(list2)
atNum = 1
for s in list2:
parseAddress(s)
print ("Processing ") + str(atNum) + (" out of ") + str(totalNum)
atNum = atNum + 1
print ("Completed. Emails parsed: ") + str(len(list1)) + "."
### MAIN
def main():
global list2
execute()
global list1
myFile = open("finishedFile.csv", "w+")
wr = csv.writer(myFile, quoting=csv.QUOTE_ALL)
for s in list1:
wr.writerow(s)
myFile.close
global list3
failFile = open("failedSites.csv", "w+")
write = csv.writer(failFile, quoting=csv.QUOTE_ALL)
for j in list3:
write.writerow(j)
failFile.close
main()
and when I run it I get this error:
Traceback (most recent call last):
File "pagescanner.py", line 85, in <module>
main()
File "pagescanner.py", line 71, in main
execute()
File "pagescanner.py", line 60, in execute
parseAddress(s)
File "pagescanner.py", line 42, in parseAddress
addys = re.findall('''[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*#(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?''', html, flags=re.IGNORECASE)
File "/usr/lib/python3.5/re.py", line 213, in findall
return _compile(pattern, flags).findall(string)
TypeError: cannot use a string pattern on a bytes-like object
So I've figured out that I need to figure out how to encode the html string into bytes for the encoding, and Tyler's answer below helped me do so but now I'm getting this error:
Traceback (most recent call last):
File "/usr/lib/python3.5/urllib/request.py", line 1254, in do_open
h.request(req.get_method(), req.selector, req.data, headers)
File "/usr/lib/python3.5/http/client.py", line 1107, in request
self._send_request(method, url, body, headers)
File "/usr/lib/python3.5/http/client.py", line 1152, in _send_request
self.endheaders(body)
File "/usr/lib/python3.5/http/client.py", line 1103, in endheaders
self._send_output(message_body)
File "/usr/lib/python3.5/http/client.py", line 934, in _send_output
self.send(msg)
File "/usr/lib/python3.5/http/client.py", line 877, in send
self.connect()
File "/usr/lib/python3.5/http/client.py", line 849, in connect
(self.host,self.port), self.timeout, self.source_address)
File "/usr/lib/python3.5/socket.py", line 712, in create_connection
raise err
File "/usr/lib/python3.5/socket.py", line 703, in create_connection
sock.connect(sa)
OSError: [Errno 22] Invalid argument
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "pagescanner.py", line 39, in parseAddress
website = urllib2.urlopen(getAddress(url))
File "/usr/lib/python3.5/urllib/request.py", line 163, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib/python3.5/urllib/request.py", line 466, in open
response = self._open(req, data)
File "/usr/lib/python3.5/urllib/request.py", line 484, in _open
'_open', req)
File "/usr/lib/python3.5/urllib/request.py", line 444, in _call_chain
result = func(*args)
File "/usr/lib/python3.5/urllib/request.py", line 1282, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "/usr/lib/python3.5/urllib/request.py", line 1256, in do_open
raise URLError(err)
urllib.error.URLError: <urlopen error [Errno 22] Invalid argument>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "pagescanner.py", line 85, in <module>
main()
File "pagescanner.py", line 71, in main
execute()
File "pagescanner.py", line 60, in execute
parseAddress(s)
File "pagescanner.py", line 51, in parseAddress
print ("Cannot retrive URL: ") + err.reason[1]
TypeError: 'OSError' object is not subscriptable
Does this mean that one of the urls from the list isn't a valid url? I thought I had finally removed all fo the bad urls from my csv file but I may need to take another look
I have a cron job which calls vendor api to fetch the companies list. Once the data is fetched, we are storing that data into cloud datastore as shown in the below code . For some reason for last two days when i trigger the cron job , started seeing the errors. When i debug the code locally i dont see this error
company_list = cron.rest_client.load(config, "companies", '')
if not company_list:
logging.info("Company list is empty")
return "Ok"
for row in company_list:
company_repository.save(row,original_data_source,
actual_data_source)
Repository code
def save( dto, org_ds , act_dp):
try:
key = 'FIN/%s' % (dto['ticker'])
company = CompanyInfo(id=key)
company.stock_code = key
company.ticker = dto['ticker']
company.name = dto['name']
company.original_data_source = org_ds
company.actual_data_provider = act_dp
company.put()
return company
except Exception:
logging.exception("company_repository: error occurred saving the
company record ")
raise
Error
DeadlineExceededError: The overall deadline for responding to the
HTTP request was exceeded.
Exception details
Traceback (most recent call last):
File
"/base/data/home/runtimes/python27_experiment/python27_lib/versions/1/googl
e/appengine/runtime/wsgi.py", line 267, in Handle
result = handler(dict(self._environ), self._StartResponse)
File "/base/data/home/apps/p~svasti-173418/internal-
api:20170808t160537.403249868819304873/lib/flask/app.py", line 1836, in __call__
return self.wsgi_app(environ, start_response)
File "/base/data/home/apps/p~svasti-173418/internal-
api:20170808t160537.403249868819304873/lib/flask/app.py", line 1817, in
wsgi_app
response = self.full_dispatch_request()
File "/base/data/home/apps/p~svasti-173418/internal-
api:20170808t160537.403249868819304873/lib/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/base/data/home/apps/p~svasti-173418/internal-api:20170808t160537.403249868819304873/lib/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/base/data/home/apps/p~svasti-173418/internal-api:20170808t160537.403249868819304873/internal/cron/company_list.py", line 21, in run
company_repository.save(row,original_data_source, actual_data_source)
File "/base/data/home/apps/p~svasti-173418/internal-api:20170808t160537.403249868819304873/internal/repository/company_repository.py", line 13, in save
company.put()
File "/base/data/home/runtimes/python27_experiment/python27_lib/versions/1/google/appengine/ext/ndb/model.py", line 3458, in _put
return self._put_async(**ctx_options).get_result()
File "/base/data/home/runtimes/python27_experiment/python27_lib/versions/1/google/appengine/ext/ndb/tasklets.py", line 383, in get_result
self.check_success()
File "/base/data/home/runtimes/python27_experiment/python27_lib/versions/1/google/appengine/ext/ndb/tasklets.py", line 378, in check_success
self.wait()
File "/base/data/home/runtimes/python27_experiment/python27_lib/versions/1/google/appengine/ext/ndb/tasklets.py", line 362, in wait
if not ev.run1():
File "/base/data/home/runtimes/python27_experiment/python27_lib/versions/1/google/appengine/ext/ndb/eventloop.py", line 268, in run1
delay = self.run0()
File "/base/data/home/runtimes/python27_experiment/python27_lib/versions/1/google/appengine/ext/ndb/eventloop.py", line 248, in run0
_logging_debug('rpc: %s.%s', rpc.service, rpc.method)
File "/base/data/home/runtimes/python27_experiment/python27_lib/versions/1/google/appengine/api/apiproxy_stub_map.py", line 453, in service
#property
DeadlineExceededError: The overall deadline for responding to the HTTP request was exceeded.
Has your company list been getting bigger?
How many entities are you trying to put?
Try saving them as a batch, instead of sequentially in a loop. Remove company.put() from def save( dto, org_ds , act_dp): and use ndb.put_multi() afterwards instead.
company_list = cron.rest_client.load(config, "companies", '')
if not company_list:
logging.info("Company list is empty")
return "Ok"
company_objs=[]
for row in company_list:
company_objs.append(company_repository.save(row,original_data_source,
actual_data_source))
# put 500 at a time
if len(company_objs) > 500:
ndb.put_multi(company_objs)
company_objs=[]
# put any remainders
if len(company_objs) > 0:
ndb.put_multi(company_objs)
My answer is based on one that Alex gave, but runs async.
I've replaced put_multi() with put_multi_async()
By replacing the call to put_multi() with a call to its async equivalent put_multi_async(), the application can do other things right away instead of blocking on put_multi().
And added #ndb.toplevel decorator
This decorator tells the handler not to exit until its asynchronous requests have finished
If your data grows bigger, you may want to look deeper into defered library. It can be used to respawn task every X batches, with the rest of your unprocessed data.
#ndb.toplevel
def fetch_companies_list():
company_list = cron.rest_client.load(config, "companies", '')
if not company_list:
logging.info("Company list is empty")
return "Ok"
company_objs=[]
for row in company_list:
company_objs.append(company_repository.save(row,original_data_source,
actual_data_source))
# put 500 at a time
if len(company_objs) >= 500:
ndb.put_multi_async(company_objs)
company_objs=[]
# put any remainders
if len(company_objs) > 0:
ndb.put_multi_async(company_objs)
I am currently developing a game. This game store data in a sqlite database. I'm using dataset to manage the database, so I don't have to worry about sql queries. I have a method that access the database to update player info :
def updatePlayerInfo(channel, info): # Context at https://github.com/DuckHunt-discord/DuckHunt-Discord/blob/master/database.py#L33
table = getChannelTable(channel)
table.upsert(info, ["id_"])
# An UPSERT is a smart combination of insert and update.
# If rows with matching keys exist they will be updated, otherwise a new row is inserted in the table.
This function works fine for almost everything. Only one thing create an error : using munAP_ as a column name ! (storing only integers timestamps inside)
Some other columns work the same way, but aren't affected by a single bug !
Exception raised is the following :
Ignoring exception in on_message
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/discord/client.py", line 245, in _run_event
yield from getattr(self, event)(*args, **kwargs)
File "./main.py", line 1022, in on_message
if database.getStat(message.channel, message.author, "chargeurs",
File "/home/cloudbot/discord-bot/database.py", line 45, in setStat
updatePlayerInfo(channel, dict_)
File "/home/cloudbot/discord-bot/database.py", line 35, in updatePlayerInfo
table.upsert(info, ["id_"])
File "/usr/local/lib/python3.4/dist-packages/dataset/persistence/table.py", line 185, in upsert
row_count = self.update(row, keys, ensure=ensure, types=types)
File "/usr/local/lib/python3.4/dist-packages/dataset/persistence/table.py", line 154, in update
rp = self.database.executable.execute(stmt)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/engine/base.py", line 1991, in execute
return connection.execute(statement, *multiparams, **params)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/engine/base.py", line 914, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/elements.py", line 323, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/engine/base.py", line 1003, in _execute_clauseelement
inline=len(distilled_params) > 1)
File "<string>", line 1, in <lambda>
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/elements.py", line 494, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/elements.py", line 500, in _compiler
return dialect.statement_compiler(dialect, self, **kw)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/compiler.py", line 395, in __init__
Compiled.__init__(self, dialect, statement, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/compiler.py", line 190, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/compiler.py", line 213, in process
return obj._compiler_dispatch(self, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/visitors.py", line 81, in _compiler_dispatch
return meth(self, **kw)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/compiler.py", line 1958, in visit_update
crud_params = crud._get_crud_params(self, update_stmt, **kw)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/crud.py", line 109, in _get_crud_params
(", ".join("%s" % c for c in check))
sqlalchemy.exc.CompileError: Unconsumed column names: munAP_
https://github.com/DuckHunt-discord/DuckHunt-Discord/issues/8
I already tried to change the column name (it was munAP before) but it changed nothing !
What else can I try ? I suspect that the problem is in my code, but maybe it's dataset fault ?
Currently using Python 3.5.2 and Celery 3.1.23
#celery.task(bind=True)
def test(self, player, team, region):
print('test', player, team, region)
def update_player_accounts(self):
try:
teams = client.get_pro_teams()
for team, team_data in teams.items():
for player, player_data in team_data['members'].items():
print(player, team, team_data['region'])
self.test.delay(
player,
team,
team_data['region'],
)
I'm iterating through a nested string dictionary teams. My understanding of this code is that I'm creating celery tasks in the nested for loop to print my parameters. The above code throws a maximum recursion depth exceeded error. However, it works fine if I just remove one of the parameters in function test(). Doesn't matter which one I remove.
Also, the code will work if I attempt to add empty strings to the argument like so:
self.test.delay(
player+'',
team,
team_data['region'],
)
Why does this happen? I'm only passing strings to the celery task which should be straightforward.
Edit 1:
Supplying example teams
{
'Last Kings':{
'members':{
'Primoo':{
'url':'/professional/resume/primoo/',
'lane':'jng'
},
'Badmilk':{
'url':'/professional/resume/badmilk/',
'lane':'sup'
},
'Nipphu':{
'url':'/professional/resume/nipphu/',
'lane':'top'
},
'SryNotSry':{
'url':None,
'lane':'adc'
},
'Rakyz':{
'url':'/professional/resume/rakyz/',
'lane':'mid'
}
},
'region':'las'
},
'Dire Wolves':{
....
}
Edit 2:
Traceback (most recent call last):
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 55, in _reraise_errors
yield
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 164, in dumps
payload = encoder(data)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 356, in pickle_dumps
return dumper(obj, protocol=pickle_protocol)
RecursionError: maximum recursion depth exceeded
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Jung\Documents\Projects\hq\services\player_service.py", line 84, in update_player_accounts
team_data['region']
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\celery\app\task.py", line 453, in delay
return self.apply_async(args, kwargs)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\celery\app\task.py", line 560, in apply_async
**dict(self._get_exec_options(), **options)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\celery\app\base.py", line 354, in send_task
reply_to=reply_to or self.oid, **options
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\celery\app\amqp.py", line 305, in publish_task
**kwargs
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\messaging.py", line 165, in publish
compression, headers)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\messaging.py", line 241, in _prepare
body) = dumps(body, serializer=serializer)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 164, in dumps
payload = encoder(data)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\contextlib.py", line 77, in __exit__
self.gen.throw(type, value, traceback)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 59, in _reraise_errors
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\five.py", line 131, in reraise
raise value.with_traceback(tb)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 55, in _reraise_errors
yield
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 164, in dumps
payload = encoder(data)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 356, in pickle_dumps
return dumper(obj, protocol=pickle_protocol)
kombu.exceptions.EncodeError: maximum recursion depth exceeded
I've decided to post the entire stack trace as it is not that long.
I'm doing a POST request to an API, the code looks like this:
#gen.coroutine
def call():
...
response = yield AsyncHTTPClient().fetch(
HTTPRequest(
url='https://api.mywebsite.com/v1/users',
headers=headers,
method='POST',
body=json.dumps(body),
validate_cert=False
)
)
print response, response.body
if __name__ == "__main__":
tornado.ioloop.IOLoop.current().run_sync(call)
The server responds first time with 201 Created and the second time with 200 OK.
But for that code I get this error for the first time. The second time works
Traceback (most recent call last):
File "t.py", line 49, in <module>
tornado.ioloop.IOLoop.current().run_sync(call)
File "/usr/lib/python2.7/dist-packages/tornado/ioloop.py", line 389, in run_sync
return future_cell[0].result()
File "/usr/lib/python2.7/dist-packages/tornado/concurrent.py", line 129, in result
raise_exc_info(self.__exc_info)
File "/usr/lib/python2.7/dist-packages/tornado/stack_context.py", line 302, in wrapped
ret = fn(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/tornado/gen.py", line 574, in inner
self.set_result(key, result)
File "/usr/lib/python2.7/dist-packages/tornado/gen.py", line 500, in set_result
self.run()
File "/usr/lib/python2.7/dist-packages/tornado/gen.py", line 529, in run
yielded = self.gen.throw(*exc_info)
File "t.py", line 43, in call
validate_cert=False
File "/usr/lib/python2.7/dist-packages/tornado/gen.py", line 520, in run
next = self.yield_point.get_result()
File "/usr/lib/python2.7/dist-packages/tornado/gen.py", line 409, in get_result
return self.runner.pop_result(self.key).result()
File "/usr/lib/python2.7/dist-packages/tornado/concurrent.py", line 131, in result
return super(TracebackFuture, self).result(timeout=timeout)
File "/usr/lib/python2.7/dist-packages/concurrent/futures/_base.py", line 401, in result
return self.__get_result()
File "/usr/lib/python2.7/dist-packages/concurrent/futures/_base.py", line 360, in __get_result
raise self._exception
AssertionError
Looks like you are using sync function HTTPRequest in future
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPResponse
def call():
response = None
http_client = AsyncHTTPClient()
try:
body = json.dumps(body)
response: HTTPResponse = yield http_client.fetch('https://api.mywebsite.com/v1/users', method='POST', body=str(body), headers=headers, request_timeout=5)
except Exception as e:
print('get_request error:{0}'.format(e))