This error occurs on pynsq 0.4.2
STACKTRACE
2013-07-30 15:03:43,205 ERROR [ip-10-114-195-89:4150:nsq_msg_handler] failed to handle_message()
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/nsq/Reader.py", line 367, in _data_callback
self._handle_message(conn, message)
File "/usr/local/lib/python2.7/dist-packages/nsq/Reader.py", line 291, in _handle_message
return message.requeue()
File "/usr/local/lib/python2.7/dist-packages/nsq/nsq.py", line 47, in requeue
self.respond(REQ, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/nsq/Reader.py", line 211, in _message_responder
self._requeue(conn, message, time_ms=kwargs.get('time_ms', -1))
File "/usr/local/lib/python2.7/dist-packages/nsq/Reader.py", line 222, in _requeue
return self.finish(conn, message)
AttributeError: 'Reader' object has no attribute 'finish'
CODE
def connect_nsq(self):
r = nsq.Reader(message_handler=self.nsq_msg_handler, lookupd_http_addresses=["127.0.0.1:4161"], topic="test_topic", channel="test_channel", max_in_flight=500)
nsq.run()
# callback
def nsq_msg_handler(self, message):
try:
before_ts = get_utc_now_ts()
json_data = json.loads(message.body)
my_data = json_data["key1"]
my_data = json_data["key2"]
my_data = json_data["key3"]
after_ts = get_utc_now_ts()
delta = after_ts - before_ts
logger.debug("took %f seconds for json_data _id: %s" % (delta, json_data["_id"]))
except Exception as reason:
print reason, traceback.format_exc()
return False
return true
This issue was fixed in the latest release of pynsq which is 0.5.0
Here's the bug report: https://github.com/bitly/pynsq/issues/44
Related
While scanning bunch of websites using the below function I received an error (see below). Would there be any except step I should add to the function below to handle such error or there is something wrong with my try / except part in my function?
function:
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
import io
import requests.exceptions
import time
import asyncio
from concurrent.futures import ProcessPoolExecutor, as_completed
import io
df = pd.read_csv('myScan.csv')
urls = df.T.values.tolist()[2]
results = {}
status = {}
async def scrape(url):
try:
r = requests.get(url, timeout=(3, 6))
r.raise_for_status()
soup = BeautifulSoup(r.content, 'html.parser')
if soup.body:
data = {
"coming soon": soup.body.findAll(text = re.compile("coming soon", re.I)),
"Opening Soon": soup.body.findAll(text = re.compile("Opening Soon", re.I)),
"Under Construction": soup.body.findAll(text = re.compile("Under Construction", re.I)),
"Currently Unavailable": soup.body.findAll(text = re.compile("Currently Unavailable", re.I)),
"button_2": soup.findAll(text = re.compile('button_2.js'))}
results[url] = data
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, requests.exceptions.MissingSchema):
status[url] = "Connection Error"
except (requests.exceptions.HTTPError):
status[url] = "Http Error"
except (requests.exceptions.TooManyRedirects):
status[url] = "Redirects"
except (requests.exceptions.RequestException) as err:
status[url] = "Fatal Error: " + err + url
else:
status[url] = "OK"
async def main():
await asyncio.wait([scrape(url) for url in urls])
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
comingList= []
openingList = []
underList = []
button_2 = []
statusList = []
for url in urls:
if(not results.get(url)):
statusList.append(status.get(url))
comingList.append("-")
openingList.append("-")
underList.append("-")
button_2.append("-")
else:
statusList.append(status.get(url))
comingList.append("x" if len(results[url].get("coming soon")) > 0 else "-")
openingList.append("x" if len(results[url].get("Opening Soon")) > 0 else "-")
underList.append("x" if len(results[url].get("Under Construction")) > 0 else "-")
button_2.append("x" if len(results[url].get("button_2")) > 0 else "-")
df["comingSoon"] = pd.DataFrame(comingList, columns=['comingSoon'])
df["openingSoon"] = pd.DataFrame(openingList, columns=['openingSoon'])
df["underConstruction"] = pd.DataFrame(underList, columns=['underConstruction'])
df["button_2"] = pd.DataFrame(button_2, columns=['button_2'])
df['status'] = pd.DataFrame(statusList, columns=['Status'])
df.to_csv('myScanCompleted.csv', index=False)
Error:
Task exception was never retrieved
future: <Task finished name='Task-43943' coro=<scrape() done, defined at crawler.py:69> exception=TypeError('can only concatenate str (not "ChunkedEncodingError") to str')>
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/urllib3/response.py", line 697, in _update_chunk_length
self.chunk_left = int(line, 16)
ValueError: invalid literal for int() with base 16: b''
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/urllib3/response.py", line 438, in _error_catcher
yield
File "/usr/local/lib/python3.9/site-packages/urllib3/response.py", line 764, in read_chunked
self._update_chunk_length()
File "/usr/local/lib/python3.9/site-packages/urllib3/response.py", line 701, in _update_chunk_length
raise InvalidChunkLength(self, line)
urllib3.exceptions.InvalidChunkLength: InvalidChunkLength(got length b'', 0 bytes read)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/requests/models.py", line 753, in generate
for chunk in self.raw.stream(chunk_size, decode_content=True):
File "/usr/local/lib/python3.9/site-packages/urllib3/response.py", line 572, in stream
for line in self.read_chunked(amt, decode_content=decode_content):
File "/usr/local/lib/python3.9/site-packages/urllib3/response.py", line 793, in read_chunked
self._original_response.close()
File "/usr/local/Cellar/python#3.9/3.9.0_5/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py", line 135, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/local/lib/python3.9/site-packages/urllib3/response.py", line 455, in _error_catcher
raise ProtocolError("Connection broken: %r" % e, e)
urllib3.exceptions.ProtocolError: ("Connection broken: InvalidChunkLength(got length b'', 0 bytes read)", InvalidChunkLength(got length b'', 0 bytes read))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "crawler.py", line 71, in scrape
r = requests.get(url, timeout=(3, 6))
File "/usr/local/lib/python3.9/site-packages/requests/api.py", line 76, in get
return request('get', url, params=params, **kwargs)
File "/usr/local/lib/python3.9/site-packages/requests/api.py", line 61, in request
return session.request(method=method, url=url, **kwargs)
File "/usr/local/lib/python3.9/site-packages/requests/sessions.py", line 542, in request
resp = self.send(prep, **send_kwargs)
File "/usr/local/lib/python3.9/site-packages/requests/sessions.py", line 697, in send
r.content
File "/usr/local/lib/python3.9/site-packages/requests/models.py", line 831, in content
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
File "/usr/local/lib/python3.9/site-packages/requests/models.py", line 756, in generate
raise ChunkedEncodingError(e)
requests.exceptions.ChunkedEncodingError: ("Connection broken: InvalidChunkLength(got length b'', 0 bytes read)", InvalidChunkLength(got length b'', 0 bytes read))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "crawler.py", line 89, in scrape
status[url] = "Fatal Error: " + err + url
TypeError: can only concatenate str (not "ChunkedEncodingError") to str
I encountered the same error. Can't speak as to why, but switching from the html.parser parser to lxml fixed it for me.
Might be useful: difference between parsers
My task (check payment status in Sberbank. If no capture - retry check):
from ....celeryconf import app
from . import client as sberbank
from ...models import Payment, Transaction
#app.task(bind=True, default_retry_delay=60, time_limit=1200)
def check_status_sberbank_task(self, order_id, connection_params):
sberbank_client = sberbank.Client(auth=(connection_params['login'], connection_params['password']),
sandbox=connection_params['sandbox_mode'])
response = sberbank_client.payment.get_status(order_id=order_id)
txn = Transaction.objects.get(token=order_id)
if response['actionCode'] == 0:
txn.is_success = True
txn.save()
payment = Payment.objects.get(pk=txn.payment_id)
payment.charge_status = 'fully-charged'
payment.captured_amount = payment.total
payment.save()
return 'Success pay on Sberbank for ' + str(order_id)
else:
self.retry(countdown=60)
in log file I have:
ERROR celery.app.trace Task
saleor.payment.gateways.sberbank.tasks.check_status_sberbank_task[bb384815-4a5b-49d7-bc29-114707f072b1]
raised unexpected: RuntimeError('Never call result.get() within a
task!\nSee
http://docs.celeryq.org/en/latest/userguide/tasks.html#task-synchronous-subtasks\n',)
[PID:26869:Thread-825]
Traceback (most recent call last): File
"/home/korolev/saleor/lib/python3.6/site-packages/celery/app/trace.py",
line 385, in trace_task
R = retval = fun(*args, **kwargs) File "/home/korolev/saleor/saleor/payment/gateways/sberbank/tasks.py", line
26, in check_status_sberbank_task
self.retry(countdown=60) File "/home/korolev/saleor/lib/python3.6/site-packages/celery/app/task.py",
line 715, in retry
S.apply().get() File "/home/korolev/saleor/lib/python3.6/site-packages/celery/result.py",
line 1015, in get
assert_will_not_block() File "/home/korolev/saleor/lib/python3.6/site-packages/celery/result.py",
line 41, in assert_will_not_block
raise RuntimeError(E_WOULDBLOCK)
RuntimeError: Never call result.get() within a task! See
http://docs.celeryq.org/en/latest/userguide/tasks.html#task-synchronous-subtasks
How do I fix this error?
I set up a try catch in my code, but it appears that my exception was not correct because it did not seem to catch it.
I am using an exception from a module, and perhaps I didn't import it correctly? Here is my code:
import logging
import fhirclient.models.bundle as b
from fhirclient.server import FHIRUnauthorizedException
logging.disable(logging.WARNING)
def get_all_resources(resource, struct, smart):
'''Perform a search on a resource type and get all resources entries from all retunred bundles.\n
This function takes all paginated bundles into consideration.'''
if smart.ready == False:
smart.reauthorize
search = resource.where(struct)
bundle = search.perform(smart.server)
resources = [entry.resource for entry in bundle.entry or []]
next_url = _get_next_url(bundle.link)
while next_url != None:
try:
json_dict = smart.server.request_json(next_url)
except FHIRUnauthorizedException:
smart.reauthorize
continue
bundle = b.Bundle(json_dict)
resources += [entry.resource for entry in bundle.entry or []]
next_url = _get_next_url(bundle.link)
return resources
Now when i ran the code I got the following error:
Traceback (most recent call last):
File "code.py", line 79, in <module>
main()
File "code.py", line 42, in main
reports = get_all_resources(dr.DiagnosticReport, search, smart)
File "somepath/fhir_tools/resource.py", line 23, in get_all_resources
json_dict = smart.server.request_json(next_url)
File "/usr/local/lib/python3.6/dist-packages/fhirclient/server.py", line 153, in request_json
res = self._get(path, headers, nosign)
File "/usr/local/lib/python3.6/dist-packages/fhirclient/server.py", line 181, in _get
self.raise_for_status(res)
File "/usr/local/lib/python3.6/dist-packages/fhirclient/server.py", line 256, in raise_for_status
raise FHIRUnauthorizedException(response)
server.FHIRUnauthorizedException: <Response [401]>
Shouldn't my exception catch this?
I am trying to run the first example:Get Campaigns,through the python sdk(ads api,not adwords).
I got an error that did not describe how to solve the problem: google.api_core.exceptions.DeadlineExceeded: 504 Deadline Exceeded
"""This example illustrates how to get all campaigns.
To add campaigns, run add_campaigns.py.
"""
from __future__ import absolute_import
import argparse
import six
import sys
import google.ads.google_ads.client
_DEFAULT_PAGE_SIZE = 10
def main(client, customer_id, page_size):
ga_service = client.get_service('GoogleAdsService', version='v1')
query = ('SELECT campaign.id, campaign.name FROM campaign '
'ORDER BY campaign.id')
results = ga_service.search(customer_id, query=query, page_size=page_size)
try:
for row in results:
print('Campaign with ID %d and name "%s" was found.'
% (row.campaign.id.value, row.campaign.name.value))
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
if __name__ == '__main__':
google_ads_client = (google.ads.google_ads.client.GoogleAdsClient
.load_from_storage("google-ads.yaml"))
main(google_ads_client, "customer_id", _DEFAULT_PAGE_SIZE)
and got the error:
Traceback (most recent call last):
File "c:/PythonProjects/google_ads_api_test/google-ads-python-master/google-ads-python-master/examples/basic_operations/get_campaigns.py", line 68, in <module>
main(google_ads_client, "customer_id", _DEFAULT_PAGE_SIZE)
File "c:/PythonProjects/google_ads_api_test/google-ads-python-master/google-ads-python-master/examples/basic_operations/get_campaigns.py", line 40, in main
for row in results:
File "C:\Anaconda3\lib\site-packages\google\api_core\page_iterator.py", line 204, in _items_iter
for page in self._page_iter(increment=False):
File "C:\Anaconda3\lib\site-packages\google\api_core\page_iterator.py", line 235, in _page_iter
page = self._next_page()
File "C:\Anaconda3\lib\site-packages\google\api_core\page_iterator.py", line 526, in _next_page
response = self._method(self._request)
File "C:\Anaconda3\lib\site-packages\google\api_core\gapic_v1\method.py", line 143, in __call__
return wrapped_func(*args, **kwargs)
File "C:\Anaconda3\lib\site-packages\google\api_core\retry.py", line 270, in retry_wrapped_func
on_error=on_error,
File "C:\Anaconda3\lib\site-packages\google\api_core\retry.py", line 179, in retry_target
return target()
File "C:\Anaconda3\lib\site-packages\google\api_core\timeout.py", line 214, in func_with_timeout
return func(*args, **kwargs)
File "C:\Anaconda3\lib\site-packages\google\api_core\grpc_helpers.py", line 59, in error_remapped_callable
six.raise_from(exceptions.from_grpc_error(exc), exc)
File "<string>", line 3, in raise_from
google.api_core.exceptions.DeadlineExceeded: 504 Deadline Exceeded
Is something wrong with the call?
I believe you simply need to increase your timeout value in your configuration.
I keep getting this error in my program and I have no idea how to fix it. Here's where the error comes.
def check_file():
for read in range(6):
length = len(linecache.getline('status.py', read))
f = linecache.getline('status.py', read)[0:length-1]
print (f)
if read >= 6:
print("5")
else:
file[read-1][1] = f
return file
def change_state(change_till, state): # changes tills' status
global file
data = ''
status = check_file()
status[change_till-1][1] = state
file = open('status.py', 'w')
file.write(status[0][1]+'\n'+status[1][1]+'\n'+status[2][1]+'\n'+status[3][1]+'\n'+status[4][1])
file.close()
return state
I run the code and this error comes up
Traceback (most recent call last):
File "C:\Users\rhysj\OneDrive\Documents\Rhys Morgan Jones\My Projects\Coding\Python\Months\December 2016\aldi\till.py", line 151, in <module>
start()
File "C:\Users\rhysj\OneDrive\Documents\Rhys Morgan Jones\My Projects\Coding\Python\Months\December 2016\aldi\till.py", line 144, in start
state = check_status(this_till-1)
File "C:\Users\rhysj\OneDrive\Documents\Rhys Morgan Jones\My Projects\Coding\Python\Months\December 2016\aldi\till.py", line 58, in check_status
status = check_file()
File "C:\Users\rhysj\OneDrive\Documents\Rhys Morgan Jones\My Projects\Coding\Python\Months\December 2016\aldi\till.py", line 21, in check_file
file[read-1][1] = f
TypeError: '_io.TextIOWrapper' object is not subscriptable.
Does anyone know why this happens and how to fix it?