H2OAutoML throws libgomp exception in train step - python

I run H2O on a docker image using Python 3.6.3 and H2O 3.26.0.3.
import h2o
from h2o.automl import H2OAutoML
h2o.init()
In this step, initialization is successful and it prints the following information.
H2O cluster uptime: 01 secs
H2O cluster timezone: Europe/Istanbul
H2O data parsing timezone: UTC
H2O cluster version: 3.26.0.3
H2O cluster version age: 9 days
H2O cluster name: H2O_from_python_96273_8m5wyj
H2O cluster total nodes: 1
H2O cluster free memory: 26.67 Gb
H2O cluster total cores: 72
H2O cluster allowed cores: 72
H2O cluster status: accepting new members, healthy
H2O connection url: http://127.0.0.1:54321
H2O connection proxy: None
H2O internal security: False
H2O API Extensions: Amazon S3, XGBoost, Algos, AutoML, Core V3, Core V4
Python version: 3.6.3 final
Now, I will run AutoML but it is problematic.
hf = h2o.H2OFrame(x_train)
aml = H2OAutoML(max_runtime_secs=600)
aml.train(x = list(df.columns[:-1]), y = df.columns[-1], training_frame = hf)
I have the following error
ConnectionResetError Traceback (most recent call
last) ~/.local/lib/python3.6/site-packages/urllib3/connectionpool.py
in urlopen(self, method, url, body, headers, retries, redirect,
assert_same_host, timeout, pool_timeout, release_conn, chunked,
body_pos, **response_kw)
599 body=body, headers=headers,
--> 600 chunked=chunked)
601
~/.local/lib/python3.6/site-packages/urllib3/connectionpool.py in
_make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
383 # otherwise it looks like a programming error was the cause.
--> 384 six.raise_from(e, None)
385 except (SocketTimeout, BaseSSLError, SocketError) as e:
~/.local/lib/python3.6/site-packages/urllib3/packages/six.py in
raise_from(value, from_value)
~/.local/lib/python3.6/site-packages/urllib3/connectionpool.py in
_make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 try:
--> 380 httplib_response = conn.getresponse()
381 except Exception as e:
/opt/rh/rh-python36/root/usr/lib64/python3.6/http/client.py in
getresponse(self) 1330 try:
-> 1331 response.begin() 1332 except ConnectionError:
/opt/rh/rh-python36/root/usr/lib64/python3.6/http/client.py in
begin(self)
296 while True:
--> 297 version, status, reason = self._read_status()
298 if status != CONTINUE:
/opt/rh/rh-python36/root/usr/lib64/python3.6/http/client.py in
_read_status(self)
257 def _read_status(self):
--> 258 line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
259 if len(line) > _MAXLINE:
/opt/rh/rh-python36/root/usr/lib64/python3.6/socket.py in
readinto(self, b)
585 try:
--> 586 return self._sock.recv_into(b)
587 except timeout:
ConnectionResetError: [Errno 104] Connection reset by peer
During handling of the above exception, another exception occurred:
ProtocolError Traceback (most recent call
last) ~/.local/lib/python3.6/site-packages/requests/adapters.py in
send(self, request, stream, timeout, verify, cert, proxies)
448 retries=self.max_retries,
--> 449 timeout=timeout
450 )
~/.local/lib/python3.6/site-packages/urllib3/connectionpool.py in
urlopen(self, method, url, body, headers, retries, redirect,
assert_same_host, timeout, pool_timeout, release_conn, chunked,
body_pos, **response_kw)
637 retries = retries.increment(method, url, error=e, _pool=self,
--> 638 _stacktrace=sys.exc_info()[2])
639 retries.sleep()
~/.local/lib/python3.6/site-packages/urllib3/util/retry.py in
increment(self, method, url, response, error, _pool, _stacktrace)
367 if read is False or not self._is_method_retryable(method):
--> 368 raise six.reraise(type(error), error, _stacktrace)
369 elif read is not None:
~/.local/lib/python3.6/site-packages/urllib3/packages/six.py in
reraise(tp, value, tb)
684 if value.traceback is not tb:
--> 685 raise value.with_traceback(tb)
686 raise value
~/.local/lib/python3.6/site-packages/urllib3/connectionpool.py in
urlopen(self, method, url, body, headers, retries, redirect,
assert_same_host, timeout, pool_timeout, release_conn, chunked,
body_pos, **response_kw)
599 body=body, headers=headers,
--> 600 chunked=chunked)
601
~/.local/lib/python3.6/site-packages/urllib3/connectionpool.py in
_make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
383 # otherwise it looks like a programming error was the cause.
--> 384 six.raise_from(e, None)
385 except (SocketTimeout, BaseSSLError, SocketError) as e:
~/.local/lib/python3.6/site-packages/urllib3/packages/six.py in
raise_from(value, from_value)
~/.local/lib/python3.6/site-packages/urllib3/connectionpool.py in
_make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 try:
--> 380 httplib_response = conn.getresponse()
381 except Exception as e:
/opt/rh/rh-python36/root/usr/lib64/python3.6/http/client.py in
getresponse(self) 1330 try:
-> 1331 response.begin() 1332 except ConnectionError:
/opt/rh/rh-python36/root/usr/lib64/python3.6/http/client.py in
begin(self)
296 while True:
--> 297 version, status, reason = self._read_status()
298 if status != CONTINUE:
/opt/rh/rh-python36/root/usr/lib64/python3.6/http/client.py in
_read_status(self)
257 def _read_status(self):
--> 258 line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
259 if len(line) > _MAXLINE:
/opt/rh/rh-python36/root/usr/lib64/python3.6/socket.py in
readinto(self, b)
585 try:
--> 586 return self._sock.recv_into(b)
587 except timeout:
ProtocolError: ('Connection aborted.', ConnectionResetError(104,
'Connection reset by peer'))
During handling of the above exception, another exception occurred:
ConnectionError Traceback (most recent call
last) ~/.local/lib/python3.6/site-packages/h2o/backend/connection.py
in request(self, endpoint, data, json, filename, save_to)
404 headers=headers, timeout=self._timeout, stream=stream,
--> 405 auth=self._auth, verify=self._verify_ssl_cert, proxies=self._proxies)
406 self._log_end_transaction(start_time, resp)
~/.local/lib/python3.6/site-packages/requests/api.py in
request(method, url, **kwargs)
59 with sessions.Session() as session:
---> 60 return session.request(method=method, url=url, **kwargs)
61
~/.local/lib/python3.6/site-packages/requests/sessions.py in
request(self, method, url, params, data, headers, cookies, files,
auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert,
json)
532 send_kwargs.update(settings)
--> 533 resp = self.send(prep, **send_kwargs)
534
~/.local/lib/python3.6/site-packages/requests/sessions.py in
send(self, request, **kwargs)
645 # Send the request
--> 646 r = adapter.send(request, **kwargs)
647
~/.local/lib/python3.6/site-packages/requests/adapters.py in
send(self, request, stream, timeout, verify, cert, proxies)
497 except (ProtocolError, socket.error) as err:
--> 498 raise ConnectionError(err, request=request)
499
ConnectionError: ('Connection aborted.', ConnectionResetError(104,
'Connection reset by peer'))
During handling of the above exception, another exception occurred:
H2OConnectionError Traceback (most recent call
last) in
----> 1 aml.train(x = list(df.columns[:-1]), y = df.columns[-1], training_frame = hf)
~/.local/lib/python3.6/site-packages/h2o/automl/autoh2o.py in
train(self, x, y, training_frame, fold_column, weights_column,
validation_frame, leaderboard_frame, blending_frame)
443 poll_updates = ft.partial(self._poll_training_updates, verbosity=self._verbosity, state={})
444 try:
--> 445 self._job.poll(poll_updates=poll_updates)
446 finally:
447 poll_updates(self._job, 1)
~/.local/lib/python3.6/site-packages/h2o/job.py in poll(self,
poll_updates)
55 pb = ProgressBar(title=self._job_type + " progress", hidden=hidden)
56 if poll_updates:
---> 57 pb.execute(self._refresh_job_status, print_verbose_info=ft.partial(poll_updates, self))
58 else:
59 pb.execute(self._refresh_job_status)
~/.local/lib/python3.6/site-packages/h2o/utils/progressbar.py in
execute(self, progress_fn, print_verbose_info)
169 # Query the progress level, but only if it's time already
170 if self._next_poll_time <= now:
--> 171 res = progress_fn() # may raise StopIteration
172 assert_is_type(res, (numeric, numeric), numeric)
173 if not isinstance(res, tuple):
~/.local/lib/python3.6/site-packages/h2o/job.py in
_refresh_job_status(self)
92 def _refresh_job_status(self):
93 if self._poll_count <= 0: raise StopIteration("")
---> 94 jobs = h2o.api("GET /3/Jobs/%s" % self.job_key)
95 self.job = jobs["jobs"][0] if "jobs" in jobs else jobs["job"][0]
96 self.status = self.job["status"]
~/.local/lib/python3.6/site-packages/h2o/h2o.py in api(endpoint, data,
json, filename, save_to)
102 # type checks are performed in H2OConnection class
103 _check_connection()
--> 104 return h2oconn.request(endpoint, data=data, json=json, filename=filename, save_to=save_to)
105
106
~/.local/lib/python3.6/site-packages/h2o/backend/connection.py in
request(self, endpoint, data, json, filename, save_to)
413 else:
414 self._log_end_exception(e)
--> 415 raise H2OConnectionError("Unexpected HTTP error: %s" % e)
416 except requests.exceptions.Timeout as e:
417 self._log_end_exception(e)
H2OConnectionError: Unexpected HTTP error: ('Connection aborted.',
ConnectionResetError(104, 'Connection reset by peer'))
I suspect that proxy might be the reason of this exception. When I add the proxy information to the path, then exception message would be "HTTP 500 INKApi Error"
import os
os.environ['http_proxy']= ...
os.environ['https_proxy']= ...
JVM stdout log file dumps the following exception.
[thread 140335217821440 also had an error][thread 140335320467200 also
had an error] [thread 140335207294720 also had an error]
[thread 140335316256512 also had an error]# A fatal error has been detected by the Java Runtime Environment:
[thread 140335202031360 also had an error]
SIGSEGV (0xb) at pc=0x00007fa3276cdb8d, pid=51986, tid=0x00007fa2575f5700
JRE version: OpenJDK Runtime Environment (8.0_212-b04) (build 1.8.0_212-b04)
Java VM: OpenJDK 64-Bit Server VM (25.212-b04 mixed mode linux-amd64 compressed oops)
Problematic frame:
[thread 140335231506176 also had an error] C [libc.so.6+0x39b8d][thread 140335341520640 also had an error]
JVM stderr log file contains interesting logs
libgomp: Thread creation failed: Resource temporarily unavailable
* Error in `/usr/bin/java': free(): corrupted unsorted chunks: 0x00007efe342f0240 *
libgomp: Thread creation failed: Resource temporarily unavailable
Funny but It runs successfully when I run same code on my local machine. I suspect that it might be because of docker configuration.

I spent hours on this problem but I can resolve it immediately when I post this question. It would be a typical rubber duck programming.
It seems that the engine consumes all resources of the server and exceeded its limits. This is the reason of "Thread creation failed: Resource temporarily unavailable" message.
Limiting memory and number of threads solves this problem.
h2o.init(ip="127.0.0.1",max_mem_size_GB = 40, nthreads = 2)

Related

API requests aborted

First of all, I can't expose my API keys due to privacy reasons, sorry about that. Let me know how I could better explain the situation.
On high level, here is my Python code:
for x in range(0, len(zone_ids)):
time.sleep(2)
response = requests.post(f'https://vemcount.app/api/v3/report?source=zones&data=
{zone_ids[x]}&data_output=count_out&period=date&form_date_from=2020-01-04&form_date_to=2020-01-
04&period_step=30min&show_hours_from=00:00&show_hours_to=23:45', headers=headers)
The problem is that, this code sometimes run successfully, sometime fails. When it fails, some IDs/requests are actually run, it just stopped in the middle of for loop for any particular "random" request. I'm pretty sure it hasn't exceeded the API rate limit of 60 requests per min as I have embedded time.sleep(2) in my for loop. Here is the message:
---------------------------------------------------------------------------
RemoteDisconnected Traceback (most recent call last)
~\Anaconda3\envs\sa\lib\site-packages\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
698 # Make the request on the httplib connection object.
--> 699 httplib_response = self._make_request(
700 conn,
~\Anaconda3\envs\sa\lib\site-packages\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
444 # Otherwise it looks like a bug in the code.
--> 445 six.raise_from(e, None)
446 except (SocketTimeout, BaseSSLError, SocketError) as e:
~\Anaconda3\envs\sa\lib\site-packages\urllib3\packages\six.py in raise_from(value, from_value)
~\Anaconda3\envs\sa\lib\site-packages\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
439 try:
--> 440 httplib_response = conn.getresponse()
441 except BaseException as e:
~\Anaconda3\envs\sa\lib\http\client.py in getresponse(self)
1346 try:
-> 1347 response.begin()
1348 except ConnectionError:
~\Anaconda3\envs\sa\lib\http\client.py in begin(self)
306 while True:
--> 307 version, status, reason = self._read_status()
308 if status != CONTINUE:
~\Anaconda3\envs\sa\lib\http\client.py in _read_status(self)
275 # sending a valid response.
--> 276 raise RemoteDisconnected("Remote end closed connection without"
277 " response")
RemoteDisconnected: Remote end closed connection without response
During handling of the above exception, another exception occurred:
ProtocolError Traceback (most recent call last)
~\Anaconda3\envs\sa\lib\site-packages\requests\adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
438 if not chunked:
--> 439 resp = conn.urlopen(
440 method=request.method,
~\Anaconda3\envs\sa\lib\site-packages\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
754
--> 755 retries = retries.increment(
756 method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
~\Anaconda3\envs\sa\lib\site-packages\urllib3\util\retry.py in increment(self, method, url, response, error, _pool, _stacktrace)
530 if read is False or not self._is_method_retryable(method):
--> 531 raise six.reraise(type(error), error, _stacktrace)
532 elif read is not None:
~\Anaconda3\envs\sa\lib\site-packages\urllib3\packages\six.py in reraise(tp, value, tb)
733 if value.__traceback__ is not tb:
--> 734 raise value.with_traceback(tb)
735 raise value
~\Anaconda3\envs\sa\lib\site-packages\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
698 # Make the request on the httplib connection object.
--> 699 httplib_response = self._make_request(
700 conn,
~\Anaconda3\envs\sa\lib\site-packages\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
444 # Otherwise it looks like a bug in the code.
--> 445 six.raise_from(e, None)
446 except (SocketTimeout, BaseSSLError, SocketError) as e:
~\Anaconda3\envs\sa\lib\site-packages\urllib3\packages\six.py in raise_from(value, from_value)
~\Anaconda3\envs\sa\lib\site-packages\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
439 try:
--> 440 httplib_response = conn.getresponse()
441 except BaseException as e:
~\Anaconda3\envs\sa\lib\http\client.py in getresponse(self)
1346 try:
-> 1347 response.begin()
1348 except ConnectionError:
~\Anaconda3\envs\sa\lib\http\client.py in begin(self)
306 while True:
--> 307 version, status, reason = self._read_status()
308 if status != CONTINUE:
~\Anaconda3\envs\sa\lib\http\client.py in _read_status(self)
275 # sending a valid response.
--> 276 raise RemoteDisconnected("Remote end closed connection without"
277 " response")
ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
During handling of the above exception, another exception occurred:
ConnectionError Traceback (most recent call last)
c:\Users\Vzhao\repos\SocialMediaAnalytics\2_etl_scripts\Vemcount_Historical_Data.py in
74 s = requests.Session()
75 s.mount('https://', MyAdapter())
---> 76 response = s.post(f'https://vemcount.app/api/v3/report?source=zones&data={zone_ids[x]}&data_output=count_out&period=date&form_date_from=2020-01-04&form_date_to=2020-01-04&period_step=30min&show_hours_from=00:00&show_hours_to=23:45', headers=headers)
77 # response = requests.post(f'https://vemcount.app/api/v3/report?source=zones&data={zone_ids[x]}&data_output=count_out&period=date&form_date_from=2020-01-02&form_date_to=2020-01-02&period_step=30min&show_hours_from=00:00&show_hours_to=23:45', headers=headers)
78 vemcount = json.loads(response.text)
~\Anaconda3\envs\sa\lib\site-packages\requests\sessions.py in post(self, url, data, json, **kwargs)
588 """
589
--> 590 return self.request('POST', url, data=data, json=json, **kwargs)
591
592 def put(self, url, data=None, **kwargs):
~\Anaconda3\envs\sa\lib\site-packages\requests\sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
540 }
541 send_kwargs.update(settings)
--> 542 resp = self.send(prep, **send_kwargs)
543
544 return resp
~\Anaconda3\envs\sa\lib\site-packages\requests\sessions.py in send(self, request, **kwargs)
653
654 # Send the request
--> 655 r = adapter.send(request, **kwargs)
656
657 # Total elapsed time of the request (approximately)
~\Anaconda3\envs\sa\lib\site-packages\requests\adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
496
497 except (ProtocolError, socket.error) as err:
--> 498 raise ConnectionError(err, request=request)
499
500 except MaxRetryError as e:
ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))

WinError 10060] A connection attempt failed for Flickr API

I'm new to python and is learning REST API from an online material, which provided an example code for Flickr API.
The code worked in the online video, however when I tried to run the same code on my own computer environment (Windows, Python IDE installed, browser is Chrome), it gave me a time-out error 10060.
I also checked against the Flickr documentation to make sure all the parameters input are correct.
Anyone knows why and how I can solve it? Thank you.
import json
flickr_key = 'xxxxxxxxx' #my own key was keyed in here
def get_flickr_data(tags_string):
baseurl = "https://api.flickr.com/services/rest/"
params_diction = {}
params_diction["api_key"] = flickr_key # from the above global variable
params_diction["tags"] = tags_string # must be a comma separated string to work correctly
params_diction["tag_mode"] = "all"
params_diction["method"] = "flickr.photos.search"
params_diction["per_page"] = 5
params_diction["media"] = "photos"
params_diction["format"] = "json"
params_diction["nojsoncallback"] = 1
flickr_resp = requests.get(baseurl, params = params_diction, timeout=1800)
# Useful for debugging: print the url! Uncomment the below line to do so.
print(flickr_resp.url) # Paste the result into the browser to check it out...
return flickr_resp.json()
result_river_mts = get_flickr_data("river,mountains")
# Some code to open up a few photos that are tagged with the mountains and river tags...
photos = result_river_mts['photos']['photo']
for photo in photos:
owner = photo['owner']
photo_id = photo['id']
url = 'https://www.flickr.com/photos/{}/{}'.format(owner, photo_id)
print(url)
# webbrowser.open(url)
The error is like this:
---------------------------------------------------------------------------
TimeoutError Traceback (most recent call last)
D:\Python\Python Install\lib\site-packages\urllib3\connection.py in _new_conn(self)
168 try:
--> 169 conn = connection.create_connection(
170 (self._dns_host, self.port), self.timeout, **extra_kw
D:\Python\Python Install\lib\site-packages\urllib3\util\connection.py in create_connection(address, timeout, source_address, socket_options)
95 if err is not None:
---> 96 raise err
97
D:\Python\Python Install\lib\site-packages\urllib3\util\connection.py in create_connection(address, timeout, source_address, socket_options)
85 sock.bind(source_address)
---> 86 sock.connect(sa)
87 return sock
TimeoutError: [WinError 10060]
During handling of the above exception, another exception occurred:
NewConnectionError Traceback (most recent call last)
D:\Python\Python Install\lib\site-packages\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
698 # Make the request on the httplib connection object.
--> 699 httplib_response = self._make_request(
700 conn,
D:\Python\Python Install\lib\site-packages\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
381 try:
--> 382 self._validate_conn(conn)
383 except (SocketTimeout, BaseSSLError) as e:
D:\Python\Python Install\lib\site-packages\urllib3\connectionpool.py in _validate_conn(self, conn)
1009 if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
-> 1010 conn.connect()
1011
D:\Python\Python Install\lib\site-packages\urllib3\connection.py in connect(self)
352 # Add certificate verification
--> 353 conn = self._new_conn()
354 hostname = self.host
D:\Python\Python Install\lib\site-packages\urllib3\connection.py in _new_conn(self)
180 except SocketError as e:
--> 181 raise NewConnectionError(
182 self, "Failed to establish a new connection: %s" % e
NewConnectionError: <urllib3.connection.HTTPSConnection object at 0x00000000056C9430>: Failed to establish a new connection: [WinError 10060]
During handling of the above exception, another exception occurred:
MaxRetryError Traceback (most recent call last)
D:\Python\Python Install\lib\site-packages\requests\adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
438 if not chunked:
--> 439 resp = conn.urlopen(
440 method=request.method,
D:\Python\Python Install\lib\site-packages\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
754
--> 755 retries = retries.increment(
756 method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
D:\Python\Python Install\lib\site-packages\urllib3\util\retry.py in increment(self, method, url, response, error, _pool, _stacktrace)
573 if new_retry.is_exhausted():
--> 574 raise MaxRetryError(_pool, url, error or ResponseError(cause))
575
MaxRetryError: HTTPSConnectionPool(host='api.flickr.com', port=443): Max retries exceeded with url: /services/rest/?api_key='xxxxxxx' (#here masked by myself) &tags=river%2Cmountains&tag_mode=all&method=flickr.photos.search&per_page=5&media=photos&format=json&nojsoncallback=1 (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x00000000056C9430>: Failed to establish a new connection: [WinError 10060] '))
During handling of the above exception, another exception occurred:
ConnectionError Traceback (most recent call last)
<ipython-input-40-01e73ed2c8b4> in <module>
25 return flickr_resp.json()
26
---> 27 result_river_mts = get_flickr_data("river,mountains")
28
29 # Some code to open up a few photos that are tagged with the mountains and river tags...
<ipython-input-40-01e73ed2c8b4> in get_flickr_data(tags_string)
20 params_diction["format"] = "json"
21 params_diction["nojsoncallback"] = 1
---> 22 flickr_resp = requests.get(baseurl, params = params_diction, timeout=1800)
23 # Useful for debugging: print the url! Uncomment the below line to do so.
24 print(flickr_resp.url) # Paste the result into the browser to check it out...
D:\Python\Python Install\lib\site-packages\requests\api.py in get(url, params, **kwargs)
74
75 kwargs.setdefault('allow_redirects', True)
---> 76 return request('get', url, params=params, **kwargs)
77
78
D:\Python\Python Install\lib\site-packages\requests\api.py in request(method, url, **kwargs)
59 # cases, and look like a memory leak in others.
60 with sessions.Session() as session:
---> 61 return session.request(method=method, url=url, **kwargs)
62
63
D:\Python\Python Install\lib\site-packages\requests\sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
540 }
541 send_kwargs.update(settings)
--> 542 resp = self.send(prep, **send_kwargs)
543
544 return resp
D:\Python\Python Install\lib\site-packages\requests\sessions.py in send(self, request, **kwargs)
653
654 # Send the request
--> 655 r = adapter.send(request, **kwargs)
656
657 # Total elapsed time of the request (approximately)
D:\Python\Python Install\lib\site-packages\requests\adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
514 raise SSLError(e, request=request)
515
--> 516 raise ConnectionError(e, request=request)
517
518 except ClosedPoolError as e:
ConnectionError: HTTPSConnectionPool(host='api.flickr.com', port=443): Max retries exceeded with url: /services/rest/?api_key=xxxxxxxx(#here masked by myself)&tags=river%2Cmountains&tag_mode=all&method=flickr.photos.search&per_page=5&media=photos&format=json&nojsoncallback=1 (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x00000000056C9430>: Failed to establish a new connection: [WinError 10060] '))

Web scraping challenge

I'm learning how to use BeautifulSoup on a random challenge bracket (as an exercise because I would like to start scraping challenge brackets).
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
r=requests.get('https://smashchile.challonge.com/ss1')
webpage= bs(r.content)
But I got an error (see at the bottom).
This is my first time web scraping and I would like to know more about the legal restrictions about it.
Terms of services says the following: (Link)
Use any robot, spider, scraper, or other automated means to access this Website or services for any purpose without our express written permission; however, this provision shall not apply to the indexing or updating of search engines.
Thanks in advance ;)
RemoteDisconnected Traceback (most recent call last)
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
705 headers=headers,
--> 706 chunked=chunked,
707 )
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
444 # Otherwise it looks like a bug in the code.
--> 445 six.raise_from(e, None)
446 except (SocketTimeout, BaseSSLError, SocketError) as e:
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/packages/six.py in raise_from(value, from_value)
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
439 try:
--> 440 httplib_response = conn.getresponse()
441 except BaseException as e:
/usr/lib/python3.7/http/client.py in getresponse(self)
1335 try:
-> 1336 response.begin()
1337 except ConnectionError:
/usr/lib/python3.7/http/client.py in begin(self)
305 while True:
--> 306 version, status, reason = self._read_status()
307 if status != CONTINUE:
/usr/lib/python3.7/http/client.py in _read_status(self)
274 # sending a valid response.
--> 275 raise RemoteDisconnected("Remote end closed connection without"
276 " response")
RemoteDisconnected: Remote end closed connection without response
During handling of the above exception, another exception occurred:
ProtocolError Traceback (most recent call last)
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
448 retries=self.max_retries,
--> 449 timeout=timeout
450 )
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
755 retries = retries.increment(
--> 756 method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
757 )
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/util/retry.py in increment(self, method, url, response, error, _pool, _stacktrace)
530 if read is False or not self._is_method_retryable(method):
--> 531 raise six.reraise(type(error), error, _stacktrace)
532 elif read is not None:
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/packages/six.py in reraise(tp, value, tb)
733 if value.__traceback__ is not tb:
--> 734 raise value.with_traceback(tb)
735 raise value
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
705 headers=headers,
--> 706 chunked=chunked,
707 )
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
444 # Otherwise it looks like a bug in the code.
--> 445 six.raise_from(e, None)
446 except (SocketTimeout, BaseSSLError, SocketError) as e:
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/packages/six.py in raise_from(value, from_value)
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
439 try:
--> 440 httplib_response = conn.getresponse()
441 except BaseException as e:
/usr/lib/python3.7/http/client.py in getresponse(self)
1335 try:
-> 1336 response.begin()
1337 except ConnectionError:
/usr/lib/python3.7/http/client.py in begin(self)
305 while True:
--> 306 version, status, reason = self._read_status()
307 if status != CONTINUE:
/usr/lib/python3.7/http/client.py in _read_status(self)
274 # sending a valid response.
--> 275 raise RemoteDisconnected("Remote end closed connection without"
276 " response")
ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
During handling of the above exception, another exception occurred:
ConnectionError Traceback (most recent call last)
<ipython-input-1-49ffef2d4435> in <module>
3 import pandas as pd
4
----> 5 r=requests.get('https://smashchile.challonge.com/ss1')
6 webpage= bs(r.content)
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/requests/api.py in get(url, params, **kwargs)
74
75 kwargs.setdefault('allow_redirects', True)
---> 76 return request('get', url, params=params, **kwargs)
77
78
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/requests/api.py in request(method, url, **kwargs)
59 # cases, and look like a memory leak in others.
60 with sessions.Session() as session:
---> 61 return session.request(method=method, url=url, **kwargs)
62
63
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
540 }
541 send_kwargs.update(settings)
--> 542 resp = self.send(prep, **send_kwargs)
543
544 return resp
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/requests/sessions.py in send(self, request, **kwargs)
653
654 # Send the request
--> 655 r = adapter.send(request, **kwargs)
656
657 # Total elapsed time of the request (approximately)
~/Desktop/Programming/JupNbEnv/lib/python3.7/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
496
497 except (ProtocolError, socket.error) as err:
--> 498 raise ConnectionError(err, request=request)
499
500 except MaxRetryError as e:
ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
For that challenge to get solved, what you need is headers. Otherwise, the server (correctly) thinks you're a bot and refuses the connection.
For example:
import requests
from bs4 import BeautifulSoup
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36",
}
webpage = BeautifulSoup(
requests.get('https://smashchile.challonge.com/ss1', headers=headers).content,
"html.parser",
).select_one(".user-profile-block>.details>*.-nopad")
print(webpage.getText(strip=True))
Output:
KLG | DDC | Keen
A side note: That website you're scraping is mostly dynamic, so you won't get much out of the content as b4 simply won't see it.

Remote end closed connection without response (Python 3- Bugzilla)

I am currently working on a project that involves python-bugzilla module.
When I try to collect some bug data, I get the following error :
RemoteDisconnected : Remote end closed connection without response
api = Bugzilla(url)
product = ...
request = api.build_query(product=product, include_fields=["id"])
data = api.query(request)
ids = np.array([bug.id for bug in data]).reshape(-1)
n = ids.shape[0]
q = 500 #size of bug package
if q < n :
m = n%q
k = (n+q-m)/q
else:
k = n
ids_splitted = np.array_split(ids, k)
bugs = []
for ids_ in ids_splitted:
bugs = bugs + api.getbugs(ids_)
The complete error is :
---------------------------------------------------------------------------
RemoteDisconnected Traceback (most recent call last)
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
599 body=body, headers=headers,
--> 600 chunked=chunked)
601
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
383 # otherwise it looks like a programming error was the cause.
--> 384 six.raise_from(e, None)
385 except (SocketTimeout, BaseSSLError, SocketError) as e:
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/packages/six.py in raise_from(value, from_value)
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 try:
--> 380 httplib_response = conn.getresponse()
381 except Exception as e:
~/anaconda3/envs/DataScience/lib/python3.7/http/client.py in getresponse(self)
1320 try:
-> 1321 response.begin()
1322 except ConnectionError:
~/anaconda3/envs/DataScience/lib/python3.7/http/client.py in begin(self)
295 while True:
--> 296 version, status, reason = self._read_status()
297 if status != CONTINUE:
~/anaconda3/envs/DataScience/lib/python3.7/http/client.py in _read_status(self)
264 # sending a valid response.
--> 265 raise RemoteDisconnected("Remote end closed connection without"
266 " response")
RemoteDisconnected: Remote end closed connection without response
During handling of the above exception, another exception occurred:
ProtocolError Traceback (most recent call last)
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
448 retries=self.max_retries,
--> 449 timeout=timeout
450 )
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
637 retries = retries.increment(method, url, error=e, _pool=self,
--> 638 _stacktrace=sys.exc_info()[2])
639 retries.sleep()
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/util/retry.py in increment(self, method, url, response, error, _pool, _stacktrace)
366 if read is False or not self._is_method_retryable(method):
--> 367 raise six.reraise(type(error), error, _stacktrace)
368 elif read is not None:
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/packages/six.py in reraise(tp, value, tb)
684 if value.__traceback__ is not tb:
--> 685 raise value.with_traceback(tb)
686 raise value
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
599 body=body, headers=headers,
--> 600 chunked=chunked)
601
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
383 # otherwise it looks like a programming error was the cause.
--> 384 six.raise_from(e, None)
385 except (SocketTimeout, BaseSSLError, SocketError) as e:
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/packages/six.py in raise_from(value, from_value)
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 try:
--> 380 httplib_response = conn.getresponse()
381 except Exception as e:
~/anaconda3/envs/DataScience/lib/python3.7/http/client.py in getresponse(self)
1320 try:
-> 1321 response.begin()
1322 except ConnectionError:
~/anaconda3/envs/DataScience/lib/python3.7/http/client.py in begin(self)
295 while True:
--> 296 version, status, reason = self._read_status()
297 if status != CONTINUE:
~/anaconda3/envs/DataScience/lib/python3.7/http/client.py in _read_status(self)
264 # sending a valid response.
--> 265 raise RemoteDisconnected("Remote end closed connection without"
266 " response")
ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
During handling of the above exception, another exception occurred:
ConnectionError Traceback (most recent call last)
<ipython-input-9-820dd90f4151> in <module>
1 t0 = time()
2 request = api.build_query(product=product, include_fields=["id"])
----> 3 data = api.query(request)
4 ids = np.array([bug.id for bug in data]).reshape(-1)
5 n = ids.shape[0]
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/bugzilla/base.py in query(self, query)
1263 """
1264 try:
-> 1265 r = self._proxy.Bug.search(query)
1266 except Fault as e:
1267
~/anaconda3/envs/DataScience/lib/python3.7/xmlrpc/client.py in __call__(self, *args)
1110 return _Method(self.__send, "%s.%s" % (self.__name, name))
1111 def __call__(self, *args):
-> 1112 return self.__send(self.__name, args)
1113
1114 ##
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/bugzilla/transport.py in _ServerProxy__request(self, methodname, params)
102 # pylint: disable=no-member
103 ret = super(_BugzillaServerProxy,
--> 104 self)._ServerProxy__request(methodname, params)
105 # pylint: enable=no-member
106
~/anaconda3/envs/DataScience/lib/python3.7/xmlrpc/client.py in __request(self, methodname, params)
1450 self.__handler,
1451 request,
-> 1452 verbose=self.__verbose
1453 )
1454
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/bugzilla/transport.py in request(self, host, handler, request_body, verbose)
199 request_body = request_body.replace(b'\r', b'
')
200
--> 201 return self._request_helper(url, request_body)
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/bugzilla/transport.py in _request_helper(self, url, request_body)
162 try:
163 response = self.session.post(
--> 164 url, data=request_body, **self.request_defaults)
165
166 # We expect utf-8 from the server
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/requests/sessions.py in post(self, url, data, json, **kwargs)
579 """
580
--> 581 return self.request('POST', url, data=data, json=json, **kwargs)
582
583 def put(self, url, data=None, **kwargs):
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
531 }
532 send_kwargs.update(settings)
--> 533 resp = self.send(prep, **send_kwargs)
534
535 return resp
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/requests/sessions.py in send(self, request, **kwargs)
644
645 # Send the request
--> 646 r = adapter.send(request, **kwargs)
647
648 # Total elapsed time of the request (approximately)
~/anaconda3/envs/DataScience/lib/python3.7/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
496
497 except (ProtocolError, socket.error) as err:
--> 498 raise ConnectionError(err, request=request)
499
500 except MaxRetryError as e:
ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
Can someone help me to fix this ? This is really strange as my code worked something like 2 weeks ago...
EDIT: I ran the code some times, and I noticed that it can either run perfectly or be stopped ... So, I assume it is not due to my code but to another thing. Since I am not an expert, I do not know can be the cause of this ... So, if someone can explain this to me, it will be great
EDIT2: If someone has a same issue, that is due to the server activity. Some servers limit the frequency of calling. To work around, one would try to sleep the code for let's say 1 second, that should do the job. But, the runtime will be increased.

How to fix 'Connection aborted.' error in Python with BeautifulSoup

I had been running this code daily for weeks with no error. This morning, it ran the for loop over 100 times properly, then gave a connection issue. Each time I have tried to run it since, it will run anywhere from 5 to 130 times, but always gives the connection error before completing.
I am still getting status codes of 200. I've seen some posts referencing 'memory leak' issues in Python, but I'm not sure how to figure out if that's the problem here. It's also strange because it had been working fine until today.
I have similar code for other pages on the same site that still runs correctly all the way through.
Here is the code:
import requests
from bs4 import BeautifulSoup
updates = []
print(f'Getting {total_timebanks} timebank details... ')
for timebank in range(len(timebanks)):
url = f"http://community.timebanks.org/{timebanks['slug'][timebank]}"
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.content, 'lxml')
update = {}
update['members'] = soup.find('div', {'class': 'views-field-field-num-users-value'}).span.text.strip().replace(',', '')
updates.append(update)
time.sleep(1)
And here is the full error message:
---------------------------------------------------------------------------
RemoteDisconnected Traceback (most recent call last)
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
599 body=body, headers=headers,
--> 600 chunked=chunked)
601
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
383 # otherwise it looks like a programming error was the cause.
--> 384 six.raise_from(e, None)
385 except (SocketTimeout, BaseSSLError, SocketError) as e:
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/packages/six.py in raise_from(value, from_value)
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 try:
--> 380 httplib_response = conn.getresponse()
381 except Exception as e:
/anaconda3/envs/DSI-6/lib/python3.6/http/client.py in getresponse(self)
1330 try:
-> 1331 response.begin()
1332 except ConnectionError:
/anaconda3/envs/DSI-6/lib/python3.6/http/client.py in begin(self)
296 while True:
--> 297 version, status, reason = self._read_status()
298 if status != CONTINUE:
/anaconda3/envs/DSI-6/lib/python3.6/http/client.py in _read_status(self)
265 # sending a valid response.
--> 266 raise RemoteDisconnected("Remote end closed connection without"
267 " response")
RemoteDisconnected: Remote end closed connection without response
During handling of the above exception, another exception occurred:
ProtocolError Traceback (most recent call last)
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
448 retries=self.max_retries,
--> 449 timeout=timeout
450 )
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
637 retries = retries.increment(method, url, error=e, _pool=self,
--> 638 _stacktrace=sys.exc_info()[2])
639 retries.sleep()
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/util/retry.py in increment(self, method, url, response, error, _pool, _stacktrace)
366 if read is False or not self._is_method_retryable(method):
--> 367 raise six.reraise(type(error), error, _stacktrace)
368 elif read is not None:
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/packages/six.py in reraise(tp, value, tb)
684 if value.__traceback__ is not tb:
--> 685 raise value.with_traceback(tb)
686 raise value
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
599 body=body, headers=headers,
--> 600 chunked=chunked)
601
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
383 # otherwise it looks like a programming error was the cause.
--> 384 six.raise_from(e, None)
385 except (SocketTimeout, BaseSSLError, SocketError) as e:
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/packages/six.py in raise_from(value, from_value)
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 try:
--> 380 httplib_response = conn.getresponse()
381 except Exception as e:
/anaconda3/envs/DSI-6/lib/python3.6/http/client.py in getresponse(self)
1330 try:
-> 1331 response.begin()
1332 except ConnectionError:
/anaconda3/envs/DSI-6/lib/python3.6/http/client.py in begin(self)
296 while True:
--> 297 version, status, reason = self._read_status()
298 if status != CONTINUE:
/anaconda3/envs/DSI-6/lib/python3.6/http/client.py in _read_status(self)
265 # sending a valid response.
--> 266 raise RemoteDisconnected("Remote end closed connection without"
267 " response")
ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response',))
During handling of the above exception, another exception occurred:
ConnectionError Traceback (most recent call last)
<ipython-input-17-31257fee2c23> in <module>
5 for timebank in range(len(timebanks)):
6 url = f"http://community.timebanks.org/{timebanks['slug'][timebank]}"
----> 7 res = requests.get(url, headers=headers)
8 soup = BeautifulSoup(res.content, 'lxml')
9
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/requests/api.py in get(url, params, **kwargs)
73
74 kwargs.setdefault('allow_redirects', True)
---> 75 return request('get', url, params=params, **kwargs)
76
77
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/requests/api.py in request(method, url, **kwargs)
58 # cases, and look like a memory leak in others.
59 with sessions.Session() as session:
---> 60 return session.request(method=method, url=url, **kwargs)
61
62
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
531 }
532 send_kwargs.update(settings)
--> 533 resp = self.send(prep, **send_kwargs)
534
535 return resp
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/requests/sessions.py in send(self, request, **kwargs)
644
645 # Send the request
--> 646 r = adapter.send(request, **kwargs)
647
648 # Total elapsed time of the request (approximately)
/anaconda3/envs/DSI-6/lib/python3.6/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
496
497 except (ProtocolError, socket.error) as err:
--> 498 raise ConnectionError(err, request=request)
499
500 except MaxRetryError as e:
ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response',))
The problem seems to have resolved itself. With no changes to the code, it is back to running as expected this morning.
I don't have much insight as to why I had connection errors yesterday, but it does seem to have been an issue with the site, not the code.
Thanks for the responses! For reference, I had also tried increasing sleep timer to 30, but that did not resolve the problem yesterday.

Categories