Website access with Python: Internal error - python

In my program, I need to access a website html and extract some French postal codes.
When I do this in my browser it is working, but when I try to access it with Python I can't stop having an internal error HTTP 500
Do you think it is the website blocking access or is it my program problem?
Thanks
Below is my code:
import requests
import urllib
import re
link = "https://annuaire.118712.fr/magasin/mcdonald-s_1"
f = urllib.request.urlopen(link)
myfile = f.read()
for result in re.findall(r"postalCode\": \"(\d{5})", str(myfile)):
print(result)
And below is the error I get:
---------------------------------------------------------------------------
HTTPError Traceback (most recent call last)
<ipython-input-2-76fbd165889e> in <module>
5
6 link = "https://annuaire.118712.fr/magasin/mcdonald-s_1"
----> 7 f = urllib.request.urlopen(link)
8 myfile = f.read()
9 for result in re.findall(r"postalCode\": \"(\d{5})", str(myfile)):
~\Anaconda3\lib\urllib\request.py in urlopen(url, data, timeout, cafile, capath, cadefault, context)
220 else:
221 opener = _opener
--> 222 return opener.open(url, data, timeout)
223
224 def install_opener(opener):
~\Anaconda3\lib\urllib\request.py in open(self, fullurl, data, timeout)
529 for processor in self.process_response.get(protocol, []):
530 meth = getattr(processor, meth_name)
--> 531 response = meth(req, response)
532
533 return response
~\Anaconda3\lib\urllib\request.py in http_response(self, request, response)
638 # request was successfully received, understood, and accepted.
639 if not (200 <= code < 300):
--> 640 response = self.parent.error(
641 'http', request, response, code, msg, hdrs)
642
~\Anaconda3\lib\urllib\request.py in error(self, proto, *args)
561 http_err = 0
562 args = (dict, proto, meth_name) + args
--> 563 result = self._call_chain(*args)
564 if result:
565 return result
~\Anaconda3\lib\urllib\request.py in _call_chain(self, chain, kind, meth_name, *args)
500 for handler in handlers:
501 func = getattr(handler, meth_name)
--> 502 result = func(*args)
503 if result is not None:
504 return result
~\Anaconda3\lib\urllib\request.py in http_error_302(self, req, fp, code, msg, headers)
753 fp.close()
754
--> 755 return self.parent.open(new, timeout=req.timeout)
756
757 http_error_301 = http_error_303 = http_error_307 = http_error_302
~\Anaconda3\lib\urllib\request.py in open(self, fullurl, data, timeout)
529 for processor in self.process_response.get(protocol, []):
530 meth = getattr(processor, meth_name)
--> 531 response = meth(req, response)
532
533 return response
~\Anaconda3\lib\urllib\request.py in http_response(self, request, response)
638 # request was successfully received, understood, and accepted.
639 if not (200 <= code < 300):
--> 640 response = self.parent.error(
641 'http', request, response, code, msg, hdrs)
642
~\Anaconda3\lib\urllib\request.py in error(self, proto, *args)
567 if http_err:
568 args = (dict, 'default', 'http_error_default') + orig_args
--> 569 return self._call_chain(*args)
570
571 # XXX probably also want an abstract factory that knows when it makes
~\Anaconda3\lib\urllib\request.py in _call_chain(self, chain, kind, meth_name, *args)
500 for handler in handlers:
501 func = getattr(handler, meth_name)
--> 502 result = func(*args)
503 if result is not None:
504 return result
~\Anaconda3\lib\urllib\request.py in http_error_default(self, req, fp, code, msg, hdrs)
647 class HTTPDefaultErrorHandler(BaseHandler):
648 def http_error_default(self, req, fp, code, msg, hdrs):
--> 649 raise HTTPError(req.full_url, code, msg, hdrs, fp)
650
651 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 500: Internal Server Error

use requests library instead urllib is more friendly.
That code works fine:
import requests
import re
link = "https://annuaire.118712.fr/magasin/mcdonald-s_1"
response = requests.get(url=link)
if response.ok:
data = response.text
for result in re.findall(r"postalCode\": \"(\d{5})", data):
print(result)
else:
print("Http Error: "+str(response.status_code))
exit(1)
Results:
01170
01700
01300
01800
01210
01200
01170
01700
01330
01000
01440
01000
01000
01500
01100
01300
01300
01300
01170
01710

Related

HTTP Error when running code to fetch file from github

I am trying to fetch and create a dataset from a dataset on GitHub based on code from hands on ml. The problem is however, when I try and run this code I keep getting an HTTP Error 404 error message. Not sure what could be causing this.
Here is a detailed traceback of the error message I am receiving:
---------------------------------------------------------------------------
HTTPError Traceback (most recent call last)
<ipython-input-19-6ada1818e178> in <module>
----> 1 fetch_housing()
<ipython-input-15-981f9394002a> in fetch_housing(housing_url, housing_path)
5 tgz_path = os.path.join(housing_path,'housing.tgz')
6 print(tgz_path)
----> 7 urllib.request.urlretrieve(housing_url,tgz_path)
8 housing_tgz = tarfile.open(tgz_path)
9 housing_tgz.extractall(path=housing_path)
~/anaconda3/lib/python3.7/urllib/request.py in urlretrieve(url, filename, reporthook, data)
245 url_type, path = splittype(url)
246
--> 247 with contextlib.closing(urlopen(url, data)) as fp:
248 headers = fp.info()
249
~/anaconda3/lib/python3.7/urllib/request.py in urlopen(url, data, timeout, cafile, capath, cadefault, context)
220 else:
221 opener = _opener
--> 222 return opener.open(url, data, timeout)
223
224 def install_opener(opener):
~/anaconda3/lib/python3.7/urllib/request.py in open(self, fullurl, data, timeout)
529 for processor in self.process_response.get(protocol, []):
530 meth = getattr(processor, meth_name)
--> 531 response = meth(req, response)
532
533 return response
~/anaconda3/lib/python3.7/urllib/request.py in http_response(self, request, response)
639 if not (200 <= code < 300):
640 response = self.parent.error(
--> 641 'http', request, response, code, msg, hdrs)
642
643 return response
~/anaconda3/lib/python3.7/urllib/request.py in error(self, proto, *args)
567 if http_err:
568 args = (dict, 'default', 'http_error_default') + orig_args
--> 569 return self._call_chain(*args)
570
571 # XXX probably also want an abstract factory that knows when it makes
~/anaconda3/lib/python3.7/urllib/request.py in _call_chain(self, chain, kind, meth_name, *args)
501 for handler in handlers:
502 func = getattr(handler, meth_name)
--> 503 result = func(*args)
504 if result is not None:
505 return result
~/anaconda3/lib/python3.7/urllib/request.py in http_error_default(self, req, fp, code, msg, hdrs)
647 class HTTPDefaultErrorHandler(BaseHandler):
648 def http_error_default(self, req, fp, code, msg, hdrs):
--> 649 raise HTTPError(req.full_url, code, msg, hdrs, fp)
650
651 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 404: Not Found
This is the code I am using
download_root = 'https://raw.githubusercontent.com/ageron/hanson-ml/master/'
housing_path = os.path.join('datasets','housing')
housing_url = download_root+'datasets/housing/housing.tgz'
def fetch_housing(housing_url = housing_url,housing_path=housing_path):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path,'housing.tgz')
urllib.request.urlretrieve(housing_url,tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()

Giving error while accessing data through urllib

I am working with solr and python.I use urllib library to get http data.
I write code as:
from urllib2 import *
connection = urlopen('http://localhost:8983/solr/data/select?indent=on&q=sender_name:*AXI*&wt=json')
It was working fine but when I apply more query filter string as follows as:
from urllib2 import *
connection = urlopen('http://localhost:8983/solr/data/select?indent=on&q=sender_name:*AXI* AND message:*Avbl*&wt=json')
I got error as:
HTTPError Traceback (most recent call last)
<ipython-input-22-6dad7f9847f1> in <module>()
----> 1 connection = urlopen('http://localhost:8983/solr/data/select?indent=on&q=sender_name:*AXI* AND message:*Avbl*&wt=json')
/usr/lib/python2.7/urllib2.pyc in urlopen(url, data, timeout, cafile, capath, cadefault, context)
152 else:
153 opener = _opener
--> 154 return opener.open(url, data, timeout)
155
156 def install_opener(opener):
/usr/lib/python2.7/urllib2.pyc in open(self, fullurl, data, timeout)
433 for processor in self.process_response.get(protocol, []):
434 meth = getattr(processor, meth_name)
--> 435 response = meth(req, response)
436
437 return response
/usr/lib/python2.7/urllib2.pyc in http_response(self, request, response)
546 if not (200 <= code < 300):
547 response = self.parent.error(
--> 548 'http', request, response, code, msg, hdrs)
549
550 return response
/usr/lib/python2.7/urllib2.pyc in error(self, proto, *args)
471 if http_err:
472 args = (dict, 'default', 'http_error_default') + orig_args
--> 473 return self._call_chain(*args)
474
475 # XXX probably also want an abstract factory that knows when it makes
/usr/lib/python2.7/urllib2.pyc in _call_chain(self, chain, kind, meth_name, *args)
405 func = getattr(handler, meth_name)
406
--> 407 result = func(*args)
408 if result is not None:
409 return result
/usr/lib/python2.7/urllib2.pyc in http_error_default(self, req, fp, code, msg, hdrs)
554 class HTTPDefaultErrorHandler(BaseHandler):
555 def http_error_default(self, req, fp, code, msg, hdrs):
--> 556 raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
557
558 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 400: Unknown Version
How can this error be resolved.
An URL does not contain space characters. You can replace space characters with "%20" when it is a query.

Python: HTTPError: HTTP Error 403: Bad Behavior

I am trying to read a web page to extract contents from it. Please find below the code.
url = "http://www.sanjamar.com/product-categories/bar/bar-tools/"
html = urlopen(url).read()
soup = BeautifulSoup(html)
print(soup)
The last time I used with a different website, it worked. This time its throwing the following error.
HTTPError Traceback (most recent call last)
<ipython-input-83-ccdefd422a61> in <module>()
1 url = "http://www.sanjamar.com/product-categories/bar/bar-tools/"
----> 2 html = urlopen(url).read()
3 soup = BeautifulSoup(html)
4 print(soup)
C:\Users\Santosh\Anaconda3\lib\urllib\request.py in urlopen(url, data,
timeout, cafile, capath, cadefault, context)
221 else:
222 opener = _opener
--> 223 return opener.open(url, data, timeout)
224
225 def install_opener(opener):
C:\Users\Santosh\Anaconda3\lib\urllib\request.py in open(self, fullurl,
data, timeout)
530 for processor in self.process_response.get(protocol, []):
531 meth = getattr(processor, meth_name)
--> 532 response = meth(req, response)
533
534 return response
C:\Users\Santosh\Anaconda3\lib\urllib\request.py in http_response(self,
request, response)
640 if not (200 <= code < 300):
641 response = self.parent.error(
--> 642 'http', request, response, code, msg, hdrs)
643
644 return response
C:\Users\Santosh\Anaconda3\lib\urllib\request.py in error(self, proto, *
args)
568 if http_err:
569 args = (dict, 'default', 'http_error_default') +
orig_args
--> 570 return self._call_chain(*args)
571
572 # XXX probably also want an abstract factory that knows when it
makes
C:\Users\Santosh\Anaconda3\lib\urllib\request.py in _call_chain(self,
chain,
kind, meth_name, *args)
502 for handler in handlers:
503 func = getattr(handler, meth_name)
--> 504 result = func(*args)
505 if result is not None:
506 return result
C:\Users\Santosh\Anaconda3\lib\urllib\request.py in http_error_default(self,
req, fp, code, msg, hdrs)
648 class HTTPDefaultErrorHandler(BaseHandler):
649 def http_error_default(self, req, fp, code, msg, hdrs):
--> 650 raise HTTPError(req.full_url, code, msg, hdrs, fp)
651
652 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 403: Bad Behavior
I guess the issue is the website is blocking python. If not please let me know a solution.
Thanks

what have I done wrong parsing html with python urllib2 and beautifulsoup

Trying to scrape some links from google, and learn python
import urllib2
from bs4 import BeautifulSoup
response = urllib2.urlopen('http://www.google.com.au/search?q=python')
html = response.read()
print html
response.close()
What have I done wrong? I get the following error?
---------------------------------------------------------------------------
HTTPError Traceback (most recent call last)
<ipython-input-4-d990999e71f4> in <module>()
9
10 import urllib2
---> 11 response = urllib2.urlopen('http://www.google.com.au/search?q=python')
12 html = response.read()
13 print html
C:\Python27\lib\urllib2.pyc in urlopen(url, data, timeout)
124 if _opener is None:
125 _opener = build_opener()
--> 126 return _opener.open(url, data, timeout)
127
128 def install_opener(opener):
C:\Python27\lib\urllib2.pyc in open(self, fullurl, data, timeout)
395 for processor in self.process_response.get(protocol, []):
396 meth = getattr(processor, meth_name)
--> 397 response = meth(req, response)
398
399 return response
C:\Python27\lib\urllib2.pyc in http_response(self, request, response)
508 if not (200 <= code < 300):
509 response = self.parent.error(
--> 510 'http', request, response, code, msg, hdrs)
511
512 return response
C:\Python27\lib\urllib2.pyc in error(self, proto, *args)
433 if http_err:
434 args = (dict, 'default', 'http_error_default') + orig_args
--> 435 return self._call_chain(*args)
436
437 # XXX probably also want an abstract factory that knows when it makes
C:\Python27\lib\urllib2.pyc in _call_chain(self, chain, kind, meth_name, *args)
367 func = getattr(handler, meth_name)
368
--> 369 result = func(*args)
370 if result is not None:
371 return result
C:\Python27\lib\urllib2.pyc in http_error_default(self, req, fp, code, msg, hdrs)
516 class HTTPDefaultErrorHandler(BaseHandler):
517 def http_error_default(self, req, fp, code, msg, hdrs):
--> 518 raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
519
520 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 403: Forbidden
It looks like google does not allow that type of requests
Try:Requests or mechanize
You can easily manipulate your request headers (user agent, etc..)
Check, which is easier and more appropriate for you

503 error when trying to access Google Patents using python

Earlier today I was able to pull data from Google Patents using the code below
import urllib2
url = 'http://www.google.com/search?tbo=p&q=ininventor:"John-Mudd"&hl=en&tbm=pts&source=lnt&tbs=ptso:us'
req = urllib2.Request(url, headers={'User-Agent' : "foobar"})
response = urllib2.urlopen(req)
Now when I go to run it I get the following 503 error. I had only looped through this code maybe 30 times on it (i'm trying to get all the patents owned by a list of 30 people).
HTTPError Traceback (most recent call last)
<ipython-input-4-01f83e2c218f> in <module>()
----> 1 response = urllib2.urlopen(req)
C:\Python27\lib\urllib2.pyc in urlopen(url, data, timeout)
124 if _opener is None:
125 _opener = build_opener()
--> 126 return _opener.open(url, data, timeout)
127
128 def install_opener(opener):
C:\Python27\lib\urllib2.pyc in open(self, fullurl, data, timeout)
404 for processor in self.process_response.get(protocol, []):
405 meth = getattr(processor, meth_name)
--> 406 response = meth(req, response)
407
408 return response
C:\Python27\lib\urllib2.pyc in http_response(self, request, response)
517 if not (200 <= code < 300):
518 response = self.parent.error(
--> 519 'http', request, response, code, msg, hdrs)
520
521 return response
C:\Python27\lib\urllib2.pyc in error(self, proto, *args)
436 http_err = 0
437 args = (dict, proto, meth_name) + args
--> 438 result = self._call_chain(*args)
439 if result:
440 return result
C:\Python27\lib\urllib2.pyc in _call_chain(self, chain, kind, meth_name, *args)
376 func = getattr(handler, meth_name)
377
--> 378 result = func(*args)
379 if result is not None:
380 return result
C:\Python27\lib\urllib2.pyc in http_error_302(self, req, fp, code, msg, headers)
623 fp.close()
624
--> 625 return self.parent.open(new, timeout=req.timeout)
626
627 http_error_301 = http_error_303 = http_error_307 = http_error_302
C:\Python27\lib\urllib2.pyc in open(self, fullurl, data, timeout)
404 for processor in self.process_response.get(protocol, []):
405 meth = getattr(processor, meth_name)
--> 406 response = meth(req, response)
407
408 return response
C:\Python27\lib\urllib2.pyc in http_response(self, request, response)
517 if not (200 <= code < 300):
518 response = self.parent.error(
--> 519 'http', request, response, code, msg, hdrs)
520
521 return response
C:\Python27\lib\urllib2.pyc in error(self, proto, *args)
442 if http_err:
443 args = (dict, 'default', 'http_error_default') + orig_args
--> 444 return self._call_chain(*args)
445
446 # XXX probably also want an abstract factory that knows when it makes
C:\Python27\lib\urllib2.pyc in _call_chain(self, chain, kind, meth_name, *args)
376 func = getattr(handler, meth_name)
377
--> 378 result = func(*args)
379 if result is not None:
380 return result
C:\Python27\lib\urllib2.pyc in http_error_default(self, req, fp, code, msg, hdrs)
525 class HTTPDefaultErrorHandler(BaseHandler):
526 def http_error_default(self, req, fp, code, msg, hdrs):
--> 527 raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
528
529 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 503: Service Unavailable
Google's TOS bans automated queries, sadly enough. It almost certainly detected that you were "up to no good."
source: https://support.google.com/websearch/answer/86640?hl=en
Shot in the dark guess:
Did you look to see if there was a "Retry-After header" in the response. It's a real possibility with 503.
From RFC 2616:
14.37 Retry-After
The Retry-After response-header field can be used with a 503 (Service
Unavailable) response to indicate how long the service is expected to
be unavailable to the requesting client. This field MAY also be used
with any 3xx (Redirection) response to indicate the minimum time the
user-agent is asked wait before issuing the redirected request. The
value of this field can be either an HTTP-date or an integer number of
seconds (in decimal) after the time of the response.
Retry-After = "Retry-After" ":" ( HTTP-date | delta-seconds )
Two examples of its use are
Retry-After: Fri, 31 Dec 1999 23:59:59 GMT
Retry-After: 120
In the latter example, the delay is 2 minutes.

Categories