How to use the requests module to skip connection timeout urls - python

Hi how can i use the request module to go through a bunch of URLs and if a url in the list takes more time to load or a connection timeout how can i skip that particular url and skip to the next one
def req():
with open('demofile.txt','r') as http:
for url in http.readlines():
req = url.strip()
print(req)
page=requests.get("http://"+req,verify=False)
if page.status_code == 400:
break
else:
continue
time.sleep(1)

You can raise exception if there is a timeout and continue on finally block for next request,
import requests
import logging
timeout = 0.00001
try:
response = requests.get(url="https://google.com", timeout=timeout)
except requests.exceptions.ConnectTimeout as e:
logging.error("Time out!")
finally:
# continue request here
print("hello")
# output,
ERROR:root:Time out!
hello

Related

Checking website response within x seconds

Good day the problem I am facing is that I want to check if my website is up or not this is the sample pseudo code
Check(website.com)
if checking_time > 10 seconds:
print "No response Recieve"
else:
print "Site is up"
I already try the code below but not working
try:
response = urllib.urlopen("http://insurance.contactnumbersph.com").getcode()
time.sleep(5)
if response == "" or response == "403":
print "No response"
else:
print "ok"
If the website is not up and running, you will get connection refused error and actually doesn't return any status code. So, you can catch the error in python with simple try: and except: blocks.
import requests
URL = 'http://some-url-where-there-is-no-server'
try:
resp = requests.get(URL)
except Exception as e:
# handle here
print(e) # for example
You can also check repeatedly 10 times, each per second to check if there is an exception, if there is you will check again
import requests
URL = 'http://some-url'
canCheck = False
counts = 0
gotConnected = False
while counts < 10 :
try:
resp = requests.get(URL)
gotConnected = True
break
except Exception as e:
counts +=1
time.sleep(1)
The result will be available in gotConnected flag, which you can use later to handle appropriate actions.
note that the timeout that gets passed around by urllib applies to the "wrong thing". that is each individual network operation (e.g. hostname resolution, socket connection, sending headers, reading a few bytes of the headers, reading a few more bytes of the response) each get this same timeout applied. hence passing a "timeout" of 10 seconds could allow a large response to continue for hours
if you want to stick to built in Python code then it would be nice to use a thread to do this, but it doesn't seem to be possible to cancel running threads nicely. an async library like trio would allow better timeout and cancellation handling, but we can make do by using the multiprocessing module instead:
from urllib.request import Request, urlopen
from multiprocessing import Process
from time import perf_counter
def _http_ping(url):
req = Request(url, method='HEAD')
print(f'trying {url!r}')
start = perf_counter()
res = urlopen(req)
secs = perf_counter() - start
print(f'response {url!r} of {res.status} after {secs*1000:.2f}ms')
res.close()
def http_ping(url, timeout):
proc = Process(target=_http_ping, args=(url,))
try:
proc.start()
proc.join(timeout)
success = not proc.is_alive()
finally:
proc.terminate()
proc.join()
proc.close()
return success
you can use https://httpbin.org/ to test this, e.g:
http_ping('https://httpbin.org/delay/2', 1)
should print out a "trying" message, but not a "response" message. you can adjust the delay time and timeout to explore how this behaves...
note that this spins up a new process for each request, but as long as you're doing this less than a thousand pings a second it should be OK

If request timeout skip url python requests

I would like the following script to try every url in url_list, and if it exist print it exist(url) and if not print don't(url) and if request timeout skip to the next url using "requests" lib:
url_list = ['www.google.com','www.urlthatwilltimeout.com','www.urlthatdon\'t exist']
def exist:
if request.status_code == 200:
print"exist{0}".format(url)
else:
print"don\'t{0}".format(url)
a = 0
while (a < 2):
url = urllist[a]
try:
request = requests.get(url, timeout=10)
except request.timeout:#any option that is similar?
print"timed out"
continue
validate()
a+=1
Based on this SO answer
below is code which will limit the total time taken by a GET request as well
as discern other exceptions that may happen.
Note that in requests 2.4.0 and later you may specify a connection timeout and read timeout
by using the syntax:
requests.get(..., timeout=(...conn timeout..., ...read timeout...))
The read timeout, however, only specifies the timeout between individual
read calls, not a timeout for the entire request.
Code:
import requests
import eventlet
eventlet.monkey_patch()
url_list = ['http://localhost:3000/delay/0',
'http://localhost:3000/delay/20',
'http://localhost:3333/', # no server listening
'http://www.google.com'
]
for url in url_list:
try:
with eventlet.timeout.Timeout(1):
response = requests.get(url)
print "OK -", url
except requests.exceptions.ReadTimeout:
print "READ TIMED OUT -", url
except requests.exceptions.ConnectionError:
print "CONNECT ERROR -", url
except eventlet.timeout.Timeout, e:
print "TOTAL TIMEOUT -", url
except requests.exceptions.RequestException, e:
print "OTHER REQUESTS EXCEPTION -", url, e
And here is an express server you can use to test it:
var express = require('express');
var sleep = require('sleep')
var app = express();
app.get('/delay/:secs', function(req, res) {
var secs = parseInt( req.params.secs )
sleep.sleep(secs)
res.send('Done sleeping for ' + secs + ' seconds')
});
app.listen(3000, function () {
console.log('Example app listening on port 3000!');
});

python request error handling

I am writing some small python app which uses requests to get and post data to an html page.
now the problem I am having is that if I can't reach the html page the code stops with a max retries exceeded. I want to be able to do some things if I can't reach the server.
is such a thing possible?
here is sample code:
import requests
url = "http://127.0.0.1/"
req = requests.get(url)
if req.status_code == 304:
#do something
elif req.status_code == 404:
#do something else
# etc etc
# code here if server can`t be reached for whatever reason
You want to handle the exception requests.exceptions.ConnectionError, like so:
try:
req = requests.get(url)
except requests.exceptions.ConnectionError as e:
# Do stuff here
You may want to set a suitable timeout when catching ConnectionError:
url = "http://www.stackoverflow.com"
try:
req = requests.get(url, timeout=2) #2 seconds timeout
except requests.exceptions.ConnectionError as e:
# Couldn't connect
See this answer if you want to change the number of retries.

Multithreading under url open process

I finished editing a script that check the url is requiring a WWW web basic authentication or not and printing the result for the user as in this script :
#!/usr/bin/python
# Importing libraries
from urllib2 import urlopen, HTTPError
import socket
import urllib2
import threading
import time
# Setting up variables
url = open("oo.txt",'r')
response = None
start = time.time()
# Excuting Coommands
start = time.time()
for line in url:
try:
response = urlopen(line, timeout=1)
except HTTPError as exc:
# A 401 unauthorized will raise an exception
response = exc
except socket.timeout:
print ("{0} | Request timed out !!".format(line))
except urllib2.URLError:
print ("{0} | Access error !!".format(line))
auth = response and response.info().getheader('WWW-Authenticate')
if auth and auth.lower().startswith('basic'):
print "requires basic authentication"
elif socket.timeout or urllib2.URLError:
print "Yay"
else:
print "Not requires basic authentication"
print "Elapsed Time: %s" % (time.time() - start)
I have a little things i need your help with the script to edit it here ..
I want the script to check every 10 urls together and give the result for all the urls in one time inside a text file . I read about the multithreading and the processing but i didn't find a match form my case to simplify the code to me .
also i have a problem in the result when a timeout or a url error appears , the script give the result in 2 lines like that :
http://www.test.test
| Access error !!
I want it in one line , why it shows in tow ??
Any help in this issues ?
Thanks in advance
The package concurrent.futures provides functionality, that makes it very easy to use concurrency in Python. You define a function check_url that should be called for each URL. Then you can use the map function the apply the function to each URL in parallel and iterate over the return values.
#! /usr/bin/env python3
import concurrent.futures
import urllib.error
import urllib.request
import socket
def load_urls(pathname):
with open(pathname, 'r') as f:
return [ line.rstrip('\n') for line in f ]
class BasicAuth(Exception): pass
class CheckBasicAuthHandler(urllib.request.BaseHandler):
def http_error_401(self, req, fp, code, msg, hdrs):
if hdrs.get('WWW-Authenticate', '').lower().startswith('basic'):
raise BasicAuth()
return None
def check_url(url):
try:
opener = urllib.request.build_opener(CheckBasicAuthHandler())
with opener.open(url, timeout=1) as u:
return 'requires no authentication'
except BasicAuth:
return 'requires basic authentication'
except socket.timeout:
return 'request timed out'
except urllib.error.URLError as e:
return 'access error ({!r})'.format(e.reason)
if __name__ == '__main__':
urls = load_urls('/tmp/urls.txt')
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for url, result in zip(urls, executor.map(check_url, urls)):
print('{}: {}'.format(url, result))

Checking if a website is up via Python

By using python, how can I check if a website is up? From what I read, I need to check the "HTTP HEAD" and see status code "200 OK", but how to do so ?
Cheers
Related
How do you send a HEAD HTTP request in Python?
You could try to do this with getcode() from urllib
import urllib.request
print(urllib.request.urlopen("https://www.stackoverflow.com").getcode())
200
For Python 2, use
print urllib.urlopen("http://www.stackoverflow.com").getcode()
200
I think the easiest way to do it is by using Requests module.
import requests
def url_ok(url):
r = requests.head(url)
return r.status_code == 200
You can use httplib
import httplib
conn = httplib.HTTPConnection("www.python.org")
conn.request("HEAD", "/")
r1 = conn.getresponse()
print r1.status, r1.reason
prints
200 OK
Of course, only if www.python.org is up.
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
req = Request("http://stackoverflow.com")
try:
response = urlopen(req)
except HTTPError as e:
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
except URLError as e:
print('We failed to reach a server.')
print('Reason: ', e.reason)
else:
print ('Website is working fine')
Works on Python 3
import httplib
import socket
import re
def is_website_online(host):
""" This function checks to see if a host name has a DNS entry by checking
for socket info. If the website gets something in return,
we know it's available to DNS.
"""
try:
socket.gethostbyname(host)
except socket.gaierror:
return False
else:
return True
def is_page_available(host, path="/"):
""" This function retreives the status code of a website by requesting
HEAD data from the host. This means that it only requests the headers.
If the host cannot be reached or something else goes wrong, it returns
False.
"""
try:
conn = httplib.HTTPConnection(host)
conn.request("HEAD", path)
if re.match("^[23]\d\d$", str(conn.getresponse().status)):
return True
except StandardError:
return None
The HTTPConnection object from the httplib module in the standard library will probably do the trick for you. BTW, if you start doing anything advanced with HTTP in Python, be sure to check out httplib2; it's a great library.
If server if down, on python 2.7 x86 windows urllib have no timeout and program go to dead lock. So use urllib2
import urllib2
import socket
def check_url( url, timeout=5 ):
try:
return urllib2.urlopen(url,timeout=timeout).getcode() == 200
except urllib2.URLError as e:
return False
except socket.timeout as e:
print False
print check_url("http://google.fr") #True
print check_url("http://notexist.kc") #False
I use requests for this, then it is easy and clean.
Instead of print function you can define and call new function (notify via email etc.). Try-except block is essential, because if host is unreachable then it will rise a lot of exceptions so you need to catch them all.
import requests
URL = "https://api.github.com"
try:
response = requests.head(URL)
except Exception as e:
print(f"NOT OK: {str(e)}")
else:
if response.status_code == 200:
print("OK")
else:
print(f"NOT OK: HTTP response code {response.status_code}")
You may use requests library to find if website is up i.e. status code as 200
import requests
url = "https://www.google.com"
page = requests.get(url)
print (page.status_code)
>> 200
In my opinion, caisah's answer misses an important part of your question, namely dealing with the server being offline.
Still, using requests is my favorite option, albeit as such:
import requests
try:
requests.get(url)
except requests.exceptions.ConnectionError:
print(f"URL {url} not reachable")
If by up, you simply mean "the server is serving", then you could use cURL, and if you get a response than it's up.
I can't give you specific advice because I'm not a python programmer, however here is a link to pycurl http://pycurl.sourceforge.net/.
Hi this class can do speed and up test for your web page with this class:
from urllib.request import urlopen
from socket import socket
import time
def tcp_test(server_info):
cpos = server_info.find(':')
try:
sock = socket()
sock.connect((server_info[:cpos], int(server_info[cpos+1:])))
sock.close
return True
except Exception as e:
return False
def http_test(server_info):
try:
# TODO : we can use this data after to find sub urls up or down results
startTime = time.time()
data = urlopen(server_info).read()
endTime = time.time()
speed = endTime - startTime
return {'status' : 'up', 'speed' : str(speed)}
except Exception as e:
return {'status' : 'down', 'speed' : str(-1)}
def server_test(test_type, server_info):
if test_type.lower() == 'tcp':
return tcp_test(server_info)
elif test_type.lower() == 'http':
return http_test(server_info)
Requests and httplib2 are great options:
# Using requests.
import requests
request = requests.get(value)
if request.status_code == 200:
return True
return False
# Using httplib2.
import httplib2
try:
http = httplib2.Http()
response = http.request(value, 'HEAD')
if int(response[0]['status']) == 200:
return True
except:
pass
return False
If using Ansible, you can use the fetch_url function:
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
module = AnsibleModule(
dict(),
supports_check_mode=True)
try:
response, info = fetch_url(module, url)
if info['status'] == 200:
return True
except Exception:
pass
return False
my 2 cents
def getResponseCode(url):
conn = urllib.request.urlopen(url)
return conn.getcode()
if getResponseCode(url) != 200:
print('Wrong URL')
else:
print('Good URL')
Here's my solution using PycURL and validators
import pycurl, validators
def url_exists(url):
"""
Check if the given URL really exists
:param url: str
:return: bool
"""
if validators.url(url):
c = pycurl.Curl()
c.setopt(pycurl.NOBODY, True)
c.setopt(pycurl.FOLLOWLOCATION, False)
c.setopt(pycurl.CONNECTTIMEOUT, 10)
c.setopt(pycurl.TIMEOUT, 10)
c.setopt(pycurl.COOKIEFILE, '')
c.setopt(pycurl.URL, url)
try:
c.perform()
response_code = c.getinfo(pycurl.RESPONSE_CODE)
c.close()
return True if response_code < 400 else False
except pycurl.error as err:
errno, errstr = err
raise OSError('An error occurred: {}'.format(errstr))
else:
raise ValueError('"{}" is not a valid url'.format(url))

Categories