How can I print the status code for this response? (eg 200/401 etc)
resp = http.urlopen('POST', 'https://api.bitbucket.org/2.0/repositories/6789oh', headers=headers, body=json.dumps(data))
print(str(resp.data))
I tried:
resp.code
resp.get_code()
etc but none of them work for http.urlopen.
I would use urllib.request , try this see if it helps
import urllib.request , json
try:
resp=urllib.request.urlopen('POST','https://api.bitbucket.org/2.0/repositories/6789oh',headers=headers, body=json.dump(data))
except urllib.error.URLError as a :
print(str(a.status) +"\n" + str(a.message))
except urllib.error.URLError as e :
print(str(e.code) +"\n"+ str(e.msg))
Related
I'm new to python and I want this code to run only once and stops, not every 30 seconds
because I want to run multiple codes like this with different access tokens every 5 seconds using the command line.
and when I tried this code it never jumps to the second one because it's a while true:
import requests
import time
api_url = "https://graph.facebook.com/v2.9/"
access_token = "access token"
graph_url = "site url"
post_data = { 'id':graph_url, 'scrape':True, 'access_token':access_token }
# Beware of rate limiting if trying to increase frequency.
refresh_rate = 30 # refresh rate in second
while True:
try:
resp = requests.post(api_url, data = post_data)
if resp.status_code == 200:
contents = resp.json()
print(contents['title'])
else:
error = "Warning: Status Code {}\n{}\n".format(
resp.status_code, resp.content)
print(error)
raise RuntimeWarning(error)
except Exception as e:
f = open ("open_graph_refresher.log", "a")
f.write("{} : {}".format(type(e), e))
f.close()
print(e)
time.sleep(refresh_rate)
From what I understood you're trying to execute the piece of code for multiple access tokens. To make your job simple, have all your access_tokens as lists and use the following code. It assumes that you know all your access_tokens in advance.
import requests
import time
def scrape_facebook(api_url, access_token, graph_url):
""" Scrapes the given access token"""
post_data = { 'id':graph_url, 'scrape':True, 'access_token':access_token }
try:
resp = requests.post(api_url, data = post_data)
if resp.status_code == 200:
contents = resp.json()
print(contents['title'])
else:
error = "Warning: Status Code {}\n{}\n".format(
resp.status_code, resp.content)
print(error)
raise RuntimeWarning(error)
except Exception as e:
f = open (access_token+"_"+"open_graph_refresher.log", "a")
f.write("{} : {}".format(type(e), e))
f.close()
print(e)
access_token = ['a','b','c']
graph_url = ['sss','xxx','ppp']
api_url = "https://graph.facebook.com/v2.9/"
for n in range(len(graph_url)):
scrape_facebook(api_url, access_token[n], graph_url[n])
time.sleep(5)
I am calling an API. While making requests I hit the maximum number of tries and I get a connection error. I would like to edit the url programmatically by incrementing the number in the url. I do know how to change the arguments programmatically but not sure how to change/increment an argument when I hit connection error.
My language of usage is Python and I am using requests library.
Code Snippet
Libraries importing
from requests.auth import HTTPBasicAuth
import requests
from requests.exceptions import ConnectionError
```def make_request(data , id=None):
url = "http://server001.net:8080/?id="
result = {}
if id:
response = requests.get(url +id , auth=HTTPBasicAuth('uname', 'pass'))
return response
else :
for line in data:
try:
response = requests.get(url +line , auth=HTTPBasicAuth('uname', 'pass'))
result = html_parser2(response)
if result:
write_csv(result)
else:
pass
except ConnectionError as e:
print (e)```
Expected output
url = "http://server001.net:8080/?id="
url_edited = "http://server002.net:8080/?id="
Only if I hit the maximum number of tries, i.e I get an exception or
else keep requesting the same url.
One of the options is to enclose the try..except block with a while loop.
Besides, may be you should put your first requests.get into try..except block too.
Also try to avoid multiple unrelated operations in one try..except block, i.e. execute write_csv after successful connection only.
def make_request(data , id=None):
url = 'http://server001.net:8080/?id={}'
connection_failed = False
response = None
if id:
try:
response = requests.get(url.format(id) , auth=HTTPBasicAuth('uname', 'pass'))
except ConnectionError as e:
print('id = {}, e: {}'.format(id, e))
else:
for line in data:
while not connection_failed:
try:
response = requests.get(url.format(line) , auth=HTTPBasicAuth('uname', 'pass'))
except ConnectionError as e:
connection_failed = True
print('line = {}, e: {}'.format(id, e))
else:
result = html_parser2(response)
if result:
write_csv(result)
return response
def make_request(data , id=None):
url = 'http://server001.net:8080/?id={}'
response = None
if id:
try:
response = requests.get(url.format(id) , auth=HTTPBasicAuth('uname', 'pass'))
except ConnectionError as e:
print('id = {}, e: {}'.format(id, e))
else:
for line in data:
try:
response = requests.get(url.format(line) , auth=HTTPBasicAuth('uname', 'pass'))
except ConnectionError as e:
print('line = {}, e: {}'.format(id, e))
else:
result = html_parser2(response)
if result:
write_csv(result)
break
return response
I wrote a hiscore checker for a game that I play, basically you enter a list of usernames into the .txt file & it outputs the results in found.txt.
However if the page responds a 404 it throws an error instead of returning output as " 0 " & continuing with the list.
Example of script,
#!/usr/bin/python
import urllib2
def get_total(username):
try:
req = urllib2.Request('http://services.runescape.com/m=hiscore/index_lite.ws?player=' + username)
res = urllib2.urlopen(req).read()
parts = res.split(',')
return parts[1]
except urllib2.HTTPError, e:
if e.code == 404:
return "0"
except:
return "err"
filename = "check.txt"
accs = []
handler = open(filename)
for entry in handler.read().split('\n'):
if "No Displayname" not in entry:
accs.append(entry)
handler.close()
for account in accs:
display_name = account.split(':')[len(account.split(':')) - 1]
total = get_total(display_name)
if "err" not in total:
rStr = account + ' - ' + total
handler = open('tried.txt', 'a')
handler.write(rStr + '\n')
handler.close()
if total != "0" and total != "49":
handler = open('found.txt', 'a')
handler.write(rStr + '\n')
handler.close()
print rStr
else:
print "Error searching"
accs.append(account)
print "Done"
HTTPERROR exception that doesn't seem to be working,
except urllib2.HTTPError, e:
if e.code == 404:
return "0"
except:
return "err"
Error response shown below.
Now I understand the error shown doesn't seem to be related to a response of 404, however this only occurs with users that return a 404 response from the request, any other request works fine. So I can assume the issue is within the 404 response exception.
I believe the issue may lay in the fact that the 404 is a custom page which you get redirected too?
so the original page is " example.com/index.php " but the 404 is " example.com/error.php "?
Not sure how to fix.
For testing purposes, format to use is,
ID:USER:DISPLAY
which is placed into check.txt
It seems that total can end up being None. In that case you can't check that it has 'err' in it. To fix the crash, try changing that line to:
if total is not None and "err" not in total:
To be more specific, get_total is returning None, which means that either
parts[1] is None or
except urllib2.HTTPError, e: is executed but e.code is not 404.
In the latter case None is returned as the exception is caught but you're only dealing with the very specific 404 case and ignoring other cases.
So I have this code that is creating an output in Excel.
What I want to do now is get the parameters (lid) in payload to loop through a list of other ID's
This list is stored in a txt file.
can anyone modify my code to show me how to do that please?
The text file has values
1654,
3457,
4327,
1234
(can also hard code these somewhere in the script if it is easier)
from __future__ import print_function
import sys
import csv
import collections
import itertools
try:
import requests
from requests import exceptions
import base64
import json
except ImportError as e:
import requests
from requests import exceptions
import base64
import json
print ("Import Error: %s" % e)
API_TOKEN = u''
b64token = base64.b64encode(bytes(API_TOKEN))
REST_BASE_URL = u'https://visdasa.dsds.com/rest/'
# API URL request examples (choose one)
REST_URL = u'rawdata/'
FULL_URL = REST_BASE_URL + REST_URL
def retrieve_data(api_url):
try:
#connect to the API and retrieve data
bauth_header = {'Authorization': 'Basic '+b64token.decode('UTF-8')}
payload = {'start': '2014-08-01T00:00:01', 'stop': '2014- 8-01T23:59:59','category': 'ots','lid': '9263'}
response = requests.get(api_url, headers=bauth_header, params=payload)
# check the api response
if response.status_code == requests.codes.ok:
# Convert from json data
json_data = json.loads(response.text)
Header_String = "ID", "Site Name", "Network ID", "Network Lablel", "Company Branch ID", "Comapany Label","Count", "timestamp", "ots_duration", "notsure1", "notsure2"
for location_row in json_data["data"]["locations"]:
Location_string = (location_row["id"], location_row["label"], location_row["site"]["network"]["id"],location_row["site"]["network"]["label"],
location_row["site"]["id"], location_row["site"]["label"])
try:
with open('C:\\Users\\teddy\\Desktop\\party\\test.csv', 'w') as wFile:
writer = csv.writer(wFile, delimiter=',')
writer.write(Header_string)
for row in json_data["data"]["raw_data"]:
writer.writerow(row)
except IOError as e:
logger.error("I/O error({0}): {1}".format(e.errno, e.strerror))
print( "I/O error({0}): {1}".format(e.errno, e.strerror))
else:
json_data = json.loads(response.text)
# If not successful api call the throw an error
raise requests.RequestException("Error with the api. Status code : %i \n Json response: %s"
% (response.status_code, json_data))
except (requests.exceptions.ProxyError, requests.RequestException) as e:
print (e)
def main():
#retrieve_data(FULL_URL, PROXY_SETTINGS)
retrieve_data(FULL_URL)
sys.exit()
if __name__ == '__main__':
main()
Why not just pass all the lid values as a parameter to your retrieve_data function.
def retrieve_data(api_url):
would become
def retrieve_data(api_url, lid_value):
You would remove the hardcoded lid section of your payload so the payload would look like this
payload = {'start': '2014-08-01T00:00:01', 'stop': '2014- 8-01T23:59:59','category': 'ots'}
Then on the next line you can add
payload['lid'] = lid_value
In your main function you could then loop through the values in the text file. Here is a simple loop with a list.
def main():
lid_values = ['1654', '3457', '4327', '1234']
for lid in lid_values:
retrieve_data(FULL_URL, lid)
sys.exit()
i'm trying to create a script which makes requests to random urls from a txt file
import urllib2
with open('urls.txt') as urls:
for url in urls:
try:
r = urllib2.urlopen(url)
except urllib2.URLError as e:
r = e
if r.code in (200, 401):
print '[{}]: '.format(url), "Up!"
elif r.code == 404:
print '[{}]: '.format(url), "Not Found!"
But I want that when some url does 404 not found erase from the file. Each url is per line, so basically is to erase every url that does 404 not found. How to do it?!
You could write to a second file:
import urllib2
with open('urls.txt', 'r') as urls, open('urls2.txt', 'w') as urls2:
for url in urls:
try:
r = urllib2.urlopen(url)
except urllib2.URLError as e:
r = e
if r.code in (200, 401):
print '[{}]: '.format(url), "Up!"
urls2.write(url + '\n')
elif r.code == 404:
print '[{}]: '.format(url), "Not Found!"
In order to delete lines from a file, you have to rewrite the entire contents of the file. The safest way to do that is to write out a new file in the same directory and then rename it over the old file. I'd modify your code like this:
import os
import sys
import tempfile
import urllib2
good_urls = set()
with open('urls.txt') as urls:
for url in urls:
try:
r = urllib2.urlopen(url)
except urllib2.URLError as e:
r = e
if r.code in (200, 401):
sys.stdout.write('[{}]: Up!\n'.format(url))
good_urls.add(url)
elif r.code == 404:
sys.stdout.write('[{}]: Not found!\n'.format(url))
else:
sys.stdout.write('[{}]: Unexpected response code {}\n'.format(url, r.code))
tmp = None
try:
tmp = tempfile.NamedTemporaryFile(mode='w', suffix='.txt', dir='.', delete=False)
for url in sorted(good_urls):
tmp.write(url + "\n")
tmp.close()
os.rename(tmp.name, 'urls.txt')
tmp = None
finally:
if tmp is not None:
os.unlink(tmp.name)
You may want to add a good_urls.add(url) to the else clause in the first loop. If anyone knows a tidier way to do what I did with try-finally there at the end, I'd like to hear about it.