Better way to write this Python function because of If conditions - python

def do_request(url, token, json_data=None,
mode="get", work_around_for_image_custom_list=False):
"""Uploads a file. """
header_collection = {"X-Auth-Token": token}
if json_data is not None:
header_collection['Content-Type'] = 'application/json'
try:
if mode == "delete":
# this looks ugly, but there is absolutely no way to
# get requests to do DELETE when there is a blank JSON
# included
r = requests.delete(url, headers=header_collection, timeout=10)
else:
r = getattr(requests, mode)(url, data=json.dumps(json_data),
headers=header_collection, timeout=10)
if r.status_code == 200:
#This looks ugly also, but has to be for a particular function that calls it
if work_around_for_image_custom_list:
return r
else:
http_info = (json.dumps(r.json(), indent=2), r.status_code)
else:
http_info = (r.text, r.status_code)
return http_info
except requests.exceptions.ConnectionError:
print "Connection Error! Http status Code {}".format(r.status_code)
sys.exit()
except (requests.exceptions.RequestException,
requests.exceptions.HTTPError):
print "Ambiguous Error! Http status Code {}".format(r.status_code)
sys.exit()
Using Python 2.7 and the requests module, I have this function that I call with several other functions to make api calls. However, I have to make an exception for one particular function and return the request object...if work_around_for_image_custom_list is True. This seems seems like a ugly hack/work around and I am wondering how I could re-write it to compensate if work_around_for_image_custom_list. For instance, would it be better to make this a class and have each function create a object to use it? If so, how would I over ride if r.status_code == 200:?

Expanding on the comment I made:
def do_raw_request(url, token, json_data=None, mode="get"):
"""Uploads a file. """
header_collection = {"X-Auth-Token": token}
if json_data is not None:
header_collection['Content-Type'] = 'application/json'
try:
if mode == "delete":
# this looks ugly, but there is absolutely no way to
# get requests to do DELETE when there is a blank JSON
# included
r = requests.delete(url, headers=header_collection, timeout=10)
else:
r = getattr(requests, mode)(url, data=json.dumps(json_data),
headers=header_collection, timeout=10)
if r.status_code == 200:
return r, r.status_code
return r.text, r.status_code
except requests.exceptions.ConnectionError:
print "Connection Error! Http status Code {}".format(r.status_code)
sys.exit()
except (requests.exceptions.RequestException,
requests.exceptions.HTTPError):
print "Ambiguous Error! Http status Code {}".format(r.status_code)
sys.exit()
Then:
def do_request(url, token, json_data=None, mode="get"):
res, code = do_raw_request(url, token, json_data, mode)
if code == 200:
return (json.dumps(r.json(), indent=2), r.status_code)
return res, code
and now you call either do_raw_request or do_request as appropriate.
Note that I changed the return so it always returns a tuple otherwise you would have to start checking types to know whether you have a status text or a response object.

Related

github v3 api delete public key return error {'message': 'Not Found', 'documentation_url': 'https://developer.github.com/v3'}

I'm using python3 requests module to access Github v3 API (DELETE /user/keys/:key_id) of the deleting public ssh-key that it return the error
{'message': 'Not Found''documentation_url':'https://developer.github.com/v3'}.
I use python3 virtual environmemt and requests module to handle.
I checked the URL and method of the api and it has no problem. In addition, I added access_token='My token' after the URL parameter. but no effect. i try to use command
curl -H "Authorization: token 93ca7d685602dca9d32e8788ddffafc8e7385003" https://api.github.com/users/codertocat -I to find the scope of the token.
and I checked that the the key_id is correct also.
def __init__(self):
self.accessToken = '93ca7d685602dca9d32e8788ddffafc8e7385003'
self.rootUrl = 'https://api.github.com'
self.headers = {"Authorization": "token %s" % self.accessToken}
def baseGet(self, url, me='get', data=None):
try:
response = ''
if me == 'get':
response = requests.get(url)
if me == 'post':
response = requests.get(url, data)
if me == 'delete':
response = requests.delete(url)
else:
print('no support')
try:
data = response.json()
except:
data = response.content
return data
except Exception as e:
print('error by', e)
return False
def del_user_public_key(self, key_id):
# del_user_public_key
userkey = self.rootUrl + '/users/keys/%d?access_token=%s' % (key_id, self.accessToken)
print(userkey)
return self.baseGet(userkey, me='delete')
I expect the output of the result to be Status 204 No Content and the public deleted in github.

Change a while true python script to run only once

I'm new to python and I want this code to run only once and stops, not every 30 seconds
because I want to run multiple codes like this with different access tokens every 5 seconds using the command line.
and when I tried this code it never jumps to the second one because it's a while true:
import requests
import time
api_url = "https://graph.facebook.com/v2.9/"
access_token = "access token"
graph_url = "site url"
post_data = { 'id':graph_url, 'scrape':True, 'access_token':access_token }
# Beware of rate limiting if trying to increase frequency.
refresh_rate = 30 # refresh rate in second
while True:
try:
resp = requests.post(api_url, data = post_data)
if resp.status_code == 200:
contents = resp.json()
print(contents['title'])
else:
error = "Warning: Status Code {}\n{}\n".format(
resp.status_code, resp.content)
print(error)
raise RuntimeWarning(error)
except Exception as e:
f = open ("open_graph_refresher.log", "a")
f.write("{} : {}".format(type(e), e))
f.close()
print(e)
time.sleep(refresh_rate)
From what I understood you're trying to execute the piece of code for multiple access tokens. To make your job simple, have all your access_tokens as lists and use the following code. It assumes that you know all your access_tokens in advance.
import requests
import time
def scrape_facebook(api_url, access_token, graph_url):
""" Scrapes the given access token"""
post_data = { 'id':graph_url, 'scrape':True, 'access_token':access_token }
try:
resp = requests.post(api_url, data = post_data)
if resp.status_code == 200:
contents = resp.json()
print(contents['title'])
else:
error = "Warning: Status Code {}\n{}\n".format(
resp.status_code, resp.content)
print(error)
raise RuntimeWarning(error)
except Exception as e:
f = open (access_token+"_"+"open_graph_refresher.log", "a")
f.write("{} : {}".format(type(e), e))
f.close()
print(e)
access_token = ['a','b','c']
graph_url = ['sss','xxx','ppp']
api_url = "https://graph.facebook.com/v2.9/"
for n in range(len(graph_url)):
scrape_facebook(api_url, access_token[n], graph_url[n])
time.sleep(5)

Using aiohttp's MultipartWriter with StreamResponse

I am trying to create a data endpoint that streams either the entirety of a file or responds appropriately to range requests. Streaming the whole file seems understandable, but it's not clear to me how to deal with range requests. Particularly, I can't see how aiohttp.MultipartWriter can write to a StreamResponse.
Here's an abstracted form of my code, so far:
from aiohttp.web import Request, StreamResponse
from aiohttp.multipart import MultipartWriter
async def data_handler(req:Request) -> StreamResponse:
is_range_request = "Range" in req.headers
with open("my_big_file", "rb") as f:
if is_range_request:
status_code = 202
content_type = "multipart/bytes"
else:
status_code = 200
content_type = "application/octet-stream"
resp = SteamResponse(status=status_code, headers={"Content-Type": content_type})
resp.enable_chunked_encoding()
resp.enable_compression()
await resp.prepare(req)
if is_range_request:
# _parse_range_header :: str -> List[ByteRange]
# ByteRange = Tuple[int, int] i.e., "from" and "to", inclusive
ranges = _parse_range_header(req.headers["Range"])
mpwriter = MultipartWriter("bytes")
for r in ranges:
range_from, range_to = r
range_size = (range_to - range_from) + 1
range_header = {"Content-Type": "application/octet-stream"}
# FIXME Won't this block?
f.seek(range_from)
mpwriter.append(f.read(range_size), range_header)
# TODO Write to response. How?...
else:
while True:
data = f.read(8192)
if not data:
await resp.drain()
break
resp.write(data)
return resp
This also doesn't return the response until it gets to the end. This doesn't seem correct to me: How does an upstream call know what's going on until the response is returned; or is the asyncio stuff doing this for me automagically?

Catching exception and handling in except: if-statement

I have 2 possible errors that I could get in my use case: 'RSA key format is not supported' for incorrect passphrase and 'PEM encryption format not supported.' for required passphrase but none given. These are both ValueError type.
I'm trying to do this in try-except
from flask import Flask, url_for, request, json, jsonify
from Crypto.PublicKey import RSA
app = Flask(__name__)
#app.route('/key2pub', methods = ['POST'])
def api_keypub():
if request.headers['Content-Type'] == 'application/json':
resp = None
try:
pubkey = RSA.importKey(request.json['key'], request.json['passphrase'])
except ValueError as e:
if e == 'RSA key format is not supported':
global resp
resp = jsonify({"error": "Incorrect passphrase", "raw": e})
elif e == 'PEM encryption format not supported.':
global resp
resp = jsonify({"error": "Passphrase missing", "raw": e})
return resp
if __name__ == '__main__':
app.run(debug=True);
I'm getting the error:
...
ValueError: View function did not return a response
So it seems that resp is not getting set in the except if statement.
I suspect I'm using try-except in the wrong manner, can someone show me the correct way to do this?
You write to a global resp while you actually have a local one. So the global resp will be set, but your local resp which is set to None will not be changed, therefore you will return None. So if you remove the global resp declaration should it work correctly. Maybe also set a default resp in case its another error which you would miss. So something like the following
resp = None
try:
pubkey = RSA.importKey(request.json['key'], request.json['passphrase'])
except ValueError as e:
if str(e) == 'RSA key format is not supported':
resp = jsonify({"error": "Incorrect passphrase", "raw": str(e)})
elif str(e) == 'PEM encryption format not supported.':
resp = jsonify({"error": "Passphrase missing", "raw": str(e)})
else:
resp = jsonify({"error": "Unexpected error", "raw": str(e)})
return resp
api_keypub is your innermost context here: resp is local to that context. Using global isn't going to help.
On top of that, you need to indent your if statement to be part of the except clause.

Sequential requests using python-requests

right now I'm using Flask, and I'm having trouble while trying to do more than one GET request using python requests module.
If I try to send a series of requests, the first one is completed successfully, but the other ones throw a timeout exception.
Here is part of the view's code:
import requests
sess = requests.Session()
site_url = 'http://www.example.com/api/'
steps = ['first_step', 'second_step', 'third_step']
step_responses = dict()
for s in steps:
try:
req = sess.get(site_url + s, timeout=5))
except requests.exceptions.Timeout:
return jsonify({'result':False, 'error':'timeout'})
except requests.exceptions.ConnectionError:
return jsonify({'result':False, 'error':'connection_error'})
else:
step_responses[s] = True
If I extract this part into a standalone .py file, it completes successfully.
import requests
sess = requests.Session()
site_url = 'http://www.example.com/api/'
steps = ['first_step', 'second_step', 'third_step']
step_responses = dict()
for s in steps:
try:
req = sess.get(site_url + s, timeout=5)
except requests.exceptions.Timeout:
step_responses[s] = 'timeout'
except requests.exceptions.ConnectionError:
step_responses[s] = 'conn_error'
else:
step_responses[s] = 'ok'
print step_responses
Works for me. You may want to check the second and third steps
import requests
sess = requests.Session()
def module():
site_url = 'http://stackoverflow.com/'
steps = ['users', 'questions', 'tags']
step_responses = dict()
for s in steps:
try:
req = sess.get(site_url + s, timeout=5)
except requests.exceptions.Timeout:
return jsonify({'result':False, 'error':'timeout'})
except requests.exceptions.ConnectionError:
return jsonify({'result':False, 'error':'connection_error'})
else:
step_responses[s] = True
You might want to make sure that you read all the values from the req object.
I think you might need req.text and req.status_code or req.content
Check half-way down the page here: http://docs.python-requests.org/en/latest/api/#request-sessions where they discuss session parameters
"class requests.adapters.HTTPAdapter(pool_connections=10, pool_maxsize=10, max_retries=0, pool_block=False)"
I'm not at all sure how to use connection pools and so forth but the docs do say (http://docs.python-requests.org/en/latest/user/advanced/) (Look for Keep Alive)
"Note that connections are only released back to the pool for reuse once all body data has been read; be sure to either set stream to False or read the content property of the Response object."

Categories