Basic Auth is failing in AWS Lambda function - python

I'm calling a REST API with basic authentication in an AWS Lambda function. This is my code
import json, os, base64
from urllib import request
def lambda_handler(event, context):
retStatusCode = 0
retBody = ""
try:
url = os.environ['URL']
username = os.environ['USERNAME']
password = os.environ['PASSWORD']
requestURL = url + "/" + event['queueID'] + "/" + event['cli'];
#print ("QUEUEID IS: " + event['queueID'])
#print ("CLI IS: " + event['cli'])
#print ("URL IS: " + requestURL)
req = request.Request(requestURL, method="POST")
myStr = '%s:%s' % (username, password)
myBytes = myStr.encode("utf-8")
base64string = base64.b64encode(myBytes)
req.add_header("Authorization", "Basic %s" % base64string)
resp = request.urlopen(req)
responseJSON = json.load(resp)
retStatusCode = responseJSON["Result"]
retBody = responseJSON["Message"]
except Exception as e:
retStatusCode = 500
retBody = "An exception occurred: " + str(e)
return {
'statusCode': retStatusCode,
'body': retBody
}
However, I'm getting a "HTTP Error 401: Unauthorized" returned. If I call the API method in Postman with the same credentials, it returns data successfully, so I figure it must be something to do with the format of the header I'm adding, but just can't see what's wrong.

The problem is in this line:
req.add_header("Authorization", "Basic %s" % base64string)
From the documentation, base64.b64encode method is designed to "return the encoded bytes".
If you try to execute this code in REPL, you'll see that your resulting header looks wrong. It concatenates string with bytes:
>>> "Basic %s" % base64string
"Basic b'aGVsbG86d29ybGQ='"
You can read more about Python's b' syntax here.
So you need to decode the string back to utf8.
req.add_header("Authorization", "Basic %s" % base64string.decode('utf-8'))
The result will look a like a valid Auth header now:
>>> "Basic %s" % base64string.decode('utf-8')
'Basic aGVsbG86d29ybGQ='

Related

Python Server Logger

I need some help figuring out why this isn't working the way it should. The goal was to stand up a local python server (similar to SimpleHTTPServer) but have it log requests/responses to a file to get a better understanding of how browser DOM behaves. I want to be able to browse to localhost and have it react like a webserver would.
When using this it appears to be hanging at localhost and not redirecting it the 'sample.html' file. Any thoughts on why this might be happening?
Code:
import os, re, socket, thread, threading;
# Settings that you may want to change:
WEBSERVER_PORT = 28876;
LOGS_FOLDER = os.path.join(os.getcwd(), 'logs');
TEST_HTML = 'sample.html';
VERBOSE_OUTPUT = True;
# Settings that you may need to change if you add new file types:
DEFAULT_MIME_TYPE = 'application/octet-stream';
REGISTERED_MIME_TYPES = {
'text/html': ['htm', 'html'],
'text/javascript': ['js'],
'image/svg+xml': ['svg'],
'image/jpeg': ['jpg'],
};
def MimeType(path):
last_dot = path.rfind('.');
if last_dot != -1:
ext = path[last_dot + 1:].lower();
for mime_type, exts in REGISTERED_MIME_TYPES.items():
if ext in exts:
return mime_type;
return DEFAULT_MIME_TYPE;
def GenerateRedirect(new_path):
return (301, 'Moved permanently', \
['Location: http://localhost:%s%s' % (WEBSERVER_PORT, new_path)], \
'text/html', '%s' % (new_path, new_path));
def Reply404(path, query, data):
return 404, 'Not found', 'text/plain', 'The path %s was not found' % path;
def ReplyFile(path, query, data):
path = os.path.join(os.getcwd(), path[1:].replace(os.altsep, os.sep));
if os.path.isfile(path):
try:
data = open(path, 'rb').read();
except:
print ' Cannot open file %s' % path;
return 500, 'Internal server error', 'text/plain', \
'The path %s could not be read' % path;
if VERBOSE_OUTPUT:
print ' Reply = file %s' % path;
return 200, 'OK', MimeType(path), data;
print ' Cannot find file %s' % path;
return 404, 'Not found', 'text/plain', 'The path %s was not found' % path;
def ReplyLog(path, query, data):
path = os.path.join(LOGS_FOLDER, path[len('/logs/'):].replace('/', os.sep));
try:
open(path, 'ab').write(data + '\r\n');
except:
print ' Cannot write to log file %s' % path;
return 500, 'Internal server error', 'text/plain', \
'The path %s could not be read' % path;
if VERBOSE_OUTPUT:
print ' Wrote %s bytes to log file %s' % (len(data), path);
return 200, 'OK', 'text/plain', 'Log successful';
# Replies contains entries in one of two forms:
# "regexp": ("mimetype", "body"),
# "regexp": function,
# The first form causes the server to reply with "200 OK" and the given body and
# mimetype. The second form requires "function" to accept the HTTP request path
# as an argument and return a tuple containing the HTTP return code, HTTP reason
# message, mimetype and body.
replies = [
(r'^/$', GenerateRedirect('/' + TEST_HTML)),
(r'^/logs/.*$', ReplyLog),
(r'^.*$', ReplyFile),
];
def Main():
if not os.path.isdir(LOGS_FOLDER):
try:
os.mkdir(LOGS_FOLDER);
except:
print 'Cannot create logs folder %s' % LOGS_FOLDER;
return;
server_socket = socket.socket();
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
server_socket.bind(('', WEBSERVER_PORT));
server_socket.listen(1);
print 'Webserver running at http://localhost:%d/' % WEBSERVER_PORT;
print;
thread_counter = 1;
while 1:
client_socket, client_address = server_socket.accept();
thread = threading.Thread(target = ConnectionThread, \
args = (client_socket, client_address));
thread.start();
thread_counter += 1;
def ConnectionThread(client_socket, client_address):
try:
print '** Connection from %s:%s opened.' % client_address;
while 1:
try:
request_headers, request_content = ReadRequest(client_socket);
if request_headers is None:
break;
response = HandleRequest(request_headers, request_content);
try:
client_socket.send(response);
except socket.error, e:
raise ConnectionError('Cannot send response');
except ConnectionError, e:
print ' ConnectionError: %s' % e;
try:
client_socket.close();
except:
pass;
break;
print '** Connection from %s:%s closed' % client_address;
except:
thread.interrupt_main();
class ConnectionError(Exception):
def __init__(self, value):
self.value = value;
def __str__(self):
return self.value;
def GetContentLength(request_headers):
for request_header in request_headers:
if request_header.find(':') != -1:
name, value = request_header.split(':', 1);
if name.strip().lower() == 'content-length':
try:
return int(value);
except ValueError:
raise ConnectionError('Bad content-length value: %s' % value);
return None;
def ReadRequest(client_socket):
request = '';
request_headers = None;
request_headers_length = None;
if VERBOSE_OUTPUT:
print '>> Accepted request, reading headers...',;
while request_headers is None:
try:
request += client_socket.recv(256);
except socket.error, e:
if VERBOSE_OUTPUT:
print;
raise ConnectionError('Connection dropped while reading request headers.');
if len(request) == 0:
if VERBOSE_OUTPUT:
print;
# raise ConnectionError('Connection closed.');
return None, None;
request_headers_length = request.find('\r\n\r\n');
if request_headers_length != -1:
request_headers = request[:request_headers_length].split('\r\n')[:-1];
# One line breaks is part of the headers
request_headers_length += 2;
# The other is not part of the headers or the content:
request_content_length = GetContentLength(request_headers);
if request_content_length is None:
if VERBOSE_OUTPUT:
print '\r>> Accepted request, read %d bytes of headers and ' \
'no content.' % (request_headers_length);
return request_headers, None;
request_content = request[request_headers_length + 2:];
else:
if VERBOSE_OUTPUT:
print '\r>> Accepted request, read %d bytes of headers...' % \
len(request),;
while len(request_content) < request_content_length:
if VERBOSE_OUTPUT:
print '\r>> Accepted request, read %d bytes of headers and ' \
'%d/%d bytes of content...' % (request_headers_length, \
len(request_content), request_content_length),;
read_size = request_content_length - len(request_content);
try:
request_content += client_socket.recv(read_size);
except socket.error, e:
if VERBOSE_OUTPUT:
print;
raise ConnectionError('Connection dropped while reading request content.');
if VERBOSE_OUTPUT:
print '\r>> Accepted request, read %d bytes of headers and ' \
'%d bytes of content. %s' % (request_headers_length, \
len(request_content), ' ' * len(str(request_content_length)));
return request_headers, request_content;
def HandleRequest(request_headers, request_content):
end_method = request_headers[0].find(' ');
if end_method == -1:
raise ConnectionError('Bad request header; no method recognized');
method = request_headers[0][:end_method];
end_path = request_headers[0].find(' ', end_method + 1);
if end_path == -1:
raise ConnectionError('Bad request header; no path recognized');
path = request_headers[0][end_method + 1:end_path];
query = None;
start_query = path.find('?');
if start_query != -1:
query = path[start_query:];
path = path[:start_query];
if VERBOSE_OUTPUT:
print ' method=%s, path=%s, query=%s, %s headers' % \
(method, path, query, len(request_headers));
code, reason, mime_type, body = 404, 'Not found', 'text/plain', 'Not found';
response = None;
for path_regexp, response in replies:
if re.match(path_regexp, path):
if type(response) != tuple:
response = response(path, query, request_content);
break;
assert type(response) == tuple and len(response) in [2, 4, 5], \
'Invalid response tuple %s' % repr(response);
code, reason, headers, mime_type, body = 200, 'OK', [], 'text/plain', '';
if len(response) == 2:
mime_type, body = response;
elif len(response) == 4:
code, reason, mime_type, body = response;
else:
code, reason, headers, mime_type, body = response;
response_lines = [
'HTTP/1.1 %03d %s' % (code, reason),
'Content-Type: %s' % mime_type,
'Date: Sat Aug 28 1976 09:15:00 GMT',
'Expires: Sat Aug 28 1976 09:15:00 GMT',
'Cache-Control: no-cache, must-revalidate',
'Pragma: no-cache',
'Accept-Ranges: bytes',
'Content-Length: %d' % len(body),
] + headers + [
'',
body
];
response = '\r\n'.join(response_lines);
if VERBOSE_OUTPUT:
print '<< %s (%d bytes %s)' % \
(response.split('\r\n')[0], len(response), mime_type);
return response;
if __name__ == "__main__":
Main();
Any feedback would be greatly appreciated.
Thank you!

Python - Capture Individaul value result of request

I am new to Python. I am trying to capture individual values that returned from the request result.
Here is part of my code to send the request:
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(url, data=json_payload)
auth = base64.encodestring('%s:%s' % (user, pwd)).replace('\n', '')
request.add_header('Authorization', 'Basic %s' % auth)
request.add_header('Content-Type', 'application/json')
request.add_header('Accept', 'application/json')
request.get_method = lambda: 'POST'
# Perform the request
result = opener.open(request).read()
print result
The print result gives me the output below (messy formatting)
{"#odata.context":"https://outlook.office365.com/api/v1.0/$metadata#Me/Events(Start,End)/$entity","#odata.id":"https://outlook.office365.com
/api/v1.0/Users('user1#domain.com')/Events('AAMkADA1OWVjOTkxLTlmYmEtNDAwMS04YWU3LTNkNYDHE4YjU2OGI1ZABGBBBBBBD_fa49_h8OTJ5eGdjSTEF3BwBOcC
SV9aNzSoXurwI4R0IgBBBBBBBENAABOcCSV9aNzSoXurwI4R0IgAAHn0Cy0AAA=')","#odata.etag":"W/\"TnAklfWjc0qF7q8COEdDTHDAB5+uOdw==\"","Id":"AAMkADA1OWVjO
TkxLTlmYmEtNDAwMS04YWU3LTMHKNDE2YjU2OGI1ZABGAAAAAAD_fa49_h8OTJ5eGdjSTEF3BwBOcCSV9aNzSoXurwI4R0IgCCCCCCCENAABOcCSV9aNzSoXurwI4R0IgAAHn0Cy0AAA="
,"Start":"2016-08-13T15:00:00-07:00","End":"2016-08-13T16:00:00-07:00"}
Is there a way that I load the result into json.load and capture individual values for #odata.context, #odata.id, #odata.etag, Id, Start, End?
I tried this, but no luck.
data = json.load(result)
print data["#odata.context"]
Have you tried using json loads. load in the background uses the method .read() so if you wanted to use it you must use a file like object like StringIO or file/open
Like this:
import urllib2
import json
import base64
test_load = "foo=bar"
user = "test"
pwd = "test"
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request("http://scooterlabs.com/echo.json?%s" % test_load, data=b'data')
auth = base64.encodestring('%s:%s' % (user, pwd)).replace('\n', '')
request.add_header('Authorization', 'Basic %s' % auth)
request.add_header('Content-Type', 'application/json')
request.add_header('Accept', 'application/json')
request.get_method = lambda: 'POST'
result = opener.open(request).read()
jsons = json.loads(result)
print("FULL: %s" % result)
print(jsons[u'method'])
print(jsons[u'request'])
Edit:
It seems as if the comments already answered the question... sorry

Mailchimp API - Pagination not working for lists

I'm trying to make a generic python script to retrieve an arbitrary number of lists through the mailchimp api. However, the "pagination" feature isn't working. There are sixteen lists in my account and whatever value I try for offset=n&count=n, I get only those 16 records. Here is part of my code that fetches the lists:
#FETCH ALL REPORTS
baseurl = "https://" + dc + ".api.mailchimp.com/3.0/"
request = urllib2.Request(baseurl + "reports/")
base64string = base64.encodestring('%s:%s' % (username, key)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
try:
output = urllib2.urlopen(request).read()
data = json.loads(output)
except:
print "Error occurred. Make sure you entered the correct api key"
exit()
createfile("allcampaigns.json", output)
psize, i = 10, 0
while(True):
list_url = baseurl + "lists" + '?offset=' + str(psize * i) + '&count=' + str(psize)
#list_url = baseurl + "lists+ '?offset=' + str(psize * i) + '&count=' + str(psize)
print list_url
request = urllib2.Request(list_url)
base64string = base64.encodestring('%s:%s' % (username, key)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
output = urllib2.urlopen(request).read()
createfile("alllists.json", output)
#print output
data.update(json.loads(output))
cnt = len(data)
print cnt, " lists retrieved."
if cnt<psize: break #cnt could also be zero if no records are returned
i += 1
The issue turned out to be in my code only, there was nothing wrong with the mailchimp api, of course. On the returned json object, I was counting len(data) instead of the len(data.lists) object. Fixed it and started working!

Parse JSON output in Python using Requests and its sessions

Here I have a rate stream that outputs the following and i'm looking to only print the "bid" price. Could someone help explain how I can parse the output correctly? It's driving me crazy!
example = 1.05653
I need the output without quotes or any other markup as well..
JSON
{
"tick": {
"instrument": "EUR_USD",
"time": "2015-04-13T14:28:26.123314Z",
"bid": 1.05653,
"ask": 1.05669
}
}
My code:
import requests
import json
from optparse import OptionParser
def connect_to_stream():
"""
Environment <Domain>
fxTrade stream-fxtrade.oanda.com
fxTrade Practice stream-fxpractice.oanda.com
sandbox stream-sandbox.oanda.com
"""
# Replace the following variables with your personal ones
domain = 'stream-fxpractice.oanda.com'
access_token = 'xxxxx'
account_id = 'xxxxxxxxx'
instruments = "EUR_USD"
try:
s = requests.Session()
url = "https://" + domain + "/v1/prices"
headers = {'Authorization' : 'Bearer ' + access_token,
# 'X-Accept-Datetime-Format' : 'unix'
}
params = {'instruments' : instruments, 'accountId' : account_id}
req = requests.Request('GET', url, headers = headers, params = params)
pre = req.prepare()
resp = s.send(pre, stream = True, verify = False)
return resp
except Exception as e:
s.close()
print "Caught exception when connecting to stream\n" + str(e)
def demo(displayHeartbeat):
response = connect_to_stream()
if response.status_code != 200:
print response.text
return
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
print "Caught exception when converting message into json\n" + str(e)
return
if msg.has_key("instrument") or msg.has_key("tick"):
print line
if displayHeartbeat:
print line
else:
if msg.has_key("instrument") or msg.has_key("tick"):
print line
def main():
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-b", "--displayHeartBeat", dest = "verbose", action = "store_true",
help = "Display HeartBeat in streaming data")
displayHeartbeat = False
(options, args) = parser.parse_args()
if len(args) > 1:
parser.error("incorrect number of arguments")
if options.verbose:
displayHeartbeat = True
demo(displayHeartbeat)
if __name__ == "__main__":
main()
Sorry if this is an extremely basic question but I'm not that familiar with python..
Thanks in advance!
You are iterating over the stream line by line attempting to parse each line as JSON. Each line alone is not proper JSON so that's one problem.
I would just regex over each hline you bring in looking for the text "bid: " followed by a decimal number, and return that number as a float. For example:
import re
for line in response.iter_lines(1):
matches = re.findall(r'\"bid\"\:\s(\d*\.\d*)', line)
if len(matches) > 0:
print float(matches[0])
Try something along the lines of this:
def demo(displayHeartbeat):
response = connect_to_stream()
for line in response.iter_lines():
if line.startswith(" \"bid\"")
print "bid:"+line.split(":")[1]
This actually turned out to be pretty easy, I fixed it by replacing the "demo" function with this:
def demo(displayHeartbeat):
response = connect_to_stream()
if response.status_code != 200:
print response.text
return
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
print "Caught exception when converting message into json\n" + str(e)
return
if displayHeartbeat:
print line
else:
if msg.has_key("instrument") or msg.has_key("tick"):
print msg["tick"]["ask"] - .001
instrument = msg["tick"]["instrument"]
time = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]

Python request JSON with formatting

I'm trying to make a bitbucket request from python to return a list of my repos. It's responding, but the formatting is including "\n" characters, I assume my encoding is wrong, but don't know how to fix it.
How do I encode my response to be formatted JSON.
theurl = 'https://api.bitbucket.org/1.0/user/repositories/';
username = 'xxxxx';
password = 'xxxxx';
passman = urllib2.HTTPPasswordMgrWithDefaultRealm();
passman.add_password(None, theurl, username, password);
authhandler = urllib2.HTTPBasicAuthHandler(passman);
opener = urllib2.build_opener(authhandler);
urllib2.install_opener(opener);
pagehandle = urllib2.urlopen(theurl);
output = pagehandle.decode('utf-8');
responseH = output.read();
Why don't you try using python-bitbucket? Example below
from api import API
import datetime
api = API("username", "**password**")
repos = api.get_repositories()
for repo in repos:
print "Name: %s" % repo.name
print "Owner: %s" % repo.owner
print "Website: %s" % repo.website
print "Description: %s" % repo.description
print "Created on: %s" % datetime.datetime.strftime(repo.created_on, "%c")
print "Language: %s" % repo.language
print "SCM: %s" % repo.scm
for issue in repo.get_issues():
# Yes, this works too!
print "Issue title: %s" % issue.title
print "Issue priority: %s" % issue.priority
print "Issue content:\n%s\n\n" % issue.content
for change in repo.get_changesets(limit=5):
print "Revision/Node: %d:%s" % (change.revision, change.node)
# Since change.timestamp is a datetime object, we can use formatting on it.
print "Timestamp: %s" % datetime.datetime.strftime(change.timestamp, "%c")
print "Commit message:\n%s" % change.message
print "Affected files: %s" % len(change.files)
for f in change.files:
print f.filename
print "\n"

Categories