While trying to update a dns record in R53 using boto i get the following error:
Traceback (most recent call last):
File "testing.py", line 106, in <module>
updateDns(load_balancer_dns)
File "testing.py", line 102, in updateDns
change.commit()
File "/usr/lib/python2.6/site-packages/boto/route53/record.py", line 149, in commit
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
File "/usr/lib/python2.6/site-packages/boto/route53/connection.py", line 320, in change_rrsets
body)
boto.route53.exception.DNSServerError: DNSServerError: 505 HTTP Version Not Supported
The following is the function i use to update the dns entry:
def updateDns(load_balancer_dns):
r53 = boto.route53.connection.Route53Connection(aws_access_key_id=<access_key>,aws_secret_access_key=<secret_key>)
zone_id = r53.get_hosted_zone_by_name(<domain_name>)
print zone_id
change = boto.route53.record.ResourceRecordSets(connection=r53,hosted_zone_id=zone_id)
change.add_change_record("UPSERT", boto.route53.record.Record(name=<name>, type="CNAME", resource_records=load_balancer_dns, ttl=300))
change.commit()
print "record changed"
return None
updateDns(load_balancer_dns)
Anyone else ran into such issues earlier ?
Since this doesn't truly have an authoritative answer here's a working script I have just cobbled together:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from time import sleep
import boto
def main():
"""Entrypoint."""
r53 = boto.connect_route53()
zones = r53.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones']
private_zone = find_private_zone('sub.mydomain.com.', zones)
reverse_zone = find_private_zone('100.10.in-addr.arpa.', zones)
upsert_record(r53, private_zone, 'dangus.sub.mydomain.com.', '127.0.0.1', 'A', wait=True)
delete_record(r53, private_zone, 'dangus.sub.mydomain.com.', '127.0.0.1', 'A', wait=True)
def find_private_zone(name, zones):
for zone in zones:
if zone.get('Name') == name and zone.get('Config', {}).get('PrivateZone') in [True, 'true']:
return zone
return None
def find_record(r53, zone_id, name, record_type):
records = r53.get_all_rrsets(zone_id)
for record in records:
if record.name == name and record.type == record_type:
return record
return None
def upsert_record(r53, zone, name, record, record_type, ttl=60, wait=False):
print("Inserting record {}[{}] -> {}; TTL={}".format(name, record_type, record, ttl))
recordset = boto.route53.record.ResourceRecordSets(connection=r53, hosted_zone_id=zone.get('Id').split('/')[-1])
recordset.add_change_record('UPSERT', boto.route53.record.Record(
name=name,
type=record_type,
resource_records=[record],
ttl=ttl
))
changeset = recordset.commit()
change_id = changeset['ChangeResourceRecordSetsResponse']['ChangeInfo']['Id'].split('/')[-1]
while wait:
status = r53.get_change(change_id)['GetChangeResponse']['ChangeInfo']['Status']
if status == 'INSYNC':
break
sleep(10)
def delete_record(r53, zone, name, record, record_type, wait=False):
print("Deleting record {}[{}] -> {}".format(name, record_type, record))
zone_id = zone.get('Id').split('/')[-1]
record = find_record(r53, zone_id, name, record_type)
if not record:
print("No record exists.")
return
recordset = boto.route53.record.ResourceRecordSets(connection=r53, hosted_zone_id=zone.get('Id').split('/')[-1])
recordset.add_change_record('DELETE', record)
changeset = recordset.commit()
change_id = changeset['ChangeResourceRecordSetsResponse']['ChangeInfo']['Id'].split('/')[-1]
while wait:
status = r53.get_change(change_id)['GetChangeResponse']['ChangeInfo']['Status']
if status == 'INSYNC':
break
sleep(10)
if __name__ == "__main__":
main()
Unfortunately, none of the APIs that I have worked with have a good Route 53 implementation and you eventually have to use these dictionary lookups to look into the XML actually returned by the service.
Some gotchas:
Always use the FQDN which means every record should end with a dot.
You can't just fetch the hosted zone you're looking for, you need to get all of them and search for the one that you're looking for (ie: a private zone matching the given name).
You have to parse out the ID from your hosted zone.
You have to parse out the ID from your change sets.
When deleting entries, you must first capture the current state of the record if it exists at all, and then send that value to Route 53 as part of your deletion change.
This makes the API really aggravating to use, but at least it solves RFC-1925 rule number 1: it works.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# src: https://stackoverflow.com/a/47166985/65706
# courtesy of: Naftuli Kay
#
from __future__ import absolute_import, print_function
from time import sleep
import argparse
import boto
import pprint
import sys
def set_vars():
usage_examples = '''
clear ; poetry run python process_dns.py --action upsert --dns-zone cgfinics.com --dns-name dev.cgfinics.com --dns-value 168.10.172.10 --record-type A
clear ; poetry run python process_dns.py --action upsert --dns-zone cgfinics.com --dns-name www.dev.cgfinics.com --dns-value www.dev.cgfinics.com --record-type CNAME
clear ; poetry run python process_dns.py --action delete --dns-zone cgfinics.com --dns-name dev.cgfinics.com
'''
parser = argparse.ArgumentParser('A quick and dirty DNS upsert to aws with boto \n\n' + usage_examples)
parser.add_argument('--action', required=True, nargs='?',
help="The action to perform - upsert or delete ")
parser.add_argument('--dns-zone', required=True, nargs='?',
help="The DNS zone to process ")
parser.add_argument('--dns-name', required=True, nargs='?',
help="The DNS name to process ")
parser.add_argument('--dns-value', required=False, nargs='?',
help="The DNS value to process ")
parser.add_argument('--record-type', required=True, nargs='?',
help="The DNS record type - could be A, CNAME ")
args = parser.parse_args()
return args
def main():
"""Entrypoint."""
args = set_vars()
action = args.action
r53 = boto.connect_route53()
zones = r53.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones']
dns_zone = args.dns_zone + '.' if not args.dns_zone.endswith('.') else args.dns_zone
public_zone = find_public_zone(dns_zone , zones)
dns_name = args.dns_name + '.' if not args.dns_name.endswith('.') else args.dns_name
dns_value = args.dns_value
record_type = args.record_type
if action == "upsert":
upsert_record(r53, public_zone, dns_name, dns_value, record_type, wait=True)
sys.exit(0)
if action == "delete":
delete_record(r53, public_zone, dns_name, dns_value, record_type, wait=True)
sys.exit(0)
print("only the upser and delete actions are supported !!!")
sys.exit(1)
def find_public_zone(name, zones):
for zone in zones:
if zone.get('Name') == name and zone.get('Config', {}).get('PrivateZone') in [True, 'false']:
return zone
return None
def find_record(r53, zone_id, name, record_type):
records = r53.get_all_rrsets(zone_id)
for record in records:
if record.name == name and record.type == record_type:
return record
return None
def upsert_record(r53, zone, name, record, record_type, ttl=60, wait=False):
print("Inserting record {}[{}] -> {}; TTL={}".format(name, record_type, record, ttl))
recordset = boto.route53.record.ResourceRecordSets(connection=r53, hosted_zone_id=zone.get('Id').split('/')[-1])
recordset.add_change_record('UPSERT', boto.route53.record.Record(
name=name,
type=record_type,
resource_records=[record],
ttl=ttl
))
changeset = recordset.commit()
change_id = changeset['ChangeResourceRecordSetsResponse']['ChangeInfo']['Id'].split('/')[-1]
while wait:
status = r53.get_change(change_id)['GetChangeResponse']['ChangeInfo']['Status']
if status == 'INSYNC':
break
sleep(6)
def delete_record(r53, zone, name, record, record_type, wait=False):
print("Deleting record {}[{}] -> {}".format(name, record_type, record))
zone_id = zone.get('Id').split('/')[-1]
record = find_record(r53, zone_id, name, record_type)
if not record:
print("No record exists.")
return
recordset = boto.route53.record.ResourceRecordSets(connection=r53, hosted_zone_id=zone.get('Id').split('/')[-1])
recordset.add_change_record('DELETE', record)
changeset = recordset.commit()
change_id = changeset['ChangeResourceRecordSetsResponse']['ChangeInfo']['Id'].split('/')[-1]
while wait:
status = r53.get_change(change_id)['GetChangeResponse']['ChangeInfo']['Status']
if status == 'INSYNC':
break
sleep(10)
if __name__ == "__main__":
main()
I am having a similar issue to this, so as a "debug" exercise, I did a
print zone_id
I noticed that the object was a dict / JSON response, so I changed my code to
zone_id = self.r53.get_hosted_zone_by_name(self.domain).get("GetHostedZoneResponse").get("HostedZone").get("Id")
And this seemed to work for me - I am now getting a 403, but that at least should be easier to fix.
Disclaimer - new to Python, so not sure if this is actually the right way!
just for reference:
get the change_id, and then check the status with the boto connection (conn) until is says "INSYNC"
e.g.
def updateDns(load_balancer_dns):
r53 = boto.route53.connection.Route53Connection(aws_access_key_id=<access_key>,aws_secret_access_key=<secret_key>)
zone_id = r53.get_hosted_zone_by_name(<domain_name>)
print zone_id
change = boto.route53.record.ResourceRecordSets(connection=r53,hosted_zone_id=zone_id)
change.add_change_record("UPSERT", boto.route53.record.Record(name=<name>, type="CNAME", resource_records=load_balancer_dns, ttl=300))
_changes = change.commit()
change_id = _changes["ChangeResourceRecordSetsResponse"]["ChangeInfo"]["Id"].split("/")[-1]
while True:
status = r53.get_change(change_id)["GetChangeResponse"]["ChangeInfo"]["Status"]
if status == "INSYNC": break
sleep(10)
Related
When I am trying to run my python code in lambda passing the handler to the function.module getting the below error, any suggestions how i could resolve this?
the below file test_client_visitor is triggered to call the client_visitor and send an email to the clients accordingly, when i run thd python file test_client_visitor in my local i get the email triggered successfully but in lambda facing the issue.
file_name: test_client_visitor
function = __import__('client_visitor')
handler = function.scan_clients
class TestFunction(unittest.TestCase):
def test_function(self):
file = open('event.json', 'rb')
try:
ba = bytearray(file.read())
event = jsonpickle.decode(ba)
print('## EVENT')
print(jsonpickle.encode(event))
context = {'requestid': '1234'}
result = handler(event, context)
print(result)
self.assertTrue(result, 'Emails could not be sent!')
finally:
file.close()
file.close()
if __name__ == '__main__':
unittest.main()
file_name: client_visitor.py
import datetime
import boto3
from aws_ses import send_bulk_templated_email
# boto3.set_stream_logger('botocore', level='DEBUG')
from mongodb import get_mongo_db
def process_clients(clients, developers, clients_to_be_notified, days):
if not clients:
pass
check_date = datetime.datetime.now() + datetime.timedelta(days)
for client in clients:
client_id_ = client['client_id']
if 'developer_id' in client:
developers[client_id_] = client['developer_id']
else:
if 'secrets' in client:
secrets = client['secrets']
for secret in secrets:
if 'not_on_or_after' in secret and secret['not_on_or_after'] < check_date.timestamp():
clients_to_be_notified.append({'client_id': client_id_,
'expiration_date': datetime.datetime.fromtimestamp(
secret['not_on_or_after']).strftime('%m/%d/%Y')})
print("adding client to notify List", client_id_, ":", client['sort'])
def notify_clients(clients_to_be_notified, developers):
developer_id_list = []
for client_secret in clients_to_be_notified:
developer_id_list.append(developers[client_secret['client_id']])
if developer_id_list:
db = get_mongo_db()
if db:
users = list(db.users.find({'guid': {'$in': developer_id_list}}, {'email', 'guid'}))
need_to_send_email = False
for user in users:
for client_secret in clients_to_be_notified:
if developers[client_secret['client_id']] == user['guid']:
client_secret['email'] = user['email']
need_to_send_email = True
break
if need_to_send_email:
return send_bulk_templated_email(clients_to_be_notified)
else:
return False
return True
def scan_clients(event, context):
local = False
if 'local' in event:
local = event['local'] == 'True'
if local:
dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
else:
dynamodb = boto3.resource('dynamodb')
days = 30
if 'days' in event:
days = int(event['days'])
print(f"Scanning Clients with {days} or less to secret expiration")
table = dynamodb.Table('****')
scan_kwargs = {
'ProjectionExpression': 'client_id, sort, developer_id, secrets, approved'
}
test = False
if 'test' in event:
test = event['test'] == 'True'
done = False
start_key = None
developers = {}
clients_to_be_notified = []
if test:
developers['idm-portal1'] = '***'
clients_to_be_notified = [{'client_id': 'idm-portal1', 'expiration_date': '04/17/2021'}]
while not done:
if start_key:
scan_kwargs['ExclusiveStartKey'] = start_key
response = table.scan(**scan_kwargs)
process_clients(response.get('Items', []), developers, clients_to_be_notified, days)
start_key = response.get('LastEvaluatedKey', None)
done = start_key is None
print("total developers ", len(developers), " total clients_to_be_notified ", len(clients_to_be_notified))
return notify_clients(clients_to_be_notified, developers)
if __name__ == '__main__':
scan_clients(event={'days': 30, 'local': False, 'test': True}, context=None)
Response
{
"errorMessage": "Unable to import module 'test_client_visitor': No module named 'test_client_visitor'",
"errorType": "Runtime.ImportModuleError",
"stackTrace": []
}
Your file must be named test_client_visitor.py. The way lambda runs the code is by trying to import the main file and call the handler function. See the AWS docs to set up a handler for Python.
The reason you didn't run into this issue locally is because I assume you are calling python directly on the command line — python test_client_visitor. When you import a module in Python, the file has to end in the .py extension.
Able to fix this issue with right packaging of the contents to zip, avoided the creation of extra folder with the below command.
Command:
cd folder; zip -r ../filename.zip *
Thankyou everyone for your inputs.
I am trying to pull data from Bloomberg using Python API. API package comes with example codes and the programs that only requires local host work perfectly. However, the programs that uses other authorization ways are always stuck with the error:
Connecting to port 8194 on localhost
TokenGenerationFailure = {
reason = {
source = "apitkns (apiauth) on ebbdbp-ob-053"
category = "NO_AUTH"
errorCode = 12
description = "User not in emrs userid=NA\mds firm=22691"
subcategory = "INVALID_USER"
}
}
Failed to get token
No authorization
I saw one more person having similar problem but instead of solving it he chose to just use local host. I can't always use localhost because I will have to assist and troubleshoot for other users. So I need a hint how to overcome this error.
My question is how can I set the userid anything other than OS_LOGON which automatically uses the login credentials of my account so that I can use other users' name when needed? I tried to change OS_LOGON with the user name but it didn't work.
The full program I am trying to run is:
"""SnapshotRequestTemplateExample.py"""
from __future__ import print_function
from __future__ import absolute_import
import datetime
from optparse import OptionParser, OptionValueError
import blpapi
TOKEN_SUCCESS = blpapi.Name("TokenGenerationSuccess")
TOKEN_FAILURE = blpapi.Name("TokenGenerationFailure")
AUTHORIZATION_SUCCESS = blpapi.Name("AuthorizationSuccess")
TOKEN = blpapi.Name("token")
def authOptionCallback(_option, _opt, value, parser):
vals = value.split('=', 1)
if value == "user":
parser.values.auth = "AuthenticationType=OS_LOGON"
elif value == "none":
parser.values.auth = None
elif vals[0] == "app" and len(vals) == 2:
parser.values.auth = "AuthenticationMode=APPLICATION_ONLY;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + vals[1]
elif vals[0] == "userapp" and len(vals) == 2:
parser.values.auth = "AuthenticationMode=USER_AND_APPLICATION;"\
"AuthenticationType=OS_LOGON;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + vals[1]
elif vals[0] == "dir" and len(vals) == 2:
parser.values.auth = "AuthenticationType=DIRECTORY_SERVICE;"\
"DirSvcPropertyName=" + vals[1]
else:
raise OptionValueError("Invalid auth option '%s'" % value)
def parseCmdLine():
"""parse cli arguments"""
parser = OptionParser(description="Retrieve realtime data.")
parser.add_option("-a",
"--ip",
dest="hosts",
help="server name or IP (default: localhost)",
metavar="ipAddress",
action="append",
default=[])
parser.add_option("-p",
dest="port",
type="int",
help="server port (default: %default)",
metavar="tcpPort",
default=8194)
parser.add_option("--auth",
dest="auth",
help="authentication option: "
"user|none|app=<app>|userapp=<app>|dir=<property>"
" (default: %default)",
metavar="option",
action="callback",
callback=authOptionCallback,
type="string",
default="user")
(opts, _) = parser.parse_args()
if not opts.hosts:
opts.hosts = ["localhost"]
if not opts.topics:
opts.topics = ["/ticker/IBM US Equity"]
return opts
def authorize(authService, identity, session, cid):
"""authorize the session for identity via authService"""
tokenEventQueue = blpapi.EventQueue()
session.generateToken(eventQueue=tokenEventQueue)
# Process related response
ev = tokenEventQueue.nextEvent()
token = None
if ev.eventType() == blpapi.Event.TOKEN_STATUS or \
ev.eventType() == blpapi.Event.REQUEST_STATUS:
for msg in ev:
print(msg)
if msg.messageType() == TOKEN_SUCCESS:
token = msg.getElementAsString(TOKEN)
elif msg.messageType() == TOKEN_FAILURE:
break
if not token:
print("Failed to get token")
return False
# Create and fill the authorization request
authRequest = authService.createAuthorizationRequest()
authRequest.set(TOKEN, token)
# Send authorization request to "fill" the Identity
session.sendAuthorizationRequest(authRequest, identity, cid)
# Process related responses
startTime = datetime.datetime.today()
WAIT_TIME_SECONDS = 10
while True:
event = session.nextEvent(WAIT_TIME_SECONDS * 1000)
if event.eventType() == blpapi.Event.RESPONSE or \
event.eventType() == blpapi.Event.REQUEST_STATUS or \
event.eventType() == blpapi.Event.PARTIAL_RESPONSE:
for msg in event:
print(msg)
if msg.messageType() == AUTHORIZATION_SUCCESS:
return True
print("Authorization failed")
return False
endTime = datetime.datetime.today()
if endTime - startTime > datetime.timedelta(seconds=WAIT_TIME_SECONDS):
return False
def main():
"""main entry point"""
global options
options = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
for idx, host in enumerate(options.hosts):
sessionOptions.setServerAddress(host, options.port, idx)
sessionOptions.setAuthenticationOptions(options.auth)
sessionOptions.setAutoRestartOnDisconnection(True)
print("Connecting to port %d on %s" % (
options.port, ", ".join(options.hosts)))
session = blpapi.Session(sessionOptions)
if not session.start():
print("Failed to start session.")
return
subscriptionIdentity = None
if options.auth:
subscriptionIdentity = session.createIdentity()
isAuthorized = False
authServiceName = "//blp/apiauth"
if session.openService(authServiceName):
authService = session.getService(authServiceName)
isAuthorized = authorize(authService, subscriptionIdentity,
session, blpapi.CorrelationId("auth"))
if not isAuthorized:
print("No authorization")
return
else:
print("Not using authorization")
.
.
.
.
.
finally:
session.stop()
if __name__ == "__main__":
print("SnapshotRequestTemplateExample")
try:
main()
except KeyboardInterrupt:
print("Ctrl+C pressed. Stopping...")
This example is intended for Bloomberg's BPIPE product and as such includes the necessary authorization code. For this example, if you're connecting to the Desktop API (typically localhost:8194) you would want to pass an auth parameter of "none". Note that this example is for the mktdata snapshot functionality which isn't supported by Desktop API.
You state you're trying to troubleshoot on behalf of other users, presumably traders using BPIPE under their credentials. In this case you would need to create an Identity object to represent that user.
This would be done thusly:
# Create and fill the authorization request
authRequest = authService.createAuthorizationRequest()
authRequest.set("authId", STRING_CONTAINING_USERS_EMRS_LOGON)
authRequest.set("ipAddress", STRING_OF_IP_ADDRESS_WHERE_USER_IS_LOGGED_INTO_TERMINAL)
# Send authorization request to "fill" the Identity
session.sendAuthorizationRequest(authRequest, identity, cid)
Please be aware of potential licensing compliance issues when using this approach as this can have serious consequences. If in any doubt, approach your firm's market data team who will be able to ask their Bloomberg contacts.
Edit:
As asked in the comments, it's useful to elaborate on the other possible parameters for the AuthorizationRequest.
"uuid" + "ipAddress"; this would be the default method of authenticating users for Server API. On BPIPE this would require Bloomberg to explicitly enable it for you. The UUID is the unique integer identifier assigned to each Bloomberg Anywhere user. You can look this up in the terminal by running IAM
"emrsId" + "ipAddress"; "emrsId" is a deprecated alias for "authId". This shouldn't be used anymore.
"authId" + "ipAddress"; "authId" is the String defined in EMRS (the BPIPE Entitlements Management and Reporting System) or SAPE (the Server API's equivalent of EMRS) that represents each user. This would typically be that user's OS login details (e.g. DOMAIN/USERID) or Active Directory property (e.g. mail -> blah#blah.blah)
"authId" + "ipAddress" + "application"; "application" is the application name defined on EMRS/SAPE. This will check to see whether the user defined in authId is enabled for the named application on EMRS. Using one of these user+app style Identity objects in requests should record usage against both the user and application in the EMRS usage reports.
"token"; this is the preferred approach. Using the session.generateToken functionality (which can be seen in the original question's code snippet) will result in an alphanumeric string. You'd pass this as the only parameter into the Authorization request. Note that the token generation system is virtualization-aware; if it detects it's running in Citrix or a remote desktop it will report the IP address of the display machine (or one hop towards where the user actually is).
I use pycharm to write a python3 web app project using tornado web framework,
The listing service has been built already. I need to build the remaining two components: the user service and the public API layer. The implementation of the listing service can serve as a good starting point to learn more about how to structure a web application using the Tornado web framework.
I am required to use tornado's built in framework for HTTP request.
error occurs at listening ( app.listen(options.port)) when I tried to run the program:
Traceback (most recent call last):
File "D:/Bill/python/Tornado/99-python-exercise-master/listing_service.py", line 203, in <module>
app.listen(options.port)
File "C:\Program Files\Python38\lib\site-packages\tornado\web.py", line 2116, in listen
server.listen(port, address)
File "C:\Program Files\Python38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Program Files\Python38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Program Files\Python38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Program Files\Python38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
code:
import tornado.web
import tornado.log
import tornado.options
import sqlite3
import logging
import json
import time
class App(tornado.web.Application):
def __init__(self, handlers, **kwargs):
super().__init__(handlers, **kwargs)
# Initialising db connection
self.db = sqlite3.connect("listings.db")
self.db.row_factory = sqlite3.Row
self.init_db()
def init_db(self):
cursor = self.db.cursor()
# Create table
cursor.execute(
"CREATE TABLE IF NOT EXISTS 'listings' ("
+ "id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"
+ "user_id INTEGER NOT NULL,"
+ "listing_type TEXT NOT NULL,"
+ "price INTEGER NOT NULL,"
+ "created_at INTEGER NOT NULL,"
+ "updated_at INTEGER NOT NULL"
+ ");"
)
self.db.commit()
class BaseHandler(tornado.web.RequestHandler):
def write_json(self, obj, status_code=200):
self.set_header("Content-Type", "application/json")
self.set_status(status_code)
self.write(json.dumps(obj))
# /listings
class ListingsHandler(BaseHandler):
#tornado.gen.coroutine
def get(self):
# Parsing pagination params
page_num = self.get_argument("page_num", 1)
page_size = self.get_argument("page_size", 10)
try:
page_num = int(page_num)
except:
logging.exception("Error while parsing page_num: {}".format(page_num))
self.write_json({"result": False, "errors": "invalid page_num"}, status_code=400)
return
try:
page_size = int(page_size)
except:
logging.exception("Error while parsing page_size: {}".format(page_size))
self.write_json({"result": False, "errors": "invalid page_size"}, status_code=400)
return
# Parsing user_id param
user_id = self.get_argument("user_id", None)
if user_id is not None:
try:
user_id = int(user_id)
except:
self.write_json({"result": False, "errors": "invalid user_id"}, status_code=400)
return
# Building select statement
select_stmt = "SELECT * FROM listings"
# Adding user_id filter clause if param is specified
if user_id is not None:
select_stmt += " WHERE user_id=?"
# Order by and pagination
limit = page_size
offset = (page_num - 1) * page_size
select_stmt += " ORDER BY created_at DESC LIMIT ? OFFSET ?"
# Fetching listings from db
if user_id is not None:
args = (user_id, limit, offset)
else:
args = (limit, offset)
cursor = self.application.db.cursor()
results = cursor.execute(select_stmt, args)
listings = []
for row in results:
fields = ["id", "user_id", "listing_type", "price", "created_at", "updated_at"]
listing = {
field: row[field] for field in fields
}
listings.append(listing)
self.write_json({"result": True, "listings": listings})
#tornado.gen.coroutine
def post(self):
# Collecting required params
user_id = self.get_argument("user_id")
listing_type = self.get_argument("listing_type")
price = self.get_argument("price")
# Validating inputs
errors = []
user_id_val = self._validate_user_id(user_id, errors)
listing_type_val = self._validate_listing_type(listing_type, errors)
price_val = self._validate_price(price, errors)
time_now = int(time.time() * 1e6) # Converting current time to microseconds
# End if we have any validation errors
if len(errors) > 0:
self.write_json({"result": False, "errors": errors}, status_code=400)
return
# Proceed to store the listing in our db
cursor = self.application.db.cursor()
cursor.execute(
"INSERT INTO 'listings' "
+ "('user_id', 'listing_type', 'price', 'created_at', 'updated_at') "
+ "VALUES (?, ?, ?, ?, ?)",
(user_id_val, listing_type_val, price_val, time_now, time_now)
)
self.application.db.commit()
# Error out if we fail to retrieve the newly created listing
if cursor.lastrowid is None:
self.write_json({"result": False, "errors": ["Error while adding listing to db"]}, status_code=500)
return
listing = dict(
id=cursor.lastrowid,
user_id=user_id_val,
listing_type=listing_type_val,
price=price_val,
created_at=time_now,
updated_at=time_now
)
self.write_json({"result": True, "listing": listing})
def _validate_user_id(self, user_id, errors):
try:
user_id = int(user_id)
return user_id
except Exception as e:
logging.exception("Error while converting user_id to int: {}".format(user_id))
errors.append("invalid user_id")
return None
def _validate_listing_type(self, listing_type, errors):
if listing_type not in {"rent", "sale"}:
errors.append("invalid listing_type. Supported values: 'rent', 'sale'")
return None
else:
return listing_type
def _validate_price(self, price, errors):
# Convert string to int
try:
price = int(price)
except Exception as e:
logging.exception("Error while converting price to int: {}".format(price))
errors.append("invalid price. Must be an integer")
return None
if price < 1:
errors.append("price must be greater than 0")
return None
else:
return price
# /listings/ping
class PingHandler(tornado.web.RequestHandler):
#tornado.gen.coroutine
def get(self):
self.write("pong!")
def make_app(options):
return App([
(r"/listings/ping", PingHandler),
(r"/listings", ListingsHandler),
], debug=options.debug)
if __name__ == "__main__":
# Define settings/options for the web app
# Specify the port number to start the web app on (default value is port 6000)
tornado.options.define("port", default=6000)
# Specify whether the app should run in debug mode
# Debug mode restarts the app automatically on file changes
tornado.options.define("debug", default=True)
# Read settings/options from command line
tornado.options.parse_command_line()
# Access the settings defined
options = tornado.options.options
# Create web app
app = make_app(options)
app.listen(options.port)
logging.info("Starting listing service. PORT: {}, DEBUG: {}".format(options.port, options.debug))
# Start event loop
tornado.ioloop.IOLoop.instance().start()
How to fix this problem?
Python 3.8 made a backwards-incompatible change to the asyncio package used by Tornado. Applications that use Tornado on Windows with Python 3.8 must call asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) at the beginning of their main file/function. (as documented on the home page of tornadoweb.org)
I can't understand from the documentation of azure for python SDK how can you create a new node-pool in an existing Kubernetes cluster?
it is easy to do it in the command line:
az aks nodepool add --cluster-name CLUSTER-NAME
--resource-group RESOURCE-GROUP
--name NODE-NAME
--enable-cluster-autoscaler
--kubernetes-version 1.15.7
--min-count 1
--max-count 4
--node-count 1
--node-vm-size Standard_NC6s_v2
How can I implement the exact same command using the python SDK?
currently, I am able to connect to the client like that :
# pip install azure
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.containerservice import ContainerServiceClient
credentials = ServicePrincipalCredentials(CLIENT,KEY,tenant = TENANT_ID)
client = ContainerServiceClient(credentials, subscription_id)
credentialResults = client.managed_clusters.list_cluster_user_credentials(resource_group, aks_service_name)
# How can I continue from here to create or delete a new nodepool?
Gow can I continue from here to create or delete a new nodepool?
You can make use of the following code to create nodepool,
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
To Add NodePool
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
tags=None,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
mode=mode
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
Github reference
Thanks to Sajeetharan, I found the implementation of 'az aks nodepool add' in the azure-cli code:
GitHub ref: azure cli:
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
tags=None,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
mode=mode
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
I wrote a small CLI todo app in Docopt but when I run it python t.py I get this exception at the end, everything seems to work fine though. and when I pass a command to the app I get no exceptions at all. One more thing, If I remove the __del__ method no exception appears but I thing we need to close the sqlite db connection. Any suggestions?
Exception AttributeError: "'Todo' object has no attribute 'db'" in <bound method Todo.__del__ of <__main__.Todo object at 0x1038dac50>> ignored
App code:
"""t, a unix command-line todo application
Usage:
t add <task>
t check <id>
t uncheck <id>
t clear
t ls [--all]
t -h | --help
t --version
Commands:
add Add a new task
check Check a new task as done
uncheck Uncheck a task as done
clear Refresh the database
ls List all tasks
Options:
-h --help Show this screen.
--version Show version.
--all List all tasks
"""
import sqlite3
import os
import datetime
from docopt import docopt
from termcolor import colored
from prettytable import PrettyTable
SMILEY = "\xF0\x9F\x98\x83" # Smiley emoji
GRIN = "\xF0\x9F\x98\x81" # Grin face emoji
def echo(msg, err=False):
"""
A simple function for printing to terminal with colors and emoji's
"""
if err:
print colored(msg + " " + GRIN, "red")
else:
print colored(msg + " " + SMILEY, "cyan")
class Todo(object):
def __init__(self):
"""
Set up the db and docopt upon creation of object
"""
self.arg = docopt(__doc__, version=0.10)
# Create a path to store the database file
db_path = os.path.expanduser("~/")
self.db_path = db_path + "/" + ".t-db"
self.init_db()
def init_db(self):
self.db = sqlite3.connect(self.db_path)
self.cursor = self.db.cursor()
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS todo(id INTEGER PRIMARY KEY, task TEXT,
done INT, date_added TEXT, date_completed TEXT)
''')
self.db.commit()
def run(self):
"""
Parse the arg's using docopt and route to the respoctive methods
"""
if self.arg['add']:
self.add_task()
elif self.arg['check']:
self.check_task()
elif self.arg['uncheck']:
self.uncheck_task()
elif self.arg['clear']:
self.clear_task()
else:
if self.arg['--all']:
self.list_task()
else:
self.list_pending_tasks()
def _record_exists(self, id):
"""
Checks if the record exists in the db
"""
self.cursor.execute('''
SELECT * FROM todo WHERE id=?
''', (id,))
record = self.cursor.fetchone()
if record is None:
return False
return True
def _is_done(self, id):
"""
Checks if the task has already been marked as done
"""
self.cursor.execute('''
SELECT done FROM todo WHERE id=?
''', (id,))
record = self.cursor.fetchone()
if record == 0:
return False
return True
def add_task(self):
"""
Add a task todo to the db
"""
task = self.arg['<task>']
date = datetime.datetime.now()
date_now = "%s-%s-%s" % (date.day, date.month, date.year)
self.cursor.execute('''
INSERT INTO todo(task, done, date_added)
VALUES (?, ?, ?)
''', (str(task), 0, date_now))
self.db.commit()
echo("The task has been been added to the list")
def check_task(self):
"""
Mark a task as done
"""
task_id = self.arg['<id>']
date = datetime.datetime.now()
date_now = "%s-%s-%s" % (date.day, date.month, date.year)
if self._record_exists(task_id):
self.cursor.execute('''
UPDATE todo SET done=?, date_completed=? WHERE Id=?
''', (1, date_now, int(task_id)))
echo("Task %s has been marked as done" % str(task_id))
self.db.commit()
else:
echo("Task %s doesn't exist" % (str(task_id)), err=True)
def uncheck_task(self):
"""
Mark as done task as undone
"""
task_id = self.arg['<id>']
if self._record_exists(task_id):
self.cursor.execute('''
UPDATE todo SET done=? WHERE id=?
''', (0, int(task_id)))
echo("Task %s has been unchecked" % str(task_id))
self.db.commit()
else:
echo("Task %s doesn't exist" % str(task_id), err=True)
def list_task(self):
"""
Display all tasks in a table
"""
tab = PrettyTable(["Id", "Task Todo", "Done ?", "Date Added",
"Date Completed"])
tab.align["Id"] = "l"
tab.padding_width = 1
self.cursor.execute('''
SELECT id, task, done, date_added, date_completed FROM todo
''')
records = self.cursor.fetchall()
for each_record in records:
if each_record[2] == 0:
done = "Nop"
else:
done = "Yup"
if each_record[4] is None:
status = "Pending..."
else:
status = each_record[4]
tab.add_row([each_record[0], each_record[1], done,
each_record[3], status])
print tab
def list_pending_tasks(self):
"""
Display all pending tasks in a tabular form
"""
tab = PrettyTable(["Id", "Task Todo", "Date Added"])
tab.align["Id"] = "l"
tab.padding_width = 1
self.cursor.execute('''
SELECT id, task, date_added FROM todo WHERE done=?
''', (int(0),))
records = self.cursor.fetchall()
for each_record in records:
tab.add_row([each_record[0], each_record[1], each_record[2]])
print tab
def clear_task(self):
"""
Delete the table to refresh the app
"""
self.cursor.execute('''
DROP TABLE todo
''')
self.db.commit()
def __del__(self):
self.db.close()
def main():
"""
Entry point for console script
"""
app = Todo()
app.run()
if __name__ == "__main__":
main()
My debugging session tells me that docopt immediately bails out if it can't parse the given options (in your case, for example when no options at all are given).
So in your __init__, before self.init_db() gets called to set up self.db, docopt() is called, fails to parse the (not) given options and immediately tries to do something like exit(1) (I'm guessing here), which then in turn tries to tear down the Todo-object via the __del__-method, but the self.db member variable is not there yet.
So the "best" fix would probably be to set up the database before calling docopt, or to tell docopt that no options are OK as well.
Avoid the use of __del__. If you want to be sure all is closed, I suggest you explicitly call self.db.close() in/after your run method , alternatively register the close using atexit module, see also this similar post