how to print domain name for route53 DNS - python

I am new to python and learning, I am writing a code to print the domain name and type and value of a route53 hosted zone.
When it loops through CNAME, I get the value of CNAME and not its domain name.
def list(zoneid, region, profile):
rrs = []
aws_session = boto3.session.Session(region=region, profile=profile)
route53 = aws_session.client('route53')
paginator = route53.get_paginator('list_resource_record_sets')
page = paginator.paginate(
HostedZoneId=zoneid,
PaginationConfig={
'MaxItems': 500,
'PageSize': 500
}
)
for i in page:
for record in i['ResourceRecordSets']:
if record['Type'] == 'CNAME':
a.extend(x['Value'] for x in record['ResourceRecords'])
elif record['Type'] == 'A':
a.append(record['Name'])
return a
print record[Name] - gives the domain name. But how can I include it in "a.extend(x['Value'] for x in record['ResourceRecords'])" this line

There is no Value key available for any of the resource record as can be seen from the sample response on the docs:
{
'ResourceRecordSets': [
{
'Name': 'string',
'Type': 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA'|'CAA',
'SetIdentifier': 'string',
'Weight': 123,
'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-west-1'|'eu-west-2'|'eu-west-3'|'eu-central-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1'|'cn-north-1'|'cn-northwest-1'|'ap-south-1',
'GeoLocation': {
'ContinentCode': 'string',
'CountryCode': 'string',
'SubdivisionCode': 'string'
},
'Failover': 'PRIMARY'|'SECONDARY',
'MultiValueAnswer': True|False,
'TTL': 123,
'ResourceRecords': [
{
'Value': 'string'
},
],
'AliasTarget': {
'HostedZoneId': 'string',
'DNSName': 'string',
'EvaluateTargetHealth': True|False
},
'HealthCheckId': 'string',
'TrafficPolicyInstanceId': 'string'
},
],
'IsTruncated': True|False,
'MaxItems': 'string',
'NextToken': 'string'
}
I think you just want to refer to the Name key:
Name (string) --
The name of the domain you want to perform the action on.
Enter a fully qualified domain name, for example, www.example.com.
You can optionally include a trailing dot. If you omit the trailing
dot, Amazon Route 53 still assumes that the domain name that you
specify is fully qualified. This means that Amazon Route 53 treats
www.example.com (without a trailing dot) and www.example.com.
(with a trailing dot) as identical.

Related

How to parse text output separated with space

First lines are field names. Others are values but if no corresponding data, values are filled with spaces.
In particular, bindings has no values in SHORTNAMES and APIGROUP.
pods has no value in APIGROUP
$ kubectl api-resources
NAME SHORTNAMES APIGROUP NAMESPACED KIND
bindings true Binding
pods po true Pod
deployments deploy apps true Deployment
Finally, I would like to treat output data as python dict, which key is field name.
First of all, it seems to replace spaced no value with the dummy value by regex.
NAME SHORTNAMES APIGROUP NAMESPACED KIND
bindings no-value no-value true Binding
Is it possibe?
Here is a solution with regex.
import re
data = """NAME SHORTNAMES APIGROUP NAMESPACED KIND
bindings true Binding
pods po true Pod
deployments deploy apps true Deployment"""
regex = re.compile(
"(?P<name>\S+)\s+"
"(?P<shortname>\S+)\s+"
"(?P<group>\S+)\s+"
"(?P<namespace>\S+)\s+"
"(?P<kind>\S+)"
)
header = data.splitlines()[0]
for match in regex.finditer(header):
name_index = match.start('name')
shortname_index = match.start('shortname')
group_index = match.start('group')
namespace_index = match.start('namespace')
kind_index = match.start('kind')
def get_text(line, index):
result = ''
for char in line[index:]:
if char == ' ':
break
result += char
if result:
return result
else:
return "no-value"
resources = []
for line in data.splitlines()[1:]:
resources.append({
"name" : get_text(line, name_index),
"shortname": get_text(line, shortname_index),
"group": get_text(line, group_index),
"namespace": get_text(line, namespace_index),
"kind": get_text(line, kind_index)
})
print(resources)
And the output is(formatted):
[
{
'name': 'bindings',
'shortname': 'no-value',
'group': 'no-value',
'namespace': 'true',
'kind': 'Binding'
},
{
'name': 'pods',
'shortname': 'po',
'group': 'no-value',
'namespace': 'true',
'kind': 'Pod'
},
{
'name': 'deployments',
'shortname': 'deploy',
'group': 'apps',
'namespace': 'true',
'kind': 'Deployment'
}
]

Set customer ID for Google Adwords API

I try to use the Google Adwords API, with the official library here : https://github.com/googleads/googleads-python-lib
I use an Manager Account on Google Adwords and want to work with my client's accounts.
I can get all the the Adwords account ID (like 123-456-7891) but I don't know how to pass the account ID to my Google Adwords functions as a parameter.
Here's my main function :
def main(argv):
adwords_client = adwords.AdWordsClient.LoadFromStorage(path="googleads.yaml")
add_campaign(adwords_client)
I see any Account ID parameter in the official samples, as :
import datetime
import uuid
from googleads import adwords
def add_campaign(client):
# Initialize appropriate services.
campaign_service = client.GetService('CampaignService', version='v201809')
budget_service = client.GetService('BudgetService', version='v201809')
# Create a budget, which can be shared by multiple campaigns.
budget = {
'name': 'Interplanetary budget #%s' % uuid.uuid4(),
'amount': {
'microAmount': '50000000'
},
'deliveryMethod': 'STANDARD'
}
budget_operations = [{
'operator': 'ADD',
'operand': budget
}]
# Add the budget.
budget_id = budget_service.mutate(budget_operations)['value'][0][
'budgetId']
# Construct operations and add campaigns.
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Interplanetary Cruise #%s' % uuid.uuid4(),
# Recommendation: Set the campaign to PAUSED when creating it to
# stop the ads from immediately serving. Set to ENABLED once you've
# added targeting and the ads are ready to serve.
'status': 'PAUSED',
'advertisingChannelType': 'SEARCH',
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC',
},
'endDate': (datetime.datetime.now() +
datetime.timedelta(365)).strftime('%Y%m%d'),
# Note that only the budgetId is required
'budget': {
'budgetId': budget_id
},
'networkSetting': {
'targetGoogleSearch': 'true',
'targetSearchNetwork': 'true',
'targetContentNetwork': 'false',
'targetPartnerSearchNetwork': 'false'
},
# Optional fields
'startDate': (datetime.datetime.now() +
datetime.timedelta(1)).strftime('%Y%m%d'),
'frequencyCap': {
'impressions': '5',
'timeUnit': 'DAY',
'level': 'ADGROUP'
},
'settings': [
{
'xsi_type': 'GeoTargetTypeSetting',
'positiveGeoTargetType': 'DONT_CARE',
'negativeGeoTargetType': 'DONT_CARE'
}
]
}
}, {
'operator': 'ADD',
'operand': {
'name': 'Interplanetary Cruise banner #%s' % uuid.uuid4(),
'status': 'PAUSED',
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC'
},
'endDate': (datetime.datetime.now() +
datetime.timedelta(365)).strftime('%Y%m%d'),
# Note that only the budgetId is required
'budget': {
'budgetId': budget_id
},
'advertisingChannelType': 'DISPLAY'
}
}]
campaigns = campaign_service.mutate(operations)
How can I tell Adwords API in which account I want to add this campaign ?
Thanks for your help !
OK my bad, I missed a documentation method (http://googleads.github.io/googleads-python-lib/googleads.adwords.AdWordsClient-class.html#SetClientCustomerId).
# ID of your customer here
CUSTOMER_SERVICE_ID = '4852XXXXX'
# Load customer account access
client = adwords.AdWordsClient.LoadFromStorage(path="googleads.yaml")
client.SetClientCustomerId(CUSTOMER_SERVICE_ID)
And the customer ID is now associate with the AdwordsClient variable as "client" set as parameters for other functions.

Google DLP: "ValueError: Protocol message Value has no "stringValue" field."

I have a method where I build a table for multiple items for Google's DLP inspect API which can take either a ContentItem, or a table of values
Here is how the request is constructed:
def redact_text(text_list):
dlp = google.cloud.dlp.DlpServiceClient()
project = 'my-project'
parent = dlp.project_path(project)
items = build_item_table(text_list)
info_types = [{'name': 'EMAIL_ADDRESS'}, {'name': 'PHONE_NUMBER'}]
inspect_config = {
'min_likelihood': "LIKELIHOOD_UNSPECIFIED",
'include_quote': True,
'info_types': info_types
}
response = dlp.inspect_content(parent, inspect_config, items)
return response
def build_item_table(text_list):
rows = []
for item in text_list:
row = {"values": [{"stringValue": item}]}
rows.append(row)
table = {"table": {"headers": [{"name": "something"}], "rows": rows}}
return table
When I run this I get back the error ValueError: Protocol message Value has no "stringValue" field. Even though the this example and the docs say otherwise.
Is there something off in how I build the request?
Edit: Here's the output from build_item_table
{
'table':
{
'headers':
[
{'name': 'value'}
],
'rows':
[
{
'values':
[
{
'stringValue': 'My name is Jenny and my number is (555) 867-5309, you can also email me at anemail#gmail.com, another email you can reach me at is email#email.com. '
}
]
},
{
'values':
[
{
'stringValue': 'Jimbob Doe (555) 111-1233, that one place down the road some_email#yahoo.com'
}
]
}
]
}
}
Try string_value .... python uses the field names, not the type name.

how to change schema type for single document value

I want to change the default type from dict to string for a particular user.
DOMAIN = {
'item': {
'schema': {
'profile':{
'type': 'dict'
},
'username': {
'type': 'string'
}
}
}
}
suppose if I get a request from x user type should not change. If I get a request from y user type should change from dict to string. How to change for a particular item resource without affecting others.
TIA.
Your best approach would probably be to set up two different API endpoints, one for users of type X, and another for users of type Y. Both endpoints would consume the same underlying datasource (same DB collection being updated). You achieve that by setting the datasource for your endpoint, like so:
itemx = {
'url': 'endpoint_1',
'datasource': {
'source': 'people', # actual DB collection consumed by the endpoint
'filter': {'usertype': 'x'} # optional
'projection': {'username': 1} # optional
},
'schema': {...} # here you set username to dict, or string
}
Rinse and repeat for the second endpoint. See the docs for more info.

Combination of two fields to be unique in Python Eve

In Python Eve framework, is it possible to have a condition which checks combination of two fields to be unique?
For example the below definition restricts only firstname and lastname to be unique for items in the resource.
people = {
# 'title' tag used in item links.
'item_title': 'person',
'schema': {
'firstname': {
'type': 'string',
'required': True,
'unique': True
},
'lastname': {
'type': 'string',
'required': True,
'unique': True
}
}
Instead, is there a way to restrict firstname and lastname combination to be unique?
Or is there a way to implement a CustomValidator for this?
You can probably achieve what you want by overloading the _validate_unique and implementing custom logic there, taking advantage of self.document in order to retrieve the other field value.
However, since _validate_unique is called for every unique field, you would end up performing your custom validation twice, once for firstname and then for lastname. Not really desirable. Of course the wasy way out is setting up fullname field, but I guess that's not an option in your case.
Have you considered going for a slighty different design? Something like:
{'name': {'first': 'John', 'last': 'Doe'}}
Then all you need is make sure that name is required and unique:
{
'name': {
'type':'dict',
'required': True,
'unique': True,
'schema': {
'first': {'type': 'string'},
'last': {'type': 'string'}
}
}
}
Inspired by Nicola and _validate_unique.
from eve.io.mongo import Validator
from eve.utils import config
from flask import current_app as app
class ExtendedValidator(Validator):
def _validate_unique_combination(self, unique_combination, field, value):
""" {'type': 'list'} """
self._is_combination_unique(unique_combination, field, value, {})
def _is_combination_unique(self, unique_combination, field, value, query):
""" Test if the value combination is unique.
"""
if unique_combination:
query = {k: self.document[k] for k in unique_combination}
query[field] = value
resource_config = config.DOMAIN[self.resource]
# exclude soft deleted documents if applicable
if resource_config['soft_delete']:
query[config.DELETED] = {'$ne': True}
if self.document_id:
id_field = resource_config['id_field']
query[id_field] = {'$ne': self.document_id}
datasource, _, _, _ = app.data.datasource(self.resource)
if app.data.driver.db[datasource].find_one(query):
key_names = ', '.join([k for k in query])
self._error(field, "value combination of '%s' is not unique" % key_names)
The way I solved this issue is by creating a dynamic field using a combination of functions and lambdas to create a hash that will use
which ever fields you provide
def unique_record(fields):
def is_lambda(field):
# Test if a variable is a lambda
return callable(field) and field.__name__ == "<lambda>"
def default_setter(doc):
# Generate the composite list
r = [
str(field(doc)
# Check is lambda
if is_lambda(field)
# jmespath is not required, but it enables using nested doc values
else jmespath.search(field, doc))
for field in fields
]
# Generate MD5 has from composite string (Keep it clean)
return hashlib.md5(''.join(r).encode()).hexdigest()
return {
'type': 'string',
'unique': True,
'default_setter': default_setter
}
Practical Implementation
My use case was to create a collection that limits the amount of key value pairs a user can create within the collection
domain = {
'schema': {
'key': {
'type': 'string',
'minlength': 1,
'maxlength': 25,
'required': True,
},
'value': {
'type': 'string',
'minlength': 1,
'required': True
},
'hash': unique_record([
'key',
lambda doc: request.USER['_id']
]),
'user': {
'type': 'objectid',
'default_setter': lambda doc: request.USER['_id'] # User tenant ID
}
}
}
}
The function will receive a list of either string or lambda function for dynamic value setting at request time, in my case the user's "_id"
The function supports the use of JSON query with the JMESPATH package, this isn't mandatory, but leave the door open for nested doc flexibility in other usecases
NOTE: This will only work with values that are set by the USER at request time or injected into the request body using the pre_GET trigger pattern, like the USER object I inject in the pre_GET trigger which represents the USER currently making the request

Categories