I am trying to create my own ansible module (which will update cmdb) and i am looking how to use ansible_facts in module code ?
example of my module script is :
#!/usr/bin/python
from ansible.module_utils.basic import *
import json, ast
from servicenow import ServiceNow
from servicenow import Connection
def __get_server_info(table,server_name="", sys_id=""):
if sys_id == "":
return table.fetch_one({'name': server_name})
if server_name == "":
return table.fetch_one({'sys_id': sys_id})
def __update_cmdb_hwinfo(table, sys_id, server_name=""):
return table.update({'sys_id': sys_id,{'hw_ram': 'Here for example i want to put ansible_facts about server ram size'})
def main():
fields = {
"snow_instance": {"required": True, "type": "str"},
"snow_username": {"required": True, "type": "str"},
"snow_password": {"required": True, "type": "str"},
"server_name": {"required": True, "type": "str" },
"api_type": {"default": "JSONv2", "type": "str"},
}
module = AnsibleModule(argument_spec=fields)
snow_connection = Connection.Auth(username=module.params['snow_username'], password=module.params['snow_password'], instance=module.params['snow_instance'], api=module.params['api_typ
e'])
server = ServiceNow.Base(snow_connection)
server.__table__ = 'cmdb_ci_server_list.do'
machine = __get_server_info(server, )
## Define connection object to ServiceNow instance
module.exit_json(changed=False, meta=module.params, msg=machine)
if __name__ == '__main__':
main()
What variable i should use to call ansible_facts in module script? (And is it even possible? ).
I doubt this is possible from inside module itself, because they are executed in the context of remote machine with predefined parameters.
But you can wrap your module with action plugin (that is executed in local context), collect required data from available variables and pass them as parameters to your module.
Like this (./action_plugins/a_test.py):
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
module_args = self._task.args.copy()
module_args['mem_size'] = self._templar._available_variables.get('ansible_memtotal_mb')
return self._execute_module(module_args=module_args, task_vars=task_vars, tmp=tmp)
In this case if your module expect mem_size parameter it will be set to ansible_memtotal_mb's value with action plugin.
Module example (./library/a_test.py):
#!/usr/bin/python
def main():
module = AnsibleModule(
argument_spec = dict(
mem_size=dict(required=False, default=None),
),
supports_check_mode = False
)
module.exit_json(changed=False, mem_size=module.params['mem_size'])
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
Test playbook:
---
- hosts: all
tasks:
- a_test:
Related
I'm using the nornir (3.3.0) automation framework with Python 3.8. I'd like to mock the SSH access to the devices in order to do testing without having some real or virtual network equipment online. How would I use patch or Mock/MagicMock from unittest.mock to mock netmiko_send_command (ssh interaction with device)?
I have the following nornir task function:
# dbb_automation/tasks.py
from nornir.core.task import Task, Result
from nornir_netmiko.tasks import netmiko_send_command
def get_interfaces_with_ip(task: Task):
log.debug(f"{task.name}: Getting result on host {task.host}")
result: MultiResult = task.run(name="show ip int br | e unass", task=netmiko_send_command,
command_string="show ip int br | e unass")
content_str = result[0].result
task.run(
task=write_file,
filename=f"outputs/{task.host}-{purpose}.{ending}",
content=content_str
)
return Result(
host=task.host,
result=f"{task.host.name} got ip result"
)
and the following test case (work in progress):
# tests/test_tasks.py
from dbb_automation.tasks import get_interfaces_with_ip
from nornir import InitNornir
from nornir.core.filter import F
from tests.settings import *
def test_get_interfaces_with_ip():
# [x] init nornir with fake host
# [ ] patch/mock netmiko_send_command
# [ ] check file contents with patched return string of netmiko_send_command
nr = InitNornir(
core={
"raise_on_error": True
},
runner={
"plugin": "threaded",
"options": {
"num_workers": 1,
}
},
inventory={
"plugin": "SimpleInventory",
"options": {
"host_file": DNAC_HOSTS_YAML,
"group_file": DNAC_GROUPS_YAML,
"defaults_file": DNAC_DEFAULT_YAML
}
},
logging={
"log_file": "logs/nornir.log"
}
)
result = nr.filter(F(has_parent_group="Borders")).run(name="get_interfaces_with_ip", task=get_interfaces_with_ip)
# todo: test code
assert False
Regards,
GĂ©rard
I think I found the solution. Key was to patch to where the imported function is used not to where it is defined and to set the return value on the mock object.
#patch("dbb_automation.tasks.netmiko_send_command")
def test_get_interfaces_with_ip(mock_netmiko_send_command, nr):
...
mock_netmiko_send_command.return_value = """Interface IP-Address OK? Method Status Protocol
GigabitEthernet22 10.1.54.146 YES TFTP up up
Loopback0 10.150.32.2 YES other up up
Port-channel1.2 10.150.33.65 YES manual up up
...
"""
import pytest
import os
import shutil
from unittest.mock import patch
from dbb_automation.tasks import get_interfaces_with_ip
from nornir import InitNornir
from nornir.core.filter import F
from tests.settings import *
#pytest.fixture()
def nr():
nr = InitNornir(
core={
"raise_on_error": True
},
runner={
"plugin": "threaded",
"options": {
"num_workers": 1,
}
},
inventory={
"plugin": "SimpleInventory",
"options": {
"host_file": DNAC_HOSTS_YAML,
"group_file": DNAC_GROUPS_YAML,
"defaults_file": DNAC_DEFAULT_YAML
}
},
logging={
"log_file": "logs/nornir.log"
}
)
return nr
#patch("dbb_automation.tasks.netmiko_send_command")
def test_get_interfaces_with_ip(mock_netmiko_send_command, nr):
output_folder_name = "outputs"
shutil.rmtree(output_folder_name)
os.mkdir(output_folder_name)
mock_netmiko_send_command.return_value = """Interface IP-Address OK? Method Status Protocol
GigabitEthernet22 10.1.54.146 YES TFTP up up
Loopback0 10.150.32.2 YES other up up
Port-channel1.2 10.150.33.65 YES manual up up
"""
nr.filter(F(has_parent_group="Borders")).run(name="get_interfaces_with_ip", task=get_interfaces_with_ip)
# test code
count = 0
files_found = None
for root_dir, cur_dir, files in os.walk(output_folder_name):
count += len(files)
assert files_found is None # make sure there are no subdirectories
files_found = files
assert count == 4 # we expect a file for each host
for file_name in files_found:
with open(f"{output_folder_name}/{file_name}") as f:
assert f.read() == mock_netmiko_send_command.return_value
I am trying to improve my code and add the progressbar to the file_transfer function. I was wondering how can I calculate the sent argument? I found this answer here and this on GitHub, but I can't figure out how to use it with my provided code.
from os.path import getsize
from netmiko import ConnectHandler, file_transfer, progress_bar
router = {
"device_type": "cisco_ios",
"host": "sandbox-iosxe-latest-1.cisco.com",
"username": "developer",
"password": "C1sco12345",
"port": 22,
"verbose": True,
"conn_timeout": 12,
"fast_cli": False,
"session_log": "sandbox-iosxe-latest-1.cisco.com.log",
}
src_file = dest_file = input("Name of file to copy: ")
with ConnectHandler(**router) as net_connect:
scp = net_connect.send_config_set(config_commands=["ip scp server enable"])
transfer = file_transfer(
net_connect,
source_file=src_file,
dest_file=dest_file,
file_system="flash:",
direction="put",
overwrite_file=True,
socket_timeout=100.0,
progress=progress_bar(
filename=src_file,
size=getsize(src_file),
sent=sent, # How to calculate? What should be placed here?
),
)
Should just be progress=progress_bar or progress4=progress_bar. You just provide a callable that is constructed a certain way (and you are using the progress_bar function that Netmiko provides so that should be fine).
Here is an example file_transfer using it:
ssh_conn = ConnectHandler(**cisco)
transfer_dict = file_transfer(
ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction,
# Force an overwrite of the file if it already exists
overwrite_file=True,
progress4=progress_bar,
)
I'm trying to test this function "start_dojot_messenger", which has some objects and methods in it, and I need to test and verify that they were called with those specific parameters.
For example we have messenger.create_channel("dojot.notifications", "r"), and I need to test if it is starting with these parameters.
follow image of the method
def start_dojot_messenger(config, persister, dojot_persist_notifications_only):
messenger = Messenger("Persister", config)
messenger.init()
# Persister Only Notification
messenger.create_channel("dojot.notifications", "r")
messenger.on(config.dojot['subjects']['tenancy'],
"message", persister.handle_new_tenant)
messenger.on("dojot.notifications", "message",
persister.handle_notification)
LOGGER.info('Listen to notification events')
if str2_bool(dojot_persist_notifications_only) != True:
LOGGER.info("Listen to devices events")
# TODO: add notifications to config on dojot-module-python
messenger.create_channel(config.dojot['subjects']['devices'], "r")
messenger.create_channel(config.dojot['subjects']['device_data'], "r")
messenger.on(config.dojot['subjects']['devices'],
"message", persister.handle_event_devices)
messenger.on(config.dojot['subjects']['device_data'],
"message", persister.handle_event_data)
in my tests I'm doing it that way here.
#patch.object(Persister, 'create_indexes')
#patch.object(Config, 'load_defaults')
#patch('history.subscriber.persister.Messenger')
#patch.object(Messenger, 'create_channel', return_value=None)
#patch.object(Messenger, 'on', return_value=None)
def test_persist_only_notifications(mock_create_channel, mock_on, mock_messenger, mock_config, create_indexes):
from history.subscriber.persister import start_dojot_messenger
from history import conf
p = Persister()
p.create_indexes_for_notifications('admin')
mock_config.dojot = {
"management": {
"user": "dojot-management",
"tenant": "dojot-management"
},
"subjects": {
"tenancy": "dojot.tenancy",
"devices": "dojot.device-manager.device",
"device_data": "device-data"
}
}
# test persist only boolean valued notifications
start_dojot_messenger(mock_config, p, True)
mock_messenger.assert_called()
assert mock_messenger.call_count == 1
the first test to check if the Messenger class has been initialized I can do it using the mock: mock_messenger.assert_called()
but the others I can't access.
I figured it could be something like this
mock_messenger.mock_create_channel.assert_called
In Airflow how can I pass parameters using context to on_success_callback function handler?
This is my test code:
import airflow
from airflow import DAG
from airflow.operators import MSTeamsWebhookOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime
from transaction_analytics import helpers
from airflow.utils.helpers import chain
# Parameters & variables
schedule_interval = "0 20 * * *"
def _task_success_callback(context):
dagid = context["task_instance"].dag_id
duration = context["task_instance"].duration
executiondate = context["execution_date"]
logurl = context["task_instance"].log_url.replace("localhost", "agbqhsbldd017v.agb.rbxd.ds")# workaround until we config airflow
pp1 = context["params"].param1
#pp1 = "{{ params.param1 }}"
ms_teams_op = MSTeamsWebhookOperator(
task_id="success_notification",
http_conn_id="msteams_airflow",
message="DAG {ppram1} `{dag}` finished successfully!".format(dag=context["task_instance"].dag_id, ppram1=pp1),
subtitle="Execution Date = {p1}, Duration = {p2}".format(p1=executiondate,p2=duration),
button_text = "View log",
button_url = "{log}".format(log=logurl),
theme_color="00FF00"#,
#proxy= "http://10.72.128.202:3128"
)
ms_teams_op.execute(context)
main_dag = DAG('test_foley',
schedule_interval=schedule_interval,
description='Test foley',
start_date=datetime(2020, 4, 19),
default_args=None,
max_active_runs=2,
default_view='graph', # Default view graph
#orientation='TB', # Top-Bottom graph
on_success_callback=_task_success_callback,
#on_failure_callback=outer_task_failure_callback,
catchup=False, # Do not catchup, run only latest
params={
"param1": "value1",
"param2": "value2"
}
)
################################### START ######################################
dag_chain = []
start = DummyOperator(task_id='start', retries = 3, dag=main_dag)
dag_chain.append(start)
step1 = BashOperator(
task_id='step1',
bash_command='pwd',
dag=main_dag,
)
dag_chain.append(step1)
step2 = BashOperator(
task_id='step2',
bash_command='exit 0',
dag=main_dag,
)
dag_chain.append(step2)
end = DummyOperator(task_id='end', dag=main_dag)
dag_chain.append(end)
chain(*dag_chain)
I have an event handler function _task_success_callback that handles success.
In DAG I have on_success_callback=_task_success_callback that captures that event.
And it works... but now I need to pass some parameters into _task_success_callback.
What is the best method?
As that function receives context I tried to create parameters in DAG as you can see:
params={
"param1": "value1",
"param2": "value2"
}
But seems I cannot access them?
My questions are:
What am I doing wrong to access params?
Is there a better way to pass parameters?
NOTE: I saw this similar question How to pass parameters to Airflow on_success_callback and on_failure_callback with one answer... and works. But what I am looking is to use context to pass parameters....
Recall that Airflow process files are simply Python, and provided you don't introduce too much overhead during their parsing (since Airflow parses the files frequently, and that overhead can add up), you can use everything Python can do. In particular for your case I recommend returning a nested function (closure) for your callback:
Put this in a file adjacent your Airflow processes, let's say on_callbacks.py
def success_ms_teams(param_1, param_2):
def callback_func(context):
print(f"param_1: {param_1}")
print(f"param_2: {param_2}")
# ... trimmed for brevity ...#
ms_teams_op.execute(context)
return callback_func
Then in your processes you can do this:
from airflow import models
from on_callbacks import success_ms_teams
with models.DAG(
...
on_success_callback=success_ms_teams(
"value1", # These values become the
"value2", # `param_1` and `param_2`
)
) as dag:
...
You can create a task that its only purpose is to push configuration setting through xcoms. You can pull the configuration via context as the task_instance object is included in context.
def push_configuration(ti, params):
ti.xcom_push(key='conn_id', value=params)
def _task_success_callback(context):
ti = context.get('ti')
params = ti.xcom_pull(key='params', task_ids='Settings')
...
step0 = PythonOperator(
task_id='Settings',
python_callable=push_configuration,
op_kwargs={'params': params})
step1 = BashOperator(
task_id='step1',
bash_command='pwd',
on_success_callback=_task_success_callback)
I used partials to call a fallback function with different connections:
from functools import *
# pass extra params here
def my_conn_callback(context, slack_conn_id, slack_channel):
print(context)
print(slack_conn_id)
print(slack_channel)
# this is for the on_failure_callback
task_fail_slack_alert_callback_my_conn = partial(my_conn_callback,
slack_conn_id = "slack-conn", slack_channel = "#slack_channel")
# this is how airflow calls it internally:
print(task_fail_slack_alert_callback_my_conn('context'))
You can directly pass user-defined parameters in the definition of DAG in case of either on_success_callback or on_failure_callback like this
params={'custom_param': 'custom_param_value'} along with on_success_callback or on_success_callback function call. It adds values in form of key-value pair as a dictionary inside context['params']
You can access the value of your customized parameter anywhere inside a function call like this
context['params'].get('custom_param')
def dag_failure_notification_alert(context):
print(context['params'].get('custom_param'))
default_args = {
"owner": "abcd",
"start_date": datetime(2021, 12, 12),
'retries': 0,
'retry_delay': timedelta(),
"schedule_interval": "#daily"
}
dag = DAG('alert_notification_dummy_dag',
default_args=default_args,
catchup=False,
on_failure_callback=dag_failure_notification_alert,
params={
'custom_param': 'custom_param_value'
}
)
Here you can pass any number of parameters like
params={"x": Param(5, type="integer", minimum=3)},
or
params={'x': [10,20,30]}
for reference, kindly look at documentation
I've used the Motor driver for asynchronous access to reading a mongo collection. When I run my application it returns with a None value. When I run it synchronously with PyMongo it runs normally. I've followed the examples at both: http://blog.mongodb.org/post/30927719826/motor-asynchronous-driver-for-mongodb-and-python and http://emptysquare.net/motor/pymongo/api/motor/tutorial.html.
Here is a partial of my code:
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import tornado.options
from tornado import gen
from bson import json_util
import json
import os.path
import motor
events = []
class WSHandler(tornado.websocket.WebSocketHandler):
#tornado.web.asynchronous
#gen.engine
def open(self):
import traceback
global events
print "tailing for events %s...." % events
try:
coll = db.blah_tail
cursor = coll.find(
{ "$and" : [
{"term": {"$in": events}},
{ "$or" : [
{"coordinates.type" : "Point"},
{"place.full_name" : {"$ne" : None}}
]}
]},
{"coordinates" : 1, "place.full_name" : 1},
tailable = True, timeout = False )
while cursor.alive:
try:
doc = yield motor.Op(cursor.next_object)
print doc
self.write_message(json.dumps(doc, default = json_util.default))
except StopIteration:
pass
db = motor.MotorConnection().open_sync().blah
if __name__ == "__main__":
print 'Server is alive.....'
app = tornado.web.Application(
handlers=[(r'/', MainHandler),
(r'/ws', WSHandler)
], db=db,
template_path=os.path.join(os.path.dirname(__file__), "templates"),
debug=True)
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
Motor makes the application asynchronous, but I'm not sure why it's basically not reading anything from the collection in the database.
Thanks
Was able to correct it by ammending code to:
doc = yield motor.Op(cursor.next_object)
if doc:
print doc
self.write_message(json.dumps(doc, default = json_util.default))
As this, prevent returning None if the first call is not returning a document. The excerpt by the creator of Motor explains it better as: "Problem is, just because cursor.alive is True doesn't truly guarantee that next_object will actually return a document. The first call returns None if find matched no documents at all...", (http://emptysquare.net/blog/category/motor/).