I am trying to improve my code and add the progressbar to the file_transfer function. I was wondering how can I calculate the sent argument? I found this answer here and this on GitHub, but I can't figure out how to use it with my provided code.
from os.path import getsize
from netmiko import ConnectHandler, file_transfer, progress_bar
router = {
"device_type": "cisco_ios",
"host": "sandbox-iosxe-latest-1.cisco.com",
"username": "developer",
"password": "C1sco12345",
"port": 22,
"verbose": True,
"conn_timeout": 12,
"fast_cli": False,
"session_log": "sandbox-iosxe-latest-1.cisco.com.log",
}
src_file = dest_file = input("Name of file to copy: ")
with ConnectHandler(**router) as net_connect:
scp = net_connect.send_config_set(config_commands=["ip scp server enable"])
transfer = file_transfer(
net_connect,
source_file=src_file,
dest_file=dest_file,
file_system="flash:",
direction="put",
overwrite_file=True,
socket_timeout=100.0,
progress=progress_bar(
filename=src_file,
size=getsize(src_file),
sent=sent, # How to calculate? What should be placed here?
),
)
Should just be progress=progress_bar or progress4=progress_bar. You just provide a callable that is constructed a certain way (and you are using the progress_bar function that Netmiko provides so that should be fine).
Here is an example file_transfer using it:
ssh_conn = ConnectHandler(**cisco)
transfer_dict = file_transfer(
ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction,
# Force an overwrite of the file if it already exists
overwrite_file=True,
progress4=progress_bar,
)
Related
I'm using the nornir (3.3.0) automation framework with Python 3.8. I'd like to mock the SSH access to the devices in order to do testing without having some real or virtual network equipment online. How would I use patch or Mock/MagicMock from unittest.mock to mock netmiko_send_command (ssh interaction with device)?
I have the following nornir task function:
# dbb_automation/tasks.py
from nornir.core.task import Task, Result
from nornir_netmiko.tasks import netmiko_send_command
def get_interfaces_with_ip(task: Task):
log.debug(f"{task.name}: Getting result on host {task.host}")
result: MultiResult = task.run(name="show ip int br | e unass", task=netmiko_send_command,
command_string="show ip int br | e unass")
content_str = result[0].result
task.run(
task=write_file,
filename=f"outputs/{task.host}-{purpose}.{ending}",
content=content_str
)
return Result(
host=task.host,
result=f"{task.host.name} got ip result"
)
and the following test case (work in progress):
# tests/test_tasks.py
from dbb_automation.tasks import get_interfaces_with_ip
from nornir import InitNornir
from nornir.core.filter import F
from tests.settings import *
def test_get_interfaces_with_ip():
# [x] init nornir with fake host
# [ ] patch/mock netmiko_send_command
# [ ] check file contents with patched return string of netmiko_send_command
nr = InitNornir(
core={
"raise_on_error": True
},
runner={
"plugin": "threaded",
"options": {
"num_workers": 1,
}
},
inventory={
"plugin": "SimpleInventory",
"options": {
"host_file": DNAC_HOSTS_YAML,
"group_file": DNAC_GROUPS_YAML,
"defaults_file": DNAC_DEFAULT_YAML
}
},
logging={
"log_file": "logs/nornir.log"
}
)
result = nr.filter(F(has_parent_group="Borders")).run(name="get_interfaces_with_ip", task=get_interfaces_with_ip)
# todo: test code
assert False
Regards,
Gérard
I think I found the solution. Key was to patch to where the imported function is used not to where it is defined and to set the return value on the mock object.
#patch("dbb_automation.tasks.netmiko_send_command")
def test_get_interfaces_with_ip(mock_netmiko_send_command, nr):
...
mock_netmiko_send_command.return_value = """Interface IP-Address OK? Method Status Protocol
GigabitEthernet22 10.1.54.146 YES TFTP up up
Loopback0 10.150.32.2 YES other up up
Port-channel1.2 10.150.33.65 YES manual up up
...
"""
import pytest
import os
import shutil
from unittest.mock import patch
from dbb_automation.tasks import get_interfaces_with_ip
from nornir import InitNornir
from nornir.core.filter import F
from tests.settings import *
#pytest.fixture()
def nr():
nr = InitNornir(
core={
"raise_on_error": True
},
runner={
"plugin": "threaded",
"options": {
"num_workers": 1,
}
},
inventory={
"plugin": "SimpleInventory",
"options": {
"host_file": DNAC_HOSTS_YAML,
"group_file": DNAC_GROUPS_YAML,
"defaults_file": DNAC_DEFAULT_YAML
}
},
logging={
"log_file": "logs/nornir.log"
}
)
return nr
#patch("dbb_automation.tasks.netmiko_send_command")
def test_get_interfaces_with_ip(mock_netmiko_send_command, nr):
output_folder_name = "outputs"
shutil.rmtree(output_folder_name)
os.mkdir(output_folder_name)
mock_netmiko_send_command.return_value = """Interface IP-Address OK? Method Status Protocol
GigabitEthernet22 10.1.54.146 YES TFTP up up
Loopback0 10.150.32.2 YES other up up
Port-channel1.2 10.150.33.65 YES manual up up
"""
nr.filter(F(has_parent_group="Borders")).run(name="get_interfaces_with_ip", task=get_interfaces_with_ip)
# test code
count = 0
files_found = None
for root_dir, cur_dir, files in os.walk(output_folder_name):
count += len(files)
assert files_found is None # make sure there are no subdirectories
files_found = files
assert count == 4 # we expect a file for each host
for file_name in files_found:
with open(f"{output_folder_name}/{file_name}") as f:
assert f.read() == mock_netmiko_send_command.return_value
Goal is to send multiple ping via a Cisco device ( L2 ) to populate the arp table.
Script is done and working but ultra slow since I need to ping 254 address and more depending if /24 or /23 etc...
Where I am confuse, I have test some threading with some basic scripts to understand how it works and everything works by using a function and call it and so far so good.
My Problem is that I don't want to create 200+ ssh connections if I use a function for the whole code.
I would like to use the threading only on the send_command part from netmiko.
My Code :
from netmiko import ConnectHandler
from pprint import pprint
from time import perf_counter
from threading import Thread
mng_ip = input("please enter the subnet to be scan: ")
cisco_Router = {
"device_type": "cisco_ios",
"host": mng_ip,
"username": "XXXX",
"password": "XXXX",
"fast_cli": False,
"secret": "XXXX"}
print(mng_ip)
subnet1 = mng_ip.split(".")[0]
subnet2 = mng_ip.split(".")[1]
subnet3 = mng_ip.split(".")[2]
#subnet4 = mng_ip.split(".")[3]
active_list = []
start_time = perf_counter()
for x in range(1,254):
ip = (subnet1+"."+subnet2+"."+subnet3+"."+str(x))
print ("Pinging:",ip)
with ConnectHandler(**cisco_Router) as net_connect:
net_connect.enable()
result = net_connect.send_command(f"ping {ip} ",delay_factor=2) # <----- this is the part i would like to perform the threading
print(result)
if "0/5" in result:
print("FAILED")
else:
print("ACTIVE")
active_list.append(ip)
net_connect.disconnect()
print("Done")
pprint(active_list)
end_time = perf_counter()
print(f'It took {end_time- start_time: 0.2f} second(s) to complete.')
not sure if it is possible and how it could be done,
Thank you in advance,
I am trying to run a command to an ecs container managed by fargate. I can establish connection as well as execute successfully but I cannot get the response from said command inside my python script.
import boto3
import pprint as pp
client = boto3.client("ecs")
cluster = "my-mundane-cluster-name"
def main():
task_arns = client.list_tasks(cluster=cluster, launchType="FARGATE")
for task_arn in task_arns.get("taskArns", []):
cmd_out = client.execute_command(
cluster=cluster,
command="ls",
interactive=True,
task=task_arn,
)
pp.pprint(f"{cmd_out}")
if __name__ == "__main__":
main()
I replaced the command with ls but for all intents and purposes, the flow is the same. Here is what I get as a reposnse
{
'clusterArn': 'arn:aws:ecs:■■■■■■■■■■■■:■■■■■■:cluster/■■■■■■',
'containerArn': 'arn:aws:ecs:■■■■■■■■■■■■:■■■■■■:container/■■■■■■/■■■■■■/■■■■■■■■■■■■■■■■■■',
'containerName': '■■■■■■',
'interactive': True,
'session': {
'sessionId': 'ecs-execute-command-■■■■■■■■■',
'streamUrl': '■■■■■■■■■■■■■■■■■■',
'tokenValue': '■■■■■■■■■■■■■■■■■■'
},
'taskArn': 'arn:aws:ecs:■■■■■■■■■■■■:■■■■■■■■■:task/■■■■■■■■■/■■■■■■■■■■■■■■■■■■',
'ResponseMetadata': {
'RequestId': '■■■■■■■■■■■■■■■■■■',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '■■■■■■■■■■■■■■■■■■',
'content-type': 'application/x-amz-json-1.1',
'content-length': '■■■',
'date': 'Thu, 29 Jul 2021 02:39:24 GMT'
},
'RetryAttempts': 0
}
}
I've tried running the command as non-interactive to see if it returns a response but the sdk says Interactive is the only mode supported currently. I've also tried searching online for clues as to how to do this but no luck.
Any help is greatly appreciated.
The value of the command output is located within the document stream located at streamId. You must initialize a new session and pass it the sessionID to retrieve it's contents.
crude example:
import boto3
import pprint as pp
client = boto3.client("ecs")
ssm_client = boto3.client("ssm")
cluster = "my-mundane-cluster-name"
def main():
task_arns = client.list_tasks(cluster=cluster, launchType="FARGATE")
for task_arn in task_arns.get("taskArns", []):
cmd_out = client.execute_command(
cluster=cluster,
command="ls",
interactive=True,
task=task_arn,
)
session_response = client.describe_sessions(
State='Active'|'History',
MaxResults=123,
NextToken='string',
Filters=[
{
'key': 'InvokedAfter'|'InvokedBefore'|'Target'|'Owner'|'Status'|'SessionId',
'value': cmd_out["session"]["sessionId"]
},
]
)
document_response = client.get_document(
Name=session_response.sessions[0].document_name,
DocumentFormat='YAML'|'JSON'|'TEXT'
)
pp.pprint(document_response)
References
SSM: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html
SSM #get_document: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.get_document
A quick solution is to use logging instead of pprint:
boto3.set_stream_logger('boto3.resources', logging.INFO)
I needed to accomplish a similar task, and it turns out it doesn't work as answered here as far as I can tell. Let me know if that worked for you, and how you implemented it, if so.
For me the solution, was to open a websocket connection given back in the session, and read the output. Like this:
import boto3
import json
import uuid
import construct as c
import websocket
def session_reader(session: dict) -> str:
AgentMessageHeader = c.Struct(
"HeaderLength" / c.Int32ub,
"MessageType" / c.PaddedString(32, "ascii"),
)
AgentMessagePayload = c.Struct(
"PayloadLength" / c.Int32ub,
"Payload" / c.PaddedString(c.this.PayloadLength, "ascii"),
)
connection = websocket.create_connection(session["streamUrl"])
try:
init_payload = {
"MessageSchemaVersion": "1.0",
"RequestId": str(uuid.uuid4()),
"TokenValue": session["tokenValue"],
}
connection.send(json.dumps(init_payload))
while True:
resp = connection.recv()
message = AgentMessageHeader.parse(resp)
if "channel_closed" in message.MessageType:
raise Exception("Channel closed before command output was received")
if "output_stream_data" in message.MessageType:
break
finally:
connection.close()
payload_message = AgentMessagePayload.parse(resp[message.HeaderLength :])
return payload_message.Payload
exec_resp = boto3.client("ecs").execute_command(
cluster=cluster,
task=task,
container=container,
interactive=True,
command=cmd,
)
print(session_reader(exec_resp["session"]))
This is all that to Andrey's excellent answer on my similar question.
For anybody arriving seeking a similar solution, I have created a tool for making this task simple. It is called interloper.
I'm trying to test this function "start_dojot_messenger", which has some objects and methods in it, and I need to test and verify that they were called with those specific parameters.
For example we have messenger.create_channel("dojot.notifications", "r"), and I need to test if it is starting with these parameters.
follow image of the method
def start_dojot_messenger(config, persister, dojot_persist_notifications_only):
messenger = Messenger("Persister", config)
messenger.init()
# Persister Only Notification
messenger.create_channel("dojot.notifications", "r")
messenger.on(config.dojot['subjects']['tenancy'],
"message", persister.handle_new_tenant)
messenger.on("dojot.notifications", "message",
persister.handle_notification)
LOGGER.info('Listen to notification events')
if str2_bool(dojot_persist_notifications_only) != True:
LOGGER.info("Listen to devices events")
# TODO: add notifications to config on dojot-module-python
messenger.create_channel(config.dojot['subjects']['devices'], "r")
messenger.create_channel(config.dojot['subjects']['device_data'], "r")
messenger.on(config.dojot['subjects']['devices'],
"message", persister.handle_event_devices)
messenger.on(config.dojot['subjects']['device_data'],
"message", persister.handle_event_data)
in my tests I'm doing it that way here.
#patch.object(Persister, 'create_indexes')
#patch.object(Config, 'load_defaults')
#patch('history.subscriber.persister.Messenger')
#patch.object(Messenger, 'create_channel', return_value=None)
#patch.object(Messenger, 'on', return_value=None)
def test_persist_only_notifications(mock_create_channel, mock_on, mock_messenger, mock_config, create_indexes):
from history.subscriber.persister import start_dojot_messenger
from history import conf
p = Persister()
p.create_indexes_for_notifications('admin')
mock_config.dojot = {
"management": {
"user": "dojot-management",
"tenant": "dojot-management"
},
"subjects": {
"tenancy": "dojot.tenancy",
"devices": "dojot.device-manager.device",
"device_data": "device-data"
}
}
# test persist only boolean valued notifications
start_dojot_messenger(mock_config, p, True)
mock_messenger.assert_called()
assert mock_messenger.call_count == 1
the first test to check if the Messenger class has been initialized I can do it using the mock: mock_messenger.assert_called()
but the others I can't access.
I figured it could be something like this
mock_messenger.mock_create_channel.assert_called
I am trying to create my own ansible module (which will update cmdb) and i am looking how to use ansible_facts in module code ?
example of my module script is :
#!/usr/bin/python
from ansible.module_utils.basic import *
import json, ast
from servicenow import ServiceNow
from servicenow import Connection
def __get_server_info(table,server_name="", sys_id=""):
if sys_id == "":
return table.fetch_one({'name': server_name})
if server_name == "":
return table.fetch_one({'sys_id': sys_id})
def __update_cmdb_hwinfo(table, sys_id, server_name=""):
return table.update({'sys_id': sys_id,{'hw_ram': 'Here for example i want to put ansible_facts about server ram size'})
def main():
fields = {
"snow_instance": {"required": True, "type": "str"},
"snow_username": {"required": True, "type": "str"},
"snow_password": {"required": True, "type": "str"},
"server_name": {"required": True, "type": "str" },
"api_type": {"default": "JSONv2", "type": "str"},
}
module = AnsibleModule(argument_spec=fields)
snow_connection = Connection.Auth(username=module.params['snow_username'], password=module.params['snow_password'], instance=module.params['snow_instance'], api=module.params['api_typ
e'])
server = ServiceNow.Base(snow_connection)
server.__table__ = 'cmdb_ci_server_list.do'
machine = __get_server_info(server, )
## Define connection object to ServiceNow instance
module.exit_json(changed=False, meta=module.params, msg=machine)
if __name__ == '__main__':
main()
What variable i should use to call ansible_facts in module script? (And is it even possible? ).
I doubt this is possible from inside module itself, because they are executed in the context of remote machine with predefined parameters.
But you can wrap your module with action plugin (that is executed in local context), collect required data from available variables and pass them as parameters to your module.
Like this (./action_plugins/a_test.py):
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
module_args = self._task.args.copy()
module_args['mem_size'] = self._templar._available_variables.get('ansible_memtotal_mb')
return self._execute_module(module_args=module_args, task_vars=task_vars, tmp=tmp)
In this case if your module expect mem_size parameter it will be set to ansible_memtotal_mb's value with action plugin.
Module example (./library/a_test.py):
#!/usr/bin/python
def main():
module = AnsibleModule(
argument_spec = dict(
mem_size=dict(required=False, default=None),
),
supports_check_mode = False
)
module.exit_json(changed=False, mem_size=module.params['mem_size'])
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
Test playbook:
---
- hosts: all
tasks:
- a_test: