boto3: execute_command inside python script - python

I am trying to run a command to an ecs container managed by fargate. I can establish connection as well as execute successfully but I cannot get the response from said command inside my python script.
import boto3
import pprint as pp
client = boto3.client("ecs")
cluster = "my-mundane-cluster-name"
def main():
task_arns = client.list_tasks(cluster=cluster, launchType="FARGATE")
for task_arn in task_arns.get("taskArns", []):
cmd_out = client.execute_command(
cluster=cluster,
command="ls",
interactive=True,
task=task_arn,
)
pp.pprint(f"{cmd_out}")
if __name__ == "__main__":
main()
I replaced the command with ls but for all intents and purposes, the flow is the same. Here is what I get as a reposnse
{
'clusterArn': 'arn:aws:ecs:■■■■■■■■■■■■:■■■■■■:cluster/■■■■■■',
'containerArn': 'arn:aws:ecs:■■■■■■■■■■■■:■■■■■■:container/■■■■■■/■■■■■■/■■■■■■■■■■■■■■■■■■',
'containerName': '■■■■■■',
'interactive': True,
'session': {
'sessionId': 'ecs-execute-command-■■■■■■■■■',
'streamUrl': '■■■■■■■■■■■■■■■■■■',
'tokenValue': '■■■■■■■■■■■■■■■■■■'
},
'taskArn': 'arn:aws:ecs:■■■■■■■■■■■■:■■■■■■■■■:task/■■■■■■■■■/■■■■■■■■■■■■■■■■■■',
'ResponseMetadata': {
'RequestId': '■■■■■■■■■■■■■■■■■■',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '■■■■■■■■■■■■■■■■■■',
'content-type': 'application/x-amz-json-1.1',
'content-length': '■■■',
'date': 'Thu, 29 Jul 2021 02:39:24 GMT'
},
'RetryAttempts': 0
}
}
I've tried running the command as non-interactive to see if it returns a response but the sdk says Interactive is the only mode supported currently. I've also tried searching online for clues as to how to do this but no luck.
Any help is greatly appreciated.

The value of the command output is located within the document stream located at streamId. You must initialize a new session and pass it the sessionID to retrieve it's contents.
crude example:
import boto3
import pprint as pp
client = boto3.client("ecs")
ssm_client = boto3.client("ssm")
cluster = "my-mundane-cluster-name"
def main():
task_arns = client.list_tasks(cluster=cluster, launchType="FARGATE")
for task_arn in task_arns.get("taskArns", []):
cmd_out = client.execute_command(
cluster=cluster,
command="ls",
interactive=True,
task=task_arn,
)
session_response = client.describe_sessions(
State='Active'|'History',
MaxResults=123,
NextToken='string',
Filters=[
{
'key': 'InvokedAfter'|'InvokedBefore'|'Target'|'Owner'|'Status'|'SessionId',
'value': cmd_out["session"]["sessionId"]
},
]
)
document_response = client.get_document(
Name=session_response.sessions[0].document_name,
DocumentFormat='YAML'|'JSON'|'TEXT'
)
pp.pprint(document_response)
References
SSM: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html
SSM #get_document: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.get_document

A quick solution is to use logging instead of pprint:
boto3.set_stream_logger('boto3.resources', logging.INFO)

I needed to accomplish a similar task, and it turns out it doesn't work as answered here as far as I can tell. Let me know if that worked for you, and how you implemented it, if so.
For me the solution, was to open a websocket connection given back in the session, and read the output. Like this:
import boto3
import json
import uuid
import construct as c
import websocket
def session_reader(session: dict) -> str:
AgentMessageHeader = c.Struct(
"HeaderLength" / c.Int32ub,
"MessageType" / c.PaddedString(32, "ascii"),
)
AgentMessagePayload = c.Struct(
"PayloadLength" / c.Int32ub,
"Payload" / c.PaddedString(c.this.PayloadLength, "ascii"),
)
connection = websocket.create_connection(session["streamUrl"])
try:
init_payload = {
"MessageSchemaVersion": "1.0",
"RequestId": str(uuid.uuid4()),
"TokenValue": session["tokenValue"],
}
connection.send(json.dumps(init_payload))
while True:
resp = connection.recv()
message = AgentMessageHeader.parse(resp)
if "channel_closed" in message.MessageType:
raise Exception("Channel closed before command output was received")
if "output_stream_data" in message.MessageType:
break
finally:
connection.close()
payload_message = AgentMessagePayload.parse(resp[message.HeaderLength :])
return payload_message.Payload
exec_resp = boto3.client("ecs").execute_command(
cluster=cluster,
task=task,
container=container,
interactive=True,
command=cmd,
)
print(session_reader(exec_resp["session"]))
This is all that to Andrey's excellent answer on my similar question.
For anybody arriving seeking a similar solution, I have created a tool for making this task simple. It is called interloper.

Related

Pypresence not closing connection

This is the code I'm using to turn the pypresence on:
from pypresence import Presence
import time
start = int(time.time())
client_id = "XXXXXXXXXXXXXXXX"
RPC = Presence(client_id, pipe=0, loop=None, handler=None)
RPC.connect()
while True:
RPC.update(
details = "▬▬▬▬▬▬▬▬▬▬",
state = "All Systems Operational.",
large_image = "xxx_logo_rpc_v2",
large_text = "exe is running!",
small_image = "xx_green_circle_rpc_v2",
small_text = "Online",
start = start,
buttons = [{"label": "🌐XX 🌐", "url": "https://xx/"}, {"label": "xxxx", "url": "https://xxxx8"}]
)
time.sleep(60)
This is the code I'm trying to use to turn it off:
from pypresence import Presence
import os
client_id = "XXXXXXXXXXXXXX"
RPC = Presence(client_id, pipe=0, loop=None, handler=None)
RPC.clear(pid=os.getpid())
RPC.close()
The problem is that it shows me this error:
File "C:\Users\Foxii\AppData\Local\Programs\Python\Python310\lib\site- packages\pypresence\baseclient.py",
line 96, in send_data assert self.sock_writer is not None, "You must connect your client before sending events!" AssertionError: You must connect your client before sending events!
Well, the client seems to be connected when I run it with my custom Tkinter GUI but when I try to run the code to close it, it shows me that error.

Netmiko / Python Threading for multiple send_command on the same devices ( network device )

Goal is to send multiple ping via a Cisco device ( L2 ) to populate the arp table.
Script is done and working but ultra slow since I need to ping 254 address and more depending if /24 or /23 etc...
Where I am confuse, I have test some threading with some basic scripts to understand how it works and everything works by using a function and call it and so far so good.
My Problem is that I don't want to create 200+ ssh connections if I use a function for the whole code.
I would like to use the threading only on the send_command part from netmiko.
My Code :
from netmiko import ConnectHandler
from pprint import pprint
from time import perf_counter
from threading import Thread
mng_ip = input("please enter the subnet to be scan: ")
cisco_Router = {
"device_type": "cisco_ios",
"host": mng_ip,
"username": "XXXX",
"password": "XXXX",
"fast_cli": False,
"secret": "XXXX"}
print(mng_ip)
subnet1 = mng_ip.split(".")[0]
subnet2 = mng_ip.split(".")[1]
subnet3 = mng_ip.split(".")[2]
#subnet4 = mng_ip.split(".")[3]
active_list = []
start_time = perf_counter()
for x in range(1,254):
ip = (subnet1+"."+subnet2+"."+subnet3+"."+str(x))
print ("Pinging:",ip)
with ConnectHandler(**cisco_Router) as net_connect:
net_connect.enable()
result = net_connect.send_command(f"ping {ip} ",delay_factor=2) # <----- this is the part i would like to perform the threading
print(result)
if "0/5" in result:
print("FAILED")
else:
print("ACTIVE")
active_list.append(ip)
net_connect.disconnect()
print("Done")
pprint(active_list)
end_time = perf_counter()
print(f'It took {end_time- start_time: 0.2f} second(s) to complete.')
not sure if it is possible and how it could be done,
Thank you in advance,

Netmiko SecureCopy Progressbar

I am trying to improve my code and add the progressbar to the file_transfer function. I was wondering how can I calculate the sent argument? I found this answer here and this on GitHub, but I can't figure out how to use it with my provided code.
from os.path import getsize
from netmiko import ConnectHandler, file_transfer, progress_bar
router = {
"device_type": "cisco_ios",
"host": "sandbox-iosxe-latest-1.cisco.com",
"username": "developer",
"password": "C1sco12345",
"port": 22,
"verbose": True,
"conn_timeout": 12,
"fast_cli": False,
"session_log": "sandbox-iosxe-latest-1.cisco.com.log",
}
src_file = dest_file = input("Name of file to copy: ")
with ConnectHandler(**router) as net_connect:
scp = net_connect.send_config_set(config_commands=["ip scp server enable"])
transfer = file_transfer(
net_connect,
source_file=src_file,
dest_file=dest_file,
file_system="flash:",
direction="put",
overwrite_file=True,
socket_timeout=100.0,
progress=progress_bar(
filename=src_file,
size=getsize(src_file),
sent=sent, # How to calculate? What should be placed here?
),
)
Should just be progress=progress_bar or progress4=progress_bar. You just provide a callable that is constructed a certain way (and you are using the progress_bar function that Netmiko provides so that should be fine).
Here is an example file_transfer using it:
ssh_conn = ConnectHandler(**cisco)
transfer_dict = file_transfer(
ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction,
# Force an overwrite of the file if it already exists
overwrite_file=True,
progress4=progress_bar,
)

boto3 wait_until_running doesn't work as desired

I'm trying to write a script using boto3 to start an instance and wait until it is started. As per the documentation of wait_until_running, it should wait until the instance is fully started (I"m assuming checks should be OK) but unfortunately it only works for wait_until_stopped and incase of wait_until_running it just starts the instance and doesn't wait until it is completely started. Not sure if I'm doing something wrong here or this is a bug of boto3.
Here is the code:
import boto3
ec2 = boto3.resource('ec2',region_name="ap-southeast-2")
ec2_id = 'i-xxxxxxxx'
instance = ec2.Instance(id=ec2_id)
print("starting instance " + ec2_id)
instance.start()
instance.wait_until_running()
print("instance started")
Thanks to #Mark B #Madhurya Gandi here is the solution that worked in my case:
import boto3,socket
retries = 10
retry_delay=10
retry_count = 0
ec2 = boto3.resource('ec2',region_name="ap-southeast-2")
ec2_id = 'i-xxxxxxxx'
instance = ec2.Instance(id=ec2_id)
print("starting instance " + ec2_id)
instance.start()
instance.wait_until_running()
while retry_count <= retries:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((instance.public_ip_address,22))
if result == 0:
Print "Instance is UP & accessible on port 22, the IP address is: ",instance.public_ip_address
break
else:
print "instance is still down retrying . . . "
time.sleep(retry_delay)
except ClientError as e:
print('Error', e)
I believe this way wait until the instance status is 2/2 passed in the check tests:
Boto3 Doc:
instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data ).
import boto3
client = boto3.client('ec2')
waiter = client.get_waiter('instance_status_ok')
waiter.wait(
InstanceIds = ["instanceID"],
Filters = [
{
"Name": "instance-status.reachability" ,
"Values": [
"passed"
]
}
]
)
[1]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Waiter.InstanceStatusOk
I believe the right way to do it is as follows:
instance.wait_until_running(
Filters=[
{
'Name': 'instance-state-name',
'Values': [
'running',
]
},
]
)
tested and worked for me
I've tried instance.wait_until_running(). It took time to update the instance to running state. As per amazon docs link, it says that the instances take a minimum of 60seconds to spin up. Here's a sample code that worked for me. Hope it helps!
;to create 5 instances
ec2.create_instances(ImageId='<ami-image-id>', MinCount=1, MaxCount=5)
time.sleep(60)
;print your instances

use ansible_facts in module code

I am trying to create my own ansible module (which will update cmdb) and i am looking how to use ansible_facts in module code ?
example of my module script is :
#!/usr/bin/python
from ansible.module_utils.basic import *
import json, ast
from servicenow import ServiceNow
from servicenow import Connection
def __get_server_info(table,server_name="", sys_id=""):
if sys_id == "":
return table.fetch_one({'name': server_name})
if server_name == "":
return table.fetch_one({'sys_id': sys_id})
def __update_cmdb_hwinfo(table, sys_id, server_name=""):
return table.update({'sys_id': sys_id,{'hw_ram': 'Here for example i want to put ansible_facts about server ram size'})
def main():
fields = {
"snow_instance": {"required": True, "type": "str"},
"snow_username": {"required": True, "type": "str"},
"snow_password": {"required": True, "type": "str"},
"server_name": {"required": True, "type": "str" },
"api_type": {"default": "JSONv2", "type": "str"},
}
module = AnsibleModule(argument_spec=fields)
snow_connection = Connection.Auth(username=module.params['snow_username'], password=module.params['snow_password'], instance=module.params['snow_instance'], api=module.params['api_typ
e'])
server = ServiceNow.Base(snow_connection)
server.__table__ = 'cmdb_ci_server_list.do'
machine = __get_server_info(server, )
## Define connection object to ServiceNow instance
module.exit_json(changed=False, meta=module.params, msg=machine)
if __name__ == '__main__':
main()
What variable i should use to call ansible_facts in module script? (And is it even possible? ).
I doubt this is possible from inside module itself, because they are executed in the context of remote machine with predefined parameters.
But you can wrap your module with action plugin (that is executed in local context), collect required data from available variables and pass them as parameters to your module.
Like this (./action_plugins/a_test.py):
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
module_args = self._task.args.copy()
module_args['mem_size'] = self._templar._available_variables.get('ansible_memtotal_mb')
return self._execute_module(module_args=module_args, task_vars=task_vars, tmp=tmp)
In this case if your module expect mem_size parameter it will be set to ansible_memtotal_mb's value with action plugin.
Module example (./library/a_test.py):
#!/usr/bin/python
def main():
module = AnsibleModule(
argument_spec = dict(
mem_size=dict(required=False, default=None),
),
supports_check_mode = False
)
module.exit_json(changed=False, mem_size=module.params['mem_size'])
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
Test playbook:
---
- hosts: all
tasks:
- a_test:

Categories