Fabric - Test SSH connection to multiple hosts - python

I have a Python script that uses fabric library to test SSH connection to multiple hosts. I want to gather all the results in one list:
...
import fabric
from fabric.api import *
results = []
#parallel
def test_connection():
global results
try:
run('ls')
results += "%s: SUCCESS" % env.host
except Exception as e:
results += "%s: FAILURE. Exception: %e" % (env.host, e)
if __name__ == '__main__':
tasks.execute(test_connection)
print results
When I execute the script, I get the following:
Traceback (most recent call last):
File "./test_ssh.py", line 99, in <module>
tasks.execute(test_connection)
File "/Library/Python/2.7/site-packages/fabric/tasks.py", line 387, in execute
multiprocessing
File "/Library/Python/2.7/site-packages/fabric/tasks.py", line 277, in _execute
return task.run(*args, **kwargs)
File "/Library/Python/2.7/site-packages/fabric/tasks.py", line 174, in run
return self.wrapped(*args, **kwargs)
File "./test_ssh.py", line 96, in test_connection
results += "%s: FAILURE. Exception: %e" % (env.host, e)
UnboundLocalError: local variable 'results' referenced before assignment
I think it is because test_connection runs it's own context so it doesn't have access to results.
Is there another way I can gather my results then ?

The trick is that you can actually return results from parallel execution:
#parallel
def test_connection():
try:
run('ls')
return True
except Exception:
return False
Now when you call your task, you will get:
result = execute(test_connection)
results = [ ('HOST %s succeeded' % key) if value else ('HOST %s failed' % key) for key, value in result.items() ]

Related

Calling a python script within another python script with args

Current Implementation which needs optimization
import subprocess
childprocess = subprocess.Popen(
['python',
'/full_path_to_directory/called_script.py',
'arg1',
'arg2'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
returnVal = childprocess.communicate()[0]
print(retVal)
Is this a correct way to call another script(called_script.py) within the current working directory?
Is there a better way to call the other script? I used import script but it gives me below error
called_script.py
def func(arg1, arg2, arg3):
#doSomething
#sys.out.write(returnVal)
if __name__ == "__main__":
func(arg1, arg2, arg3)
Implementation 2 (throws exception and errored out)
caller_script.py
Both of them are under the same path (i.e. /home/bin)
import called_script
returnVal = called_script.func(arg1,arg2,arg3)
print(returnVal)
Output:
nullNone
Traceback (most recent call last):
File "/path_to_caller/caller_script.py", line 89, in <module>
l.simple_bind_s(binddn, pw)
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 206, in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 200, in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 96, in _ldap_call
result = func(*args,**kwargs)
TypeError: argument 2 must be string or read-only buffer, not None
Another alternative I used and gave me an error is
Implementation 3(throws exception and errors out)
caller_script.py
import ldap
returnVal = subprocess.call(['python','called_script.py','arg1','arg2'])
print(returnVal)
l = ldap.initialize(cp.get('some_config_ref','some_url'))
try:
l.protocol_version = ldap.VERSION3
l.simple_bind_s(binddn, returnVal)
except ldap.INVALID_CREDENTIALS:
sys.stderr.write("Your username or password is incorrect.")
sys.exit(1)
except ldap.LDAPError, e:
if type(e.message) == dict and e.message.has_key('xyz'):
sys.stderr.write(e.message['xyz'])
else:
sys.stderr.write(e)
sys.exit(1)
Output:
returnVal0Traceback (most recent call last):
File "./path_to_script/caller_script.py", line 88, in <module>
l.simple_bind_s(binddn, pw)
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 206, in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 200, in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 96, in _ldap_call
result = func(*args,**kwargs)
TypeError: argument 2 must be string or read-only buffer, not int
Here is an example where you are calling a function from another file, you pass one value, a list, which can have an arbitrary amount of numbers, and you get the sum. Make sure they are in the same directory or you will need the path. The function in your example "script.py" does not allow you to pass a value.
called_script.py
def add_many(list_add):
the_sum = sum(list_add)
return the_sum
caller_script.py
import called_script
a_list = [1, 2, 3, 4]
the_sum = called_script.add_many(a_list)
print(the_sum)

Sometime pathos.multiprocessing.Pool can't be terminated correctly

I try to use pathos.multiprocessing.Pool in my project.
However, it will meet the following problem when I terminate the Pool.
I use CentOS 6.5, I'm not sure if it is caused by pathos.multiprocessing.Pool or other thing, can anyone help me on it?
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 1073, in run
self.function(*self.args, **self.kwargs)
File "receiver.py", line 132, in kill_clients
pool.terminate()
File "/usr/local/lib/python2.7/site-packages/multiprocess/pool.py", line 465, in terminate
self._terminate()
File "/usr/local/lib/python2.7/site-packages/multiprocess/util.py", line 207, in __call__
res = self._callback(*self._args, **self._kwargs)
File "/usr/local/lib/python2.7/site-packages/multiprocess/pool.py", line 513, in _terminate_pool
p.terminate()
File "/usr/local/lib/python2.7/site-packages/multiprocess/process.py", line 137, in terminate
self._popen.terminate()
File "/usr/local/lib/python2.7/site-packages/multiprocess/forking.py", line 174, in terminate
os.kill(self.pid, signal.SIGTERM)
OSError: [Errno 3] No such process
The wired thing is that at the beginning, it works well. But when the 4th job is received, there will be such problem.
class Receiver:
def __init__(self):
....
self.results={}
def kill_clients(self, client_list, pool):
for client in client_list:
client.kill()
pool.terminate()
def process_result(self, result):
if result is None:
self.results = {}
return
res = result.split(':')
if len(res) != 4:
raise Exception("result with wrong format: %s" % result)
self.results['%s_%s' % (res[0], res[1])] = {"code": res[3], "msg": res[4]}
...
def handler(self, job):
self.lg.debug("Receive job in rtmp_start_handler.")
self.lg.debug("<%s>" % str(job))
# each client corresponding one process
cli_counts = job['count']
pool = Pool(processes=cli_counts)
clients = []
try:
for i in xrange(cli_counts):
rtmp_cli = RtmpClient(job['case'], i)
clients.append(rtmp_cli)
[pool.apply_async(client.run, callback=self.process_result)
for client in clients]
pool.close()
sleep(1)
self.lg.debug("All clients are started.")
t = Timer(
job['timeout'],
self.kill_clients,
args=(clients, pool)
)
t.start()
self.lg.debug("Timer is started. timeout %s s" % job['timeout'])
pool.join()
except Exception, e:
self.lg.warning("Exception occurred: %s" % e)
self.lg.warning(format_exc())
return "0"
# here the self.results shall be ready
return self.parse_results()
The OSError is not caused by the Pool but by my program issue.
When I use Popen to create a subprocess and exec ffmpeg, it will exit immediately(due to other problem), so when I try to kill the subprocess, it it not existed by then. That's why OSError will be raised.

python imaplib raise socket error

I want to retrive content from sent messages folder by the code below:
conn = imaplib.IMAP4_SSL('imap.exmail.qq.com',993)
conn.login(user,pwd)
conn.select("Sent Messages")
mails = conn.search(None, 'ALL')
for num in mails[1][0].split():
t, d = conn.fetch(num, 'RFC822')
if t == 'OK':
print 'Message %s\n' % (num)
msg = email.message_from_string(d[0][1])
subject = email.Header.decode_header(msg['subject'])[0][0]
if re.search(month, subject):
print subject
else:
print 'fetch error'
The num is between 1~36, when the num is 8. There comes an error:
Traceback (most recent call last):
File "monthlySummary.py", line 30, in
t, d = conn.fetch(num, 'RFC822')
File "/usr/lib/python2.7/imaplib.py", line 455, in fetch
typ, dat = self._simple_command(name, message_set, message_parts)
File "/usr/lib/python2.7/imaplib.py", line 1087, in _simple_command
return self._command_complete(name, self._command(name, *args))
File "/usr/lib/python2.7/imaplib.py", line 911, in _command_complete
raise self.abort('command: %s => %s' % (name, val))
imaplib.abort: command: FETCH => socket error: unterminated line
I try to relogin, but still come the same error. what does this socket error mean? how can i solve this problem?
Thanks.
From the imaplib source code (here), the relevant function is:
def _get_line(self):
line = self.readline()
if not line:
raise self.abort('socket error: EOF')
# Protocol mandates all lines terminated by CRLF
if not line.endswith(b'\r\n'):
raise self.abort('socket error: unterminated line')
line = line[:-2]
if __debug__:
if self.debug >= 4:
self._mesg('< %r' % line)
else:
self._log('< %r' % line)
return line
The comment in the function says that all lines must be terminated by CRLF, so it is obviously hitting a line that is not and so it aborts.
You need to make sure each line ends with '\r\n', so to do that you could simply add this string to each line if it is not already there, before processing it. This will make sure that this error upon which the program is aborted is not raised.

KeyError: 0 using multiprocessing in python

I have the following code inwhich I try to call a function compute_cluster which do some computations and write the results in a txt file (each process write its results in different txt files independently), however, when I run the following code:
def main():
p = Pool(19)
p.map(compute_cluster, [(l, r) for l in range(6, 25) for r in range(1, 4)])
p.close()
if __name__ == "__main__":
main()
it crashes with the following errors:
File "RMSD_calc.py", line 124, in <module>
main()
File "RMSD_calc.py", line 120, in main
p.map(compute_cluster, [(l, r) for l in range(6, 25) for r in range(1, 4)])
File "/usr/local/lib/python2.7/multiprocessing/pool.py", line 225, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/local/lib/python2.7/multiprocessing/pool.py", line 522, in get
raise self._value
KeyError: 0
and when I searched online for the meaning of "KeyError: 0" i didn't find anything helpful so any suggestions why this error happens is highly appreciated
KeyError happens in compute_cluster() in a child process and p.map() reraises it for you in the parent:
from multiprocessing import Pool
def f(args):
d = {}
d[0] # <-- raises KeyError
if __name__=="__main__":
p = Pool()
p.map(f, [None])
Output
Traceback (most recent call last):
File "raise-exception-in-child.py", line 9, in <module>
p.map(f, [None])
File "/usr/lib/python2.7/multiprocessing/pool.py", line 227, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 528, in get
raise self._value
KeyError: 0
To see the full traceback, catch the exception in the child process:
import logging
from multiprocessing import Pool
def f(args):
d = {}
d[0] # <-- raises KeyError
def f_mp(args):
try:
return f(args)
except Exception:
logging.exception("f(%r) failed" % (args,))
if __name__=="__main__":
p = Pool()
p.map(f_mp, [None])
Output
ERROR:root:f(None) failed
Traceback (most recent call last):
File "raise-exception-in-child.py", line 10, in f_mp
return f(args)
File "raise-exception-in-child.py", line 6, in f
d[0] # <-- raises KeyError
KeyError: 0
It shows that d[0] caused the exception.

Live Profiling of Python Server

I want to know where the python interpreter spends the most time. I use it on a live django application, but it should work for all long running python processes.
I answer my own question.
import os, re, sys, time, datetime, collections, thread, threading, atexit, traceback
u'''
debug_live.start(seconds_float) starts a monitor thread which print
the stacktrace of all threads into a logfile.
You can report which lines are executed the most with this script:
app_foo_d#server:~$ python djangotools/utils/debug_live.py -h
usage: debug_live.py [-h] [--most-common N] {sum-all-frames,sum-last-frame}
Read stacktrace log
positional arguments:
{sum-all-frames,sum-last-frame}
optional arguments:
-h, --help show this help message and exit
--most-common N Display the N most common lines in the stacktraces
---------------------------------
You can start the watching thread your django middleware like this:
class FOOMiddleware:
def __init__(self):
u'This code gets executed once after the start of the wsgi worker process. Not for every request!'
seconds=getattr(settings, 'debug_live_interval', None)
if seconds:
seconds=float(seconds)
from djangotools.utils import debug_live
debug_live.start(seconds)
# settings.py
debug_live_interval=0.3 # ever 0.3 second
# Inspired by http://code.google.com/p/modwsgi/wiki/DebuggingTechniques
You can get a simple report of the log file of stacktraces like below. The lines
which are not from django are marked with "<====". That's most likely your code
and this could be a bottle neck.
python ..../debug_live.py read
1971 File: "/home/foo_bar_p/django/core/handlers/wsgi.py", line 272, in __call__
response = self.get_response(request)
1812 File: "/home/foo_bar_p/django/core/handlers/base.py", line 111, in get_response
response = callback(request, *callback_args, **callback_kwargs)
1725 File: "/home/foo_bar_p/django/db/backends/postgresql_psycopg2/base.py", line 44, in execute
return self.cursor.execute(query, args)
1724 File: "/home/foo_bar_p/django/db/models/sql/compiler.py", line 735, in execute_sql
cursor.execute(sql, params)
1007 File: "/home/foo_bar_p/django/db/models/sql/compiler.py", line 680, in results_iter
for rows in self.execute_sql(MULTI):
796 File: "/home/foo_bar_p/django/db/models/query.py", line 273, in iterator
for row in compiler.results_iter():
763 File: "/home/foo_bar_p/foo/utils/ticketutils.py", line 135, in __init__ <====
filter=type_filter(root_node=self.root_node)
684 File: "/home/foo_bar_p/django/db/models/query.py", line 334, in count
return self.query.get_count(using=self.db)
679 File: "/home/foo_bar_p/django/db/models/sql/query.py", line 367, in get_aggregation
result = query.get_compiler(using).execute_sql(SINGLE)
677 File: "/home/foo_bar_p/django/db/models/sql/query.py", line 401, in get_count
number = obj.get_aggregation(using=using)[None]
'''
from django.conf import settings
outfile = os.path.expanduser('~/tmp/debug_live.log')
other_code=re.compile(r'/(django|python...)/')
def stacktraces():
code=[]
now=datetime.datetime.now()
pid=os.getpid()
my_thread_id=thread.get_ident()
for thread_id, stack in sys._current_frames().items():
if thread_id==my_thread_id:
continue # Don't print this monitor thread
code.append("\n\n#START date: %s\n# ProcessId: %s\n# ThreadID: %s" % (now, pid, thread_id))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
code.append('#END')
if not code:
return
fd=open(outfile, 'at')
fd.write('\n'.join(code))
fd.close()
def monitor(interval):
while monitor_thread:
stacktraces()
time.sleep(interval)
monitor_thread=None
def exiting():
global monitor_thread
monitor_thread=None
def start(interval):
global monitor_thread
if monitor_thread:
return
assert not os.path.islink(outfile), outfile # well known temporary name.... symlink attack...
monitor_thread = threading.Thread(target=monitor, args=[interval])
monitor_thread.setDaemon(True)
atexit.register(exiting)
monitor_thread.start()
def read_logs(args):
# The outfile can be huge, don't read the whole file into memory.
counter=collections.Counter()
cur_stack=[]
py_line=''
code_line=''
if args.action=='sum-all-frames':
sum_all_frames=True
else:
sum_all_frames=False
for line in open(outfile):
if line.startswith('#END'):
if sum_all_frames:
frames=cur_stack
else:
frames=cur_stack[-1:]
counter.update(frames)
cur_stack=[]
continue
if line[0] in '\n#':
continue
if line.startswith('File:'):
py_line=line.rstrip()
continue
if line.startswith(' '):
code_line=line.rstrip()
if not (py_line, code_line) in cur_stack:
# If there is a recursion, count the line only once per stacktrace
cur_stack.append((py_line, code_line))
continue
print 'ERROR unparsed', line
for (py, code), c in counter.most_common(args.most_common):
if not other_code.search(py):
py='%s <====' % py
print '% 5d %s\n %s' % (c, py, code)
def main():
import argparse
parser=argparse.ArgumentParser(description='Read stacktrace log')
parser.add_argument('action', choices=['sum-all-frames', 'sum-last-frame'])
parser.add_argument('--most-common', metavar='N', default=30, type=int, help='Display the N most common lines in the stacktraces')
args=parser.parse_args()
return read_logs(args)
if __name__=='__main__':
main()

Categories