Paramiko raises 'AuthenticationException' sometimes - python

This is one of those rare cases that it's nearly impossible to reproduce, but I've seen it happen 4 times out of 20.
Here's my open_session method:
def open_session:
self.session = paramiko.SSHClient()
self.host = host
self.username = username
self.password = password
self.session.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.session.connect(self.host, username=self.username, port=port, password=self.password, timeout=self.connect_timeout)
except paramiko.ssh_exception.BadHostKeyException:
message = "host key could not be verified"
self.sys_conf.logger.warning(message)
raise BadCredentials(message)
except paramiko.ssh_exception.AuthenticationException:
message = "authentication failed."
self.sys_conf.logger.warning(message)
raise BadCredentials(message)
except (paramiko.ssh_exception.SSHException, socket.error) as e:
message = "could not establish connection, an error occurred: {}".format(e)
self.sys_conf.logger.warning(message)
raise ConnectionFail(message)
except socket.timeout:
message = "could not establish connection, time's out!"
self.sys_conf.logger.warning(message)
raise ConnectionFail(message)
Any particular reason why Paramiko does this?

It should be:
self.session = paramiko.SSHClient()
self.host = host
self.username = username
self.password = password
self.session.load_system_host_keys() # u missed this
self.session.set_missing_host_key_policy(paramiko.AutoAddPolicy())
load_system_host_keys(filename=None)
Load host keys from a system (read-only) file. Host keys read with this method will not be saved back by save_host_keys.
poaramiko

Upon further investigation, it turns out that it's an issue with the machine itself. The code mentioned in my initial question is good as is and worked 300 times in a row without fail on another, more stable machine.

Related

"'Connection' object has no attribute '_sftp_live'" when pysftp connection fails

I'd like to catch nicely the error when "No hostkey for host *** is found" and give an appropriate message to the end user. I tried this:
import pysftp, paramiko
try:
with pysftp.Connection('1.2.3.4', username='root', password='') as sftp:
sftp.listdir()
except paramiko.ssh_exception.SSHException as e:
print('SSH error, you need to add the public key of your remote in your local known_hosts file first.', e)
but unfortunately the output is not very nice:
SSH error, you need to add the public key of your remote in your local known_hosts file first. No hostkey for host 1.2.3.4 found.
Exception ignored in: <function Connection.__del__ at 0x00000000036B6D38>
Traceback (most recent call last):
File "C:\Python37\lib\site-packages\pysftp\__init__.py", line 1013, in __del__
self.close()
File "C:\Python37\lib\site-packages\pysftp\__init__.py", line 784, in close
if self._sftp_live:
AttributeError: 'Connection' object has no attribute '_sftp_live'
How to nicely avoid these last lines / this "exception ignored" with a try: except:?
I had the same problem. I solved this by disabling the hostkeys in the cnops:
import pysftp as sftp
FTP_HOST = "sftp.abcd.com"
FTP_USER = "root"
FTP_PASS = ""
cnopts = sftp.CnOpts()
cnopts.hostkeys = None
with sftp.Connection(host=FTP_HOST, username=FTP_USER, password=FTP_PASS, cnopts=cnopts) as sftp:
print("Connection succesfully stablished ... ")
sftp.cwd('/folder/') # Switch to a remote directory
directory_structure = sftp.listdir_attr() # Obtain structure of the remote directory
for attr in directory_structure:
print(attr.filename, attr)
The analysis by #reverse_engineer is correct. However:
It seems that an additional attribute, self._transport, also is defined too late.
The problem can be temporarily corrected until a permanent fix comes by subclassing the pysftp.Connection class as follows:
import pysftp
import paramiko
class My_Connection(pysftp.Connection):
def __init__(self, *args, **kwargs):
self._sftp_live = False
self._transport = None
super().__init__(*args, **kwargs)
try:
with My_Connection('1.2.3.4', username='root', password='') as sftp:
l = sftp.listdir()
print(l)
except paramiko.ssh_exception.SSHException as e:
print('SSH error, you need to add the public key of your remote in your local known_hosts file first.', e)
Update
I could not duplicate this error on my desktop. However, I see in the source for pysftp in the code where it initializes its _cnopts attribute with self._cnopts = cnopts or CnOpts() where cnopts is a keyword parameter to the pysftp.Connection constructor and there is a possibilty of the CnOpts constructor throwing a HostKeysException exception if no host keys are found resulting in the _cnopts attribute not being set.
Try the following updated code and let me know if it works:
import pysftp
import paramiko
class My_Connection(pysftp.Connection):
def __init__(self, *args, **kwargs):
try:
if kwargs.get('cnopts') is None:
kwargs['cnopts'] = pysftp.CnOpts()
except pysftp.HostKeysException as e:
self._init_error = True
raise paramiko.ssh_exception.SSHException(str(e))
else:
self._init_error = False
self._sftp_live = False
self._transport = None
super().__init__(*args, **kwargs)
def __del__(self):
if not self._init_error:
self.close()
try:
with My_Connection('1.2.3.4', username='root', password='') as sftp:
l = sftp.listdir()
print(l)
except paramiko.ssh_exception.SSHException as e:
print('SSH error, you need to add the public key of your remote in your local known_hosts file first.', e)
I think that's a bug in pysftp. You will always have that behavior when pysftp.Connection fails on a No hostkey for XXX found exception, because the failed Connection object (it fails so you can't access it, but it exists in the Python interpreter) gets cleaned up by the GC, which deletes it, and as you can see here, that tries to close the connection first.
We see that close() checks whether the connection is live by checking self._sftp_live. However, the Exception was thrown in the constructor of Connection before that attribute is defined (the exception happens line 132, while _sftp_live is defined line 134), so that leaves the failed Connection object in an inconsistent state and hence the uncaught exception you see.
This has no easy solution that I can think of except introducing a nice bug fix to the pysftp project ;)

Subprocess to execute commands on Windows machine

I am trying to connect to remote Windows machine and from commandline execute few commands. Commands are like: I have executable in a certain folder, go to that folder and run the command
InstallUtil.exe <exe_name>
Here is my code:
class WindowsMachine:
def __init__(self, hostname, username, password):
self.hostname = hostname
self.username = username
self.password = password
# self.remote_path = hostname
try:
print("Establishing connection to .....%s" %self.hostname)
connection = wmi.WMI(self.hostname, user=self.username, password=self.password)
print("Connection established")
try:
print(os.listdir(r"C:\Program Files\BISYS\BCE"))
a = subprocess.check_output(["InstallUtil.exe","IamHere.exe"], cwd="C:/Program Files/ABC/BCD/",stderr=subprocess.STDOUT)
print(a)
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
except wmi.x_wmi:
print("Could not connect to machine")
raise
w = WindowsMachine(hostname,username,password)
print(w)
print(w.run_remote())
But I am getting error saying:
WindowsError: [Error 2] The system cannot find the file specified
As stated in the comments, if your goal is to run process on the remote Windows machine first of all you should keep your remote connection open, so instead of storing it in local variable connection store it in class member self.connection.
Now, to execute command on the remote machine using WMI, you should do something like this(WMI tutorial):
class ConnectToRemoteWindowsMachine:
def __init__(self, hostname, username, password):
self.hostname = hostname
self.username = username
self.password = password
# self.remote_path = hostname
try:
print("Establishing connection to .....%s" %self.hostname)
self.connection = wmi.WMI(self.hostname, user=self.username, password=self.password)
print("Connection established")
except wmi.x_wmi:
print("Could not connect to machine")
raise
def run_remote(self, async=False, minimized=True):
SW_SHOWNORMAL = 1
process_startup = self.connection.Win32_ProcessStartup.new()
process_startup.ShowWindow = SW_SHOWNORMAL
process_id, result = c.Win32_Process.Create(
CommandLine="notepad.exe",
ProcessStartupInformation=process_startup
)
if result == 0:
print "Process started successfully: %d" % process_id
else:
raise RuntimeError, "Problem creating process: %d" % result
w = ConnectToRemoteWindowsMachine(hostname,username,password)
print(w)
print(w.run_remote())

smtp = smtplib.SMTP(host) > (mail.host)?

I have a scanning script that currently works by connecting to an SMTP server, printing the results and moving to the next server in the list. This is the first connect code:
def sendchk(listindex, host, user, password): # seperated function for checking
try:
smtp = smtplib.SMTP(host)
smtp.login(user, password)
code = smtp.ehlo()[0]
After the fail "except":
smtp.quit()
except(socket.gaierror, socket.error, socket.herror, smtplib.SMTPException), msg:
print "[-] Login Failed:", host, user, password
pass
I'm trying to get it to repeat the same code with the same host, adding a subdomain. "mail." I thought this would work:
smtp.quit()
except(socket.gaierror, socket.error, socket.herror, smtplib.SMTPException), msg:
print "[-] Login Failed:", host, user, password
sub1 = 'mail.'
host2 = '{0}#{1}'.format(sub1, host)
smtp = smtplib.SMTP(host2)
But it jams, saying there's an issue in server list.
What would be the better way to inject the prefix to the host here?
Without having tested it: if sub1 is "mail." and host is "myhost.com" then '{0}#{1}'.format(sub1, host) will result in mail.#myhost.com. Is that really your subdomain name? I suppose it should be mail.myhost.com. If so then drop the "#" in your formatting string.

Paramiko check login timeout of SSH Server

I want to see how long it takes my ssh server to close the connection if the user does not login.
What i have so far
self.sshobj = paramiko.SSHClient()
self.sshobj.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.sshobj.connect("192.168.0.1", port=22, username="test", password="test")
self.channel = self.sshobj.invoke_shell()
But the problem is that i don't want to login , which sshobj.connect does, i want to be in the login screen.
And i want to check how long it takes for the server to close the connection.
Is there any way to do this via paramiko ?
You do not necessarily need paramiko to check the LoginGraceTime but since you're specifically asking for it:
Note: banner_timeout is just a timeout for the peer ssh banner response.
Note: timeout is actually a socket read timeout, none is no timeout. Use this to set a hard-timeout for your check.
self.sshobj = paramiko.SSHClient()
self.sshobj.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.sshobj.connect("192.168.0.1", port=22, look_for_keys=False, timeout=None, banner_timeout=5)
except paramiko.ssh_exception.SSHException, se:
# paramiko raises SSHException('No authentication methods available',) since we did not specify any auth methods. socket stays open.
pass
ts_start = time.time()
try:
self.channel = self.sshobj.invoke_shell()
except EOFError, e:
# EOFError is raised when peer terminates session.
pass
print time.time()-ts_start
You can even get rid of the first try_catch for No authentication methods available by overriding self.sshobj._auth with an NOP. Below are some changes to the first variant:
def noauth(username, password, pkey, key_filenames, allow_agent,
look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host): pass
...
sshobj._auth = noauth
sshobj.connect("192.168.0.1", port=22, look_for_keys=False, timeout=None, banner_timeout=5)
...
But, as initially mentioned, you do not even need paramiko to test this timeout since the LoginGraceTime triggers like a server-side socket read timeout once banners are exchanged. Therefore you just need to establish a TCP connection, send a fake ssh banner and wait until the remote side disconnects:
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("192.168.0.1", 22))
s.sendall("SSH-2.0-MyPythonSSHProbingClient")
s.settimeout(5*60) # hard-limit
print s.recv(500) # remote banner
ts_start = time.time()
if not s.recv(100):
# exits when remote site closes connection, or raises socket.timeout when hard-limit is hit.
print time.time()-ts_start
else:
raise Exception("whoop, something's gone wrong")
The non-paramiko variant is even more accurate.

Python ldap connection test

I want to be able to test that a connection to a host and port is valid.
I'm using the current line:
ldapObject = ldap.open(host="host", port=389)
This seems to return an instance. I need to determine if it can find the host or not?
Any suggestions?
open is deprecated.
This is a working example. See if this helps.
def ldap_initialize(remote, port, user, password, use_ssl=False, timeout=None):
prefix = 'ldap'
if use_ssl is True:
prefix = 'ldaps'
# ask ldap to ignore certificate errors
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
if timeout:
ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, timeout)
ldap.set_option(ldap.OPT_REFERRALS, ldap.OPT_OFF)
server = prefix + '://' + remote + ':' + '%s' % port
conn = ldap.initialize(server)
conn.simple_bind_s(user, password)
return conn
Found a solution:
import ldap
try:
ldapObject = ldap.open(host="host", port=389)
ldapObject .simple_bind_s()
except ldap.SERVER_DOWN:
print("Failed")

Categories