Python multiple telnet sessions - python

I need to build a script to get the telnet output of as many hosts as possible and save them to a separate file for each host. The script should run as a daemon.
For the moment i have a function that encapsulates the logic for do it for a single host with telnetlib, but i do not how to proceed. I planned to open a process (multiprocessing.Process) for each host but i suspect it's going to be a resource waste and it must to exist a better way :)
def TelnetLogSaver(hostname,ip,filename):
# open files and telnet sessions
f = open(filename,"a")
tn = telnetlib.Telnet(ip,23,TIMEOUT)
# login
e = tn.read_until("Login: ")
tn.write(USER+"\n")
# and password
e = tn.read_until("Password: ")
tn.write(PASSWORD+"\n")
# Connected. Start infinite loop to save messages log
while True:
e = tn.read_until(PROMPT,TIMEOUT)
if e is not "":
f.write(datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"))
f.write(e)
f.flush()
# avoid session timeout
tn.write("\n")
e = tn.read_until(PROMPT

I believe the following should do what your require, I took your original code and made it into a type of thread:
import threading
import telnetlib
import datetime
import sys
# Global Variable Declarations
TIMEOUT = 30
USER = "Noel"
PROMPT = "Noel"
class listener(threading.Thread):
def __init__(self, filename, ip):
# Have to make a call to the super classes' __init__ method
super(listener, self).__init__()
self.f = open(filename,"a")
try:
self.tn = telnetlib.Telnet(ip, 23, TIMEOUT)
except:
print "Bad Connection"
sys.exit(0)
def run(self):
# login
e = self.tn.read_until("Login: ")
self.tn.write(USER+"\n")
# and password
e = self.tn.read_until("Password: ")
self.tn.write(PASSWORD+"\n")
while True:
e = self.tn.read_until(PROMPT, TIMEOUT)
if e is not "":
self.f.write(datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"))
self.f.write(e.strip())
self.f.flush()
# avoid session timeout
self.tn.write("\n")
if __name__ == "__main__":
# Things to listen to is a dictionary of hosts and files to output
# to, to add more things to listen to just add an extra entry into
# the things_to_listen_to in the format: host : outputfile
things_to_listen_to = {"localhost" :"localhost_output.txt"}
# Thread holder is going to hold all the threads we are going to start
thread_holder = []
for host, file in things_to_listen_to.iteritems():
thread_holder.append(listener(file, host))
for thread in thread_holder:
thread.run()
Hope this helps, if you have any problem update your question or leave a comment.

Related

Python subprocess with real-time input and multiple consoles

The main issue
In a nutshell: I want two consoles for my programm. One for active user input. And the other one for pure log output. (Working code including the accepted answer is in the question's text below, under section "Edit-3". And under section "Edit-1" and section "Edit-2" are functioning workarounds.)
For this I have a main command line Python script, which is supposed to open an additional console for log output only. For this I intend to redirect the log output, which would be printed on the main script's console, to the stdin of the second console, which I start as a subprocess. (I use subprocess, because I didn't find any other way to open a second console.)
The problem is, that it seems that I'm able to send to the stdin of this second console - however, nothing gets printed on this second console.
Following is the code I used for experimenting (with Python 3.4 on PyDev under Windows 10). The function writing(input, pipe, process) contains the part, where the generated string is copied to the as pipe passed stdin, of the via subprocess opened console. The function writing(...) is run via the class writetest(Thread). (I left some code, which I commented out.)
import os
import sys
import io
import time
import threading
from cmd import Cmd
from queue import Queue
from subprocess import Popen, PIPE, CREATE_NEW_CONSOLE
REPETITIONS = 3
# Position of "The class" (Edit-2)
# Position of "The class" (Edit-1)
class generatetest(threading.Thread):
def __init__(self, queue):
self.output = queue
threading.Thread.__init__(self)
def run(self):
print('run generatetest')
generating(REPETITIONS, self.output)
print('generatetest done')
def getout(self):
return self.output
class writetest(threading.Thread):
def __init__(self, input=None, pipe=None, process=None):
if (input == None): # just in case
self.input = Queue()
else:
self.input = input
if (pipe == None): # just in case
self.pipe = PIPE
else:
self.pipe = pipe
if (process == None): # just in case
self.process = subprocess.Popen('C:\Windows\System32\cmd.exe', universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
else:
self.process = proc
threading.Thread.__init__(self)
def run(self):
print('run writetest')
writing(self.input, self.pipe, self.process)
print('writetest done')
# Position of "The function" (Edit-2)
# Position of "The function" (Edit-1)
def generating(maxint, outline):
print('def generating')
for i in range(maxint):
time.sleep(1)
outline.put_nowait(i)
def writing(input, pipe, process):
print('def writing')
while(True):
try:
print('try')
string = str(input.get(True, REPETITIONS)) + "\n"
pipe = io.StringIO(string)
pipe.flush()
time.sleep(1)
# print(pipe.readline())
except:
print('except')
break
finally:
print('finally')
pass
data_queue = Queue()
data_pipe = sys.stdin
# printer = sys.stdout
# data_pipe = os.pipe()[1]
# The code of 'C:\\Users\\Public\\Documents\\test\\test-cmd.py'
# can be found in the question's text further below under "More code"
exe = 'C:\Python34\python.exe'
# exe = 'C:\Windows\System32\cmd.exe'
arg = 'C:\\Users\\Public\\Documents\\test\\test-cmd.py'
arguments = [exe, arg]
# proc = Popen(arguments, universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
proc = Popen(arguments, stdin=data_pipe, stdout=PIPE, stderr=PIPE,
universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
# Position of "The call" (Edit-2 & Edit-1) - file init (proxyfile)
# Position of "The call" (Edit-2) - thread = sockettest()
# Position of "The call" (Edit-1) - thread0 = logtest()
thread1 = generatetest(data_queue)
thread2 = writetest(data_queue, data_pipe, proc)
# time.sleep(5)
# Position of "The call" (Edit-2) - thread.start()
# Position of "The call" (Edit-1) - thread0.start()
thread1.start()
thread2.start()
# Position of "The call" (Edit-2) - thread.join()
# Position of "The call" (Edit-1) - thread.join()
thread1.join(REPETITIONS * REPETITIONS)
thread2.join(REPETITIONS * REPETITIONS)
# data_queue.join()
# receiver = proc.communicate(stdin, 5)
# print('OUT:' + receiver[0])
# print('ERR:' + receiver[1])
print("1st part finished")
A slightly different approach
The following additional code snippet works in regard to extracting the stdout from the subprocess. However, the previously sent stdin still isn't print on the second console. Also, the second console is closed immediately.
proc2 = Popen(['C:\Python34\python.exe', '-i'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
creationflags=CREATE_NEW_CONSOLE)
proc2.stdin.write(b'2+2\n')
proc2.stdin.flush()
print(proc2.stdout.readline())
proc2.stdin.write(b'len("foobar")\n')
proc2.stdin.flush()
print(proc2.stdout.readline())
time.sleep(1)
proc2.stdin.close()
proc2.terminate()
proc2.wait(timeout=0.2)
print("Exiting Main Thread")
More info
As soon as I use one of the paramaters stdin=data_pipe, stdout=PIPE, stderr=PIPE for starting the subprocess, the resulting second console isn't active and doesn't accept keyboard input (which isn't desired, though might be helpful information here).
The subprocess method communicate() can't be used for this as it waits for the process to end.
More code
Finally the code for the file, which is for the second console.
C:\Users\Public\Documents\test\test-cmd.py
from cmd import Cmd
from time import sleep
from datetime import datetime
INTRO = 'command line'
PROMPT = '> '
class CommandLine(Cmd):
"""Custom console"""
def __init__(self, intro=INTRO, prompt=PROMPT):
Cmd.__init__(self)
self.intro = intro
self.prompt = prompt
self.doc_header = intro
self.running = False
def do_dummy(self, args):
"""Runs a dummy method."""
print("Do the dummy.")
self.running = True
while(self.running == True):
print(datetime.now())
sleep(5)
def do_stop(self, args):
"""Stops the dummy method."""
print("Stop the dummy, if you can.")
self.running = False
def do_exit(self, args):
"""Exits this console."""
print("Do console exit.")
exit()
if __name__ == '__main__':
cl = CommandLine()
cl.prompt = PROMPT
cl.cmdloop(INTRO)
Thoughts
So far I'm even not certain if the Windows command line interface offers the capability to accept other input than the one from the keyboard (instead of the desired stdin pipe or similar). Though, with it having some sort of passive mode, I expect it.
Why is this not working?
Edit-1: Workaround via file (proof of concept)
Using a file as workaround in order display it's new content, as suggested in the answer of Working multiple consoles in python, is working in general. However, since the log file will grow up to many GB, it isn't a practical solution in this case. It would at least require file splitting and the proper handling of it.
The class:
class logtest(threading.Thread):
def __init__(self, file):
self.file = file
threading.Thread.__init__(self)
def run(self):
print('run logtest')
logging(self.file)
print('logtest done')
The function:
def logging(file):
pexe = 'C:\Python34\python.exe '
script = 'C:\\Users\\Public\\Documents\\test\\test-004.py'
filek = '--file'
filev = file
file = open(file, 'a')
file.close()
time.sleep(1)
print('LOG START (outer): ' + script + ' ' + filek + ' ' + filev)
proc = Popen([pexe, script, filek, filev], universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
print('LOG FINISH (outer): ' + script + ' ' + filek + ' ' + filev)
time.sleep(2)
The call:
# The file tempdata is filled with several strings of "0\n1\n2\n"
# Looking like this:
# 0
# 1
# 2
# 0
# 1
# 2
proxyfile = 'C:\\Users\\Public\\Documents\\test\\tempdata'
f = open(proxyfile, 'a')
f.close()
time.sleep(1)
thread0 = logtest(proxyfile)
thread0.start()
thread0.join(REPETITIONS * REPETITIONS)
The tail script ("test-004.py"):
As Windows doesn't offer the tail command, I used the following script instead (base on the answer for How to implement a pythonic equivalent of tail -F?), which worked for this. The additional, yet kind of unnecessary class CommandLine(Cmd) was initially an attempt to keep the second console open (because the script file argument was missing). Though, it also proved itself as useful for keeping the console fluently printing the new log file content. Otherwise the output wasn't deterministic/predictable.
import time
import sys
import os
import threading
from cmd import Cmd
from argparse import ArgumentParser
def main(args):
parser = ArgumentParser(description="Parse arguments.")
parser.add_argument("-f", "--file", type=str, default='', required=False)
arguments = parser.parse_args(args)
if not arguments.file:
print('LOG PRE-START (inner): file argument not found. Creating new default entry.')
arguments.file = 'C:\\Users\\Public\\Documents\\test\\tempdata'
print('LOG START (inner): ' + os.path.abspath(os.path.dirname(__file__)) + ' ' + arguments.file)
f = open(arguments.file, 'a')
f.close()
time.sleep(1)
words = ['word']
console = CommandLine(arguments.file, words)
console.prompt = ''
thread = threading.Thread(target=console.cmdloop, args=('', ))
thread.start()
print("\n")
for hit_word, hit_sentence in console.watch():
print("Found %r in line: %r" % (hit_word, hit_sentence))
print('LOG FINISH (inner): ' + os.path.abspath(os.path.dirname(__file__)) + ' ' + arguments.file)
class CommandLine(Cmd):
"""Custom console"""
def __init__(self, fn, words):
Cmd.__init__(self)
self.fn = fn
self.words = words
def watch(self):
fp = open(self.fn, 'r')
while True:
time.sleep(0.05)
new = fp.readline()
print(new)
# Once all lines are read this just returns ''
# until the file changes and a new line appears
if new:
for word in self.words:
if word in new:
yield (word, new)
else:
time.sleep(0.5)
if __name__ == '__main__':
print('LOG START (inner - as main).')
main(sys.argv[1:])
Edit-1: More thoughts
Three workarounds, which I didn't try yet and might work are sockets (also suggested in this answer Working multiple consoles in python), getting a process object via the process ID for more control, and using the ctypes library for directly accessing the Windows console API, allowing to set the screen buffer, as the console can have multiple buffers, but only one active buffer (stated in the remarks of the documentation for the CreateConsoleScreenBuffer function).
However, using sockets might be the easiest one. And at least the size of the log doesn't matter this way. Though, connection problems might be a problem here.
Edit-2: Workaround via sockets (proof of concept)
Using sockets as workaround in order display new log enties, as it also was suggested in the answer of Working multiple consoles in python, is working in general, too. Though, this seems to be too much effort for something, which should be simply sent to the process of the receiving console.
The class:
class sockettest(threading.Thread):
def __init__(self, host, port, file):
self.host = host
self.port = port
self.file = file
threading.Thread.__init__(self)
def run(self):
print('run sockettest')
socketing(self.host, self.port, self.file)
print('sockettest done')
The function:
def socketing(host, port, file):
pexe = 'C:\Python34\python.exe '
script = 'C:\\Users\\Public\\Documents\\test\test-005.py'
hostk = '--address'
hostv = str(host)
portk = '--port'
portv = str(port)
filek = '--file'
filev = file
file = open(file, 'a')
file.close()
time.sleep(1)
print('HOST START (outer): ' + pexe + script + ' ' + hostk + ' ' + hostv + ' ' + portk + ' ' + portv + ' ' + filek + ' ' + filev)
proc = Popen([pexe, script, hostk, hostv, portk, portv, filek, filev], universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
print('HOST FINISH (outer): ' + pexe + script + ' ' + hostk + ' ' + hostv + ' ' + portk + ' ' + portv + ' ' + filek + ' ' + filev)
time.sleep(2)
The call:
# The file tempdata is filled with several strings of "0\n1\n2\n"
# Looking like this:
# 0
# 1
# 2
# 0
# 1
# 2
proxyfile = 'C:\\Users\\Public\\Documents\\test\\tempdata'
f = open(proxyfile, 'a')
f.close()
time.sleep(1)
thread = sockettest('127.0.0.1', 8888, proxyfile)
thread.start()
thread.join(REPETITIONS * REPETITIONS)
The socket script ("test-005.py"):
The following script is based on Python: Socket programming server-client application using threads. Here I just keept the class CommandLine(Cmd) as log entry generator. At this point it should't be a problem, to put client into the main script, which calls the second console and then feed the queue with real log enties instead of (new) file lines. (The server is the printer.)
import socket
import sys
import threading
import time
from cmd import Cmd
from argparse import ArgumentParser
from queue import Queue
BUFFER_SIZE = 5120
class CommandLine(Cmd):
"""Custom console"""
def __init__(self, fn, words, queue):
Cmd.__init__(self)
self.fn = fn
self.words = words
self.queue = queue
def watch(self):
fp = open(self.fn, 'r')
while True:
time.sleep(0.05)
new = fp.readline()
# Once all lines are read this just returns ''
# until the file changes and a new line appears
self.queue.put_nowait(new)
def main(args):
parser = ArgumentParser(description="Parse arguments.")
parser.add_argument("-a", "--address", type=str, default='127.0.0.1', required=False)
parser.add_argument("-p", "--port", type=str, default='8888', required=False)
parser.add_argument("-f", "--file", type=str, default='', required=False)
arguments = parser.parse_args(args)
if not arguments.address:
print('HOST PRE-START (inner): host argument not found. Creating new default entry.')
arguments.host = '127.0.0.1'
if not arguments.port:
print('HOST PRE-START (inner): port argument not found. Creating new default entry.')
arguments.port = '8888'
if not arguments.file:
print('HOST PRE-START (inner): file argument not found. Creating new default entry.')
arguments.file = 'C:\\Users\\Public\\Documents\\test\\tempdata'
file_queue = Queue()
print('HOST START (inner): ' + ' ' + arguments.address + ':' + arguments.port + ' --file ' + arguments.file)
# Start server
thread = threading.Thread(target=start_server, args=(arguments.address, arguments.port, ))
thread.start()
time.sleep(1)
# Start client
thread = threading.Thread(target=start_client, args=(arguments.address, arguments.port, file_queue, ))
thread.start()
# Start file reader
f = open(arguments.file, 'a')
f.close()
time.sleep(1)
words = ['word']
console = CommandLine(arguments.file, words, file_queue)
console.prompt = ''
thread = threading.Thread(target=console.cmdloop, args=('', ))
thread.start()
print("\n")
for hit_word, hit_sentence in console.watch():
print("Found %r in line: %r" % (hit_word, hit_sentence))
print('HOST FINISH (inner): ' + ' ' + arguments.address + ':' + arguments.port)
def start_client(host, port, queue):
host = host
port = int(port) # arbitrary non-privileged port
queue = queue
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
soc.connect((host, port))
except:
print("Client connection error" + str(sys.exc_info()))
sys.exit()
print("Enter 'quit' to exit")
message = ""
while message != 'quit':
time.sleep(0.05)
if(message != ""):
soc.sendall(message.encode("utf8"))
if soc.recv(BUFFER_SIZE).decode("utf8") == "-":
pass # null operation
string = ""
if (not queue.empty()):
string = str(queue.get_nowait()) + "\n"
if(string == None or string == ""):
message = ""
else:
message = string
soc.send(b'--quit--')
def start_server(host, port):
host = host
port = int(port) # arbitrary non-privileged port
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# SO_REUSEADDR flag tells the kernel to reuse a local socket in TIME_WAIT state, without waiting for its natural timeout to expire
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("Socket created")
try:
soc.bind((host, port))
except:
print("Bind failed. Error : " + str(sys.exc_info()))
sys.exit()
soc.listen(5) # queue up to 5 requests
print("Socket now listening")
# infinite loop- do not reset for every requests
while True:
connection, address = soc.accept()
ip, port = str(address[0]), str(address[1])
print("Connected with " + ip + ":" + port)
try:
threading.Thread(target=client_thread, args=(connection, ip, port)).start()
except:
print("Thread did not start.")
traceback.print_exc()
soc.close()
def client_thread(connection, ip, port, max_buffer_size=BUFFER_SIZE):
is_active = True
while is_active:
client_input = receive_input(connection, max_buffer_size)
if "--QUIT--" in client_input:
print("Client is requesting to quit")
connection.close()
print("Connection " + ip + ":" + port + " closed")
is_active = False
elif not client_input == "":
print("{}".format(client_input))
connection.sendall("-".encode("utf8"))
else:
connection.sendall("-".encode("utf8"))
def receive_input(connection, max_buffer_size):
client_input = connection.recv(max_buffer_size)
client_input_size = sys.getsizeof(client_input)
if client_input_size > max_buffer_size:
print("The input size is greater than expected {}".format(client_input_size))
decoded_input = client_input.decode("utf8").rstrip() # decode and strip end of line
result = process_input(decoded_input)
return result
def process_input(input_str):
return str(input_str).upper()
if __name__ == '__main__':
print('HOST START (inner - as main).')
main(sys.argv[1:])
Edit-2: Furthermore thoughts
Having direct control of the subprocess' console input pipe/buffer would be the preferable solution to this problem. For this is the bounty of 500 Reputation.
Unfortunately I'm running out of time. Therefore I might use one of those workarounds for now and replace them with the proper solution later. Or maybe I have to use the nuclear option, just one console, where the ongoing log output is paused during any user keyboard input, and printed afterwards. Of course this might lead to buffer problems, when the user decides to type something just half the way.
Edit-3: Code including the accepted answer (one file)
With the answer from James Kent I get the desired behavior, when I start a script with the code via the Windows command line (cmd) or PowerShell. However, when I start this same script via Eclipse/PyDev with "Python run", then the output is always printed on the main Eclipse/PyDev console, while the second console of the subprocess remains empty and stays inactive. Though, I guess this is another system/environment speciality and a different issue.
from sys import argv, stdin, stdout
from threading import Thread
from cmd import Cmd
from time import sleep
from datetime import datetime
from subprocess import Popen, PIPE, CREATE_NEW_CONSOLE
INTRO = 'command line'
PROMPT = '> '
class CommandLine(Cmd):
"""Custom console"""
def __init__(self, subprocess, intro=INTRO, prompt=PROMPT):
Cmd.__init__(self)
self.subprocess = subprocess
self.intro = intro
self.prompt = prompt
self.doc_header = intro
self.running = False
def do_date(self, args):
"""Prints the current date and time."""
print(datetime.now())
sleep(1)
def do_exit(self, args):
"""Exits this command line application."""
print("Exit by user command.")
if self.subprocess is not None:
try:
self.subprocess.terminate()
except:
self.subprocess.kill()
exit()
class Console():
def __init__(self):
if '-r' not in argv:
self.p = Popen(
['python.exe', __file__, '-r'],
stdin=PIPE,
creationflags=CREATE_NEW_CONSOLE
)
else:
while True:
data = stdin.read(1)
if not data:
# break
sleep(1)
continue
stdout.write(data)
def write(self, data):
self.p.stdin.write(data.encode('utf8'))
self.p.stdin.flush()
def getSubprocess(self):
if self.p:
return self.p
else:
return None
class Feeder (Thread):
def __init__(self, console):
self.console = console
Thread.__init__(self)
def run(self):
feeding(self.console)
def feeding(console):
for i in range(0, 100):
console.write('test %i\n' % i)
sleep(1)
if __name__ == '__main__':
p = Console()
if '-r' not in argv:
thread = Feeder(p)
thread.setDaemon(True)
thread.start()
cl = CommandLine(subprocess=p.getSubprocess())
cl.use_rawinput = False
cl.prompt = PROMPT
cl.cmdloop('\nCommand line is waiting for user input (e.g. help).')
Edit-3: Honorable mentions
In the questions's text above I have mentioned using the ctypes library for directly accessing the Windows console API as another workround (under "Edit-1: More thoughts"). Or using just one console in a way, that the input prompt always stays at the bottom as nuclear option to this entire problem. (under "Edit-2: Furthermore thoughts")
For using the ctypes library I would have oriented myself on the following answer to Change console font in Windows. And for using just one console I would have tried the following answer to Keep console input line below output. I think both of these answers may offer potential merrit regarding this problem and maybe they are helpful to others how come accross this post. Also, I if i find the time, I will try if they work somehow.
The issue you're up against is the architecture of the console subsystem on Windows, the console window that you normally see is not hosted by cmd.exe but instead by conhost.exe, a child process of a conhost window can only connect to a single conhost instance meaning you're limited to a single window per process.
This then leads on to having an extra process for each console window you wish to have, then in order to look at displaying anything in that window you need to look at how stdin and stdout are normally handled, in that they are written and read from by the conhost instance, except if you turn stdin into a pipe (so you can write to the process) it no longer comes from conhost but instead from your parent process and as such conhost has no visibility of it. This means that anything written to stdin is only read by the child process so is not displayed by conhost.
As far as I know there isn't a way to share the pipe like that.
As a side effect if you make stdin a pipe then all keyboard input sent to the new console window goes nowhere, as stdin is not connected to that window.
For an output only function this means you can spawn a new process that communicates with the parent via a pipe to stdin and echos everything to stdout.
Heres an attempt:
#!python3
import sys, subprocess, time
class Console():
def __init__(self):
if '-r' not in sys.argv:
self.p = subprocess.Popen(
['python.exe', __file__, '-r'],
stdin=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_CONSOLE
)
else:
while True:
data = sys.stdin.read(1)
if not data:
break
sys.stdout.write(data)
def write(self, data):
self.p.stdin.write(data.encode('utf8'))
self.p.stdin.flush()
if (__name__ == '__main__'):
p = Console()
if '-r' not in sys.argv:
for i in range(0, 100):
p.write('test %i\n' % i)
time.sleep(1)
So a nice simple pipe between two processes and echoing the input back to the output if its the subprocess, I used a -r to signify whether the instance is a process but there are other ways depending on how you implement it.
Several things to note:
the flush after writing to stdin is needed as python normally uses buffering.
the way this approach is written is aimed at being in its own module hence the use of __file__
due to the use of __file__ this approach may need modification if frozen using cx_Freeze or similar.
EDIT 1
for a version that can be frozen with cx_Freeze:
Console.py
import sys, subprocess
class Console():
def __init__(self, ischild=True):
if not ischild:
if hasattr(sys, 'frozen'):
args = ['Console.exe']
else:
args = [sys.executable, __file__]
self.p = subprocess.Popen(
args,
stdin=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_CONSOLE
)
else:
while True:
data = sys.stdin.read(1)
if not data:
break
sys.stdout.write(data)
def write(self, data):
self.p.stdin.write(data.encode('utf8'))
self.p.stdin.flush()
if (__name__ == '__main__'):
p = Console()
test.py
from Console import Console
import sys, time
if (__name__ == '__main__'):
p = Console(False)
for i in range(0, 100):
p.write('test %i\n' % i)
time.sleep(1)
setup.py
from cx_Freeze import setup, Executable
setup(
name = 'Console-test',
executables = [
Executable(
'Console.py',
base=None,
),
Executable(
'test.py',
base=None,
)
]
)
EDIT 2
New version that should work under dev tools like IDLE
Console.py
#!python3
import ctypes, sys, subprocess
Kernel32 = ctypes.windll.Kernel32
class Console():
def __init__(self, ischild=True):
if ischild:
# try allocate new console
result = Kernel32.AllocConsole()
if result > 0:
# if we succeed open handle to the console output
sys.stdout = open('CONOUT$', mode='w')
else:
# if frozen we assume its names Console.exe
# note that when frozen 'Win32GUI' must be used as a base
if hasattr(sys, 'frozen'):
args = ['Console.exe']
else:
# otherwise we use the console free version of python
args = ['pythonw.exe', __file__]
self.p = subprocess.Popen(
args,
stdin=subprocess.PIPE
)
return
while True:
data = sys.stdin.read(1)
if not data:
break
sys.stdout.write(data)
def write(self, data):
self.p.stdin.write(data.encode('utf8'))
self.p.stdin.flush()
if (__name__ == '__main__'):
p = Console()
test.py
from Console import Console
import sys, time
if (__name__ == '__main__'):
p = Console(False)
for i in range(0, 100):
p.write('test %i\n' % i)
time.sleep(1)
setup.py
from cx_Freeze import setup, Executable
setup(
name = 'Console-test',
executables = [
Executable(
'Console.py',
base='Win32GUI',
),
Executable(
'test.py',
base=None,
)
]
)
This could be made more robust, i.e. always checking for an existing console and detaching it if found before creating a new console, and possibly better error handling.
Since you are on windows you can use win32console module to open a second console or multiple consoles for your thread or subprocess output. This is the most simple and easiest way that works if you are on windows.
Here is a sample code:
import win32console
import multiprocessing
def subprocess(queue):
win32console.FreeConsole() #Frees subprocess from using main console
win32console.AllocConsole() #Creates new console and all input and output of subprocess goes to this new console
while True:
print(queue.get())
#prints any output produced by main script passed to subprocess using queue
if __name__ == "__main__":
queue = multiprocessing.Queue()
multiprocessing.Process(target=subprocess, args=[queue]).start()
while True:
print("Hello World in main console")
queue.put("Hello work in sub process console")
#sends above string to subprocess and it prints it into its console
#and whatever else you want to do in ur main process
You can also do this with threading. You have to use queue module if you want the queue functionality as threading module doesn't have queue
Here is the win32console module documentation

How can I terminate running jobs without closing connection to the core? (currently using execnet)

I have a cluster of computers which uses a master node to communicate with the slave nodes in the cluster.
The main problem I'm facing is using execnet is being able to kill certain jobs that are running and then having new jobs requeue on the same core that the other job just got terminated on (as I want to utilize all cores of the slave nodes at any given time).
As of now there is no way to terminate running jobs using execnet, so I figured if I could just kill the jobs manually through a bash script, say sudo kill 12345 where 12345 is the PID of the job (obtaining the PID of each job is another thing not supported by execnet, but that's another topic), then it would terminate the job and then requeue another on the same core that was just terminated on. It does kill the job correctly, however it closes the connection to that channel (the core; the master node communicates to each core individually) and then does not utilize that core anymore, until all jobs are done. Is there a way to terminate a running job, without killing the connection to the core?
Here is the script to submit jobs
import execnet, os, sys
import re
import socket
import numpy as np
import pickle, cPickle
from copy import deepcopy
import time
import job
def main():
print 'execnet source files are located at:\n {}/\n'.format(
os.path.join(os.path.dirname(execnet.__file__))
)
# Generate a group of gateways.
work_dir = '/home/mpiuser/pn2/'
f = 'cluster_core_info.txt'
n_start, n_end = 250000, 250008
ci = get_cluster_info(f)
group, g_labels = make_gateway_group(ci, work_dir)
mch = group.remote_exec(job)
args = range(n_start, n_end+1) # List of parameters to compute factorial.
manage_jobs(group, mch, queue, g_labels, args)
# Close the group of gateways.
group.terminate()
def get_cluster_info(f):
nodes, ncores = [], []
with open(f, 'r') as fid:
while True:
line = fid.readline()
if not line:
fid.close()
break
line = line.strip('\n').split()
nodes.append(line[0])
ncores.append(int(line[1]))
return dict( zip(nodes, ncores) )
def make_gateway_group(cluster_info, work_dir):
''' Generate gateways on all cores in remote nodes. '''
print 'Gateways generated:\n'
group = execnet.Group()
g_labels = []
nodes = list(cluster_info.keys())
for node in nodes:
for i in range(cluster_info[node]):
group.makegateway(
"ssh={0}//id={0}_{1}//chdir={2}".format(
node, i, work_dir
))
sys.stdout.write(' ')
sys.stdout.flush()
print list(group)[-1]
# Generate a string 'node-id_core-id'.
g_labels.append('{}_{}'.format(re.findall(r'\d+',node)[0], i))
print ''
return group, g_labels
def get_mch_id(g_labels, string):
ids = [x for x in re.findall(r'\d+', string)]
ids = '{}_{}'.format(*ids)
return g_labels.index(ids)
def manage_jobs(group, mch, queue, g_labels, args):
args_ref = deepcopy(args)
terminated_channels = 0
active_jobs, active_args = [], []
while True:
channel, item = queue.get()
if item == 'terminate_channel':
terminated_channels += 1
print " Gateway closed: {}".format(channel.gateway.id)
if terminated_channels == len(mch):
print "\nAll jobs done.\n"
break
continue
if item != "ready":
mch_id_completed = get_mch_id(g_labels, channel.gateway.id)
depopulate_list(active_jobs, mch_id_completed, active_args)
print " Gateway {} channel id {} returned:".format(
channel.gateway.id, mch_id_completed)
print " {}".format(item)
if not args:
print "\nNo more jobs to submit, sending termination request...\n"
mch.send_each(None)
args = 'terminate_channel'
if args and \
args != 'terminate_channel':
arg = args.pop(0)
idx = args_ref.index(arg)
channel.send(arg) # arg is copied by value to the remote side of
# channel to be executed. Maybe blocked if the
# sender queue is full.
# Get the id of current channel used to submit a job,
# this id can be used to refer mch[id] to terminate a job later.
mch_id_active = get_mch_id(g_labels, channel.gateway.id)
print "Job {}: {}! submitted to gateway {}, channel id {}".format(
idx, arg, channel.gateway.id, mch_id_active)
populate_list(active_jobs, mch_id_active,
active_args, arg)
def populate_list(jobs, job_active, args, arg_active):
jobs.append(job_active)
args.append(arg_active)
def depopulate_list(jobs, job_completed, args):
i = jobs.index(job_completed)
jobs.pop(i)
args.pop(i)
if __name__ == '__main__':
main()
and here is my job.py script:
#!/usr/bin/env python
import os, sys
import socket
import time
import numpy as np
import pickle, cPickle
import random
import job
def hostname():
return socket.gethostname()
def working_dir():
return os.getcwd()
def listdir(path):
return os.listdir(path)
def fac(arg):
return np.math.factorial(arg)
def dump(arg):
path = working_dir() + '/out'
if not os.path.exists(path):
os.mkdir(path)
f_path = path + '/fac_{}.txt'.format(arg)
t_0 = time.time()
num = fac(arg) # Main operation
t_1 = time.time()
cPickle.dump(num, open(f_path, "w"), protocol=2) # Main operation
t_2 = time.time()
duration_0 = "{:.4f}".format(t_1 - t_0)
duration_1 = "{:.4f}".format(t_2 - t_1)
#num2 = cPickle.load(open(f_path, "rb"))
return '--Calculation: {} s, dumping: {} s'.format(
duration_0, duration_1)
if __name__ == '__channelexec__':
channel.send("ready")
for arg in channel:
if arg is None:
break
elif str(arg).isdigit():
channel.send((
str(arg)+'!',
job.hostname(),
job.dump(arg)
))
else:
print 'Warnning! arg sent should be number | None'
Yes, you are on the right track. Use psutil library to manage the processes, find their pids etc.
And kill them. No need for involveing bash anywhere. Python covers it all.
Or, even better, program your script to terminate when master say so.
It is usually done that way.
You can even make it start another script before terminating itself if you want/need.
Or, if it is the same that you would be doing in another process, just stop the current work and start a new one in the script without terminating it at all.
And, if I may make a suggestion. Don't read your file line by line, read a whole file and then use *.splitlines(). For small files reading them in chunks just tortures the IO. You wouldn't be needing *.strip() as well. And you should remove unused imports too.

How to finish a socket file transfer in Python?

I have a Client and a Server and I need to transfer some files using sockets. I can send small messages, but when I try to send a File, the problems begins...
client.py:
from socket import *
from threading import Thread
import sys
import hashlib
class Client(object):
ASK_LIST_FILES = "#001" # 001 is the requisition code to list
# all the files
ASK_SPECIFIC_FILE = "#002" # 002 is the requisition code to a
# specific file
SEND_FILE = "#003" # 003 is the requisition code to send one
# file
AUTHENTICATION = "#004" # 004 is the requisition code to user
# authentication
listOfFiles = []
def __init__(self):
try:
self.clientSocket = socket(AF_INET, SOCK_STREAM)
except (error):
print("Failed to create a Socket.")
sys.exit()
def connect(self, addr):
try:
self.clientSocket.connect(addr)
except (error):
print("Failed to connect.")
sys.exit()
print(self.clientSocket.recv(1024).decode())
def closeConnection(self):
self.clientSocket.close()
def _askFileList(self):
try:
data = Client.ASK_LIST_FILES
self.clientSocket.sendall(data.encode())
# self._recvFileList()
except (error):
print("Failed asking for the list of files.")
self.closeConnection()
sys.exit()
thread = Thread(target = self._recvFileList)
thread.start()
def _recvFileList(self):
print("Waiting for the list...")
self.listOfFiles = []
while len(self.listOfFiles) == 0:
data = self.clientSocket.recv(1024).decode()
if (data):
self.listOfFiles = data.split(',')
if(len(self.listOfFiles) > 0):
print (self.listOfFiles)
def _askForFile(self, fileIndex):
fileIndex = fileIndex - 1
try:
data = Client.ASK_SPECIFIC_FILE + "#" + str(fileIndex)
self.clientSocket.sendall(data.encode())
except(error):
print("Failed to ask for an specific file.")
self.closeConnection()
sys.exit()
self._downloadFile(fileIndex)
def _downloadFile(self, fileIndex):
print("Starting receiving file")
f = open("_" + self.listOfFiles[fileIndex], "wb+")
read = self.clientSocket.recv(1024)
# print(read)
# f.close
while len(read) > 0:
print(read)
f.write(read)
f.flush()
read = self.clientSocket.recv(1024)
f.flush()
f.close()
self.closeConnection()
server.py
from socket import *
from threading import Thread
import sys
import glob
class Server(object):
def __init__(self):
try:
self.serverSocket = socket(AF_INET, SOCK_STREAM)
except (error):
print("Failed to create a Socket.")
sys.exit()
def connect(self, addr):
try:
self.serverSocket.bind(addr)
except (error):
print ("Failed on binding.")
sys.exit()
def closeConnection(self):
self.serverSocket.close()
def waitClients(self, num):
while True:
print("Waiting for clients...")
self.serverSocket.listen(num)
conn, addr = self.serverSocket.accept()
print("New client found...")
thread = Thread(target = self.clientThread, args = (conn,))
thread.start()
def clientThread(self, conn):
WELCOME_MSG = "Welcome to the server"
conn.send(WELCOME_MSG.encode())
while True:
data = conn.recv(2024).decode()
if(data):
# print(data)
# reply = 'OK: ' + data
# conn.sendall(reply.encode())
if(data == "#001"):
listOfFiles = self.getFileList()
strListOfFiles = ','.join(listOfFiles)
self._sendFileList(strListOfFiles, conn)
else:
dataCode = data.split('#')
print(dataCode)
if(dataCode[1] == "002"):
print("Asking for file")
self._sendFile(int(dataCode[2]), conn)
if(dataCode[1] == "003"):
print("Pedido de login")
if self._authentication(dataCode[2]):
conn.send("OK".encode())
# self._recvFile(conn)
else:
conn.send("FAILED".encode())
def _sendFile(self, fileIndex, conn):
listOfFiles = self.getFileList()
print(fileIndex)
print(listOfFiles[fileIndex])
f = open(listOfFiles[fileIndex], "rb")
read = f.read(1024)
while len(read) > 0:
conn.send(read)
read = f.read(1024)
f.close()
def _sendFileList(self, strList, conn):
try:
conn.sendall(strList.encode())
except (error):
print("Failed to send list of files.")
def getFileList(self):
return glob.glob("files/*")
When I try to get a file from my server, I can transfer everything but the connection never ends. What is going on with my code?
First, you are doing here the most common error using TCP: assume all data sent in a single send() will be got identically in a single recv(). This is untrue for TCP, because it is an octet stream, not a message stream. Your code will work only under ideal (lab) conditions and could mysteriously fail in a real world usage. You should either explicitly invent message boundaries in TCP streams, or switch e.g. to SCTP. The latter is available now almost everywhere and keeps message boundaries across a network connection.
The second your error is directly connected to the first one. When sending file, you don't provide any explicit mark that file has been finished. So, clients waits forever. You might try to close server connection to show that file is finished, but in that case client won't be able to distinguish real file end and connection loss; moreover, the connection won't be reusable for further commands. You would select one of the following ways:
Prefix a file contents with its length. In this case, client will know how many bytes shall be received for the file.
Send file contents as a chunk sequence, prefixing each chunk with its length (only for TCP) and with mark whether this chunk is last (for both transports). Alternatively, a special mark "EOF" can be sent without data.
Similarly, control messages and their responses shall be provided with either length prefix or a terminator which can't appear inside such message.
When you finish developing this, you would look at FTP and HTTP; both addresses all issues I described here but in principally different ways.

raw input and multiprocessing in python

I have (in the main) the following code:
status = raw_input("Host? (Y/N) ")
if status=="Y":
print("host")
serverprozess = Process(target= spawn_server)
serverprozess.start()
clientprozess = Process (target = spawn_client)
clientprozess.start()
The methods called above basically do as follows:
def spawn_server():
mserver = server.Gameserver()
#a process for the host. spawned if and only if the player acts as host
def spawn_client():
myClient = client.Client()
#and a process for the client. this is spawned regardless of the player's status
It works fine, the server spawns and so does the client.
Only yesterday I added in client.Client() the following line:
self.ip = raw_input("IP-Adress: ")
The second raw_input throws an EOF -exception:
ret = original_raw_input(prompt)
EOFError: EOF when reading a line
Is there a way to fix this? Can I not use more than one prompt?
As you've already determined, it is easiest to call raw_input from the main process only:
status = raw_input("Host? (Y/N) ")
if status=="Y":
print("host")
serverprozess = Process(target= spawn_server)
serverprozess.start()
ip = raw_input("IP-Address: ")
clientprozess = Process (target = spawn_client, args = (ip, ))
clientprozess.start()
However, using J.F. Sebastian's solution it is also possible to duplicate sys.stdin and pass it as an argument to the subprocess:
import os
import multiprocessing as mp
import sys
def foo(stdin):
print 'Foo: ',
status = stdin.readline()
# You could use raw_input instead of stdin.readline, but
# using raw_input will cause an error if you call it in more
# than one subprocess, while `stdin.readline` does not
# cause an error.
print('Received: {}'.format(status))
status = raw_input('Host? (Y/N) ')
print(status)
newstdin = os.fdopen(os.dup(sys.stdin.fileno()))
try:
proc1 = mp.Process(target = foo, args = (newstdin, ))
proc1.start()
proc1.join()
finally:
newstdin.close()

List of IP addresses/hostnames from local network in Python

How can I get a list of the IP addresses or host names from a local network easily in Python?
It would be best if it was multi-platform, but it needs to work on Mac OS X first, then others follow.
Edit: By local I mean all active addresses within a local network, such as 192.168.xxx.xxx.
So, if the IP address of my computer (within the local network) is 192.168.1.1, and I have three other connected computers, I would want it to return the IP addresses 192.168.1.2, 192.168.1.3, 192.168.1.4, and possibly their hostnames.
If by "local" you mean on the same network segment, then you have to perform the following steps:
Determine your own IP address
Determine your own netmask
Determine the network range
Scan all the addresses (except the lowest, which is your network address and the highest, which is your broadcast address).
Use your DNS's reverse lookup to determine the hostname for IP addresses which respond to your scan.
Or you can just let Python execute nmap externally and pipe the results back into your program.
Update: The script is now located on github.
I wrote a small python script, that leverages scapy's arping().
If you know the names of your computers you can use:
import socket
IP1 = socket.gethostbyname(socket.gethostname()) # local IP adress of your computer
IP2 = socket.gethostbyname('name_of_your_computer') # IP adress of remote computer
Otherwise you will have to scan for all the IP addresses that follow the same mask as your local computer (IP1), as stated in another answer.
I have collected the following functionality from some other threads and it works for me in Ubuntu.
import os
import socket
import multiprocessing
import subprocess
def pinger(job_q, results_q):
"""
Do Ping
:param job_q:
:param results_q:
:return:
"""
DEVNULL = open(os.devnull, 'w')
while True:
ip = job_q.get()
if ip is None:
break
try:
subprocess.check_call(['ping', '-c1', ip],
stdout=DEVNULL)
results_q.put(ip)
except:
pass
def get_my_ip():
"""
Find my IP address
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
def map_network(pool_size=255):
"""
Maps the network
:param pool_size: amount of parallel ping processes
:return: list of valid ip addresses
"""
ip_list = list()
# get my IP and compose a base like 192.168.1.xxx
ip_parts = get_my_ip().split('.')
base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2] + '.'
# prepare the jobs queue
jobs = multiprocessing.Queue()
results = multiprocessing.Queue()
pool = [multiprocessing.Process(target=pinger, args=(jobs, results)) for i in range(pool_size)]
for p in pool:
p.start()
# cue hte ping processes
for i in range(1, 255):
jobs.put(base_ip + '{0}'.format(i))
for p in pool:
jobs.put(None)
for p in pool:
p.join()
# collect he results
while not results.empty():
ip = results.get()
ip_list.append(ip)
return ip_list
if __name__ == '__main__':
print('Mapping...')
lst = map_network()
print(lst)
For OSX (and Linux), a simple solution is to use either os.popen or os.system and run the arp -a command.
For example:
import os
devices = []
for device in os.popen('arp -a'): devices.append(device)
This will give you a list of the devices on your local network.
I found this network scanner in python article and wrote this short code. It does what you want! You do however need to know accessible ports for your devices. Port 22 is ssh standard and what I am using. I suppose you could loop over all ports. Some defaults are:
linux: [20, 21, 22, 23, 25, 80, 111, 443, 445, 631, 993, 995]
windows: [135, 137, 138, 139, 445]
mac: [22, 445, 548, 631]
import socket
def connect(hostname, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
result = sock.connect_ex((hostname, port))
sock.close()
return result == 0
for i in range(0,255):
res = connect("192.168.1."+str(i), 22)
if res:
print("Device found at: ", "192.168.1."+str(i) + ":"+str(22))
EDIT by TheLizzard:
Using the code above and adding threading:
from threading import Thread, Lock
from time import perf_counter
from sys import stderr
from time import sleep
import socket
# I changed this from "192.168.1.%i" to "192.168.0.%i"
BASE_IP = "192.168.0.%i"
PORT = 80
class Threader:
"""
This is a class that calls a list of functions in a limited number of
threads. It uses locks to make sure the data is thread safe.
Usage:
from time import sleep
def function(i):
sleep(2)
with threader.print_lock:
print(i)
threader = Threader(10) # The maximum number of threads = 10
for i in range(20):
threader.append(function, i)
threader.start()
threader.join()
This class also provides a lock called: `<Threader>.print_lock`
"""
def __init__(self, threads=30):
self.thread_lock = Lock()
self.functions_lock = Lock()
self.functions = []
self.threads = []
self.nthreads = threads
self.running = True
self.print_lock = Lock()
def stop(self) -> None:
# Signal all worker threads to stop
self.running = False
def append(self, function, *args) -> None:
# Add the function to a list of functions to be run
self.functions.append((function, args))
def start(self) -> None:
# Create a limited number of threads
for i in range(self.nthreads):
thread = Thread(target=self.worker, daemon=True)
# We need to pass in `thread` as a parameter so we
# have to use `<threading.Thread>._args` like this:
thread._args = (thread, )
self.threads.append(thread)
thread.start()
def join(self) -> None:
# Joins the threads one by one until all of them are done.
for thread in self.threads:
thread.join()
def worker(self, thread:Thread) -> None:
# While we are running and there are functions to call:
while self.running and (len(self.functions) > 0):
# Get a function
with self.functions_lock:
function, args = self.functions.pop(0)
# Call that function
function(*args)
# Remove the thread from the list of threads.
# This may cause issues if the user calls `<Threader>.join()`
# But I haven't seen this problem while testing/using it.
with self.thread_lock:
self.threads.remove(thread)
start = perf_counter()
# I didn't need a timeout of 1 so I used 0.1
socket.setdefaulttimeout(0.1)
def connect(hostname, port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
result = sock.connect_ex((hostname, port))
with threader.print_lock:
if result == 0:
stderr.write(f"[{perf_counter() - start:.5f}] Found {hostname}\n")
threader = Threader(10)
for i in range(255):
threader.append(connect, BASE_IP%i, PORT)
threader.start()
threader.join()
print(f"[{perf_counter() - start:.5f}] Done searching")
input("Press enter to exit.\n? ")
Try:
import socket
print ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1])
I have done following code to get the IP of MAC known device. This can be modified accordingly to obtain all IPs with some string manipulation. Hope this will help you.
#running windows cmd line statement and put output into a string
cmd_out = os.popen("arp -a").read()
line_arr = cmd_out.split('\n')
line_count = len(line_arr)
#search in all lines for ip
for i in range(0, line_count):
y = line_arr[i]
z = y.find(mac_address)
#if mac address is found then get the ip using regex matching
if z > 0:
ip_out= re.search('[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+', y, re.M | re.I)
I have just had the problem. I solved it like this:
import kthread #pip install kthread
from time import sleep
import subprocess
def getips():
ipadressen = {}
def ping(ipadresse):
try:
outputcap = subprocess.run([f'ping', ipadresse, '-n', '1'], capture_output=True) #sends only one package, faster
ipadressen[ipadresse] = outputcap
except Exception as Fehler:
print(Fehler)
t = [kthread.KThread(target = ping, name = f"ipgetter{ipend}", args=(f'192.168.0.{ipend}',)) for ipend in range(255)] #prepares threads
[kk.start() for kk in t] #starts 255 threads
while len(ipadressen) < 255:
print('Searching network')
sleep(.3)
alldevices = []
for key, item in ipadressen.items():
if not 'unreachable' in item.stdout.decode('utf-8') and 'failure' not in item.stdout.decode('utf-8'): #checks if there wasn't neither general failure nor 'unrechable host'
alldevices.append(key)
return alldevices
allips = getips() #takes 1.5 seconds on my pc
One of the answers in this question might help you. There seems to be a platform agnostic version for python, but I haven't tried it yet.
Here is a small tool scanip that will help you to get all ip addresses and their corresponding mac addresses in the network (Works on Linux).
https://github.com/vivkv/scanip

Categories