Can Django apps be called from a locally runing daemon / python script - python

I am new to the python language, so please bear with me. Also English isn't my native language so sorry for any misspelled words.
I have a question about updating a Django app from a daemon that runs locally on my server. I have a server setup which has 8 hot-swappable bays. Users can plug-in there hard disk(s) into the server and, after the server has detected that a new hard disk is plugged-in, it starts copying the contents of the hard disk to a location on the network. The current setup displays information about the process on an LCD screen.
The current setup works fine but I need to change it in a way that the whole process is displayed on a website (since this is more user friendly). So I need to display to the user when a disk is inserted into the server, the progress of the copy task etc.
My idea it to create a Django app that gets updated when a task in process is completed, but I can't seem to find any information about updating a Django app from a locally running daemon. It this even possible? Or is Django not the right way to go? Any ideas are welcome.
Below is my script used to copy content of disk to a location on the network. Hopefully it give some more information about what I doing/tying to do.
Many thanks in advance!
Script:
#!/usr/bin/env python
import os
import sys
import glob
import re
import time
import datetime
import pyudev
import thread
import Queue
import gobject
import getopt
from pyudev import Context
from subprocess import Popen, PIPE
from subprocess import check_call
from lcdproc.server import Server
from pyudev.glib import GUDevMonitorObserver
from gobject import MainLoop
from threading import Thread
#used to show progress info
from progressbar import ProgressBar, Percentage, Bar, RotatingMarker, ETA, FileTransferSpeed
# used to set up screens
lcd = Server("localhost", 13666, debug=False)
screens = []
widgets = []
#Used for threading
disk_work_queue = Queue.Queue()
# used to store remote nfs folders
remote_dirs = ['/mnt/nfs/', '/mnt/nfs1/', '/mnt/nfs2/']
#Foldername on remote server (NFS Share name)
REMOTE_NFS_SHARE = ''
# a process that runs infinity, it starts disk processing
# functions.
class ProcessThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
while 1:
try:
disk_to_be_processed = disk_work_queue.get(block=False)
set_widget_text(disk_to_be_processed[1], "Removed from queue..", "info", "on")
process_disk(disk_to_be_processed[0], disk_to_be_processed[1])
except Queue.Empty:
time.sleep(10)
set_main_widget_text("Please insert disks ")
# used to set message on the lcdscreen, message are set by disk
def set_widget_text(host, message, priority, blacklight):
if host == "host4":
screen_disk1 = screens[1]
screen_disk1.clear()
screen_disk1.set_priority(priority)
screen_disk1.set_backlight(blacklight)
widgets[1].set_text(str(message))
elif host == "host5":
screen_disk2 = screens[2]
screen_disk2.clear()
screen_disk2.set_priority(priority)
screen_disk2.set_backlight(blacklight)
widgets[2].set_text(str(message))
elif host == "host6":
screen_disk3 = screens[3]
screen_disk3.clear()
screen_disk3.set_priority(priority)
screen_disk3.set_backlight(blacklight)
widgets[3].set_text(str(message))
elif host == "host7":
screen_disk4 = screens[4]
screen_disk4.clear()
screen_disk4.set_priority(priority)
screen_disk4.set_backlight(blacklight)
widgets[4].set_text(str(message))
# used to set a message for all hosts
def set_widget_text_all(hosts, message, priority, blacklight):
for host in hosts:
set_widget_text(host, message, priority, blacklight)
def set_main_widget_text(message):
screen_disk1 = screens[0]
screen_disk1.clear()
screen_disk1.set_priority("info")
screen_disk1.set_backlight("on")
widgets[0].set_text(str(message))
# mounts, find logs files and copy image files to destionation
def process_disk(disk, host):
datadisk = mount_disk(disk, host)
source = datadisk + "/images"
set_widget_text(host, "Processing, hold on ", "info", "on")
cases = find_log(source)
upload(source, cases, host)
time.sleep(5)
umount_disk(host)
set_widget_text(host, "Disk can be removed", "info", "blink")
time.sleep(10)
# search the datadisk for logfiles containing information
# about cases and images
def find_log(src):
inf = ""
case = []
for root,dirs,files in os.walk(src):
for f in files:
if f.endswith(".log"):
log = open(os.path.join(root,f), 'r')
lines = log.readlines()[2:5]
for l in lines:
inf += re.sub("\n","",l[11:]) + ":"
log.close()
print inf
case.append(inf)
inf = ""
return case
def get_directory_size(dir):
dir_size = 0
for(path, dirs, files) in os.walk(dir):
for file in files:
filename = os.path.join(path, file)
dir_size+=os.path.getsize(filename)
return dir_size
# copies the image files to the destination location, dc3dd is used
# to copy the files in a forensicly correct way.
def upload(src, cases, host):
remotedir = ''
while len(cases) > 0:
count = 0
nfs_share_found = False
case = cases.pop()
onderzoek = case.split(':')[0];
#verwijder de _ uit de naam van het object
object = case.split(':')[1];
#image = case.split(':')[2];
localdir = src + '/' + onderzoek + '/' + object +'/'
total_files = len(os.listdir(localdir))
folder_size = get_directory_size(localdir)
for d in remote_dirs:
if os.path.exists(d + onderzoek + '/B/' + object.replace('_',' ') + '/Images/'):
nfs_share_found = True
remotedir = d + onderzoek + '/B/' + object.replace('_', ' ') + '/Images/'
break
if nfs_share_found == False:
set_widget_text(host, " Onderzoek onbekend ", "info", "flash")
time.sleep(30)
return
for root,dirs,files in os.walk(localdir):
for uploadfile in files:
currentfile = os.path.join(root, uploadfile)
file_size = os.stat(currentfile).st_size
copy_imagefile(currentfile, onderzoek, object, remotedir)
count += 1
percentage = int(count*file_size*100/folder_size)
message = onderzoek + " Obj: " + object + "..%d%%" % percentage
set_widget_text(host, message, "info", "on")
set_widget_text(host, " Copy Succesfull! ", "info", "flash")
# the actualy function to copy the files, using dc3dd
def copy_imagefile(currentfile, onderzoek, object, remotedir):
currentfilename = os.path.basename(currentfile)
dc3dd = Popen(["dc3dd", "if=" + currentfile, "hash=md5", "log=/tmp/"+ onderzoek + "_" + object + ".log", "hof=" + remotedir + currentfilename,"verb=on", "nwspc=on"],stdin=PIPE,stdout=PIPE, stderr=PIPE)
dc3dd_stdout = dc3dd.communicate()[1]
awk = Popen([r"awk", "NR==13 { print $1 }"],stdin=PIPE, stdout=PIPE)
awk_stdin = awk.communicate(dc3dd_stdout)[0]
output = awk_stdin.rstrip('\n')
if output == "[ok]":
return False
else:
return True
# when a disk gets inserted into the machine this function is called to prepare the disk
# for later use.
def device_added_callback(self, device):
position = device.sys_path.find('host')
host = device.sys_path[(position):(position+5)]
set_widget_text(host, " New disk inserted! ", "info", "on")
time.sleep(2)
disk = "/dev/" + device.sys_path[-3:] + "1"
disk_work_queue.put((disk, host))
set_widget_text(host, " Placed in queue... ", "info", "on")
# gets called when the disk is removed form the machine
def device_removed_callback(self, device):
position = device.sys_path.find('host')
host = device.sys_path[(position):(position+5)]
#message = 'Slot %s : Please remove drive' % host[4:]
set_widget_text(host, " Replace disk ", "info", "on")
# mounts the partition on the datadisk
def mount_disk(disk, host):
#device = "/dev/" + disk + "1"
mount_point = "/mnt/" + host
if not os.path.exists(mount_point):
os.mkdir(mount_point)
cmd = ['mount', '-o', 'ro,noexec,noatime,nosuid', str(disk), str(mount_point)]
check_call(cmd)
set_widget_text(host, " Disk mounted ", "info", "on")
return mount_point
# umounts the partition datadisk
def umount_disk(host):
mount_point = "/mnt/" + host
cmd = ['umount', str(mount_point)]
check_call(cmd)
os.removedirs(mount_point)
def build_screens():
screen_main = lcd.add_screen("MAIN")
screen_main.set_heartbeat("off")
screen_main.set_duration(3)
screen_main.set_priority("background")
widget0_1 = screen_main.add_string_widget("screen0Widget1", " Welcome to AFFC ", x=1, y=1)
widget0_2 = screen_main.add_string_widget("screen0Widget2", "Please insert disks ", x=1, y=2)
widgets.append(widget0_2)
screens.append(screen_main)
screen_disk1 = lcd.add_screen("DISK1")
screen_disk1.set_heartbeat("off")
screen_disk1.set_duration(3)
screen_disk1.clear()
widget_disk1_1 = screen_disk1.add_string_widget("disk1Widget1", " Slot 1 ", x=1, y=1)
widget_disk1_2 = screen_disk1.add_string_widget("disk1Widget2", " Please insert disk ", x=1, y=2)
widgets.append(widget_disk1_2)
screens.append(screen_disk1)
screen_disk2 = lcd.add_screen("DISK2")
screen_disk2.set_heartbeat("off")
screen_disk2.set_duration(3)
widget_disk2_1 = screen_disk2.add_string_widget("disk2Widget1", " Slot 2 ", x=1, y=1)
widget_disk2_2 = screen_disk2.add_string_widget("disk2Widget2", " Please insert disk ", x=1, y=2)
widgets.append(widget_disk2_2)
screens.append(screen_disk2)
screen_disk3 = lcd.add_screen("DISK3")
screen_disk3.set_heartbeat("off")
screen_disk3.set_duration(3)
widget_disk3_1 = screen_disk3.add_string_widget("disk3Widget1", " Slot 3 ", x=1, y=1)
widget_disk3_2 = screen_disk3.add_string_widget("disk3Widget2", " Please insert disk ", x=1, y=2)
widgets.append(widget_disk3_2)
screens.append(screen_disk3)
screen_disk4 = lcd.add_screen("DISK4")
screen_disk4.set_heartbeat("off")
screen_disk4.set_duration(3)
widget_disk4_1 = screen_disk4.add_string_widget("disk4Widget1", " Slot 4 ", x=1, y=1)
widget_disk4_2 = screen_disk4.add_string_widget("disk4Widget2", " Please insert disk ", x=1, y=2)
widgets.append(widget_disk4_2)
screens.append(screen_disk4)
def restart_program():
"""Restarts the current program.
Note: this function does not return. Any cleanup action (like
saving data) must be done before calling this function."""
python = sys.executable
os.execl(python, python, * sys.argv)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:v", ["help", "destination="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
verbose = False
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-d", "--destination"):
REMOTE_NFS_SHARE = a
else:
assert False, "unhandled option"
lcd.start_session()
build_screens()
#t = Thread(target=loop_disks_process())
#t.start();
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
observer = GUDevMonitorObserver(monitor)
observer.connect('device-added', device_added_callback)
observer.connect('device-removed', device_removed_callback)
monitor.filter_by(subsystem='block', device_type='disk')
monitor.enable_receiving()
mainloop = MainLoop()
gobject.threads_init()
t = ProcessThread()
t.start()
mainloop.run()
raw_input("Hit <enter>")
t.running = False
t.join()
if __name__ == "__main__":
try:
main()
except Exception, e:
restart_program()

Sorry, much too much code to read there.
I'm not sure what you mean by "updating" a Django app. Do you mean adding some data into the database? This is easy to do, either by getting your script to write directly into the DB, or by using something like a custom Django management command which can use the ORM.

Take a look at Django Piston. You can implement a RESTful API on your django app and call those apis from your demon. I use it on one of my project in which some worker processes need to communicate to frontend django apps periodically.

It could be done like this:
Daemon shares its disk information/copy progress using some inter-process communication method like simple text file or some memory objects;
Django view could then read this info and display it to the user;
Or daemon could call Django management command (#Daniel Roseman) and that command will then update app DB to represent current state.

Consider using something like Memcached as a shared area to store the state of the drives.
As the drives are added or removed, the daemon should write those changes to Memcached, and on each page load the Django web app should read the state from Memcached. You could use a management command and a SQL database, but that seems like too many moving parts for such simple problem: you're only storing a handful boolean flags.
You might even try a micro-framework like Flask instead of Django, to reduce the complexity even more.

Related

Python - add multiple files to folder but run event only once

I'm trying to make a watchdog to listen to a folder changes (adding/deleting) files.
My problem is, that every time I copy-create/delete several files from this folder (and its subfolders), the event chain starts one by one for each and every file.
How can I make the on_event() method to be invoked only once, after multiple files creation/deletion?
Let's say I'm copying to this folders two images.
I want the event handler to be invoked only once after file transfer finishes, and not twice - once for each image - as it currently works.
Thanks!
The code runs on a raspberry pi 3 with python 3.7.
Here's the code:
import os
import time
import psutil
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
i = 0
def show_stats():
global i
read = "read #" + str(i) + ":"
mem = "\nmemory in use: " + str(psutil.virtual_memory().percent)+"%"
cpu = "\ncpu load: " + str(psutil.cpu_percent())+"%"
temp = "\ncurrent " + \
os.popen("vcgencmd measure_temp").readline().replace(
"=", ": ").replace("'C", " C°")
end = "\n=================="
i += 1
stats = read + mem + cpu + temp + end
return stats
class Watcher:
DIRECTORY_TO_WATCH = r'/home/pi/Desktop/jsSlider/images'
def __init__(self):
self.observer = Observer()
print("watching ", self.DIRECTORY_TO_WATCH, "...")
def run(self):
event_handler = Handler()
self.observer.schedule(
event_handler, self.DIRECTORY_TO_WATCH, recursive=True)
self.observer.start()
try:
while True:
time.sleep(5)
print(show_stats())
except Exception as e:
self.observer.stop()
print(e)
self.observer.join()
class Handler(FileSystemEventHandler):
#staticmethod
def on_event(event):
wait = 1
elif event.event_type == 'created' or event.event_type == 'deleted':
print("Received event - %s. " %event.src_path, str(event.event_type))
time.sleep(wait) #i found that its best to give some timeout between commands because it overwhelmed the pi for some reason (one second seems to be enough)...
os.system('python /home/pi/Desktop/Slider/scripts/arr_edit.py') #recreate the JS array
time.sleep(wait)
os.system('cp -r /home/pi/Desktop/jsSlider/scripts/imgArr.js /home/pi/Desktop/jsSlider/themes/1') #copy the newly created JS array to its place
time.sleep(wait)
os.system('sudo pkill chromium') #"refresh" the page -the kiosk mode reactivates the process...
# os.system('cls')
print('done!')
if __name__ == '__main__':
w = Watcher()
w.run()
Edit I
There is a poor rpi3 connected to a tv in some clinic, working in kiosk mode to display images from a local html file (with some js code - the slide show run with an existing JS script - i can upload everything if requested | the images are also on the pi itself).
What I'm trying to achieve is to automatically:
rebulid the JS array (with a working python script - code below (arr_edit.py)).
copy the new array to its desired location. (shell command)
and restart chromium with "pkill chromium". (shell command)
Now, I cannot allow that every time someone copies/deletes multiple images, the commands will run each time - which means:
whenever 2+ images are being added, i cannot "restart" the kiosk
(sudo pkill chromium) each and every time a file is created.
Every time you copy multiple files (images in that case), for each individual image that was created in the folder, an entirely individual event.created is invoked, therefore for 5 images, there will be 5 different event.created events that will fire the on_event() method each on its own turn, making the kiosk restart 5 times in a row. (now think of what will happen if a 50 files transfer occurs - the pi will just crash)
Therefore, I need a method to invoke the command only 1 time after file transfer finishes, regardless of how many files has changed/created/deleted in the folder.
arr_edit.py (not entirely my code):
import os
dir_path = r'/home/pi/Desktop/jsSlider/images'
file_path = r'/home/pi/Desktop/jsSlider/scripts/imgArr.js'
directory = os.fsencode(dir_path)
arr_name = 'images=[\n'
start_str = '{"img":"./images/'
end_str = '"},\n'
images = ''
def writer(array, imagesList):
str_to_write = array + imagesList + ']'
f = open(file_path, 'w')
f.write(str_to_write)
f.close
file_list = os.listdir(directory)
for file in file_list:
filename = os.fsdecode(file)
if filename.endswith(".jpg") or filename.endswith(".jpeg") or filename.endswith(".webp") or filename.endswith(".webp"):
if file == file_list[len(file_list)-1]:
end_str = '"}\n'
images += start_str + filename + end_str
continue
else:
continue
writer(arr_name, images)
output JS array (sample from inside imgArr.js):
images=[
{"img":"./images/246.jpg"},
{"img":"./images/128.jpg"},
{"img":"./images/238.webp"},
{"img":"./images/198.jpg"},
{"img":"./images/247.webp"}
]
As Mark suggested in the comments,
i added a check to see if the js file has changed in the past 5 minutes.
if the file changed,
wait for another 5 minutes and re-initiate the cange (if more files have been added to the folder) so the new, larger files will also be shown in this run.
Works like a charm!
many thanks!!
here's the final watchdog.py
import os
import time
import psutil
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
i = 0
def show_stats():
global i
read = "read #" + str(i) + ":"
mem = "\nmemory in use: " + str(psutil.virtual_memory().percent)+"%"
cpu = "\ncpu load: " + str(psutil.cpu_percent())+"%"
temp = "\ncurrent " + \
os.popen("vcgencmd measure_temp").readline().replace(
"=", ": ").replace("'C", " C°")
end = "\n=================="
i += 1
stats = read + mem + cpu + temp + end
return stats
def wait_for_file(file):
time.sleep(300)
if age(file) >= 5:
modify()
def modify():
os.system('python /home/pi/Desktop/jsSlider/scripts/arr_edit.py')
os.system(
'cp -r /home/pi/Desktop/jsSlider/scripts/imgArr.js /home/pi/Desktop/jsSlider/themes/1')
time.sleep(1)
os.system('sudo pkill chromium')
# os.system('cls')
print("done!\nwatching...")
def age(filename):
return ((time.time() - os.path.getmtime(filename))//60)
class Watcher:
DIRECTORY_TO_WATCH = r'/home/pi/Desktop/jsSlider/images'
def __init__(self):
self.observer = Observer()
print("watching ", self.DIRECTORY_TO_WATCH, "...")
def run(self):
event_handler = Handler()
self.observer.schedule(
event_handler, self.DIRECTORY_TO_WATCH, recursive=True)
self.observer.start()
try:
while True:
time.sleep(5)
print(show_stats())
except Exception as e:
self.observer.stop()
print(e)
self.observer.join()
class Handler(FileSystemEventHandler):
# staticmethod
def on_any_event(event):
file = r'/home/pi/Desktop/jsSlider/scripts/imgArr.js'
if event.event_type == 'created' or event.event_type == 'deleted':
print("Received event - %s. " %
event.src_path, str(event.event_type))
time.sleep(5)
if age(file) < 5:
wait_for_file(file)
else:
modify()
if __name__ == '__main__':
w = Watcher()
w.run()

Python subprocess with real-time input and multiple consoles

The main issue
In a nutshell: I want two consoles for my programm. One for active user input. And the other one for pure log output. (Working code including the accepted answer is in the question's text below, under section "Edit-3". And under section "Edit-1" and section "Edit-2" are functioning workarounds.)
For this I have a main command line Python script, which is supposed to open an additional console for log output only. For this I intend to redirect the log output, which would be printed on the main script's console, to the stdin of the second console, which I start as a subprocess. (I use subprocess, because I didn't find any other way to open a second console.)
The problem is, that it seems that I'm able to send to the stdin of this second console - however, nothing gets printed on this second console.
Following is the code I used for experimenting (with Python 3.4 on PyDev under Windows 10). The function writing(input, pipe, process) contains the part, where the generated string is copied to the as pipe passed stdin, of the via subprocess opened console. The function writing(...) is run via the class writetest(Thread). (I left some code, which I commented out.)
import os
import sys
import io
import time
import threading
from cmd import Cmd
from queue import Queue
from subprocess import Popen, PIPE, CREATE_NEW_CONSOLE
REPETITIONS = 3
# Position of "The class" (Edit-2)
# Position of "The class" (Edit-1)
class generatetest(threading.Thread):
def __init__(self, queue):
self.output = queue
threading.Thread.__init__(self)
def run(self):
print('run generatetest')
generating(REPETITIONS, self.output)
print('generatetest done')
def getout(self):
return self.output
class writetest(threading.Thread):
def __init__(self, input=None, pipe=None, process=None):
if (input == None): # just in case
self.input = Queue()
else:
self.input = input
if (pipe == None): # just in case
self.pipe = PIPE
else:
self.pipe = pipe
if (process == None): # just in case
self.process = subprocess.Popen('C:\Windows\System32\cmd.exe', universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
else:
self.process = proc
threading.Thread.__init__(self)
def run(self):
print('run writetest')
writing(self.input, self.pipe, self.process)
print('writetest done')
# Position of "The function" (Edit-2)
# Position of "The function" (Edit-1)
def generating(maxint, outline):
print('def generating')
for i in range(maxint):
time.sleep(1)
outline.put_nowait(i)
def writing(input, pipe, process):
print('def writing')
while(True):
try:
print('try')
string = str(input.get(True, REPETITIONS)) + "\n"
pipe = io.StringIO(string)
pipe.flush()
time.sleep(1)
# print(pipe.readline())
except:
print('except')
break
finally:
print('finally')
pass
data_queue = Queue()
data_pipe = sys.stdin
# printer = sys.stdout
# data_pipe = os.pipe()[1]
# The code of 'C:\\Users\\Public\\Documents\\test\\test-cmd.py'
# can be found in the question's text further below under "More code"
exe = 'C:\Python34\python.exe'
# exe = 'C:\Windows\System32\cmd.exe'
arg = 'C:\\Users\\Public\\Documents\\test\\test-cmd.py'
arguments = [exe, arg]
# proc = Popen(arguments, universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
proc = Popen(arguments, stdin=data_pipe, stdout=PIPE, stderr=PIPE,
universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
# Position of "The call" (Edit-2 & Edit-1) - file init (proxyfile)
# Position of "The call" (Edit-2) - thread = sockettest()
# Position of "The call" (Edit-1) - thread0 = logtest()
thread1 = generatetest(data_queue)
thread2 = writetest(data_queue, data_pipe, proc)
# time.sleep(5)
# Position of "The call" (Edit-2) - thread.start()
# Position of "The call" (Edit-1) - thread0.start()
thread1.start()
thread2.start()
# Position of "The call" (Edit-2) - thread.join()
# Position of "The call" (Edit-1) - thread.join()
thread1.join(REPETITIONS * REPETITIONS)
thread2.join(REPETITIONS * REPETITIONS)
# data_queue.join()
# receiver = proc.communicate(stdin, 5)
# print('OUT:' + receiver[0])
# print('ERR:' + receiver[1])
print("1st part finished")
A slightly different approach
The following additional code snippet works in regard to extracting the stdout from the subprocess. However, the previously sent stdin still isn't print on the second console. Also, the second console is closed immediately.
proc2 = Popen(['C:\Python34\python.exe', '-i'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
creationflags=CREATE_NEW_CONSOLE)
proc2.stdin.write(b'2+2\n')
proc2.stdin.flush()
print(proc2.stdout.readline())
proc2.stdin.write(b'len("foobar")\n')
proc2.stdin.flush()
print(proc2.stdout.readline())
time.sleep(1)
proc2.stdin.close()
proc2.terminate()
proc2.wait(timeout=0.2)
print("Exiting Main Thread")
More info
As soon as I use one of the paramaters stdin=data_pipe, stdout=PIPE, stderr=PIPE for starting the subprocess, the resulting second console isn't active and doesn't accept keyboard input (which isn't desired, though might be helpful information here).
The subprocess method communicate() can't be used for this as it waits for the process to end.
More code
Finally the code for the file, which is for the second console.
C:\Users\Public\Documents\test\test-cmd.py
from cmd import Cmd
from time import sleep
from datetime import datetime
INTRO = 'command line'
PROMPT = '> '
class CommandLine(Cmd):
"""Custom console"""
def __init__(self, intro=INTRO, prompt=PROMPT):
Cmd.__init__(self)
self.intro = intro
self.prompt = prompt
self.doc_header = intro
self.running = False
def do_dummy(self, args):
"""Runs a dummy method."""
print("Do the dummy.")
self.running = True
while(self.running == True):
print(datetime.now())
sleep(5)
def do_stop(self, args):
"""Stops the dummy method."""
print("Stop the dummy, if you can.")
self.running = False
def do_exit(self, args):
"""Exits this console."""
print("Do console exit.")
exit()
if __name__ == '__main__':
cl = CommandLine()
cl.prompt = PROMPT
cl.cmdloop(INTRO)
Thoughts
So far I'm even not certain if the Windows command line interface offers the capability to accept other input than the one from the keyboard (instead of the desired stdin pipe or similar). Though, with it having some sort of passive mode, I expect it.
Why is this not working?
Edit-1: Workaround via file (proof of concept)
Using a file as workaround in order display it's new content, as suggested in the answer of Working multiple consoles in python, is working in general. However, since the log file will grow up to many GB, it isn't a practical solution in this case. It would at least require file splitting and the proper handling of it.
The class:
class logtest(threading.Thread):
def __init__(self, file):
self.file = file
threading.Thread.__init__(self)
def run(self):
print('run logtest')
logging(self.file)
print('logtest done')
The function:
def logging(file):
pexe = 'C:\Python34\python.exe '
script = 'C:\\Users\\Public\\Documents\\test\\test-004.py'
filek = '--file'
filev = file
file = open(file, 'a')
file.close()
time.sleep(1)
print('LOG START (outer): ' + script + ' ' + filek + ' ' + filev)
proc = Popen([pexe, script, filek, filev], universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
print('LOG FINISH (outer): ' + script + ' ' + filek + ' ' + filev)
time.sleep(2)
The call:
# The file tempdata is filled with several strings of "0\n1\n2\n"
# Looking like this:
# 0
# 1
# 2
# 0
# 1
# 2
proxyfile = 'C:\\Users\\Public\\Documents\\test\\tempdata'
f = open(proxyfile, 'a')
f.close()
time.sleep(1)
thread0 = logtest(proxyfile)
thread0.start()
thread0.join(REPETITIONS * REPETITIONS)
The tail script ("test-004.py"):
As Windows doesn't offer the tail command, I used the following script instead (base on the answer for How to implement a pythonic equivalent of tail -F?), which worked for this. The additional, yet kind of unnecessary class CommandLine(Cmd) was initially an attempt to keep the second console open (because the script file argument was missing). Though, it also proved itself as useful for keeping the console fluently printing the new log file content. Otherwise the output wasn't deterministic/predictable.
import time
import sys
import os
import threading
from cmd import Cmd
from argparse import ArgumentParser
def main(args):
parser = ArgumentParser(description="Parse arguments.")
parser.add_argument("-f", "--file", type=str, default='', required=False)
arguments = parser.parse_args(args)
if not arguments.file:
print('LOG PRE-START (inner): file argument not found. Creating new default entry.')
arguments.file = 'C:\\Users\\Public\\Documents\\test\\tempdata'
print('LOG START (inner): ' + os.path.abspath(os.path.dirname(__file__)) + ' ' + arguments.file)
f = open(arguments.file, 'a')
f.close()
time.sleep(1)
words = ['word']
console = CommandLine(arguments.file, words)
console.prompt = ''
thread = threading.Thread(target=console.cmdloop, args=('', ))
thread.start()
print("\n")
for hit_word, hit_sentence in console.watch():
print("Found %r in line: %r" % (hit_word, hit_sentence))
print('LOG FINISH (inner): ' + os.path.abspath(os.path.dirname(__file__)) + ' ' + arguments.file)
class CommandLine(Cmd):
"""Custom console"""
def __init__(self, fn, words):
Cmd.__init__(self)
self.fn = fn
self.words = words
def watch(self):
fp = open(self.fn, 'r')
while True:
time.sleep(0.05)
new = fp.readline()
print(new)
# Once all lines are read this just returns ''
# until the file changes and a new line appears
if new:
for word in self.words:
if word in new:
yield (word, new)
else:
time.sleep(0.5)
if __name__ == '__main__':
print('LOG START (inner - as main).')
main(sys.argv[1:])
Edit-1: More thoughts
Three workarounds, which I didn't try yet and might work are sockets (also suggested in this answer Working multiple consoles in python), getting a process object via the process ID for more control, and using the ctypes library for directly accessing the Windows console API, allowing to set the screen buffer, as the console can have multiple buffers, but only one active buffer (stated in the remarks of the documentation for the CreateConsoleScreenBuffer function).
However, using sockets might be the easiest one. And at least the size of the log doesn't matter this way. Though, connection problems might be a problem here.
Edit-2: Workaround via sockets (proof of concept)
Using sockets as workaround in order display new log enties, as it also was suggested in the answer of Working multiple consoles in python, is working in general, too. Though, this seems to be too much effort for something, which should be simply sent to the process of the receiving console.
The class:
class sockettest(threading.Thread):
def __init__(self, host, port, file):
self.host = host
self.port = port
self.file = file
threading.Thread.__init__(self)
def run(self):
print('run sockettest')
socketing(self.host, self.port, self.file)
print('sockettest done')
The function:
def socketing(host, port, file):
pexe = 'C:\Python34\python.exe '
script = 'C:\\Users\\Public\\Documents\\test\test-005.py'
hostk = '--address'
hostv = str(host)
portk = '--port'
portv = str(port)
filek = '--file'
filev = file
file = open(file, 'a')
file.close()
time.sleep(1)
print('HOST START (outer): ' + pexe + script + ' ' + hostk + ' ' + hostv + ' ' + portk + ' ' + portv + ' ' + filek + ' ' + filev)
proc = Popen([pexe, script, hostk, hostv, portk, portv, filek, filev], universal_newlines=True, creationflags=CREATE_NEW_CONSOLE)
print('HOST FINISH (outer): ' + pexe + script + ' ' + hostk + ' ' + hostv + ' ' + portk + ' ' + portv + ' ' + filek + ' ' + filev)
time.sleep(2)
The call:
# The file tempdata is filled with several strings of "0\n1\n2\n"
# Looking like this:
# 0
# 1
# 2
# 0
# 1
# 2
proxyfile = 'C:\\Users\\Public\\Documents\\test\\tempdata'
f = open(proxyfile, 'a')
f.close()
time.sleep(1)
thread = sockettest('127.0.0.1', 8888, proxyfile)
thread.start()
thread.join(REPETITIONS * REPETITIONS)
The socket script ("test-005.py"):
The following script is based on Python: Socket programming server-client application using threads. Here I just keept the class CommandLine(Cmd) as log entry generator. At this point it should't be a problem, to put client into the main script, which calls the second console and then feed the queue with real log enties instead of (new) file lines. (The server is the printer.)
import socket
import sys
import threading
import time
from cmd import Cmd
from argparse import ArgumentParser
from queue import Queue
BUFFER_SIZE = 5120
class CommandLine(Cmd):
"""Custom console"""
def __init__(self, fn, words, queue):
Cmd.__init__(self)
self.fn = fn
self.words = words
self.queue = queue
def watch(self):
fp = open(self.fn, 'r')
while True:
time.sleep(0.05)
new = fp.readline()
# Once all lines are read this just returns ''
# until the file changes and a new line appears
self.queue.put_nowait(new)
def main(args):
parser = ArgumentParser(description="Parse arguments.")
parser.add_argument("-a", "--address", type=str, default='127.0.0.1', required=False)
parser.add_argument("-p", "--port", type=str, default='8888', required=False)
parser.add_argument("-f", "--file", type=str, default='', required=False)
arguments = parser.parse_args(args)
if not arguments.address:
print('HOST PRE-START (inner): host argument not found. Creating new default entry.')
arguments.host = '127.0.0.1'
if not arguments.port:
print('HOST PRE-START (inner): port argument not found. Creating new default entry.')
arguments.port = '8888'
if not arguments.file:
print('HOST PRE-START (inner): file argument not found. Creating new default entry.')
arguments.file = 'C:\\Users\\Public\\Documents\\test\\tempdata'
file_queue = Queue()
print('HOST START (inner): ' + ' ' + arguments.address + ':' + arguments.port + ' --file ' + arguments.file)
# Start server
thread = threading.Thread(target=start_server, args=(arguments.address, arguments.port, ))
thread.start()
time.sleep(1)
# Start client
thread = threading.Thread(target=start_client, args=(arguments.address, arguments.port, file_queue, ))
thread.start()
# Start file reader
f = open(arguments.file, 'a')
f.close()
time.sleep(1)
words = ['word']
console = CommandLine(arguments.file, words, file_queue)
console.prompt = ''
thread = threading.Thread(target=console.cmdloop, args=('', ))
thread.start()
print("\n")
for hit_word, hit_sentence in console.watch():
print("Found %r in line: %r" % (hit_word, hit_sentence))
print('HOST FINISH (inner): ' + ' ' + arguments.address + ':' + arguments.port)
def start_client(host, port, queue):
host = host
port = int(port) # arbitrary non-privileged port
queue = queue
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
soc.connect((host, port))
except:
print("Client connection error" + str(sys.exc_info()))
sys.exit()
print("Enter 'quit' to exit")
message = ""
while message != 'quit':
time.sleep(0.05)
if(message != ""):
soc.sendall(message.encode("utf8"))
if soc.recv(BUFFER_SIZE).decode("utf8") == "-":
pass # null operation
string = ""
if (not queue.empty()):
string = str(queue.get_nowait()) + "\n"
if(string == None or string == ""):
message = ""
else:
message = string
soc.send(b'--quit--')
def start_server(host, port):
host = host
port = int(port) # arbitrary non-privileged port
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# SO_REUSEADDR flag tells the kernel to reuse a local socket in TIME_WAIT state, without waiting for its natural timeout to expire
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("Socket created")
try:
soc.bind((host, port))
except:
print("Bind failed. Error : " + str(sys.exc_info()))
sys.exit()
soc.listen(5) # queue up to 5 requests
print("Socket now listening")
# infinite loop- do not reset for every requests
while True:
connection, address = soc.accept()
ip, port = str(address[0]), str(address[1])
print("Connected with " + ip + ":" + port)
try:
threading.Thread(target=client_thread, args=(connection, ip, port)).start()
except:
print("Thread did not start.")
traceback.print_exc()
soc.close()
def client_thread(connection, ip, port, max_buffer_size=BUFFER_SIZE):
is_active = True
while is_active:
client_input = receive_input(connection, max_buffer_size)
if "--QUIT--" in client_input:
print("Client is requesting to quit")
connection.close()
print("Connection " + ip + ":" + port + " closed")
is_active = False
elif not client_input == "":
print("{}".format(client_input))
connection.sendall("-".encode("utf8"))
else:
connection.sendall("-".encode("utf8"))
def receive_input(connection, max_buffer_size):
client_input = connection.recv(max_buffer_size)
client_input_size = sys.getsizeof(client_input)
if client_input_size > max_buffer_size:
print("The input size is greater than expected {}".format(client_input_size))
decoded_input = client_input.decode("utf8").rstrip() # decode and strip end of line
result = process_input(decoded_input)
return result
def process_input(input_str):
return str(input_str).upper()
if __name__ == '__main__':
print('HOST START (inner - as main).')
main(sys.argv[1:])
Edit-2: Furthermore thoughts
Having direct control of the subprocess' console input pipe/buffer would be the preferable solution to this problem. For this is the bounty of 500 Reputation.
Unfortunately I'm running out of time. Therefore I might use one of those workarounds for now and replace them with the proper solution later. Or maybe I have to use the nuclear option, just one console, where the ongoing log output is paused during any user keyboard input, and printed afterwards. Of course this might lead to buffer problems, when the user decides to type something just half the way.
Edit-3: Code including the accepted answer (one file)
With the answer from James Kent I get the desired behavior, when I start a script with the code via the Windows command line (cmd) or PowerShell. However, when I start this same script via Eclipse/PyDev with "Python run", then the output is always printed on the main Eclipse/PyDev console, while the second console of the subprocess remains empty and stays inactive. Though, I guess this is another system/environment speciality and a different issue.
from sys import argv, stdin, stdout
from threading import Thread
from cmd import Cmd
from time import sleep
from datetime import datetime
from subprocess import Popen, PIPE, CREATE_NEW_CONSOLE
INTRO = 'command line'
PROMPT = '> '
class CommandLine(Cmd):
"""Custom console"""
def __init__(self, subprocess, intro=INTRO, prompt=PROMPT):
Cmd.__init__(self)
self.subprocess = subprocess
self.intro = intro
self.prompt = prompt
self.doc_header = intro
self.running = False
def do_date(self, args):
"""Prints the current date and time."""
print(datetime.now())
sleep(1)
def do_exit(self, args):
"""Exits this command line application."""
print("Exit by user command.")
if self.subprocess is not None:
try:
self.subprocess.terminate()
except:
self.subprocess.kill()
exit()
class Console():
def __init__(self):
if '-r' not in argv:
self.p = Popen(
['python.exe', __file__, '-r'],
stdin=PIPE,
creationflags=CREATE_NEW_CONSOLE
)
else:
while True:
data = stdin.read(1)
if not data:
# break
sleep(1)
continue
stdout.write(data)
def write(self, data):
self.p.stdin.write(data.encode('utf8'))
self.p.stdin.flush()
def getSubprocess(self):
if self.p:
return self.p
else:
return None
class Feeder (Thread):
def __init__(self, console):
self.console = console
Thread.__init__(self)
def run(self):
feeding(self.console)
def feeding(console):
for i in range(0, 100):
console.write('test %i\n' % i)
sleep(1)
if __name__ == '__main__':
p = Console()
if '-r' not in argv:
thread = Feeder(p)
thread.setDaemon(True)
thread.start()
cl = CommandLine(subprocess=p.getSubprocess())
cl.use_rawinput = False
cl.prompt = PROMPT
cl.cmdloop('\nCommand line is waiting for user input (e.g. help).')
Edit-3: Honorable mentions
In the questions's text above I have mentioned using the ctypes library for directly accessing the Windows console API as another workround (under "Edit-1: More thoughts"). Or using just one console in a way, that the input prompt always stays at the bottom as nuclear option to this entire problem. (under "Edit-2: Furthermore thoughts")
For using the ctypes library I would have oriented myself on the following answer to Change console font in Windows. And for using just one console I would have tried the following answer to Keep console input line below output. I think both of these answers may offer potential merrit regarding this problem and maybe they are helpful to others how come accross this post. Also, I if i find the time, I will try if they work somehow.
The issue you're up against is the architecture of the console subsystem on Windows, the console window that you normally see is not hosted by cmd.exe but instead by conhost.exe, a child process of a conhost window can only connect to a single conhost instance meaning you're limited to a single window per process.
This then leads on to having an extra process for each console window you wish to have, then in order to look at displaying anything in that window you need to look at how stdin and stdout are normally handled, in that they are written and read from by the conhost instance, except if you turn stdin into a pipe (so you can write to the process) it no longer comes from conhost but instead from your parent process and as such conhost has no visibility of it. This means that anything written to stdin is only read by the child process so is not displayed by conhost.
As far as I know there isn't a way to share the pipe like that.
As a side effect if you make stdin a pipe then all keyboard input sent to the new console window goes nowhere, as stdin is not connected to that window.
For an output only function this means you can spawn a new process that communicates with the parent via a pipe to stdin and echos everything to stdout.
Heres an attempt:
#!python3
import sys, subprocess, time
class Console():
def __init__(self):
if '-r' not in sys.argv:
self.p = subprocess.Popen(
['python.exe', __file__, '-r'],
stdin=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_CONSOLE
)
else:
while True:
data = sys.stdin.read(1)
if not data:
break
sys.stdout.write(data)
def write(self, data):
self.p.stdin.write(data.encode('utf8'))
self.p.stdin.flush()
if (__name__ == '__main__'):
p = Console()
if '-r' not in sys.argv:
for i in range(0, 100):
p.write('test %i\n' % i)
time.sleep(1)
So a nice simple pipe between two processes and echoing the input back to the output if its the subprocess, I used a -r to signify whether the instance is a process but there are other ways depending on how you implement it.
Several things to note:
the flush after writing to stdin is needed as python normally uses buffering.
the way this approach is written is aimed at being in its own module hence the use of __file__
due to the use of __file__ this approach may need modification if frozen using cx_Freeze or similar.
EDIT 1
for a version that can be frozen with cx_Freeze:
Console.py
import sys, subprocess
class Console():
def __init__(self, ischild=True):
if not ischild:
if hasattr(sys, 'frozen'):
args = ['Console.exe']
else:
args = [sys.executable, __file__]
self.p = subprocess.Popen(
args,
stdin=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_CONSOLE
)
else:
while True:
data = sys.stdin.read(1)
if not data:
break
sys.stdout.write(data)
def write(self, data):
self.p.stdin.write(data.encode('utf8'))
self.p.stdin.flush()
if (__name__ == '__main__'):
p = Console()
test.py
from Console import Console
import sys, time
if (__name__ == '__main__'):
p = Console(False)
for i in range(0, 100):
p.write('test %i\n' % i)
time.sleep(1)
setup.py
from cx_Freeze import setup, Executable
setup(
name = 'Console-test',
executables = [
Executable(
'Console.py',
base=None,
),
Executable(
'test.py',
base=None,
)
]
)
EDIT 2
New version that should work under dev tools like IDLE
Console.py
#!python3
import ctypes, sys, subprocess
Kernel32 = ctypes.windll.Kernel32
class Console():
def __init__(self, ischild=True):
if ischild:
# try allocate new console
result = Kernel32.AllocConsole()
if result > 0:
# if we succeed open handle to the console output
sys.stdout = open('CONOUT$', mode='w')
else:
# if frozen we assume its names Console.exe
# note that when frozen 'Win32GUI' must be used as a base
if hasattr(sys, 'frozen'):
args = ['Console.exe']
else:
# otherwise we use the console free version of python
args = ['pythonw.exe', __file__]
self.p = subprocess.Popen(
args,
stdin=subprocess.PIPE
)
return
while True:
data = sys.stdin.read(1)
if not data:
break
sys.stdout.write(data)
def write(self, data):
self.p.stdin.write(data.encode('utf8'))
self.p.stdin.flush()
if (__name__ == '__main__'):
p = Console()
test.py
from Console import Console
import sys, time
if (__name__ == '__main__'):
p = Console(False)
for i in range(0, 100):
p.write('test %i\n' % i)
time.sleep(1)
setup.py
from cx_Freeze import setup, Executable
setup(
name = 'Console-test',
executables = [
Executable(
'Console.py',
base='Win32GUI',
),
Executable(
'test.py',
base=None,
)
]
)
This could be made more robust, i.e. always checking for an existing console and detaching it if found before creating a new console, and possibly better error handling.
Since you are on windows you can use win32console module to open a second console or multiple consoles for your thread or subprocess output. This is the most simple and easiest way that works if you are on windows.
Here is a sample code:
import win32console
import multiprocessing
def subprocess(queue):
win32console.FreeConsole() #Frees subprocess from using main console
win32console.AllocConsole() #Creates new console and all input and output of subprocess goes to this new console
while True:
print(queue.get())
#prints any output produced by main script passed to subprocess using queue
if __name__ == "__main__":
queue = multiprocessing.Queue()
multiprocessing.Process(target=subprocess, args=[queue]).start()
while True:
print("Hello World in main console")
queue.put("Hello work in sub process console")
#sends above string to subprocess and it prints it into its console
#and whatever else you want to do in ur main process
You can also do this with threading. You have to use queue module if you want the queue functionality as threading module doesn't have queue
Here is the win32console module documentation

How can I terminate running jobs without closing connection to the core? (currently using execnet)

I have a cluster of computers which uses a master node to communicate with the slave nodes in the cluster.
The main problem I'm facing is using execnet is being able to kill certain jobs that are running and then having new jobs requeue on the same core that the other job just got terminated on (as I want to utilize all cores of the slave nodes at any given time).
As of now there is no way to terminate running jobs using execnet, so I figured if I could just kill the jobs manually through a bash script, say sudo kill 12345 where 12345 is the PID of the job (obtaining the PID of each job is another thing not supported by execnet, but that's another topic), then it would terminate the job and then requeue another on the same core that was just terminated on. It does kill the job correctly, however it closes the connection to that channel (the core; the master node communicates to each core individually) and then does not utilize that core anymore, until all jobs are done. Is there a way to terminate a running job, without killing the connection to the core?
Here is the script to submit jobs
import execnet, os, sys
import re
import socket
import numpy as np
import pickle, cPickle
from copy import deepcopy
import time
import job
def main():
print 'execnet source files are located at:\n {}/\n'.format(
os.path.join(os.path.dirname(execnet.__file__))
)
# Generate a group of gateways.
work_dir = '/home/mpiuser/pn2/'
f = 'cluster_core_info.txt'
n_start, n_end = 250000, 250008
ci = get_cluster_info(f)
group, g_labels = make_gateway_group(ci, work_dir)
mch = group.remote_exec(job)
args = range(n_start, n_end+1) # List of parameters to compute factorial.
manage_jobs(group, mch, queue, g_labels, args)
# Close the group of gateways.
group.terminate()
def get_cluster_info(f):
nodes, ncores = [], []
with open(f, 'r') as fid:
while True:
line = fid.readline()
if not line:
fid.close()
break
line = line.strip('\n').split()
nodes.append(line[0])
ncores.append(int(line[1]))
return dict( zip(nodes, ncores) )
def make_gateway_group(cluster_info, work_dir):
''' Generate gateways on all cores in remote nodes. '''
print 'Gateways generated:\n'
group = execnet.Group()
g_labels = []
nodes = list(cluster_info.keys())
for node in nodes:
for i in range(cluster_info[node]):
group.makegateway(
"ssh={0}//id={0}_{1}//chdir={2}".format(
node, i, work_dir
))
sys.stdout.write(' ')
sys.stdout.flush()
print list(group)[-1]
# Generate a string 'node-id_core-id'.
g_labels.append('{}_{}'.format(re.findall(r'\d+',node)[0], i))
print ''
return group, g_labels
def get_mch_id(g_labels, string):
ids = [x for x in re.findall(r'\d+', string)]
ids = '{}_{}'.format(*ids)
return g_labels.index(ids)
def manage_jobs(group, mch, queue, g_labels, args):
args_ref = deepcopy(args)
terminated_channels = 0
active_jobs, active_args = [], []
while True:
channel, item = queue.get()
if item == 'terminate_channel':
terminated_channels += 1
print " Gateway closed: {}".format(channel.gateway.id)
if terminated_channels == len(mch):
print "\nAll jobs done.\n"
break
continue
if item != "ready":
mch_id_completed = get_mch_id(g_labels, channel.gateway.id)
depopulate_list(active_jobs, mch_id_completed, active_args)
print " Gateway {} channel id {} returned:".format(
channel.gateway.id, mch_id_completed)
print " {}".format(item)
if not args:
print "\nNo more jobs to submit, sending termination request...\n"
mch.send_each(None)
args = 'terminate_channel'
if args and \
args != 'terminate_channel':
arg = args.pop(0)
idx = args_ref.index(arg)
channel.send(arg) # arg is copied by value to the remote side of
# channel to be executed. Maybe blocked if the
# sender queue is full.
# Get the id of current channel used to submit a job,
# this id can be used to refer mch[id] to terminate a job later.
mch_id_active = get_mch_id(g_labels, channel.gateway.id)
print "Job {}: {}! submitted to gateway {}, channel id {}".format(
idx, arg, channel.gateway.id, mch_id_active)
populate_list(active_jobs, mch_id_active,
active_args, arg)
def populate_list(jobs, job_active, args, arg_active):
jobs.append(job_active)
args.append(arg_active)
def depopulate_list(jobs, job_completed, args):
i = jobs.index(job_completed)
jobs.pop(i)
args.pop(i)
if __name__ == '__main__':
main()
and here is my job.py script:
#!/usr/bin/env python
import os, sys
import socket
import time
import numpy as np
import pickle, cPickle
import random
import job
def hostname():
return socket.gethostname()
def working_dir():
return os.getcwd()
def listdir(path):
return os.listdir(path)
def fac(arg):
return np.math.factorial(arg)
def dump(arg):
path = working_dir() + '/out'
if not os.path.exists(path):
os.mkdir(path)
f_path = path + '/fac_{}.txt'.format(arg)
t_0 = time.time()
num = fac(arg) # Main operation
t_1 = time.time()
cPickle.dump(num, open(f_path, "w"), protocol=2) # Main operation
t_2 = time.time()
duration_0 = "{:.4f}".format(t_1 - t_0)
duration_1 = "{:.4f}".format(t_2 - t_1)
#num2 = cPickle.load(open(f_path, "rb"))
return '--Calculation: {} s, dumping: {} s'.format(
duration_0, duration_1)
if __name__ == '__channelexec__':
channel.send("ready")
for arg in channel:
if arg is None:
break
elif str(arg).isdigit():
channel.send((
str(arg)+'!',
job.hostname(),
job.dump(arg)
))
else:
print 'Warnning! arg sent should be number | None'
Yes, you are on the right track. Use psutil library to manage the processes, find their pids etc.
And kill them. No need for involveing bash anywhere. Python covers it all.
Or, even better, program your script to terminate when master say so.
It is usually done that way.
You can even make it start another script before terminating itself if you want/need.
Or, if it is the same that you would be doing in another process, just stop the current work and start a new one in the script without terminating it at all.
And, if I may make a suggestion. Don't read your file line by line, read a whole file and then use *.splitlines(). For small files reading them in chunks just tortures the IO. You wouldn't be needing *.strip() as well. And you should remove unused imports too.

Reading windows event log in Python using pywin32 (win32evtlog module)

I would like to read Windows' event log. I am not sure if it's the best way but I would like to use the pywin32 -> win32evtlog module to do so. First and foremost is it possible to read logs from Windows 7 using this library and if so how to read events associated with applications runs (running an .exe must leave a trace in the event log in windows i guess).
I have managed to find some little example on the net but it's not enough for me and the documentation isn't well written unfortunately ;/
import win32evtlog
hand = win32evtlog.OpenEventLog(None,"Microsoft-Windows-TaskScheduler/Operational")
print win32evtlog.GetNumberOfEventLogRecords(hand)
you can find plenty of demos related to the winapi in your C:\PythonXX\Lib\site-packages\win32\Demos folder. In this folder you'll find a script named eventLogDemo.py. There you can see how to use win32evtlog module. Just start this script with eventLogDemo.py -v and you will get prints from your Windows event log with logtype Application.
In case you can't find this script:
import win32evtlog
import win32api
import win32con
import win32security # To translate NT Sids to account names.
import win32evtlogutil
def ReadLog(computer, logType="Application", dumpEachRecord = 0):
# read the entire log back.
h=win32evtlog.OpenEventLog(computer, logType)
numRecords = win32evtlog.GetNumberOfEventLogRecords(h)
# print "There are %d records" % numRecords
num=0
while 1:
objects = win32evtlog.ReadEventLog(h, win32evtlog.EVENTLOG_BACKWARDS_READ|win32evtlog.EVENTLOG_SEQUENTIAL_READ, 0)
if not objects:
break
for object in objects:
# get it for testing purposes, but dont print it.
msg = win32evtlogutil.SafeFormatMessage(object, logType)
if object.Sid is not None:
try:
domain, user, typ = win32security.LookupAccountSid(computer, object.Sid)
sidDesc = "%s/%s" % (domain, user)
except win32security.error:
sidDesc = str(object.Sid)
user_desc = "Event associated with user %s" % (sidDesc,)
else:
user_desc = None
if dumpEachRecord:
print "Event record from %r generated at %s" % (object.SourceName, object.TimeGenerated.Format())
if user_desc:
print user_desc
try:
print msg
except UnicodeError:
print "(unicode error printing message: repr() follows...)"
print repr(msg)
num = num + len(objects)
if numRecords == num:
print "Successfully read all", numRecords, "records"
else:
print "Couldn't get all records - reported %d, but found %d" % (numRecords, num)
print "(Note that some other app may have written records while we were running!)"
win32evtlog.CloseEventLog(h)
def usage():
print "Writes an event to the event log."
print "-w : Dont write any test records."
print "-r : Dont read the event log"
print "-c : computerName : Process the log on the specified computer"
print "-v : Verbose"
print "-t : LogType - Use the specified log - default = 'Application'"
def test():
# check if running on Windows NT, if not, display notice and terminate
if win32api.GetVersion() & 0x80000000:
print "This sample only runs on NT"
return
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], "rwh?c:t:v")
computer = None
do_read = do_write = 1
logType = "Application"
verbose = 0
if len(args)>0:
print "Invalid args"
usage()
return 1
for opt, val in opts:
if opt == '-t':
logType = val
if opt == '-c':
computer = val
if opt in ['-h', '-?']:
usage()
return
if opt=='-r':
do_read = 0
if opt=='-w':
do_write = 0
if opt=='-v':
verbose = verbose + 1
if do_write:
ph=win32api.GetCurrentProcess()
th = win32security.OpenProcessToken(ph,win32con.TOKEN_READ)
my_sid = win32security.GetTokenInformation(th,win32security.TokenUser)[0]
win32evtlogutil.ReportEvent(logType, 2,
strings=["The message text for event 2","Another insert"],
data = "Raw\0Data".encode("ascii"), sid = my_sid)
win32evtlogutil.ReportEvent(logType, 1, eventType=win32evtlog.EVENTLOG_WARNING_TYPE,
strings=["A warning","An even more dire warning"],
data = "Raw\0Data".encode("ascii"), sid = my_sid)
win32evtlogutil.ReportEvent(logType, 1, eventType=win32evtlog.EVENTLOG_INFORMATION_TYPE,
strings=["An info","Too much info"],
data = "Raw\0Data".encode("ascii"), sid = my_sid)
print("Successfully wrote 3 records to the log")
if do_read:
ReadLog(computer, logType, verbose > 0)
if __name__=='__main__':
test()
I hope this script fits your needs

Python script is exiting with no output and I have no idea why

I'm attempting to debug a Subversion post-commit hook that calls some python scripts. What I've been able to determine so far is that when I run post-commit.bat manually (I've created a wrapper for it to make it easier) everything succeeds, but when SVN runs it one particular step doesn't work.
We're using CollabNet SVNServe, which I know from the documentation removes all environment variables. This had caused some problems earlier, but shouldn't be an issue now.
Before Subversion calls a hook script, it removes all variables - including $PATH on Unix, and %PATH% on Windows - from the environment. Therefore, your script can only run another program if you spell out that program's absolute name.
The relevant portion of post-commit.bat is:
echo -------------------------- >> c:\svn-repos\company\hooks\svn2ftp.out.log
set SITENAME=staging
set SVNPATH=branches/staging/wwwroot/
"C:\Python3\python.exe" C:\svn-repos\company\hooks\svn2ftp.py ^
--svnUser="svnusername" ^
--svnPass="svnpassword" ^
--ftp-user=ftpuser ^
--ftp-password=ftppassword ^
--ftp-remote-dir=/ ^
--access-url=svn://10.0.100.6/company ^
--status-file="C:\svn-repos\company\hooks\svn2ftp-%SITENAME%.dat" ^
--project-directory=%SVNPATH% "staging.company.com" %1 %2 >> c:\svn-repos\company\hooks\svn2ftp.out.log
echo -------------------------- >> c:\svn-repos\company\hooks\svn2ftp.out.log
When I run post-commit.bat manually, for example: post-commit c:\svn-repos\company 12345, I see output like the following in svn2ftp.out.log:
--------------------------
args1: c:\svn-repos\company
args0: staging.company.com
abspath: c:\svn-repos\company
project_dir: branches/staging/wwwroot/
local_repos_path: c:\svn-repos\company
getting youngest revision...
done, up-to-date
--------------------------
However, when I commit something to the repo and it runs automatically, the output is:
--------------------------
--------------------------
svn2ftp.py is a bit long, so I apologize but here goes. I'll have some notes/disclaimers about its contents below it.
#!/usr/bin/env python
"""Usage: svn2ftp.py [OPTION...] FTP-HOST REPOS-PATH
Upload to FTP-HOST changes committed to the Subversion repository at
REPOS-PATH. Uses svn diff --summarize to only propagate the changed files
Options:
-?, --help Show this help message.
-u, --ftp-user=USER The username for the FTP server. Default: 'anonymous'
-p, --ftp-password=P The password for the FTP server. Default: '#'
-P, --ftp-port=X Port number for the FTP server. Default: 21
-r, --ftp-remote-dir=DIR The remote directory that is expected to resemble the
repository project directory
-a, --access-url=URL This is the URL that should be used when trying to SVN
export files so that they can be uploaded to the FTP
server
-s, --status-file=PATH Required. This script needs to store the last
successful revision that was transferred to the
server. PATH is the location of this file.
-d, --project-directory=DIR If the project you are interested in sending to
the FTP server is not under the root of the
repository (/), set this parameter.
Example: -d 'project1/trunk/'
This should NOT start with a '/'.
2008.5.2 CKS
Fixed possible Windows-related bug with tempfile, where the script didn't have
permission to write to the tempfile. Replaced this with a open()-created file
created in the CWD.
2008.5.13 CKS
Added error logging. Added exception for file-not-found errors when deleting files.
2008.5.14 CKS
Change file open to 'rb' mode, to prevent Python's universal newline support from
stripping CR characters, causing later comparisons between FTP and SVN to report changes.
"""
try:
import sys, os
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='svn2ftp.debug.log',
filemode='a'
)
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
logging.getLogger('').addHandler(console)
import getopt, tempfile, smtplib, traceback, subprocess
from io import StringIO
import pysvn
import ftplib
import inspect
except Exception as e:
logging.error(e)
#capture the location of the error
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
logging.debug(stack_trace)
print(stack_trace)
#end capture
sys.exit(1)
#defaults
host = ""
user = "anonymous"
password = "#"
port = 21
repo_path = ""
local_repos_path = ""
status_file = ""
project_directory = ""
remote_base_directory = ""
toAddrs = "developers#company.com"
youngest_revision = ""
def email(toAddrs, message, subject, fromAddr='autonote#company.com'):
headers = "From: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n" % (fromAddr, toAddrs, subject)
message = headers + message
logging.info('sending email to %s...' % toAddrs)
server = smtplib.SMTP('smtp.company.com')
server.set_debuglevel(1)
server.sendmail(fromAddr, toAddrs, message)
server.quit()
logging.info('email sent')
def captureErrorMessage(e):
sout = StringIO()
traceback.print_exc(file=sout)
errorMessage = '\n'+('*'*80)+('\n%s'%e)+('\n%s\n'%sout.getvalue())+('*'*80)
return errorMessage
def usage_and_exit(errmsg):
"""Print a usage message, plus an ERRMSG (if provided), then exit.
If ERRMSG is provided, the usage message is printed to stderr and
the script exits with a non-zero error code. Otherwise, the usage
message goes to stdout, and the script exits with a zero
errorcode."""
if errmsg is None:
stream = sys.stdout
else:
stream = sys.stderr
print(__doc__, file=stream)
if errmsg:
print("\nError: %s" % (errmsg), file=stream)
sys.exit(2)
sys.exit(0)
def read_args():
global host
global user
global password
global port
global repo_path
global local_repos_path
global status_file
global project_directory
global remote_base_directory
global youngest_revision
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "?u:p:P:r:a:s:d:SU:SP:",
["help",
"ftp-user=",
"ftp-password=",
"ftp-port=",
"ftp-remote-dir=",
"access-url=",
"status-file=",
"project-directory=",
"svnUser=",
"svnPass="
])
except getopt.GetoptError as msg:
usage_and_exit(msg)
for opt, arg in opts:
if opt in ("-?", "--help"):
usage_and_exit()
elif opt in ("-u", "--ftp-user"):
user = arg
elif opt in ("-p", "--ftp-password"):
password = arg
elif opt in ("-SU", "--svnUser"):
svnUser = arg
elif opt in ("-SP", "--svnPass"):
svnPass = arg
elif opt in ("-P", "--ftp-port"):
try:
port = int(arg)
except ValueError as msg:
usage_and_exit("Invalid value '%s' for --ftp-port." % (arg))
if port < 1 or port > 65535:
usage_and_exit("Value for --ftp-port must be a positive integer less than 65536.")
elif opt in ("-r", "--ftp-remote-dir"):
remote_base_directory = arg
elif opt in ("-a", "--access-url"):
repo_path = arg
elif opt in ("-s", "--status-file"):
status_file = os.path.abspath(arg)
elif opt in ("-d", "--project-directory"):
project_directory = arg
if len(args) != 3:
print(str(args))
usage_and_exit("host and/or local_repos_path not specified (" + len(args) + ")")
host = args[0]
print("args1: " + args[1])
print("args0: " + args[0])
print("abspath: " + os.path.abspath(args[1]))
local_repos_path = os.path.abspath(args[1])
print('project_dir:',project_directory)
youngest_revision = int(args[2])
if status_file == "" : usage_and_exit("No status file specified")
def main():
global host
global user
global password
global port
global repo_path
global local_repos_path
global status_file
global project_directory
global remote_base_directory
global youngest_revision
read_args()
#repository,fs_ptr
#get youngest revision
print("local_repos_path: " + local_repos_path)
print('getting youngest revision...')
#youngest_revision = fs.youngest_rev(fs_ptr)
assert youngest_revision, "Unable to lookup youngest revision."
last_sent_revision = get_last_revision()
if youngest_revision == last_sent_revision:
# no need to continue. we should be up to date.
print('done, up-to-date')
return
if last_sent_revision or youngest_revision < 10:
# Only compare revisions if the DAT file contains a valid
# revision number. Otherwise we risk waiting forever while
# we parse and uploading every revision in the repo in the case
# where a repository is retroactively configured to sync with ftp.
pysvn_client = pysvn.Client()
pysvn_client.callback_get_login = get_login
rev1 = pysvn.Revision(pysvn.opt_revision_kind.number, last_sent_revision)
rev2 = pysvn.Revision(pysvn.opt_revision_kind.number, youngest_revision)
summary = pysvn_client.diff_summarize(repo_path, rev1, repo_path, rev2, True, False)
print('summary len:',len(summary))
if len(summary) > 0 :
print('connecting to %s...' % host)
ftp = FTPClient(host, user, password)
print('connected to %s' % host)
ftp.base_path = remote_base_directory
print('set remote base directory to %s' % remote_base_directory)
#iterate through all the differences between revisions
for change in summary :
#determine whether the path of the change is relevant to the path that is being sent, and modify the path as appropriate.
print('change path:',change.path)
ftp_relative_path = apply_basedir(change.path)
print('ftp rel path:',ftp_relative_path)
#only try to sync path if the path is in our project_directory
if ftp_relative_path != "" :
is_file = (change.node_kind == pysvn.node_kind.file)
if str(change.summarize_kind) == "delete" :
print("deleting: " + ftp_relative_path)
try:
ftp.delete_path("/" + ftp_relative_path, is_file)
except ftplib.error_perm as e:
if 'cannot find the' in str(e) or 'not found' in str(e):
# Log, but otherwise ignore path-not-found errors
# when deleting, since it's not a disaster if the file
# we want to delete is already gone.
logging.error(captureErrorMessage(e))
else:
raise
elif str(change.summarize_kind) == "added" or str(change.summarize_kind) == "modified" :
local_file = ""
if is_file :
local_file = svn_export_temp(pysvn_client, repo_path, rev2, change.path)
print("uploading file: " + ftp_relative_path)
ftp.upload_path("/" + ftp_relative_path, is_file, local_file)
if is_file :
os.remove(local_file)
elif str(change.summarize_kind) == "normal" :
print("skipping 'normal' element: " + ftp_relative_path)
else :
raise str("Unknown change summarize kind: " + str(change.summarize_kind) + ", path: " + ftp_relative_path)
ftp.close()
#write back the last revision that was synced
print("writing last revision: " + str(youngest_revision))
set_last_revision(youngest_revision) # todo: undo
def get_login(a,b,c,d):
#arguments don't matter, we're always going to return the same thing
try:
return True, "svnUsername", "svnPassword", True
except Exception as e:
logging.error(e)
#capture the location of the error
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
logging.debug(stack_trace)
#end capture
sys.exit(1)
#functions for persisting the last successfully synced revision
def get_last_revision():
if os.path.isfile(status_file) :
f=open(status_file, 'r')
line = f.readline()
f.close()
try: i = int(line)
except ValueError:
i = 0
else:
i = 0
f = open(status_file, 'w')
f.write(str(i))
f.close()
return i
def set_last_revision(rev) :
f = open(status_file, 'w')
f.write(str(rev))
f.close()
#augmented ftp client class that can work off a base directory
class FTPClient(ftplib.FTP) :
def __init__(self, host, username, password) :
self.base_path = ""
self.current_path = ""
ftplib.FTP.__init__(self, host, username, password)
def cwd(self, path) :
debug_path = path
if self.current_path == "" :
self.current_path = self.pwd()
print("pwd: " + self.current_path)
if not os.path.isabs(path) :
debug_path = self.base_path + "<" + path
path = os.path.join(self.current_path, path)
elif self.base_path != "" :
debug_path = self.base_path + ">" + path.lstrip("/")
path = os.path.join(self.base_path, path.lstrip("/"))
path = os.path.normpath(path)
#by this point the path should be absolute.
if path != self.current_path :
print("change from " + self.current_path + " to " + debug_path)
ftplib.FTP.cwd(self, path)
self.current_path = path
else :
print("staying put : " + self.current_path)
def cd_or_create(self, path) :
assert os.path.isabs(path), "absolute path expected (" + path + ")"
try: self.cwd(path)
except ftplib.error_perm as e:
for folder in path.split('/'):
if folder == "" :
self.cwd("/")
continue
try: self.cwd(folder)
except:
print("mkd: (" + path + "):" + folder)
self.mkd(folder)
self.cwd(folder)
def upload_path(self, path, is_file, local_path) :
if is_file:
(path, filename) = os.path.split(path)
self.cd_or_create(path)
# Use read-binary to avoid universal newline support from stripping CR characters.
f = open(local_path, 'rb')
self.storbinary("STOR " + filename, f)
f.close()
else:
self.cd_or_create(path)
def delete_path(self, path, is_file) :
(path, filename) = os.path.split(path)
print("trying to delete: " + path + ", " + filename)
self.cwd(path)
try:
if is_file :
self.delete(filename)
else:
self.delete_path_recursive(filename)
except ftplib.error_perm as e:
if 'The system cannot find the' in str(e) or '550 File not found' in str(e):
# Log, but otherwise ignore path-not-found errors
# when deleting, since it's not a disaster if the file
# we want to delete is already gone.
logging.error(captureErrorMessage(e))
else:
raise
def delete_path_recursive(self, path):
if path == "/" :
raise "WARNING: trying to delete '/'!"
for node in self.nlst(path) :
if node == path :
#it's a file. delete and return
self.delete(path)
return
if node != "." and node != ".." :
self.delete_path_recursive(os.path.join(path, node))
try: self.rmd(path)
except ftplib.error_perm as msg :
sys.stderr.write("Error deleting directory " + os.path.join(self.current_path, path) + " : " + str(msg))
# apply the project_directory setting
def apply_basedir(path) :
#remove any leading stuff (in this case, "trunk/") and decide whether file should be propagated
if not path.startswith(project_directory) :
return ""
return path.replace(project_directory, "", 1)
def svn_export_temp(pysvn_client, base_path, rev, path) :
# Causes access denied error. Couldn't deduce Windows-perm issue.
# It's possible Python isn't garbage-collecting the open file-handle in time for pysvn to re-open it.
# Regardless, just generating a simple filename seems to work.
#(fd, dest_path) = tempfile.mkstemp()
dest_path = tmpName = '%s.tmp' % __file__
exportPath = os.path.join(base_path, path).replace('\\','/')
print('exporting %s to %s' % (exportPath, dest_path))
pysvn_client.export( exportPath,
dest_path,
force=False,
revision=rev,
native_eol=None,
ignore_externals=False,
recurse=True,
peg_revision=rev )
return dest_path
if __name__ == "__main__":
logging.info('svnftp.start')
try:
main()
logging.info('svnftp.done')
except Exception as e:
# capture the location of the error for debug purposes
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
logging.debug(stack_trace[:-1])
print(stack_trace)
# end capture
error_text = '\nFATAL EXCEPTION!!!\n'+captureErrorMessage(e)
subject = "ALERT: SVN2FTP Error"
message = """An Error occurred while trying to FTP an SVN commit.
repo_path = %(repo_path)s\n
local_repos_path = %(local_repos_path)s\n
project_directory = %(project_directory)s\n
remote_base_directory = %(remote_base_directory)s\n
error_text = %(error_text)s
""" % globals()
email(toAddrs, message, subject)
logging.error(e)
Notes/Disclaimers:
I have basically no python training so I'm learning as I go and spending lots of time reading docs to figure stuff out.
The body of get_login is in a try block because I was getting strange errors saying there was an unhandled exception in callback_get_login. Never figured out why, but it seems fine now. Let sleeping dogs lie, right?
The username and password for get_login are currently hard-coded (but correct) just to eliminate variables and try to change as little as possible at once. (I added the svnuser and svnpass arguments to the existing argument parsing.)
So that's where I am. I can't figure out why on earth it's not printing anything into svn2ftp.out.log. If you're wondering, the output for one of these failed attempts in svn2ftp.debug.log is:
2012-09-06 15:18:12,496 INFO svnftp.start
2012-09-06 15:18:12,496 INFO svnftp.done
And it's no different on a successful run. So there's nothing useful being logged.
I'm lost. I've gone way down the rabbit hole on this one, and don't know where to go from here. Any ideas?
It looks as if you are overwriting your logging level. Try setting both to DEBUG and see what happens.
import sys, os
import logging
logging.basicConfig(
level=logging.DEBUG, # DEBUG here
format='%(asctime)s %(levelname)s %(message)s',
filename='svn2ftp.debug.log',
filemode='a'
)
console = logging.StreamHandler()
console.setLevel(logging.ERROR) # ERROR here
logging.getLogger('').addHandler(console)
Additionally you are printing in some places and logging in others. I am not sure that the logging library automatically redirects sys.stdout to the logging console. I would convert all print statements to logging statements to be consistent.

Categories