Get returncode of a detached subprocess? - python

I'm trying to write a submitter for a job scheduler. As I do not know when the jobs come and how long the jobs will run, I use multiprocessing to spawn one process for each job with subprocess and detach to be able to process the next job. Meanwhile this works pretty good, but I'd like to get the returncode after the jobs finished, is that possible? I tried several subprocess variations, but those returning the RC were blocking the process for the runtime of the job.
#!/usr/bin/python3
# coding=utf-8
import time
import multiprocessing
import subprocess
JobsList = []
def SubmitJob(jobname):
""" Submit the next requested job """
print(f"Starting job {jobname}...")
JobDir ="/home/xxxxx/Jobs/"
JobMem = "{}{}.sh".format(JobDir, jobname)
SysoutFile = "./Sysout/{}.out".format(jobname)
fh = open(SysoutFile, 'w')
kwargs = {}
kwargs.update(start_new_session=True)
p = subprocess.Popen(JobMem, shell = False, stdout = fh, **kwargs)
pid = p.pid
print(f"Job {jobname} pid {pid} submitted...")
def PrepareSubmit():
""" Create and start one process per job """
jobs = []
for Job in JobsList:
process = multiprocessing.Process(target=SubmitJob,
args=(Job,))
jobs.append(process)
JobsList.remove(Job)
for j in jobs:
j.start()
for j in jobs:
j.join()
print("All jobs submitted...")
def main():
""" Check queue for new job requests """
number_of_lines = 0
jobs_list = []
while 1:
job_queue = open("/home/xxxxx/Development/Python/#Projects/Scheduler/jobs.que", 'r')
lines = job_queue.readlines()
if len(lines) > number_of_lines:
jobs_list.append(lines[len(lines)-1])
NewJob = lines[len(lines)-1][:-1]
JobsList.append(NewJob)
PrepareSubmit()
number_of_lines = number_of_lines+1
time.sleep(1)
if __name__ == "__main__":
main()
The while loop in main() is for testing purpose only.
Can any someone tell me if that is possible and how? Thanks in advance.
This is the code that gives me a return code but doesn't send a job until the previous job is finished. So if I have a long-running job, it delays the process of running jobs, what I called blocking.
def Submit(job):
""" Submit the next requested job """
print(f"Starting job {job}...")
JobDir ="/home/uwe/Jobs/"
JobMem = "{}{}.sh".format(JobDir, job)
SysoutFile = "./Sysout/{}.out".format(job)
fh = open(SysoutFile, 'w')
kwargs = {}
kwargs.update(start_new_session=True)
p = subprocess.Popen(JobMem, shell = False, stdout = fh, **kwargs)
pid = p.pid
while p.poll() == None:
a = p.poll()
print(a)
time.sleep(1)
else:
rc = p.returncode
print(f"PID: {pid} rc: {rc}")
def main():
JobsList = ['JOB90501','JOB00001','JOB00002','JOB00003']
for Job in JobsList:
Submit(Job)
Roy, this is my current code after your last hint:
def SubmitJob(jobname):
""" Submit the next requested job """
JobDir ="/home/uwe/Jobs/"
JobMem = "{}{}.sh".format(JobDir, jobname)
SysoutFile = "./Sysout/{}.out".format(jobname)
fh = open(SysoutFile, 'w')
kwargs = {}
kwargs.update(start_new_session=True)
p = subprocess.Popen(JobMem, shell = False, stdout = fh, **kwargs)
ProcessList[p] = p.pid
print(f"Started job {jobname} - PID: {p.pid}")
def main():
c_JobsList = ['JOB00001','JOB00002','JOB00003']
for Job in c_JobsList:
SubmitJob(Job)
for p, pid in ProcessList.items():
RcFile = "./Sysout/{}.rc".format(pid)
f = open(RcFile, 'w')
while p.poll() == None:
a = p.poll()
time.sleep(1)
else:
rc = p.returncode
f.writelines(str(rc))
print(f"PID: {pid} rc: {rc}")
f.close()
and the output:
Started job JOB00001 - PID: 5426
Started job JOB00002 - PID: 5427
Started job JOB00003 - PID: 5429
PID: 5426 rc: 0
PID: 5427 rc: 0
PID: 5429 rc: 8

Edit (the original answer below for future reference)
The natuaram means to use for this purpose is Popen.poll, but apparently it doesn't work in some cases (see https://lists.gt.net/python/bugs/633489). The solution I'd like to propose is using Popen.wait with a very short timeout, as in the following code sample:
import subprocess
import time
p = subprocess.Popen(["/bin/sleep", "3"])
print(f"Created process {p.pid}")
count = 0
while True:
try:
ret = p.wait(.001) # wait for 1 ms
print(f"Got a return code {ret}")
break
except subprocess.TimeoutExpired as e:
print("..", end = "")
time.sleep(.5)
print(f"Still waiting, count is {count}")
count += 1
print ("Done!")
The output I'm getting is:
Created process 30040
..Still waiting, count is 0
..Still waiting, count is 1
..Still waiting, count is 2
..Still waiting, count is 3
..Still waiting, count is 4
..Still waiting, count is 5
Got a return code 0
Done
Original idea - Popen.poll
The method you should be using is Popen.poll (documentation). It returns the exit status of the process, or None if it's still running.
To use it, you'll have to keep the 'popen' objects you get when you call subprocess.Popen, and later in time poll on these objects.

Related

Process pool results without waiting for all tasks to finish

from multiprocessing import Pool
from functools import partial
from time import sleep
import random
import string
import uuid
import os
import glob
def task_a(param1, param2, mydata):
thread_id = str(uuid.uuid4().hex) # this may not be robust enough to guarantee no collisions, address
output_filename = ''.join([str(thread_id),'.txt'])
# part 1 - create output file for task_b to use
with open(output_filename, 'w') as outfile:
for line in mydata:
outfile.write(line)
# part 2 - do some extra stuff (whilst task_b is running)
sleep(5)
print('Task A finished')
return output_filename # not interested in return val
def task_b(expected_num_files):
processed_files = 0
while processed_files<expected_num_files:
print('I am task_b, waiting for {} files ({} so far)'.format(expected_num_files, processed_files))
path_to_search = ''
for filename in glob.iglob(path_to_search + '*.txt', recursive=True):
print('Got file : {}'.format(filename))
# would do something complicated here
os.rename(filename, filename+'.done')
processed_files+=1
sleep(10)
if __name__ == '__main__':
param1 = '' # dummy variable, need to support in solution
param2 = '' # dummy variable, need to support in solution
num_workers = 2
full_data = [[random.choice(string.ascii_lowercase) for _ in range(5)] for _ in range(100)]
print(full_data)
for i in range(0, len(full_data), num_workers):
print('Going to process {}'.format(full_data[i:i+num_workers]))
p = Pool(num_workers)
task_a_func = partial(task_a, param1, param2)
results = p.map(task_a_func, full_data[i:i+num_workers])
p.close()
p.join()
task_b(expected_num_files=num_workers) # want this running sooner
print('Iteration {} complete'.format(i))
#want to wait for task_a's and task_b to finish
I'm having trouble scheduling these tasks to run concurrently.
task_a is a multiprocessing pool that produces an output file part way through it execution.
task_b MUST process the output files sequentially can be in any order (can be as soon as they are available), WHILST task_a continues to run (it will no longer change the output file)
The next iteration must only start when both all task_a's have completed AND task_b has completed.
The toy code I have posted obviously waits for task_a's to fully complete before task_b is started (which is not what I want)
I have looked at multiprocessing / subprocess etc. but cannot find a way to launch both the pool and the single task_b process concurrently AND wait for BOTH to finish.
task_b is written as if it could be changed to an external script, but I am still stuck on how manage the execution.
Should I effectively merge code from task_b into task_a and somehow pass a flag to ensure one worker per pool 'runs the task_b code' via a if/else - at least then I would just be waiting on the pool to complete?
You can use an interprocess queue to communicate the filenames between task a and task b.
Also, initializing pool repeatedly inside the loop is harmful and unnecessarily slow.
Its better to initialize the pool once in the beginning.
from multiprocessing import Pool, Manager, Event
from functools import partial
from time import sleep
import random
import string
import uuid
import os
import glob
def task_a(param1, param2, queue, mydata):
thread_id = str(uuid.uuid4().hex)
output_filename = ''.join([str(thread_id),'.txt'])
output_filename = 'data/' + output_filename
with open(output_filename, 'w') as outfile:
for line in mydata:
outfile.write(line)
print(f'{thread_id}: Task A file write complete for data {mydata}')
queue.put(output_filename)
print('Task A finished')
def task_b(queue, num_workers, data_size, event_task_b_done):
print('Task b started!')
processed_files = 0
while True:
filename = queue.get()
if filename == 'QUIT':
# Whenever you want task_b to quit, just push 'quit' to the queue
print('Task b quitting')
break
print('Got file : {}'.format(filename))
os.rename(filename, filename+'.done')
processed_files+=1
print(f'Have processed {processed_files} so far!')
if (processed_files % num_workers == 0) or (processed_files == data_size):
event_task_b_done.set()
if __name__ == '__main__':
param1 = '' # dummy variable, need to support in solution
param2 = '' # dummy variable, need to support in solution
num_workers = 2
data_size = 100
full_data = [[random.choice(string.ascii_lowercase) for _ in range(5)] for _ in range(data_size)]
mgr = Manager()
queue = mgr.Queue()
event_task_b_done = mgr.Event()
# One extra worker for task b
p = Pool(num_workers + 1)
p.apply_async(task_b, args=(queue, num_workers, data_size, event_task_b_done))
task_a_func = partial(task_a, param1, param2, queue)
for i in range(0, len(full_data), num_workers):
data = full_data[i:i+num_workers]
print('Going to process {}'.format(data))
p.map_async(task_a_func, full_data[i:i+num_workers])
print(f'Waiting for task b to process all {num_workers} files...')
event_task_b_done.wait()
event_task_b_done.clear()
print('Iteration {} complete'.format(i))
queue.put('QUIT')
p.close()
p.join()
exit(0)

How do I restart a thread if its duration is longer than 10 seconds?

How in the main thread can I track the duration of the function write_file()?
Task: create a condition, if the execution time of the function is more than 10 seconds, then it is necessary to restart the function.
from multiprocessing import Pool
def write_file(file: str):
f = open(file, 'w')
for item in range(0, 1500000):
f.write("%s\n" % item)
f.close()
if __name__ == '__main__':
list_files = ['1.txt', '2.txt', '3.txt']
with Pool(3) as p:
p.map(write_file, list_files)
I found attempt to amend Pool to be overcomplicated here.
The Pool class let workers to be alive untill the whole working queue is done and thus has complex mechanism to control it.
Instead, if you have not very stict requirenments on 10 second, you can use the following code:
from multiprocessing import Process
import time
pdict = {}
for fname in list_files:
p = Process(target = write_file, args = (fname,))
pdict[fname] = p
p.start()
while pdict:
to_del = []
time.sleep(10)
for pname in pdict:
if pdict[pname].exitcode == None or pdict[pname].is_alive():
pdict[pname].terminate() #killing old; that should also release file resource
pdict[pname] = Process(target = write_file, args = (pname,))
pdict[pname].start() #simply creating new and starting
else:
to_del.append(pname)
for pname in to_del:
del pdict[pname]

How can I terminate running jobs without closing connection to the core? (currently using execnet)

I have a cluster of computers which uses a master node to communicate with the slave nodes in the cluster.
The main problem I'm facing is using execnet is being able to kill certain jobs that are running and then having new jobs requeue on the same core that the other job just got terminated on (as I want to utilize all cores of the slave nodes at any given time).
As of now there is no way to terminate running jobs using execnet, so I figured if I could just kill the jobs manually through a bash script, say sudo kill 12345 where 12345 is the PID of the job (obtaining the PID of each job is another thing not supported by execnet, but that's another topic), then it would terminate the job and then requeue another on the same core that was just terminated on. It does kill the job correctly, however it closes the connection to that channel (the core; the master node communicates to each core individually) and then does not utilize that core anymore, until all jobs are done. Is there a way to terminate a running job, without killing the connection to the core?
Here is the script to submit jobs
import execnet, os, sys
import re
import socket
import numpy as np
import pickle, cPickle
from copy import deepcopy
import time
import job
def main():
print 'execnet source files are located at:\n {}/\n'.format(
os.path.join(os.path.dirname(execnet.__file__))
)
# Generate a group of gateways.
work_dir = '/home/mpiuser/pn2/'
f = 'cluster_core_info.txt'
n_start, n_end = 250000, 250008
ci = get_cluster_info(f)
group, g_labels = make_gateway_group(ci, work_dir)
mch = group.remote_exec(job)
args = range(n_start, n_end+1) # List of parameters to compute factorial.
manage_jobs(group, mch, queue, g_labels, args)
# Close the group of gateways.
group.terminate()
def get_cluster_info(f):
nodes, ncores = [], []
with open(f, 'r') as fid:
while True:
line = fid.readline()
if not line:
fid.close()
break
line = line.strip('\n').split()
nodes.append(line[0])
ncores.append(int(line[1]))
return dict( zip(nodes, ncores) )
def make_gateway_group(cluster_info, work_dir):
''' Generate gateways on all cores in remote nodes. '''
print 'Gateways generated:\n'
group = execnet.Group()
g_labels = []
nodes = list(cluster_info.keys())
for node in nodes:
for i in range(cluster_info[node]):
group.makegateway(
"ssh={0}//id={0}_{1}//chdir={2}".format(
node, i, work_dir
))
sys.stdout.write(' ')
sys.stdout.flush()
print list(group)[-1]
# Generate a string 'node-id_core-id'.
g_labels.append('{}_{}'.format(re.findall(r'\d+',node)[0], i))
print ''
return group, g_labels
def get_mch_id(g_labels, string):
ids = [x for x in re.findall(r'\d+', string)]
ids = '{}_{}'.format(*ids)
return g_labels.index(ids)
def manage_jobs(group, mch, queue, g_labels, args):
args_ref = deepcopy(args)
terminated_channels = 0
active_jobs, active_args = [], []
while True:
channel, item = queue.get()
if item == 'terminate_channel':
terminated_channels += 1
print " Gateway closed: {}".format(channel.gateway.id)
if terminated_channels == len(mch):
print "\nAll jobs done.\n"
break
continue
if item != "ready":
mch_id_completed = get_mch_id(g_labels, channel.gateway.id)
depopulate_list(active_jobs, mch_id_completed, active_args)
print " Gateway {} channel id {} returned:".format(
channel.gateway.id, mch_id_completed)
print " {}".format(item)
if not args:
print "\nNo more jobs to submit, sending termination request...\n"
mch.send_each(None)
args = 'terminate_channel'
if args and \
args != 'terminate_channel':
arg = args.pop(0)
idx = args_ref.index(arg)
channel.send(arg) # arg is copied by value to the remote side of
# channel to be executed. Maybe blocked if the
# sender queue is full.
# Get the id of current channel used to submit a job,
# this id can be used to refer mch[id] to terminate a job later.
mch_id_active = get_mch_id(g_labels, channel.gateway.id)
print "Job {}: {}! submitted to gateway {}, channel id {}".format(
idx, arg, channel.gateway.id, mch_id_active)
populate_list(active_jobs, mch_id_active,
active_args, arg)
def populate_list(jobs, job_active, args, arg_active):
jobs.append(job_active)
args.append(arg_active)
def depopulate_list(jobs, job_completed, args):
i = jobs.index(job_completed)
jobs.pop(i)
args.pop(i)
if __name__ == '__main__':
main()
and here is my job.py script:
#!/usr/bin/env python
import os, sys
import socket
import time
import numpy as np
import pickle, cPickle
import random
import job
def hostname():
return socket.gethostname()
def working_dir():
return os.getcwd()
def listdir(path):
return os.listdir(path)
def fac(arg):
return np.math.factorial(arg)
def dump(arg):
path = working_dir() + '/out'
if not os.path.exists(path):
os.mkdir(path)
f_path = path + '/fac_{}.txt'.format(arg)
t_0 = time.time()
num = fac(arg) # Main operation
t_1 = time.time()
cPickle.dump(num, open(f_path, "w"), protocol=2) # Main operation
t_2 = time.time()
duration_0 = "{:.4f}".format(t_1 - t_0)
duration_1 = "{:.4f}".format(t_2 - t_1)
#num2 = cPickle.load(open(f_path, "rb"))
return '--Calculation: {} s, dumping: {} s'.format(
duration_0, duration_1)
if __name__ == '__channelexec__':
channel.send("ready")
for arg in channel:
if arg is None:
break
elif str(arg).isdigit():
channel.send((
str(arg)+'!',
job.hostname(),
job.dump(arg)
))
else:
print 'Warnning! arg sent should be number | None'
Yes, you are on the right track. Use psutil library to manage the processes, find their pids etc.
And kill them. No need for involveing bash anywhere. Python covers it all.
Or, even better, program your script to terminate when master say so.
It is usually done that way.
You can even make it start another script before terminating itself if you want/need.
Or, if it is the same that you would be doing in another process, just stop the current work and start a new one in the script without terminating it at all.
And, if I may make a suggestion. Don't read your file line by line, read a whole file and then use *.splitlines(). For small files reading them in chunks just tortures the IO. You wouldn't be needing *.strip() as well. And you should remove unused imports too.

Can not establish a pipe between processes

I am trying to create a pipe between parent and child processes:
def launch_process():
parentStdin, childStdout = os.pipe()
childStdin, parentStdout = os.pipe()
pid = os.fork()
if pid:
os.close(childStdout)
os.close(childStdin)
self.set_configuration()
else:
os.close(parentStdout)
os.close(parentStdin)
os.dup2(childStdin, 0)
os.dup2(childStdout, 1)
os.execvp("echo.py", "")
raise RuntimeError
#after launch_process() call...
pipein = os.fdopen(parentStdin) #I saved both parentStdin and parentStdout variable as global
while True:
time.sleep(3)
os.write(parentStdout, ("command").encode())
line = pipein.readline() #the program stops here
print(line)
This code can launch a child process but pepin.readline() blocks running without any result. What's wrong?
edit: Child process runs the following loop echo-program ("echo.py"):
while True:
value = input()
print(value)
sys.stdout.flush()

Multiprocessing, writing to file, and deadlock on large loops

I have a very weird problem with the code below. when numrows = 10 the Process loops completes itself and proceeds to finish. If the growing list becomes larger it goes into a deadlock. Why is this and how can I solve this?
import multiprocessing, time, sys
# ----------------- Calculation Engine -------------------
def feed(queue, parlist):
for par in parlist:
queue.put(par)
def calc(queueIn, queueOut):
while True:
try:
par = queueIn.get(block = False)
print "Project ID: %s started. " % par
res = doCalculation(par)
queueOut.put(res)
except:
break
def write(queue, fname):
print 'Started to write to file'
fhandle = open(fname, "w")
while True:
try:
res = queue.get(block = False)
for m in res:
print >>fhandle, m
except:
break
fhandle.close()
print 'Complete writing to the file'
def doCalculation(project_ID):
numrows = 100
toFileRowList = []
for i in range(numrows):
toFileRowList.append([project_ID]*100)
print "%s %s" % (multiprocessing.current_process().name, i)
return toFileRowList
def main():
parlist = [276, 266]
nthreads = multiprocessing.cpu_count()
workerQueue = multiprocessing.Queue()
writerQueue = multiprocessing.Queue()
feedProc = multiprocessing.Process(target = feed , args = (workerQueue, parlist))
calcProc = [multiprocessing.Process(target = calc , args = (workerQueue, writerQueue)) for i in range(nthreads)]
writProc = multiprocessing.Process(target = write, args = (writerQueue, 'somefile.csv'))
feedProc.start()
feedProc.join ()
for p in calcProc:
p.start()
for p in calcProc:
p.join()
writProc.start()
writProc.join()
if __name__=='__main__':
sys.exit(main())
I think the problem is the Queue buffer getting filled, so you need to read from the queue before you can put additional stuff in it.
For example, in your feed thread you have:
queue.put(par)
If you keep putting much stuff without reading this will cause it to block untill the buffer is freed, but the problem is that you only free the buffer in your calc thread, which in turn doesn't get started before you join your blocking feed thread.
So, in order for your feed thread to finish, the buffer should be freed, but the buffer won't be freed before the thread finishes :)
Try organizing your queues access more.
The feedProc and the writeProc are not actually running in parallel with the rest of your program. When you have
proc.start()
proc.join ()
you start the process and then, on the join() you immediatly wait for it to finish. In this case there's no gain in multiprocessing, only overhead. Try to start ALL processes at once before you join them. This will also have the effect that your queues get emptied regularyl and you won't deadlock.

Categories