Control running Python Process (multiprocessing) - python

I have yet another question about Python multiprocessing.
I have a module that creates a Process and just runs in a while True loop.
This module is meant to be enabled/disabled from another Python module.
That other module will import the first one once and is also run as a process.
How would I better implement this?
so for a reference:
#foo.py
def foo():
while True:
if enabled:
#do something
p = Process(target=foo)
p.start()
and imagine second module to be something like that:
#bar.py
import foo, time
def bar():
while True:
foo.enable()
time.sleep(10)
foo.disable()
Process(target=bar).start()
Constantly running a process checking for condition inside a loop seems like a waste, but I would gladly accept the solution that just lets me set the enabled value from outside.
Ideally I would prefer to be able to terminate and restart the process, again from outside of this module.
From my understanding, I would use a Queue to pass commands to the Process. If it is indeed just that, can someone show me how to set it up in a way that I can add something to the queue from a different module.
Can this even be easily done with Python or is it time to abandon hope and switch to something like C or Java

I purposed in comment two different approches :
using a shared variable from multiprocessing.Value
pause / resume the process with signals
Control by sharing a variable
def target_process_1(run_statement):
while True:
if run_statement.value:
print "I'm running !"
time.sleep(1)
def target_process_2(run_statement):
time.sleep(3)
print "Stoping"
run_statement.value = False
time.sleep(3)
print "Resuming"
run_statement.value = True
if __name__ == "__main__":
run_statement = Value("i", 1)
process_1 = Process(target=target_process_1, args=(run_statement,))
process_2 = Process(target=target_process_2, args=(run_statement,))
process_1.start()
process_2.start()
time.sleep(8)
process_1.terminate()
process_2.terminate()
Control by sending a signal
from multiprocessing import Process
import time
import os, signal
def target_process_1():
while True:
print "Running !"
time.sleep(1)
def target_process_2(target_pid):
time.sleep(3)
os.kill(target_pid, signal.SIGSTOP)
time.sleep(3)
os.kill(target_pid, signal.SIGCONT)
if __name__ == "__main__":
process_1 = Process(target=target_process_1)
process_1.start()
process_2 = Process(target=target_process_2, args=(process_1.pid,))
process_2.start()
time.sleep(8)
process_1.terminate()
process_2.terminate()
Side note: if possible do not run a while True.
EDIT: if you want to manage your process in two different files, supposing you want to use a control by sharing a variable, this is a way to do.
# file foo.py
from multiprocessing import Value, Process
import time
__all__ = ['start', 'stop', 'pause', 'resume']
_statement = None
_process = None
def _target(run_statement):
""" Target of the foo's process """
while True:
if run_statement.value:
print "I'm running !"
time.sleep(1)
def start():
global _process, _statement
_statement = Value("i", 1)
_process = Process(target=_target, args=(_statement,))
_process.start()
def stop():
global _process, _statement
_process.terminate()
_statement, _process = None, _process
def enable():
_statement.value = True
def disable():
_statement.value = False

Related

If two multiprocessing can request input on the terminal, is there a way to pause one of them until the answer is given?

As can be seen in the code below, two multiprocessing runs together, but both have a moment that can ask for an input() in the Terminal, is there any way to pause the other multiprocessing until the answer is given in the Terminal?
File Code_One archaic and simple example to speed up the explanation:
from time import sleep
def main():
sleep(1)
print('run')
sleep(1)
print('run')
sleep(1)
input('Please, give the number:')
File Code_Two archaic and simple example to speed up the explanation:
from time import sleep
def main():
sleep(2)
input('Please, give the number:')
sleep(1)
print('run 2')
sleep(1)
print('run 2')
sleep(1)
print('run 2')
sleep(1)
print('run 2')
sleep(1)
print('run 2')
File Main_Code:
import Code_One
import Code_Two
import multiprocessing
from time import sleep
def main():
while True:
pression = multiprocessing.Process(target=Code_One.main)
xgoals = multiprocessing.Process(target=Code_Two.main)
pression.start()
xgoals.start()
pression.join()
xgoals.join()
print('Done')
sleep(5)
if __name__ == '__main__':
main()
How should I proceed in this situation?
In this example, as it doesn't pause the other multi, whenever it asks for an input this error happens:
input('Please, give the number:')
EOFError: EOF when reading a line
Sure, this is possible. To do it you will need to use some sort of interprocess communication (IPC) mechanism to allow the two processes to coordinate. time.sleep is not the best option though, and there are much more efficient ways of tackling it that are specifically made just for this problem.
Probably the most efficient way is to use a multiprocessing.Event, like this:
import multiprocessing
import sys
import os
def Code_One(event, fno):
proc_name = multiprocessing.current_process().name
print(f'running {proc_name}')
sys.stdin = os.fdopen(fno)
val = input('give proc 1 input: ')
print(f'proc 1 got input: {val}')
event.set()
def Code_Two(event, fno):
proc_name = multiprocessing.current_process().name
print(f'running {proc_name} and waiting...')
event.wait()
sys.stdin = os.fdopen(fno)
val = input('give proc 2 input: ')
print(f'proc 2 got input {val}')
if __name__ == '__main__':
event = multiprocessing.Event()
pression = multiprocessing.Process(name='code_one', target=Code_One, args=(event, sys.stdin.fileno()))
xgoals = multiprocessing.Process(name='code_two', target=Code_Two, args=(event, sys.stdin.fileno()))
xgoals.start()
pression.start()
xgoals.join()
pression.join()
This creates the event object, and the two subprocesses. Event objects have an internal flag that starts out False, and can then be toggled True by any process calling event.set(). If a process calls event.wait() while the flag is False, that process will block until another process calls event.set().
The event is created in the parent process, and passed to each subprocess as an argument. Code_Two begins and calls event.wait(), which blocks until the internal flag in the event is set to True. Code_One executes immediately and then calls event.set(), which sets event's internal flag to True, and allows Code_Two to proceed. At that point both processes have returned and called join, and the program ends.
This is a little hacky because it is also passing the stdin file number from the parent to the child processes. That is necessary because when subprocesses are forked, those file descriptors are closed, so for a child process to read stdin using input it first needs to open the correct input stream (that is what sys.stdin = os.fdopen(fno) is doing). It won't work to just send sys.stdin to the child as another argument, because of the mechanics that Python uses to set up the environment for forked processes (sys.stdin is a IO wrapper object and is not pickleable).

Why my code is running from the beginning infinitely while it should not?

I have two functions that are going to do something. I need them to run simultaneously until the except case happen, so I have used multiprocessing but whenever I execute my code, it runs from beginning of the code infinitely which there is a few line of code and then my two functions. My code looks something like this:
'''
few line of code
'''
def func_1():
# Do Somthing
def func_2():
# Do Somthing
while True:
try:
if __name__ == '__main__':
sensor_process = Process(target=sensor)
sensor_process.start()
balance_process = Process(target=balance_fun)
balance_process.start()
except(KeyboardInterrupt, SystemExit):
break
Is there anything wrong with my multiprocessing that my code executes from the beginning infinitely or the problem is somewhere else?
There are some points on your code. First, if you want to execute multiple functions it doesn't mean to create multiple processes each time as you currently doing. You only need one process or thread per function.
Second I assume you want your functions run simultaneously forever so you need to put the infinite loop inside each function.
from time import sleep
from multiprocessing import Process
def func_1():
# Do Somthing
while True:
print("hello from func_1")
sleep(1)
def func_2():
# Do Somthing
while True:
print("hello from func_2")
sleep(1)
if __name__ == '__main__':
try:
sensor_process = Process(target=func_1)
sensor_process.start()
balance_process = Process(target=func_2)
balance_process.start()
# if you set a control (flag) on both func_1 and func_2 then these two lines would wait until those flags released
sensor_process.join()
balance_process.join()
except(KeyboardInterrupt, SystemExit):
pass
put a sleep for the main process to get your keyboard interrupt:
while True:
try:
if __name__ == '__main__':
sensor_process = Process(target=sensor)
sensor_process.start()
balance_process = Process(target=balance_fun)
balance_process.start()
time.sleep(1)
except(KeyboardInterrupt, SystemExit):
break
besides, I think creating new processes in an infinite loop is not a good practice.
I think you meant to do something like this:
from multiprocessing import Process
def sensor():
# Do Somthing
pass
def balance_fun():
# Do Somthing
pass
if __name__ == '__main__':
try:
function_list = [sensor, balance_fun]
process_list = list()
for function in function_list:
proc = Process(target=function)
proc.start()
process_list.append(proc)
for proc in process_list:
proc.join()
except(KeyboardInterrupt, SystemExit):
pass
This will run each function in a separate process and wait for both processes to finish before exiting. Also, if you add more functions, you just need to add them to function_list instead of copying and modifying a block of code.

Get live value of variable from another script

I have two scripts, new.py and test.py.
Test.py
import time
while True:
x = "hello"
time.sleep(1)
x = "world"
time.sleep(1)
new.py
import time
while True:
import test
x = test.x
print(x)
time.sleep(1)
Now from my understanding this should print "hello" and a second later "world" all the time when executing new.py.
It does not print anything, how can i fix that?
Thanks
I think the code below captures what you are asking. Here I simulate two scripts running independently (by using threads), then show how you can use shelve to communicate between them. Note, there are likely much better ways to get to what you are after -- but if you absolutely must run the scripts independently, this will work for you.
Incidentally, any persistent source would do (such as a database).
import shelve
import time
import threading
def script1():
while True:
with shelve.open('my_store') as holder3:
if holder3['flag'] is not None: break
print('waiting')
time.sleep(1)
print("Done")
def script2():
print("writing")
with shelve.open('my_store') as holder2:
holder2['flag'] = 1
if __name__ == "__main__":
with shelve.open('my_store') as holder1:
holder1['flag'] = None
t = threading.Thread(target=script1)
t.start()
time.sleep(5)
script2()
t.join()
Yields:
waiting
waiting
waiting
waiting
waiting
writing
Done
Test.py
import time
def hello():
callList = ['hello', 'world']
for item in callList:
print item
time.sleep(1)
hello()
new.py
from parent import hello
while True:
hello()

Basic multiprocessing with infinity loop and queue

import random
import queue as Queue
import _thread as Thread
a = Queue.Queue()
def af():
while True:
a.put(random.randint(0,1000))
def bf():
while True:
if (not a.empty()): print (a.get())
def main():
Thread.start_new_thread(af, ())
Thread.start_new_thread(bf, ())
return
if __name__ == "__main__":
main()
the above code works fine with extreme high CPU usage, i tried to use multiprocessing with no avail. i have tried
def main():
multiprocessing.Process(target=af).run()
multiprocessing.Process(target=bf).run()
and
def main():
manager = multiprocessing.Manager()
a = manager.Queue()
pool = multiprocessing.Pool()
pool.apply_async(af)
pool.apply_async(bf)
both not working, can anyone please help me? thanks a bunch ^_^
def main():
multiprocessing.Process(target=af).run() # will not return
multiprocessing.Process(target=bf).run()
The above code does not work because af does not return; no chance to call bf. You need to separate run call to start/join so that both can run in parallel. (+ to make them share manage.Queue)
To make the second code work, you need to pass a (manager.Queue object) to functions. Otherwise they will use Queue.Queue global object which is not shared between processes; need to modify af, bf to accepts a, and main to pass a.
def af(a):
while True:
a.put(random.randint(0, 1000))
def bf(a):
while True:
print(a.get())
def main():
manager = multiprocessing.Manager()
a = manager.Queue()
pool = multiprocessing.Pool()
proc1 = pool.apply_async(af, [a])
proc2 = pool.apply_async(bf, [a])
# Wait until process ends. Uncomment following line if there's no waiting code.
# proc1.get()
# proc2.get()
In the first alternative main you use Process, but the method you should call to start the activity is not run(), as one would think, but rather start(). You will want to follow that up with appropriate join() statements. Following the information in multiprocessing (available here: https://docs.python.org/2/library/multiprocessing.html), here is a working sample:
import random
from multiprocessing import Process, Queue
def af(q):
while True:
q.put(random.randint(0,1000))
def bf(q):
while True:
if not q.empty():
print (q.get())
def main():
a = Queue()
p = Process(target=af, args=(a,))
c = Process(target=bf, args=(a,))
p.start()
c.start()
p.join()
c.join()
if __name__ == "__main__":
main()
To add to the accepted answer, in the original code:
while True:
if not q.empty():
print (q.get())
q.empty() is being called every time which is unnecessary since q.get() if the queue is empty will wait until something is available here documentation.
Similar answer here
I assume that this could affect the performance since calling the .empty() every iteration should consume more resources (it should be more noticeable if Thread was used instead of Process because Python Global Interpreter Lock (GIL))
I know it's an old question but hope it helps!

Can I somehow avoid using time.sleep() in this script?

I have the following python script:
#! /usr/bin/python
import os
from gps import *
from time import *
import time
import threading
import sys
gpsd = None #seting the global variable
class GpsPoller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd #bring it in scope
gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.current_value = None
self.running = True #setting the thread running to true
def run(self):
global gpsd
while gpsp.running:
gpsd.next() #this will continue to loop and grab EACH set of gpsd info to clear the buffer
if __name__ == '__main__':
gpsp = GpsPoller() # create the thread
try:
gpsp.start() # start it up
while True:
print gpsd.fix.speed
time.sleep(1) ## <<<< THIS LINE HERE
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
print "\nKilling Thread..."
gpsp.running = False
gpsp.join() # wait for the thread to finish what it's doing
print "Done.\nExiting."
I'm not very good with python, unfortunately. The script should be multi-threaded somehow (but that probably doesn't matter in the scope of this question).
What baffles me is the gpsd.next() line. If I get it right, it was supposed to tell the script that new gps data have been acquired and are ready to be read.
However, I read the data using the infinite while True loop with a 1 second pause with time.sleep(1).
What this does, however, is that it sometimes echoes the same data twice (the sensor hasn't updated the data in the last second). I figure it also skips some sensor data somehow too.
Can I somehow change the script to print the current speed not every second, but every time the sensor reports new data? According to the data sheet it should be every second (a 1 Hz sensor), but obviously it isn't exactly 1 second, but varies by milliseconds.
As a generic design rule, you should have one thread for each input channel or more generic, for each "loop over a blocking call". Blocking means that the execution stops at that call until data arrives. E.g. gpsd.next() is such a call.
To synchronize multiple input channels, use a Queue and one extra thread. Each input thread should put its "events" on the (same) queue. The extra thread loops over queue.get() and reacts appropriately.
From this point of view, your script need not be multithreaded, since there is only one input channel, namely the gpsd.next() loop.
Example code:
from gps import *
class GpsPoller(object):
def __init__(self, action):
self.gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.action=action
def run(self):
while True:
self.gpsd.next()
self.action(self.gpsd)
def myaction(gpsd):
print gpsd.fix.speed
if __name__ == '__main__':
gpsp = GpsPoller(myaction)
gpsp.run() # runs until killed by Ctrl-C
Note how the use of the action callback separates the plumbing from the data evaluation.
To embed the poller into a script doing other stuff (i.e. handling other threads as well), use the queue approach. Example code, building on the GpsPoller class:
from threading import Thread
from Queue import Queue
class GpsThread(object):
def __init__(self, valuefunc, queue):
self.valuefunc = valuefunc
self.queue = queue
self.poller = GpsPoller(self.on_value)
def start(self):
self.t = Thread(target=self.poller.run)
self.t.daemon = True # kill thread when main thread exits
self.t.start()
def on_value(self, gpsd):
# note that we extract the value right here.
# Otherwise it could change while the event is in the queue.
self.queue.put(('gps', self.valuefunc(gpsd)))
def main():
q = Queue()
gt = GpsThread(
valuefunc=lambda gpsd: gpsd.fix.speed,
queue = q
)
print 'press Ctrl-C to stop.'
gt.start()
while True:
# blocks while q is empty.
source, data = q.get()
if source == 'gps':
print data
The "action" we give to the GpsPoller says "calculate a value by valuefunc and put it in the queue". The mainloop sits there until a value pops out, then prints it and continues.
It is also straightforward to put other Thread's events on the queue and add the appropriate handling code.
I see two options here:
GpsPoller will check if data changed and raise a flag
GpsPoller will check id data changed and put new data in the queue.
Option #1:
global is_speed_changed = False
def run(self):
global gpsd, is_speed_changed
while gpsp.running:
prev_speed = gpsd.fix.speed
gpsd.next()
if prev_speed != gpsd.fix.speed
is_speed_changed = True # raising flag
while True:
if is_speed_changed:
print gpsd.fix.speed
is_speed_changed = False
Option #2 ( I prefer this one since it protects us from raise conditions):
gpsd_queue = Queue.Queue()
def run(self):
global gpsd
while gpsp.running:
prev_speed = gpsd.fix.speed
gpsd.next()
curr_speed = gpsd.fix.speed
if prev_speed != curr_speed:
gpsd_queue.put(curr_speed) # putting new speed to queue
while True:
# get will block if queue is empty
print gpsd_queue.get()

Categories