I have 2 processing, I need that when something happened on one process, something else will happen on the other.
For example:
import multiprocessing
def function_1(num):
while True:
status = False
for i in range (num):
if i == 100:
status = True
i +=1
def function_2():
while True:
if status == True:
print("status changed")
if __name__ == '__main__':
num = 101
a = multiprocessing.Process(target=function_1,args=(num,))
b = multiprocessing.Process(target=function_2)
a.start()
b.start()
a.join()
b.join()
This code obviously does not work, how can I make it work? I don't need one process to end and then get the result, I need the process to continue after that... is there a way to do that?
thank you!
Instead of a using a shared variable, for the purpose of making function_2 wait until function_1 reaches a certain state, you can create a multiprocessing.Queue instance to pass to both functions, and take advantage of the fact that Queue.get blocks until the queue receives something to dequeue, and make function_1 put something into the queue once it reaches the desired state:
import multiprocessing
def function_1(queue, num):
while True:
for i in range(num):
print(i)
if i == 3:
queue.put(None)
def function_2(queue):
queue.get()
print('do something')
if __name__ == '__main__':
num = 5
queue = multiprocessing.Queue()
a = multiprocessing.Process(target=function_1, args=(queue, num))
b = multiprocessing.Process(target=function_2, args=(queue,))
a.start()
b.start()
You forgot to add .join() after the start().
Try this :
a.start()
b.start()
a.join()
b.join()
Related
import multiprocessing
global stop
stop = False
def makeprocesses():
processes = []
for _ in range(50):
p = multiprocessing.Process(target=runprocess)
processes.append(p)
for _ in range(50):
processes[_].start()
runprocess()
def runprocess():
global stop
while stop == False:
x = 1 #do something here
if x = 1:
stop = True
makeprocesses()
while stop == True:
x = 0
makeprocesses()
How could I make all the other 49 processes stop if just one changes stop to True?
I would think since stop is a global variable once one process changes stop all the others would stop.
No. Each process gets its own copy. It's global to the script, but not across processes. Remember that each process has a completely separate address space. It gets a COPY of the first process' data.
If you need to communicate across processes, you need to use one of the synchronization techniques in the multiprocessing documentation (https://docs.python.org/3/library/multiprocessing.html#synchronization-primitives), like an Event or a shared object.
Whenever you want to synchronise threads you need some shared context and make sure it is safe. as #Tim Roberts mentioned These can be taken from (https://docs.python.org/3/library/multiprocessing.html#synchronization-primitives)
Try something like this:
import multiprocessing
from multiprocessing import Event
from time import sleep
def makeprocesses():
processes = []
e = Event()
for i in range(50):
p = multiprocessing.Process(target=runprocess,args= (e,i))
p.start()
processes.append(p)
for p in processes:
p.join()
def runprocess(e: Event() = None,name = 0):
while not e.is_set():
sleep(1)
if name == 1:
e.set() # here we make all other processes to stop
print("end")
if __name__ == '__main__':
makeprocesses()
My favorite way is using cancelation token which is a object wrapping what we did here
this only replicates my problem to get 100% load for the main python script if it tries to control loop over a shared queue
import multiprocessing
import random
def func1(num, q):
while True:
num = random.randint(1, 101)
if q.empty():
q.put(num)
def func2(num, q):
while True:
num = q.get()
num = num ** 2
if q.empty():
q.put(num)
num = 2
q = multiprocessing.Queue()
p1 = multiprocessing.Process(target=func1, args=(num, q))
p2 = multiprocessing.Process(target=func2, args=(num, q))
p1.daemon = True
p2.daemon = True
p1.start()
p2.start()
running = True
while running:
if not q.empty():
num = q.get(True, 0.1)
print(num)
would there be a better method to control from a script multiple worker processes. Better in sense of no load !?
I'm not sure I understand your program:
What's with the num parameter of func1() and func2()? It never gets used.
func2 will discard its result if func1 happens to have posted another number after func2 got the last number out of the queue.
Why do you daemonize the workers? Are you quite sure this is what you want?
The if not q.empty(): q.get() construct in the main code will sooner or later raise a queue.Empty exception because it's a race between it and the q.get() in func2.
The uncaught queue.Empty exception will terminate the main process, leaving the two workers orphaned - and running.
General advice:
Use different queues for issuing jobs (request queue) and collecting results (response queue). Include the request in the response if necessary.
Think about how to terminate the workers. Consider a "poison pill", i.e. a value in the request queue that causes workers to die, i.e. exit/terminate.
Be really really sure you understand the race conditions in your code, like the one I mentioned above (empty vs. get).
Here's some sample code I hacked up:
import multiprocessing
import time
import random
import os
def request_generator(requests):
while True:
requests.put(random.randint(1, 101))
time.sleep(0.01)
def worker(requests, responses):
worker_id = os.getpid()
while True:
request = requests.get()
response = request ** 2
responses.put((request, response, worker_id))
def main():
requests = multiprocessing.Queue()
responses = multiprocessing.Queue()
gen = multiprocessing.Process(target=request_generator, args=(requests,))
w1 = multiprocessing.Process(target=worker, args=(requests, responses))
w2 = multiprocessing.Process(target=worker, args=(requests, responses))
gen.start()
w1.start()
w2.start()
while True:
req, resp, worker_id = responses.get()
print("worker {}: {} => {}".format(worker_id, req, resp))
if __name__ == "__main__":
main()
I am new to python I have very little knowledge about threads in python. Here is my sample code.
import threading
from threading import Thread
import time
check = False
def func1():
print ("funn1 started")
while check:
print ("got permission")
def func2():
global check
print ("func2 started")
time.sleep(2)
check = True
time.sleep(2)
check = False
if __name__ == '__main__':
Thread(target = func1).start()
Thread(target = func2).start()
What I want is to see see "got permission" as the output. But with my current code it is not happening. I assume that the func1 thread is closed before func2 changes the check value to True.
How can I keep func1 alive?
I have researched on the internet but I could not found a solution.
Any help would be appreciated.
Thank you in advance!
The problem here is that func1 performs the check in the while loop, finds it is false, and terminates. So the first thread finishes without printing "got permission".
I don't think this mechanism is quite what you are looking for. I would opt to use a Condition like this,
import threading
from threading import Thread
import time
check = threading.Condition()
def func1():
print ("funn1 started")
check.acquire()
check.wait()
print ("got permission")
print ("funn1 finished")
def func2():
print ("func2 started")
check.acquire()
time.sleep(2)
check.notify()
check.release()
time.sleep(2)
print ("func2 finished")
if __name__ == '__main__':
Thread(target = func1).start()
Thread(target = func2).start()
Here the condition variable is using a mutex internally to communicate between the threads; So only one thread can acquire the condition variable at a time. The first function acquires the condition variable and then releases it but registers that it is going to wait until it receives a notification via the condition variable. The second thread can then acquire the condition variable and, when it has done what it needs to do, it notifies the waiting thread that it can continue.
from threading import Thread
import time
check = False
def func1():
print ("funn1 started")
while True:
if check:
print ("got permission")
break
def func2():
global check
print ("func2 started")
time.sleep(2)
check = True
time.sleep(2)
check = False
if __name__ == '__main__':
Thread(target = func1).start()
Thread(target = func2).start()
func1 must be like this
def func1():
print("func1 started")
while True:
if check:
print("got permission")
break
else:
time.sleep(0.1)
I want to trigger upfunction and stop when it writes 3 in the filename. Basically I want to stop a thread once the condition is met as shown below.
def getstatus():
fh = open(filename,'r')
return fh.read()
def upfunction(arg):
for i in range(arg):
print ("backup running")
print(getstatus())
target = open(filename, 'w')
target.write(str(i))
sleep(1)
if __name__ == "__main__":
thread = Thread(target = upfunction, args = (10, ))
thread.start()
print(getstatus())
while getstatus() != "3":
print("NOT 3 ")
sleep(0.5)
continue
thread.stop()
print("thread finished...exiting")
It shows
AttributeError: 'Thread' object has no attribute 'stop'
Please see me as newbie to python.
Any help will be highly appreciated
'Thread' object has no attribute 'stop' is helpful answer from python interpretator to you
You should place thread termination condition to upfunction.
def upfunction(arg):
i = 0
while getstatus() != "3":
print ("backup running")
print(getstatus())
target = open(filename, 'w')
target.write(str(i))
i += 1
sleep(1)
if __name__ == "__main__":
thread = Thread(target = upfunction, args = (10, ))
thread.start()
print(getstatus())
print("thread finished...exiting")
you can just use threading deamon method to kill this new thread.
thread.start()
thread.deamon()
when the main threads ends this custom threads also dies .so there is no need of that.
Here are some explanation about the right way to do that: Is there any way to kill a Thread in Python?.
And as Lex said [0], you can add a condition (in upfunction arguments) to stop your target function.
I want a long-running process to return its progress over a Queue (or something similar) which I will feed to a progress bar dialog. I also need the result when the process is completed. A test example here fails with a RuntimeError: Queue objects should only be shared between processes through inheritance.
import multiprocessing, time
def task(args):
count = args[0]
queue = args[1]
for i in xrange(count):
queue.put("%d mississippi" % i)
return "Done"
def main():
q = multiprocessing.Queue()
pool = multiprocessing.Pool()
result = pool.map_async(task, [(x, q) for x in range(10)])
time.sleep(1)
while not q.empty():
print q.get()
print result.get()
if __name__ == "__main__":
main()
I've been able to get this to work using individual Process objects (where I am alowed to pass a Queue reference) but then I don't have a pool to manage the many processes I want to launch. Any advise on a better pattern for this?
The following code seems to work:
import multiprocessing, time
def task(args):
count = args[0]
queue = args[1]
for i in xrange(count):
queue.put("%d mississippi" % i)
return "Done"
def main():
manager = multiprocessing.Manager()
q = manager.Queue()
pool = multiprocessing.Pool()
result = pool.map_async(task, [(x, q) for x in range(10)])
time.sleep(1)
while not q.empty():
print q.get()
print result.get()
if __name__ == "__main__":
main()
Note that the Queue is got from a manager.Queue() rather than multiprocessing.Queue(). Thanks Alex for pointing me in this direction.
Making q global works...:
import multiprocessing, time
q = multiprocessing.Queue()
def task(count):
for i in xrange(count):
q.put("%d mississippi" % i)
return "Done"
def main():
pool = multiprocessing.Pool()
result = pool.map_async(task, range(10))
time.sleep(1)
while not q.empty():
print q.get()
print result.get()
if __name__ == "__main__":
main()
If you need multiple queues, e.g. to avoid mixing up the progress of the various pool processes, a global list of queues should work (of course, each process will then need to know what index in the list to use, but that's OK to pass as an argument;-).