I tried to run the following codes:
import multiprocessing
import time
def init_queue():
print("init g_queue start")
while not g_queue.empty():
g_queue.get()
for _index in range(10):
g_queue.put(_index)
print("init g_queue end")
return
def task_io(task_id):
print("IOTask[%s] start" % task_id)
print("the size of queue is %s" % g_queue.qsize())
while not g_queue.empty():
time.sleep(1)
try:
data = g_queue.get(block=True, timeout=1)
print("IOTask[%s] get data: %s" % (task_id, data))
except Exception as excep:
print("IOTask[%s] error: %s" % (task_id, str(excep)))
print("IOTask[%s] end" % task_id)
return
g_queue = multiprocessing.Queue()
if __name__ == '__main__':
print("the size of queue is %s" % g_queue.qsize())
init_queue()
print("the size of queue is %s" % g_queue.qsize())
time_0 = time.time()
process_list = [multiprocessing.Process(target=task_io, args=(i,)) for i in range(multiprocessing.cpu_count())]
for p in process_list:
p.start()
for p in process_list:
if p.is_alive():
p.join()
print("End:", time.time() - time_0, "\n")
what I got was the following:
the size of queue is 0
init g_queue start
init g_queue end
the size of queue is 10
IOTask[0] start
the size of queue is 0
IOTask[0] end
IOTask[1] start
the size of queue is 0
IOTask[1] end
('End:', 0.6480000019073486, '\n')
What I was expecting was
IOTask[0] start
the size of queue is 10
Because after initialization of g_queue, the size of queue was supposed to be 10, not 0. It seems like the queue is not in the shared memory. When the sub process starts, a copy of g_queue is created and its size is 0.
Why multiprocessing.queue is not in the shared memory? Please advise. Many thanks!
You should pass your g_queue as a parameter, then it will work.
demo for using multiprocessing with queue
import multiprocessing
import time
def long_time_calculate(n, result_queue):
time.sleep(1)
result_queue.put(n)
if __name__ == '__main__':
result_queue = multiprocessing.Queue()
pool_size = multiprocessing.cpu_count() * 2
pool = multiprocessing.Pool(processes=pool_size, maxtasksperchild=4)
manager = multiprocessing.Manager()
result_queue = manager.Queue()
inputs = [(1, result_queue), (2, result_queue), (3, result_queue), (4, result_queue)]
for input in inputs:
pool.apply_async(long_time_calculate, input)
pool.close()
pool.join()
print(list(result_queue.get() for _ in inputs))
Related
I want to start 4 process which put an integer in queue when counter is divisible by 100.Same time another process continuously read it and print it.Please correct my code to run...I am getting an error ['Queue' object is not iterable]
from multiprocessing import Lock, Process, Queue, current_process
import time
import queue
def doFirstjob(process_Queue):
i=0
while True:
if i%100==0:
process_Queue.put(i)
else:
i+=1
def doSecondjob(process_Queue):
while(1):
if not process_Queue.Empty:
task = process_Queue.get()
print("task: ",task)
else:
time.sleep(0.2)
def main():
number_of_processes = 4
process_Queue = Queue()
processes = []
process_Queue.put(1)
q = Process(target=doSecondjob, args=(process_Queue))
q.start()
for w in range(number_of_processes):
p = Process(target=doFirstjob, args=(process_Queue))
processes.append(p)
p.start()
if __name__ == '__main__':
main()
You were getting error because Process was expecting a list/tuple in arguments/args.
Also instead of Empty it should be empty.
change the code to below.
from multiprocessing import Lock, Process, Queue, current_process
import time
import queue
def doFirstjob(process_Queue):
i=0
while True:
print("foo")
if i%100==0:
process_Queue.put(i)
else:
i+=1
def doSecondjob(process_Queue):
while(1):
print("bar")
if not process_Queue.empty:
task = process_Queue.get()
print("task: ",task)
else:
time.sleep(0.2)
def main():
number_of_processes = 4
process_Queue = Queue()
processes = []
process_Queue.put(1)
q = Process(target=doSecondjob, args=(process_Queue,))
q.start()
for w in range(number_of_processes):
p = Process(target=doFirstjob, args=(process_Queue,))
processes.append(p)
p.start()
if __name__ == '__main__':
main()
I tried to benchmark the speed up of Pipe over Queue from the multiprocessing package. T thought Pipe would be faster as Queue uses Pipe internally.
Strangely, Pipe is slower than Queue when sending large numpy array. What am I missing here?
Pipe:
import sys
import time
from multiprocessing import Process, Pipe
import numpy as np
NUM = 1000
def worker(conn):
for task_nbr in range(NUM):
conn.send(np.random.rand(400, 400, 3))
sys.exit(1)
def main():
parent_conn, child_conn = Pipe(duplex=False)
Process(target=worker, args=(child_conn,)).start()
for num in range(NUM):
message = parent_conn.recv()
if __name__ == "__main__":
start_time = time.time()
main()
end_time = time.time()
duration = end_time - start_time
msg_per_sec = NUM / duration
print "Duration: %s" % duration
print "Messages Per Second: %s" % msg_per_sec
# Took 10.86s.
Queue
import sys
import time
from multiprocessing import Process
from multiprocessing import Queue
import numpy as np
NUM = 1000
def worker(q):
for task_nbr in range(NUM):
q.put(np.random.rand(400, 400, 3))
sys.exit(1)
def main():
recv_q = Queue()
Process(target=worker, args=(recv_q,)).start()
for num in range(NUM):
message = recv_q.get()
if __name__ == "__main__":
start_time = time.time()
main()
end_time = time.time()
duration = end_time - start_time
msg_per_sec = NUM / duration
print "Duration: %s" % duration
print "Messages Per Second: %s" % msg_per_sec
# Took 6.86s.
You can do an experiment and put the following into your Pipe code above..
def worker(conn):
for task_nbr in range(NUM):
data = np.random.rand(400, 400, 3)
sys.exit(1)
def main():
parent_conn, child_conn = Pipe(duplex=False)
p = Process(target=worker, args=(child_conn,))
p.start()
p.join()
This gives you the time that it takes to create the data for your test. On my system this takes about 2.9 seconds.
Under the hood the queue object implements a buffer and a threaded send. The thread is still in the same process but by using it, the data creation doesn't have to wait for the system IO to complete. It effectively parallelizes the operations. Try your Pipe code modified with some simple threading implemented (disclaimer, code here is for test only and is not production ready)..
import sys
import time
import threading
from multiprocessing import Process, Pipe, Lock
import numpy as np
import copy
NUM = 1000
def worker(conn):
_conn = conn
_buf = []
_wlock = Lock()
_sentinel = object() # signal that we're done
def thread_worker():
while 1:
if _buf:
_wlock.acquire()
obj = _buf.pop(0)
if obj is _sentinel: return
_conn.send(data)
_wlock.release()
t = threading.Thread(target=thread_worker)
t.start()
for task_nbr in range(NUM):
data = np.random.rand(400, 400, 3)
data[0][0][0] = task_nbr # just for integrity check
_wlock.acquire()
_buf.append(data)
_wlock.release()
_wlock.acquire()
_buf.append(_sentinel)
_wlock.release()
t.join()
sys.exit(1)
def main():
parent_conn, child_conn = Pipe(duplex=False)
Process(target=worker, args=(child_conn,)).start()
for num in range(NUM):
message = parent_conn.recv()
assert num == message[0][0][0], 'Data was corrupted'
if __name__ == "__main__":
start_time = time.time()
main()
end_time = time.time()
duration = end_time - start_time
msg_per_sec = NUM / duration
print "Duration: %s" % duration
print "Messages Per Second: %s" % msg_per_sec
On my machine this takes 3.4 seconds to run which is almost exactly the same as your Queue code above.
From https://docs.python.org/2/library/threading.html
In Cython, due to due to the Global Interpreter Lock, only one thread can execute Python code at once... however, threading is still an appropriate model if you want to run multiple I/O-bound tasks simultaneously.
The queue and pipe differences are definitely an odd implementation detail until you dig into it a bit.
I assume by your print command you are using Python2. However the strange behavior cannot be replicated with Python3, where Pipe is actually faster than Queue.
import sys
import time
from multiprocessing import Process, Pipe, Queue
import numpy as np
NUM = 20000
def worker_pipe(conn):
for task_nbr in range(NUM):
conn.send(np.random.rand(40, 40, 3))
sys.exit(1)
def main_pipe():
parent_conn, child_conn = Pipe(duplex=False)
Process(target=worker_pipe, args=(child_conn,)).start()
for num in range(NUM):
message = parent_conn.recv()
def pipe_test():
start_time = time.time()
main_pipe()
end_time = time.time()
duration = end_time - start_time
msg_per_sec = NUM / duration
print("Pipe")
print("Duration: " + str(duration))
print("Messages Per Second: " + str(msg_per_sec))
def worker_queue(q):
for task_nbr in range(NUM):
q.put(np.random.rand(40, 40, 3))
sys.exit(1)
def main_queue():
recv_q = Queue()
Process(target=worker_queue, args=(recv_q,)).start()
for num in range(NUM):
message = recv_q.get()
def queue_test():
start_time = time.time()
main_queue()
end_time = time.time()
duration = end_time - start_time
msg_per_sec = NUM / duration
print("Queue")
print("Duration: " + str(duration))
print("Messages Per Second: " + str(msg_per_sec))
if __name__ == "__main__":
for i in range(2):
queue_test()
pipe_test()
Results in:
Queue
Duration: 3.44321894646
Messages Per Second: 5808.51822408
Pipe
Duration: 2.69065594673
Messages Per Second: 7433.13169575
Queue
Duration: 3.45295906067
Messages Per Second: 5792.13354361
Pipe
Duration: 2.78426194191
Messages Per Second: 7183.23218766
------------------
(program exited with code: 0)
Press return to continue
On my system Pipe(duplex=False) is slower (twice the time, or half the rate) than Pipe(duplex=True). For anyone looking for performance here is a side-by-side comparison:
from time import time
from multiprocessing import Process, Queue, Pipe
n = 1000
buffer = b'\0' * (1000*1000) # 1 megabyte
def print_elapsed(name, start):
elapsed = time() - start
spi = elapsed / n
ips = n / elapsed
print(f'{name}: {spi*1000:.3f} ms/item, {ips:.0f} item/sec')
def producer(q):
start = time()
for i in range(n):
q.put(buffer)
print_elapsed('producer', start)
def consumer(q):
start = time()
for i in range(n):
out = q.get()
print_elapsed('consumer', start)
class PipeQueue():
def __init__(self, **kwargs):
self.out_pipe, self.in_pipe = Pipe(**kwargs)
def put(self, item):
self.in_pipe.send_bytes(item)
def get(self):
return self.out_pipe.recv_bytes()
def close(self):
self.out_pipe.close()
self.in_pipe.close()
print('duplex=True')
q = PipeQueue(duplex=True)
producer_process = Process(target=producer, args=(q,))
consumer_process = Process(target=consumer, args=(q,))
consumer_process.start()
producer_process.start()
consumer_process.join()
producer_process.join()
q.close()
print('duplex=False')
q = PipeQueue(duplex=False)
producer_process = Process(target=producer, args=(q,))
consumer_process = Process(target=consumer, args=(q,))
consumer_process.start()
producer_process.start()
consumer_process.join()
producer_process.join()
q.close()
Results:
duplex=True
consumer: 0.301 ms/item, 3317 item/sec
producer: 0.298 ms/item, 3358 item/sec
duplex=False
consumer: 0.673 ms/item, 1486 item/sec
producer: 0.669 ms/item, 1494 item/sec
I think this must come down to CPython using os.pipe vs socket.socketpair, but I'm not sure.
I'm facing problems with the following example code:
from multiprocessing import Lock, Process, Queue, current_process
def worker(work_queue, done_queue):
for item in iter(work_queue.get, 'STOP'):
print("adding ", item, "to done queue")
#this works: done_queue.put(item*10)
done_queue.put(item*1000) #this doesnt!
return True
def main():
workers = 4
work_queue = Queue()
done_queue = Queue()
processes = []
for x in range(10):
work_queue.put("hi"+str(x))
for w in range(workers):
p = Process(target=worker, args=(work_queue, done_queue))
p.start()
processes.append(p)
work_queue.put('STOP')
for p in processes:
p.join()
done_queue.put('STOP')
for item in iter(done_queue.get, 'STOP'):
print(item)
if __name__ == '__main__':
main()
When the done Queue becomes big enough (a limit about 64k i think), the whole thing freezes without any further notice.
What is the general approach for such a situation when the queue becomes too big? is there some way to remove elements on the fly once they are processed? The Python docs recommend removing the p.join(), in a real application however i can not estimate when the processes have finished. Is there a simple solution for this problem besides infinite looping and using .get_nowait()?
This works for me with 3.4.0alpha4, 3.3, 3.2, 3.1 and 2.6. It tracebacks with 2.7 and 3.0. I pylint'd it, BTW.
#!/usr/local/cpython-3.3/bin/python
'''SSCCE for a queue deadlock'''
import sys
import multiprocessing
def worker(workerno, work_queue, done_queue):
'''Worker function'''
#reps = 10 # this worked for the OP
#reps = 1000 # this worked for me
reps = 10000 # this didn't
for item in iter(work_queue.get, 'STOP'):
print("adding", item, "to done queue")
#this works: done_queue.put(item*10)
for thing in item * reps:
#print('workerno: {}, adding thing {}'.format(workerno, thing))
done_queue.put(thing)
done_queue.put('STOP')
print('workerno: {0}, exited loop'.format(workerno))
return True
def main():
'''main function'''
workers = 4
work_queue = multiprocessing.Queue(maxsize=0)
done_queue = multiprocessing.Queue(maxsize=0)
processes = []
for integer in range(10):
work_queue.put("hi"+str(integer))
for workerno in range(workers):
dummy = workerno
process = multiprocessing.Process(target=worker, args=(workerno, work_queue, done_queue))
process.start()
processes.append(process)
work_queue.put('STOP')
itemno = 0
stops = 0
while True:
item = done_queue.get()
itemno += 1
sys.stdout.write('itemno {0}\r'.format(itemno))
if item == 'STOP':
stops += 1
if stops == workers:
break
print('exited done_queue empty loop')
for workerno, process in enumerate(processes):
print('attempting process.join() of workerno {0}'.format(workerno))
process.join()
done_queue.put('STOP')
if __name__ == '__main__':
main()
HTH
How do speed up this test code in python to Redis on Winxp using python 2.7?
Would multiprocessing be better? The load rate in 6000/s vs publish 100,000/s rates.
I chose 100,000, but could lower in testing. The process takes 15 seconds.
Would changing setting on server help???
import time
from time import strftime
import redis
import threading, Queue
start_time = time.time()
cxn = redis.StrictRedis('127.0.0.1',6379,1)
class WorkerMain(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while 1:
try: # take a job from the queue
row = self.queue.get_nowait()
except Queue.Empty: raise SystemExit
try:
cxn.set(row, "Row")
#print (row, "Row")
except: print 'Setup Error'
if __name__ == '__main__':
connections = 5
sml = range(1,100000)
queue = Queue.Queue()
for row in sml:
queue.put(str(row))
threads = []
for dummy in range(connections):
t = WorkerMain(queue)
t.start()
threads.append(t)
# wait for all threads to finish
for thread in threads:
thread.join()
print
end_time = time.time()
duration = end_time - start_time
print "Duration: %s" % duration
Used the code below for mulitprocessing and "monitored" the data with CLI...not all data went into the server.
from multiprocessing import Pool
import time
import redis
start_time = time.time()
cxn = redis.Redis('127.0.0.1',6379,1)
def rset(var):
cxn.set(var,"value")
if __name__ =='__main__':
sml = range(1,10000)
#for x in sml:print x
pool = Pool(processes=5)
for row in sml:
pool.apply_async(rset, [(row,)])
#print result.get(),
end_time = time.time()
duration = end_time - start_time
print "Duration: %s" % duration
Here is the pipelined code...... I just commented out the threading stuff.
from time import strftime
import redis
import threading, Queue
start_time = time.time()
cxn = redis.StrictRedis('127.0.0.1',6379,0)
pipe = cxn.pipeline(transaction=False)
class WorkerMain(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while 1:
try: # take a job from the queue
row = self.queue.get_nowait()
except Queue.Empty: raise SystemExit
try:
cxn.set(row, "Row")
#print (row, "ROw")
except: print 'Setup Error'
if __name__ == '__main__':
#connections = 5
sml = range(1,100000)
#queue = Queue.Queue()
for row in sml:
#queue.put(str(row))
pipe.set(str(row),"value").execute()# key, value
# threads = []
# for dummy in range(connections):
# t = WorkerMain(queue)
# t.start()
# threads.append(t)
#
# # wait for all threads to finish
# for thread in threads:
# thread.join()
print
end_time = time.time()
duration = end_time - start_time
print "Duration: %s" % duration
Use Pipelines. A Pipeline batches commands so you don't pay for network overheads.
See :
Section on Pipelines over here https://github.com/andymccurdy/redis-py
Pipelining on Redis.io - http://redis.io/topics/pipelining
Using threading for better performance is not a really good idea if you use cpython (the standard python interpreter) because of the gil.
http://wiki.python.org/moin/GlobalInterpreterLock
multiprocessing should work better
I want a long-running process to return its progress over a Queue (or something similar) which I will feed to a progress bar dialog. I also need the result when the process is completed. A test example here fails with a RuntimeError: Queue objects should only be shared between processes through inheritance.
import multiprocessing, time
def task(args):
count = args[0]
queue = args[1]
for i in xrange(count):
queue.put("%d mississippi" % i)
return "Done"
def main():
q = multiprocessing.Queue()
pool = multiprocessing.Pool()
result = pool.map_async(task, [(x, q) for x in range(10)])
time.sleep(1)
while not q.empty():
print q.get()
print result.get()
if __name__ == "__main__":
main()
I've been able to get this to work using individual Process objects (where I am alowed to pass a Queue reference) but then I don't have a pool to manage the many processes I want to launch. Any advise on a better pattern for this?
The following code seems to work:
import multiprocessing, time
def task(args):
count = args[0]
queue = args[1]
for i in xrange(count):
queue.put("%d mississippi" % i)
return "Done"
def main():
manager = multiprocessing.Manager()
q = manager.Queue()
pool = multiprocessing.Pool()
result = pool.map_async(task, [(x, q) for x in range(10)])
time.sleep(1)
while not q.empty():
print q.get()
print result.get()
if __name__ == "__main__":
main()
Note that the Queue is got from a manager.Queue() rather than multiprocessing.Queue(). Thanks Alex for pointing me in this direction.
Making q global works...:
import multiprocessing, time
q = multiprocessing.Queue()
def task(count):
for i in xrange(count):
q.put("%d mississippi" % i)
return "Done"
def main():
pool = multiprocessing.Pool()
result = pool.map_async(task, range(10))
time.sleep(1)
while not q.empty():
print q.get()
print result.get()
if __name__ == "__main__":
main()
If you need multiple queues, e.g. to avoid mixing up the progress of the various pool processes, a global list of queues should work (of course, each process will then need to know what index in the list to use, but that's OK to pass as an argument;-).