I want the first thread to process the first one and the fifth and the ninth, the second would get the second-sixth-tenth, third-seventh, eleventh, and the fourth would the eighth and twelfth. I know that it is a repeating patterning of (4*counter + original) but I am lost when it comes to moving that actual _thread process. Here is what I have so far.
If I am going about it wrong them just tell me because I am open to suggestions.
Edit- I am using Python 3.3
def calc(threadName):
testRange = 100
testNumber = 100
timesToTest = 25
testCounter = 0
if threadName == 'ThreadOne':
testNumber = (testNumber) + 5*(testCounter)
if threadName == 'ThreadTwo':
testNumber = (testNumber+1) + 5*(testCounter)
if threadName == 'ThreadThree':
testNumber = (testNumber+2) + 5*(testCounter)
if threadName == 'ThreadFour':
testNumber = (testNumber+3) + 5*(testCounter)
while testCounter < timesToTest:
testCounter +=1
while testRange >= 0:
answer = ((testNumber*3) - ((testNumber-1)**2))
testbool = isprime(answer)
print('Testing '+str(testNumber)+' on '+str(threadName))
testNumber +=1
testRange -= 1
if testbool:
list.append((threadName,testNumber,answer))
threadOne = _thread.start_new_thread(calc,('ThreadOne', ))
threadTwo = _thread.start_new_thread(calc,('ThreadTwo', ))
threadThree = _thread.start_new_thread(calc,('ThreadThree', ))
threadFour = _thread.start_new_thread(calc,('ThreadFour', ))
while 1:
pass
tried This:
import threading
import queue
class Worker(threading.Thread):
global results_list
print('in main class')
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
self.jobs_queue = queue.Queue()
self.results_list = list()
print('in init')
def isprime(self,n):
n = abs(int(n))
print('in isprime')
if n < 2:
return False
if n == 2:
return True
if not n & 1:
return False
for x in range(3, int(n**0.5)+1, 2):
if n % x == 0:
return False
return True
def run(self):
print('in run')
while True:
testNumber = self.jobs_queue.get()
if testNumber == "END":
return
# here, do your stuff with 'testNumber'
# for example, let's multiply it by 2
answer = ((testNumber**3) - ((testNumber-1)**3))
testbool = self.isprime(answer)
if testbool:# results are appended to a list
self.results_list.append((self.name,testNumber,answer))
def calc(self, n):
print('in calc')
self.jobs_queue.put(n)
if not self.is_alive():
self.start()
def get_result(self):
print('in get_result')
return self.results_list
def stop(self):
print('in stop')
# tell the thread to stop,
# once jobs in queue are done
self.jobs_queue.put("END")
self.join()
print('Anything')
workers = [Worker('thread 1'), Worker('thread 2'), Worker('thread 3'), Worker('thread 4')]
for n in range(100):
print('here 1')
w = workers[n % 4]
w.calc(n)
for w in workers:
w.stop()
for w in workers:
x=1
# print(results_list)
As far as I understand you want a pool of 4 worker threads, fair queuing the same "job"
between those 4 threads.
I would do something more like that:
import threading
import queue
class Worker(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
self.jobs_queue = queue.Queue()
self.results_list = list()
def run(self):
while True:
testNumber = self.jobs_queue.get()
if testNumber == "END":
return
# here, do your stuff with 'testNumber'
# for example, let's multiply it by 2
answer = testNumber * 2
# results are appended to a list
self.results_list.append((self.name,testNumber,answer))
def calc(self, n):
self.jobs_queue.put(n)
if not self.is_alive():
self.start()
def get_result(self):
return self.results_list
def stop(self):
# tell the thread to stop,
# once jobs in queue are done
self.jobs_queue.put("END")
self.join()
workers = [Worker('thread 1'), Worker('thread 2'), Worker('thread 3'), Worker('thread 4')]
for n in range(100):
w = workers[n % 4]
w.calc(n)
for w in workers:
w.stop()
for w in workers:
print(w.get_result())
Related
Hi :) I am programming an independent multi-metronome and need to run the funciont beat() on a while loop for every instances of my class Metronome(), starting at the same time.
import time
import mido
from numpy import interp
from IPython.display import clear_output
inport = mido.open_input() #my MIDI is a KORG nanoKontrol 2
class Metronome():
#A dict for my MIDI controller
metronomes_controls = {
'inst_number':[n + 0 for n in range(0,7)],
'vol_slide':[n + 0 for n in range(0,7)],
'tempo_knob': [n + 16 for n in range(0,7)],
'play_stop': [n + 32 for n in range(0,7)],
'sync_selected': [n + 48 for n in range(0,7)],
'tap_button': [n + 64 for n in range(0,7)]}
def __init__(self, inport, inst_number = 0, tempo=60, active=True,
on_off_list = ['ON','OFF','ON','OFF'], selector = 0):
self.inport = inport
self.inst_number = inst_number
self.tempo = tempo
self.active = active
self.on_off_list = on_off_list #The controller is not so precise
self.selector = selector
self.controls = dict(zip(list(metronomes_controls.keys()),
[val[self.inst_number] for val in metronomes_controls.values()]))
def beat(self):
if self.active == True:
print('Tick', self.tempo) #this is going to be a sound
time.sleep(int(round(60/self.tempo)))
clear_output()
self.update()
else:
self.update()
def update(self):
msg = self.inport.receive(block=False)
for msg in inport.iter_pending():
if msg.control == self.controls['tempo_knob']:
self.tempo = int(interp(msg.value,[0,127],[20,99]))
if msg.control == self.controls['play_stop']:
self.selector += 1
if self.selector >3:
self.selector = 0
if 'ON' in self.on_off_list[self.selector]:
print('ON')
self.active = True
if 'OFF' in self.on_off_list[self.selector]:
print('OFF')
self.active = False
#Creating two instances of my class
m0 = Metronome(inport = inport, inst_number = 0)
m1 = Metronome(inport = inport,inst_number = 1)
m2 = Metronome(inport = inport,inst_number = 1)
m3 = Metronome(inport = inport,inst_number = 1)
#They run on a while loop. All should start at the same time.
while True:
m0.beat()
m1.beat()
m2.beat()
m3.beat()
I read about threading but it seems to create some starting delay. Then I got into barries, but I couldn't imagine how to implement it :/ or maybe should I try something with multiprocess? I got really lost! Any advice is highly appreciated
Thanks for the help!!!
Create one thread per metronome and start them at (almost) the same time:
from threading import Thread
# Previous code...
...
def create_thread(inport, inst_number):
# The function the thread is going to execute
def inner():
m = Metronome(inport, inst_number)
m.beat()
return Thread(target=inner)
if __name__ == "__main__":
inport = mido.open_input()
# Create the threads
threads = [
create_thread(inport, i) for i in (0, 1, 1, 1)
]
# Start them at (almost) the same time
for t in threads:
t.start()
# Wait for them to finish execution
for t in threads:
t.join()
I'm trying to implement mutual exclusion using semaphore in Python. The two processes (proc1, proc2) are supposed to be two independent, concurrent processes. They do exactly the same thing: store n in array[n], then increment n.
The purpose of the program is to show that using semaphore we can ensure that the array is filled properly: [0,1,2,3,4,5,6,7,8,9], without skipping any index. However, my code seems to store [0,1,0,0,0,0,0,0,0,0]. I haven't used threads in python before, so I don't know what's going on.
import threading
import time
n = 0
array = [0]*10
sem = threading.Semaphore()
def proc1():
global n, array
while True:
sem.acquire()
array[n] = n
n += 1
sem.release()
time.sleep(0.25)
def proc2():
global n, array
while True:
sem.acquire()
array[n] = n
n += 1
sem.release()
time.sleep(0.25)
t = threading.Thread(target = proc1)
t.start()
t2 = threading.Thread(target = proc2)
t2.start()
print (array)
the problem was that the OP tried to print the result before the threads were done.
He should have waited for join.
import threading
import time
n = 0
array = [0]*10
sem = threading.Semaphore()
def proc(num):
global n
while True:
sem.acquire()
n = n+1
sem.release()
if n > 9:
break
array[n] = n
print ("Thread {}: {}".format(num,array))
time.sleep(0.25)
t1 = threading.Thread(target = proc, args=[1])
t2 = threading.Thread(target = proc, args=[2])
t1.start()
t2.start()
t1.join()
t2.join()
Different take on a Semaphore pattern, handing the "tasks" within the Sempahore itself
class Sempahore:
def __init__(self, max_threads):
self.active_threads = 0
self.max_threads = max_threads
self.tasks = []
def add_task(self, func, args):
self.tasks.append(
Task(
func=func,
args=args
)
)
def run_task(self, task: Task):
_func = task.func
_args = task.args
self.active_threads += 1
_func(*_args)
self.active_threads -= 1
def run(self, blocking=False):
if blocking:
self._run()
else:
t = Thread(target=self._run)
t.start()
def _run(self):
while True:
if self.active_threads < self.max_threads:
task = self.tasks.pop()
logger.info(f'starting task: {task.task_id}')
t = Thread(
target=self.run_task,
args=(task,))
t.start()
if len(self.tasks) == 0:
break
I have an object:
from multiprocessing import Pool
import time
class ASYNC(object):
def __init__(self, THREADS=[]):
print('do')
pool = Pool(processes=len(THREADS))
self.THREAD_POOL = {}
thread_index = 0
for thread_ in THREADS:
self.THREAD_POOL[thread_index] = {
'thread': thread_['thread'],
'args': thread_['args'],
'callback': thread_['callback']
}
self.THREAD_POOL[thread_index]['running'] = True
pool.apply_async(self.run, [thread_index], callback=thread_['callback'])
thread_index += 1
def run(self, thread_index):
print('enter')
while(self.THREAD_POOL[thread_index]['running']):
print("loop")
self.THREAD_POOL[thread_index]['thread'](self.THREAD_POOL[thread_index])#HERE
time.sleep(1)
self.THREAD_POOL[thread_index]['running'] = False
def wait_for_finish(self):
for pool in self.THREAD_POOL:
while(self.THREAD_POOL[pool]['running']):
print("sleep" + str(self.THREAD_POOL[pool]['running']))
time.sleep(1)
def x(pool):#HERE
print(str(pool))
if(pool['args'][0] >= 15):
pool['running'] = False
pool['args'][0] += 1
def y(str):
print("done")
A = ASYNC([{'thread': x, 'args':[10], 'callback':y}])
print("start")
A.wait_for_finish()
I am having issues passing self.THREAD_POOL[thread_index] as reference to def x(pool)
I need x(pool) to change the value of the variable in the object.
If i check the value in wait_for_finish then the object is not changed.
Passing object by reference: (tested and works properly)
x = {"1":"one", "2","two"}
def test(a):
a["1"] = "ONE"
print(x["1"])#outputs ONE as expected
this means that dictionaries in python are passed by reference; So, why in my code is it passing by value?
SOLUTION
#DevShark
from multiprocessing import Process, Value, Array
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):
a[i] = -a[i]
if __name__ == '__main__':
num = Value('d', 0.0)
arr = Array('i', range(10))
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print num.value
print arr[:]
according to the documentation, you should not do this unless absolutely needed. I decided not to use this. https://docs.python.org/2/library/multiprocessing.html#multiprocessing.JoinableQueue
instead i will be doing:
from multiprocessing import Pool
import time
class ASYNC(object):
def __init__(self, THREADS=[]):
print('do')
pool = Pool(processes=len(THREADS))
self.THREAD_POOL = {}
thread_index = 0
for thread_ in THREADS:
self.THREAD_POOL[thread_index] = {
'thread': thread_['thread'],
'args': thread_['args'],
'callback': thread_['callback']
}
self.THREAD_POOL[thread_index]['running'] = True
pool.apply_async(self.run, [thread_index], callback=thread_['callback'])
thread_index += 1
def run(self, thread_index):
print('enter')
while(self.THREAD_POOL[thread_index]['running']):
print("loop")
self.THREAD_POOL[thread_index]['thread'](thread_index)
time.sleep(1)
self.THREAD_POOL[thread_index]['running'] = False
def wait_for_finish(self):
for pool in self.THREAD_POOL:
while(self.THREAD_POOL[pool]['running']):
print("sleep" + str(self.THREAD_POOL[pool]['running']))
time.sleep(1)
def x(index):
global A
A.THREAD_POOL[index]
print(str(pool))
if(pool['args'][0] >= 15):
pool['running'] = False
pool['args'][0] += 1
def y(str):
print("done")
A = ASYNC([{'thread': x, 'args':[10], 'callback':y}])
print("start")
A.wait_for_finish()
You are running your function in a different process. That's the way multiprocessing works. Therefore it does not matter what you do with the object, modifications will not be seen in other processes.
To share data between process, see the doc as someone noted in a comment.
Data can be stored in a shared memory map using Value or Array.
I'm launching 3 processes and I want them to put a string into a shared array, at the index corresponding to the process (i).
Look at the code below, the output generated is:
['test 0', None, None]
['test 1', 'test 1', None]
['test 2', 'test 2', 'test 2']
Why 'test 0' get overwritten by test 1, and test 1 by test 2?
What I want is (order is not important) :
['test 0', None, None]
['test 0', 'test 1', None]
['test 0', 'test 1', 'test 2']
The code :
#!/usr/bin/env python
import multiprocessing
from multiprocessing import Value, Lock, Process, Array
import ctypes
from ctypes import c_int, c_char_p
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, arr, lock):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.arr = arr
self.lock = lock
def run(self):
proc_name = self.name
while True:
next_task = self.task_queue.get()
if next_task is None:
self.task_queue.task_done()
break
answer = next_task(arr=self.arr, lock=self.lock)
self.task_queue.task_done()
self.result_queue.put(answer)
return
class Task(object):
def __init__(self, i):
self.i = i
def __call__(self, arr=None, lock=None):
with lock:
arr[self.i] = "test %d" % self.i
print arr[:]
def __str__(self):
return 'ARC'
def run(self):
print 'IN'
if __name__ == '__main__':
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
arr = Array(ctypes.c_char_p, 3)
lock = multiprocessing.Lock()
num_consumers = multiprocessing.cpu_count() * 2
consumers = [Consumer(tasks, results, arr, lock) for i in xrange(num_consumers)]
for w in consumers:
w.start()
for i in xrange(3):
tasks.put(Task(i))
for i in xrange(num_consumers):
tasks.put(None)
I'm running Python 2.7.3 (Ubuntu)
This problem seems similar to this one. There, J.F. Sebastian speculated that the assignment to arr[i] points arr[i] to a memory address that was only meaningful to the subprocess making the assignment. The other subprocesses retrieve garbage when looking at that address.
There are at least two ways to avoid this problem. One is to use a multiprocessing.manager list:
import multiprocessing as mp
class Consumer(mp.Process):
def __init__(self, task_queue, result_queue, lock, lst):
mp.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.lock = lock
self.lst = lst
def run(self):
proc_name = self.name
while True:
next_task = self.task_queue.get()
if next_task is None:
self.task_queue.task_done()
break
answer = next_task(lock = self.lock, lst = self.lst)
self.task_queue.task_done()
self.result_queue.put(answer)
return
class Task(object):
def __init__(self, i):
self.i = i
def __call__(self, lock, lst):
with lock:
lst[self.i] = "test {}".format(self.i)
print([lst[i] for i in range(3)])
if __name__ == '__main__':
tasks = mp.JoinableQueue()
results = mp.Queue()
manager = mp.Manager()
lst = manager.list(['']*3)
lock = mp.Lock()
num_consumers = mp.cpu_count() * 2
consumers = [Consumer(tasks, results, lock, lst) for i in xrange(num_consumers)]
for w in consumers:
w.start()
for i in xrange(3):
tasks.put(Task(i))
for i in xrange(num_consumers):
tasks.put(None)
tasks.join()
Another way is to use a shared array with a fixed size such as mp.Array('c', 10).
import multiprocessing as mp
class Consumer(mp.Process):
def __init__(self, task_queue, result_queue, arr, lock):
mp.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.arr = arr
self.lock = lock
def run(self):
proc_name = self.name
while True:
next_task = self.task_queue.get()
if next_task is None:
self.task_queue.task_done()
break
answer = next_task(arr = self.arr, lock = self.lock)
self.task_queue.task_done()
self.result_queue.put(answer)
return
class Task(object):
def __init__(self, i):
self.i = i
def __call__(self, arr, lock):
with lock:
arr[self.i].value = "test {}".format(self.i)
print([a.value for a in arr])
if __name__ == '__main__':
tasks = mp.JoinableQueue()
results = mp.Queue()
arr = [mp.Array('c', 10) for i in range(3)]
lock = mp.Lock()
num_consumers = mp.cpu_count() * 2
consumers = [Consumer(tasks, results, arr, lock) for i in xrange(num_consumers)]
for w in consumers:
w.start()
for i in xrange(3):
tasks.put(Task(i))
for i in xrange(num_consumers):
tasks.put(None)
tasks.join()
I speculate that the reason why this works when mp.Array(ctypes.c_char_p, 3) does not, is because mp.Array('c', 10) has a fixed size so the memory address never changes, while mp.Array(ctypes.c_char_p, 3) has a variable size, so the memory address might change when arr[i] is assigned to a bigger string.
Perhaps this is what the docs are warning about when it states,
Although it is possible to store a pointer in shared memory remember
that this will refer to a location in the address space of a specific
process. However, the pointer is quite likely to be invalid in the
context of a second process and trying to dereference the pointer from
the second process may cause a crash.
I'm trying to use the multiprocessing module in python 2.6, but apparently there is something I do not understand. I would expect the class below to add up the numbers sent to it by add() and return the sum in the get_result() method. The code below prints "0", I'd like it to print "2". What have I missed?
import multiprocessing
class AdderProcess(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.sum = 0
self.queue = multiprocessing.JoinableQueue(5)
self.daemon = True
self.start()
def run(self):
while True:
number = self.queue.get()
self.sum += number
self.queue.task_done()
def add(self, number):
self.queue.put(number)
def get_result(self):
self.queue.join()
return self.sum
p = AdderProcess()
p.add(1)
p.add(1)
print p.get_result()
PS. This problem has been solved. Thanks for the answers! Just to make it easier for any readers, here's the complete working version:
import multiprocessing
class AdderProcess(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.sum = multiprocessing.Value('d', 0.0)
self.queue = multiprocessing.JoinableQueue(5)
self.daemon = True
self.start()
def run(self):
while True:
number = self.queue.get()
self.sum.value += number
self.queue.task_done()
def add(self, number):
self.queue.put(number)
def get_result(self):
self.queue.join()
return self.sum.value
p = AdderProcess()
p.add(1)
p.add(1)
print p.get_result()
Change self.sum = 0 to self.sum = multiprocessing.Value('d', 0.0), and use self.sum.value to access or change the value.
class AdderProcess(multiprocessing.Process):
def __init__(self):
...
self.sum = multiprocessing.Value('d', 0.0)
...
def run(self):
while True:
number = self.queue.get()
self.sum.value += number # <-- use self.sum.value
self.queue.task_done()
def get_result(self):
self.queue.join()
return self.sum.value # <-- use self.sum.value
The problem is this: Once you call self.start() in __init__, the main process forks off a child process. All values are copied. Now there are two versions of p. In the main process, p.sum is 0. In the child process, the run method is called and p.sum is augmented to 2. But when the main process calls p.get_result(), its version of p still has p.sum equal to 0.
So 0 is printed.
When you want to share a float value between processes, you need to use a sharing mechanism, such as mp.Value.
See "Sharing state between processes" for more options on how to share values.
self.sum is 2... in that process:
def run(self):
while True:
number = self.queue.get()
print "got %s from queue" % number
print "Before adding - self.sum = %d" % self.sum
self.sum += number
print "After adding - self.sum = %d" % self.sum
self.queue.task_done()
[ 13:56 jon#host ~ ]$ ./mp.py
got 1 from queue
Before adding - self.sum = 0
After adding - self.sum = 1
got 1 from queue
Before adding - self.sum = 1
After adding - self.sum = 2
See multiprocessing 16.3.1.4. - Sharing state between processes on how to get self.sum to be the same in different processes.