Wait for threads to finish before running them again - python

I am making a program that controls 2 motors through a raspberry Pi. I am running python code and I am wondering how to achieve the following :
Run motor1
Run motor2 simultaneously
Wait for both motors to finish
Run motor1
Run motor2 simultaneously
etc.
What I have done so far is creating a Thread and using a queue.
class Stepper(Thread):
def __init__(self, stepper):
Thread.__init__(self)
self.stepper = stepper
self.q = Queue(maxsize=0)
def setPosition(self, pos):
self.q.put(pos)
def run(self):
while not self.q.empty():
item = self.q.get()
// run motor and do some stuff
thread_1 = Stepper(myStepper1)
thread_2 = Stepper(myStepper2)
thread_1.start()
thread_2.start()
loop = 10
while(loop):
thread_1.setPosition(10)
thread_2.setPosition(30)
# I want to wait here
thread_1.setPosition(10)
thread_2.setPosition(30)
loop = loop - 1
thread_1.join()
thread_2.join()
Both thread_1 and thread_2 won't finish at the same time depending of the numbers of steps the motor need to process.
I have tried to use the Lock() functionality but I am not sure how to correctly implement it. I also thought about re-creating the Threads but not sure if this is the correct solution.

You can use Semaphore actually:
from threading import Semaphore
class Stepper(Thread):
def __init__(self, stepper, semaphore):
Thread.__init__(self)
self.stepper = stepper
self.semaphore = semaphore
def setPosition(self, pos):
self.q.put(pos)
def run(self):
while not self.q.empty():
try:
# run motor and do some stuff
finally:
self.semaphore.release() # release semaphore when finished one cycle
semaphore = Semaphore(2)
thread_1 = Stepper(myStepper1, semaphore)
thread_2 = Stepper(myStepper2, semaphore)
thread_1.start()
thread_2.start()
loop = 10
for i in range(loop):
semaphore.acquire()
semaphore.acquire()
thread_1.setPosition(10)
thread_2.setPosition(30)
semaphore.acquire()
semaphore.acquire() # wait until the 2 threads both released the semaphore
thread_1.setPosition(10)
thread_2.setPosition(30)

You can use the thread's join method like so:
thread_1.join() # Wait for thread_1 to finish
thread_2.join() # Same for thread_2
As per the documentation at https://docs.python.org/3/library/threading.html#threading.Thread.join:
A thread can be join()ed many times.
To run threads repeatedly, you will need to reinitialize the Thread object after each run.

Related

Python port scanner, how to determine optimal number of threads

I have a multi threaded Python port scanner where each thread in a loop gets something (an IP address/port pair) from a common queue, does some work on it (connects, does a handshake and grabs the server's version) and loops again.
Here's some partial code:
import threading, queue, multiprocessing
class ScanProcess(multiprocessing.Process):
threads = []
def __init__(self, squeue, dqueue, count):
self.squeue = squeue
self.dqueue = dqueue
self.count = count
self._init_threads()
super(ScanProcess, self).__init__()
def _init_threads(self):
self.threads = [ScanThread(self.squeue, self.dqueue) for _ in range(0, self.count)]
def _start_threads(self):
for thread in self.threads:
thread.start()
def _join_threads(self):
for thread in self.threads:
thread.join()
def run(self):
self._start_threads()
self._join_threads()
class ScanThread(threading.Thread):
def __init__(self, squeue, dqueue):
self.squeue = squeue
self.dqueue = dqueue
super(ScanThread, self).__init__()
def run(self):
while not self.squeue.empty():
try:
target = self.squeue.get(block=False)
# do the actual work then put the result in dqueue
except queue.Empty:
continue
# how many threads/processes
process_count = 2
thread_count = 10
# load tasks from file or network and fill the queues
squeue = multiprocessing.Queue()
dqueue = multiprocessing.Queue()
# create and start everything
processes = [ScanProcess(squeue, dqueue, thread_count) for _ in range(0, process_count)]
for process in processes:
process.start()
for process in processes:
process.join()
# enjoy the show!
The problem I've got is that the number of threads is currently set manually. I'd like to set it automatically to saturate the network connection while not dropping packets, but I have no idea how to begin implementing that. Could anyone summarize how nmap/zmap do it?
Any help is appreciated.

How to launch two threads and lock them before running, and executing only when unlocked?

I have this example code to explain my problem:
import threading
import time
class thread1(threading.Thread):
def __init__(self, lock):
threading.Thread.__init__(self)
self.daemon = True
self.start()
self.lock = lock
def run(self):
while True:
self.lock.acquire(True)
print ('write done by t1')
self.lock.release()
class thread2(threading.Thread):
def __init__(self, lock):
threading.Thread.__init__(self)
self.daemon = True
self.start()
self.lock = lock
def run(self):
while True:
self.lock.acquire(True)
print ('write done by t2')
self.lock.release()
if __name__ == '__main__':
lock = threading.Lock()
t1 = thread1(lock)
t2 = thread2(lock)
lock.acquire(True)
counter = 0
while True:
print("main...")
counter = counter + 1
if(counter==5 or counter==10):
lock.release() # Here I want to unlock both threads to run just one time and then wait until I release again
time.sleep(1)
t1.join()
t2.join()
What I'm having some issues is the following:
I want to have two threads (thread1 and thread2) that are launched at the beginning of the program, but they should wait until the main() counter reaches 5 or 10.
When the main() counter reaches 5 or 10, it should signal/trigger/unlock the threads, and both threads should run just once and then wait until a new unlock.
I was expecting the code to have the following output (Each line is 1 second running):
main...
main...
main...
main...
main...
write done by t1
write done by t2
main...
main...
main...
main...
main...
write done by t1
write done by t2
Instead I have a different behaviour, such as starting with:
write done by t1
write done by t1
write done by t1
write done by t1
(etc)
And after 5 seconds the
write done by t2
A lot of times...
Can someone help me explaining what is wrong and how can I improve this?
In __init__() of thread1 and thread2, start() is invoked before self.lock is assigned.
t1 and t2 are created before the main thread acquires the lock. That makes these two threads start printing before the main thread locks them. It is the reason your code print the first several lines of "write done by x".
After the counter reaches 5, the main thread releases the lock, but it never locks it again. That makes t1 and t2 keep running.
It never quits unless you kill it...
I suggest you to use Condition Object instead of Lock.
Here is an example based on your code.
import threading
import time
class Thread1(threading.Thread):
def __init__(self, condition_obj):
super().__init__()
self.daemon = True
self.condition_obj = condition_obj
self.start()
def run(self):
with self.condition_obj:
while True:
self.condition_obj.wait()
print('write done by t1')
class Thread2(threading.Thread):
def __init__(self, condition_obj):
super().__init__()
self.daemon = True
self.condition_obj = condition_obj
self.start()
def run(self):
with self.condition_obj:
while True:
self.condition_obj.wait()
print('write done by t2')
if __name__ == '__main__':
condition = threading.Condition()
t1 = Thread1(condition)
t2 = Thread2(condition)
counter = 0
while True:
print("main...")
counter += 1
if counter == 5 or counter == 10:
with condition:
condition.notify_all()
time.sleep(1)
t1.join()
t2.join()

Why the threads are not released after all work is consumed from python Queue

I use Queue to provide tasks that threads can work on. After all work is done from Queue, I see the threads are still alive while I expected them being released. Here is my code. You can see the active threads number is increasing after a batch of task(in the same queue) increases from the console. How could I release the threads after a batch of work get done?
import threading
import time
from Queue import Queue
class ThreadWorker(threading.Thread):
def __init__(self, task_queue):
threading.Thread.__init__(self)
self.task_queue = task_queue
def run(self):
while True:
work = self.task_queue.get()
#do some work
# do_work(work)
time.sleep(0.1)
self.task_queue.task_done()
def get_batch_work_done(works):
task_queue = Queue()
for _ in range(5):
t = ThreadWorker(task_queue)
t.setDaemon(True)
t.start()
for work in range(works):
task_queue.put(work)
task_queue.join()
print 'get batch work done'
print 'active threads count is {}'.format(threading.activeCount())
if __name__ == '__main__':
for work_number in range(3):
print 'start with {}'.format(work_number)
get_batch_work_done(work_number)
Do a non blocking read in a loop and use the exception handling to terminate
def run(self):
try:
while True:
work = self.task_queue.get(True, 0.1)
#do some work
# do_work(work)
except Queue.Empty:
print "goodbye"

How I can execute code pararelly or multithread

The following code works for me, the problem is that each thread has to wait until throws to end or at least the perception that I have because when I put the sleep (10) the waiting time is indicated and then continuous.
What I wish is that the haul thread without having to wait for the internal code to run.
It is my code (example):
import threading
from time import sleep
class MyThread(threading.Thread):
def __init__(self, num):
threading.Thread.__init__(self)
self.num = num
def run(self):
print "I'm the thread", self.num
sleep(10)
print "I'm the thread, after 10 seg"
print "I'm the main thread"
for i in range(0, 10):
t = MyThread(i)
t.start()
t.join()
Thanks in advances.
Use 2 for loops: 1 to start the threads and one to wait for them:
# create all threads
ts = [MyThread(i) for i in range(10)]
# start all threads
for t in ts:
t.start()
# wait for all threads
for t in ts:
t.join()

Why my python multi process code runs very slow?

# multi-processes
from multiprocessing import Process, Queue
class Worker(object):
def __init__(self, queue):
self.queue = queue
self.process_num = 10 <------------ 10 processes
self.count = 0
def start(self):
for i in range(self.process_num):
p = Process(target = self.run)
p.start()
p.join()
def run(self):
while True:
self.count += 1
user = self.queue.get()
# do something not so fast like time.sleep(1)
print self.count
if self.queue.empty():
break
I use Worker().start(queue) to start the program, but the output is not so fast as i expected(Seems only one process are running).
Is there any problem in my code ?
Yes, you're only running one process at a time, you're waiting for each process to terminate before starting the next;
def start(self):
for i in range(self.process_num):
p = Process(target = self.run)
p.start() <-- starts a new process
p.join() <-- waits for the process to terminate
In other words, you're starting 10 processes, but the second one won't start until the first one terminates and so on.
For what you're trying to do, it may be better not to use Process manually and instead use a Process Pool.

Categories