I am exploring Klein and Deferred. In the following example I am trying to increment a number using a child process and return it via Future. I am able to receive the Future call back.
The problem is that deferred object never calls the cb() function and the request made to endpoint never returns. Please help me identify the problem.
Following is my server.py code
from klein import Klein
from twisted.internet.defer import inlineCallbacks, returnValue
import Process4
if __name__ == '__main__':
app = Klein()
#app.route('/visit')
#inlineCallbacks
def get_num_visit(request):
try:
resp = yield Process4.get_visitor_num()
req.setResponseCode(200)
returnValue('Visited = {}'.format(resp))
except Exception as e:
req.setResponseCode(500)
returnValue('error {}'.format(e))
print('starting server')
app.run('0.0.0.0', 5005)
Following is Process4.py code
from multiprocessing import Process
from concurrent.futures import Future
from time import sleep
from twisted.internet.defer import Deferred
def foo(x):
result = x+1
sleep(3)
return result
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = Future()
self.visit = 0
def run(self):
r = foo(self.visit)
self.f.set_result(result=r)
def cb(result):
print('visitor number {}'.format(result))
return result
def eb(err):
print('error occurred {}'.format(err))
return err
def future_to_deferred(future):
d = Deferred()
def callback(f):
e = f.exception()
if e:
d.errback(e)
else:
d.callback(f.result())
future.add_done_callback(callback)
return d
def get_visitor_num():
p1 = MyProcess(target=foo, args=None)
d = future_to_deferred(p1.f)
p1.start()
d.addCallback(cb)
d.addErrback(eb)
sleep(1)
return d
Edit 1
Adding callbacks before starting the process p1 solves the problem of calling cb() function. But still the http request made to the endpoint does not return.
It turns out that setting future result self.f.set_result(result=r) in the run() method triggers the callback() method in the child process, where no thread is waiting for the result to be returned!
So to get the callback() function triggered in the MainProcess I had to get the result from the child-process using a multiprocess Queue using a worker thread in the MainProcess and then set the future result.
#notorious.no Thanks for reply. One thing which I noticed is that reactor.callFromThread does switches result from worker thread to MainThread in my modified code however d.callback(f.result()) works just fine but returns result from worker thread.
Following is the modified working code
server.py
from klein import Klein
from twisted.internet.defer import inlineCallbacks, returnValue
import Process4
if __name__ == '__main__':
app = Klein()
visit_count = 0
#app.route('/visit')
#inlineCallbacks
def get_num_visit(req):
global visit_count
try:
resp = yield Process4.get_visitor_num(visit_count)
req.setResponseCode(200)
visit_count = resp
returnValue('Visited = {}'.format(resp))
except Exception as e:
req.setResponseCode(500)
returnValue('error {}'.format(e))
print('starting server')
app.run('0.0.0.0', 5005)
Process4.py
from multiprocessing import Process, Queue
from concurrent.futures import Future
from time import sleep
from twisted.internet.defer import Deferred
import threading
from twisted.internet import reactor
def foo(x, q):
result = x+1
sleep(3)
print('setting result, {}'.format(result))
q.put(result)
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.visit = 0
def run(self):
self.target(*self.args)
def future_to_deferred(future):
d = Deferred()
def callback(f):
e = f.exception()
print('inside callback {}'.format(threading.current_thread().name))
if e:
print('calling errback')
d.errback(e)
# reactor.callFromThread(d.errback, e)
else:
print('calling callback with result {}'.format(f.result()))
# d.callback(f.result())
reactor.callFromThread(d.callback, f.result())
future.add_done_callback(callback)
return d
def wait(q,f):
r = q.get(block=True)
f.set_result(r)
def get_visitor_num(x):
def cb(result):
print('inside cb visitor number {} {}'.format(result, threading.current_thread().name))
return result
def eb(err):
print('inside eb error occurred {}'.format(err))
return err
f = Future()
q = Queue()
p1 = MyProcess(target=foo, args=(x,q,))
wait_thread = threading.Thread(target=wait, args=(q,f,))
wait_thread.start()
defr = future_to_deferred(f)
defr.addCallback(cb)
defr.addErrback(eb)
p1.start()
print('returning deferred')
return defr
Related
I'm writing a program which starts one thread to generate "work" and add it to a queue every N seconds. Then, I have a thread pool which processes items in the queue.
The program below works perfectly fine, until I comment out/delete line #97 (time.sleep(0.5) in the main function). Once I do that, it generates a RuntimeError which attempting to gracefully stop the program (by sending a SIGINT or SIGTERM to the main process). It even works fine with an extremely small sleep like 0.1s, but has an issue with none at all.
I tried researching "reentrancy" but it went a bit over my head unfortunately.
Can anyone help me to understand this?
Code:
import random
import signal
import threading
import time
from concurrent.futures import Future, ThreadPoolExecutor
from datetime import datetime
from queue import Empty, Queue, SimpleQueue
from typing import Any
class UniqueQueue:
"""
A thread safe queue which can only ever contain unique items.
"""
def __init__(self) -> None:
self._q = Queue()
self._items = []
self._l = threading.Lock()
def get(self, block: bool = False, timeout: float | None = None) -> Any:
with self._l:
try:
item = self._q.get(block=block, timeout=timeout)
except Empty:
raise
else:
self._items.pop(0)
return item
def put(self, item: Any, block: bool = False, timeout: float | None = None) -> None:
with self._l:
if item in self._items:
return None
self._items.append(item)
self._q.put(item, block=block, timeout=timeout)
def size(self) -> int:
return self._q.qsize()
def empty(self) -> bool:
return self._q.empty()
def stop_app(sig_num, sig_frame) -> None:
# global stop_app_event
print("Signal received to stop the app")
stop_app_event.set()
def work_generator(q: UniqueQueue) -> None:
last_execution = time.time()
is_first_execution = True
while not stop_app_event.is_set():
elapsed_seconds = int(time.time() - last_execution)
if elapsed_seconds <= 10 and not is_first_execution:
time.sleep(0.5)
continue
last_execution = time.time()
is_first_execution = False
print("Generating work...")
for _ in range(100):
q.put({"n": random.randint(0, 500)})
def print_work(w) -> None:
print(f"{datetime.now()}: {w}")
def main():
# Create a work queue
work_queue = UniqueQueue()
# Create a thread to generate the work and add to the queue
t = threading.Thread(target=work_generator, args=(work_queue,))
t.start()
# Create a thread pool, get work from the queue, and submit to the pool for processing
pool = ThreadPoolExecutor(max_workers=20)
futures: list[Future] = []
while True:
print("Processing work...")
if stop_app_event.is_set():
print("stop_app_event is set:", stop_app_event.is_set())
for future in futures:
future.cancel()
break
print("Queue Size:", work_queue.size())
try:
while not work_queue.empty():
work = work_queue.get()
future = pool.submit(print_work, work)
futures.append(future)
except Empty:
pass
time.sleep(0.5)
print("Stopping the work generator thread...")
t.join(timeout=10)
print("Work generator stopped")
print("Stopping the thread pool...")
pool.shutdown(wait=True)
print("Thread pool stopped")
if __name__ == "__main__":
stop_app_event = threading.Event()
signal.signal(signalnum=signal.SIGINT, handler=stop_app)
signal.signal(signalnum=signal.SIGTERM, handler=stop_app)
main()
It's because you called print() in the signal handler, stop_app().
A signal handler is executed in a background thread In C, but in Python it is executed in the main thread(See the reference.). In your case, while executing a print() call, another print() was called, so the term 'reentrant' fits perfectly. And the current IO stack prohibits a reentrant call.(See the implementation if you are interested.)
You can remedy this by using os.write() and sys.stdout like the following.
import sys
import os
...
def stop_app(sig_num, sig_frame):
os.write(sys.stdout.fileno(), b"Signal received to stop the app\n")
stop_app_event.set()
i have one scraper which initiate the "requestes" session and fetch some data, using a IPV6, i have now 10000 ip list, I have prepared it using threading, but its giving error.
Need support to find out the issue.
import requests, queue,threading, urllib3,jso,pandas as pd, os, time, datetime,inspect
num_threads = 2
root = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
with open (root+ "/ip_list.txt") as ips:
device_ip = list(ips)
class Writer_Worker(threading.Thread):
def __init__(self, queue, df, *args, **kwargs):
if not queue:
print("Device Queue not specified")
exit(1)
self.out_q = queue
self.df = df
super().__init__(*args, **kwargs)
def run(self):
while True:
try:
device_details = self.out_q.get(timeout=3)
except queue.Empty:
return
self.df[device_details[0]] = device_details
self.out_q.task_done()
class Worker(threading.Thread):
def __init__(self, queue, out_queue, device_password, *args, **kwargs):
if not queue:
print("Device Queue not specified")
exit(1)
self.queue = queue
self.pas = device_password
self.out_q = out_queue
super().__init__(*args, **kwargs)
def run(self):
while True:
try:
device_ip = self.queue.get(timeout=3)
except queue.Empty:
return
self.connect_to_device_and_process(device_ip)
self.queue.task_done()
def connect_to_device_and_process(self, device_ip):
st = str("Online")
try:
r = requests.post("https://["+device_ip+"]/?q=index.login&mimosa_ajax=1", {"username":"configure", "password":self.pas}, verify=False)
except requests.exceptions.ConnectionError:
st = str("Offline")
self.out_q.put([device_ip,st,"","","","","","","","","","","","","","","","","",""])
return
finally:
if 'Online' in st:
r = requests.get("https://["+device_ip+"]/cgi/dashboard.php", cookies=r.cookies, verify=False)
if "Response [401]" in str(r):
st2 = str("Password Error")
self.out_q.put([device_ip,st2,"","","","","","","","","","","","","","","","","",""])
else:
data = json.loads(r.content.decode())
output5 = data ['config'] ['Spectrum_Power']
self.out_q.put([device_ip,st,output5['Auto_Power'].replace('2', 'Max Power').replace('1', 'Min Power').replace('0', 'off'),output5['AutoConfig']])
def main():
start = time.time()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
pas = input("Enter Device Password:")
df =pd.DataFrame(columns = ["IP","Status","Auto_Power","AutoChannel"])
q = queue.Queue(len(device_ip))
for ip in device_ip:
q.put_nowait(ip)
out_q = queue.Queue(len(device_ip))
Writer_Worker(out_q, df).start()
for _ in range(num_threads):
Worker(q, out_q, pas).start()
q.join()
print(df)
df.to_excel('iBridge_C5x_Audit_Report.xlsx', sheet_name='Detail', index = False)
if __name__ == "__main__":
main()
below is the error while running the script, seeps I am unable to login to this device.
Any help is appreciable.
You should use a thread pool that distributes the work between a fixed number of threads. This is a core feature of Python since version 3.2.
from concurrent.futures import ThreadPoolExecutor
Define a function perform(ip) that performs the request for one ip
Set variable numThreads to the number of desired threads
Run the thread-pool executor:
print(f'Using {numThreads} threads')
with ThreadPoolExecutor(max_workers=numThreads) as pool:
success = all(pool.map(perform, ips))
Source: https://docs.python.org/3/library/concurrent.futures.html
On that page you find an example even better tailored to your application: https://docs.python.org/3/library/concurrent.futures.html#threadpoolexecutor-example
from threading import Thread
th = Thread(target=self.fill_imdb, args=(movies_info_part, "thread " + str(count)))
th.start()
fill_imdb is my method
I have a requirement of creating child processes, receive results using Future and then kill some of them when required.
For this I have subclassed multiprocessing.Process class and return a Future object from the start() method.
The problem is that I am not able to receive the result in the cb() function as it never gets called.
Please help/suggest if this can be done in some other way or something I am missing in my current implementation?
Following is my current approach
from multiprocessing import Process, Queue
from concurrent.futures import _base
import threading
from time import sleep
def foo(x,q):
print('result {}'.format(x*x))
result = x*x
sleep(5)
q.put(result)
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = _base.Future()
def run(self):
q = Queue()
worker_thread = threading.Thread(target=self.target, args=(self.args+ (q,)))
worker_thread.start()
r = q.get(block=True)
print('setting result {}'.format(r))
self.f.set_result(result=r)
print('done setting result')
def start(self):
f = _base.Future()
run_thread = threading.Thread(target=self.run)
run_thread.start()
return f
def cb(future):
print('received result in callback {}'.format(future))
def main():
p1 = MyProcess(target=foo, args=(2,))
f = p1.start()
f.add_done_callback(fn=cb)
sleep(10)
if __name__ == '__main__':
main()
print('Main thread dying')
In your start method you create a new Future which you then return. This is a different future then the one you set the result on, this future is just not used at all. Try:
def start(self):
run_thread = threading.Thread(target=self.run)
run_thread.start()
return self.f
However there are more problems with your code. You override the start method of the process, replacing it with execution on a worker thread, therefore actually bypassing multiprocessing. Also you shouldn't import the _base module, that is an implementation detail as seen from the leading underscore. You should import concurrent.futures.Future (it's the same class, but through public API).
This really uses multiprocessing:
from multiprocessing import Process, Queue
from concurrent.futures import Future
import threading
from time import sleep
def foo(x,q):
print('result {}'.format(x*x))
result = x*x
sleep(5)
q.put(result)
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = Future()
def run(self):
q = Queue()
worker_thread = threading.Thread(target=self.target, args=(self.args+ (q,)))
worker_thread.start()
r = q.get(block=True)
print('setting result {}'.format(r))
self.f.set_result(result=r)
print('done setting result')
def cb(future):
print('received result in callback {}: {}'.format(future, future.result()))
def main():
p1 = MyProcess(target=foo, args=(2,))
p1.f.add_done_callback(fn=cb)
p1.start()
p1.join()
sleep(10)
if __name__ == '__main__':
main()
print('Main thread dying')
And you're already in a new process now, spawning a worker thread to execute your target function shouldn't really be necessary, you could just execute your target function directly instead. Should the target function raise an Exception you wouldn't know about it, your callback will only be called on success. So if you fix that, then you're left with:
from multiprocessing import Process
from concurrent.futures import Future
import threading
from time import sleep
def foo(x):
print('result {}'.format(x*x))
result = x*x
sleep(5)
return result
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = Future()
def run(self):
try:
r = self.target(*self.args)
print('setting result {}'.format(r))
self.f.set_result(result=r)
print('done setting result')
except Exception as ex:
self.f.set_exception(ex)
def cb(future):
print('received result in callback {}: {}'.format(future, future.result()))
def main():
p1 = MyProcess(target=foo, args=(2,))
p1.f.add_done_callback(fn=cb)
p1.start()
p1.join()
sleep(10)
if __name__ == '__main__':
main()
print('Main thread dying')
This is basically what a ProcessPoolExecutor does.
I'm using threading.Thread and t.start() with a List of Callables to do long-running multithreaded processing. My main thread is blocked until all threads did finish. I'd like however t.start() to immediately return if one of the Callables throw an exception and terminate the other threads.
Using t.join() to check that the thread got executed provides no information about failures due to exception.
Here is the code:
import json
import requests
class ThreadServices:
def __init__(self):
self.obj = ""
def execute_services(self, arg1, arg2):
try:
result = call_some_process(arg1, arg2) #some method
#save results somewhere
except Exception, e:
# raise exception
print e
def invoke_services(self, stubs):
"""
Thread Spanning Function
"""
try:
p1 = "" #some value
p2 = "" #some value
# Call service 1
t1 = threading.Thread(target=self.execute_services, args=(a, b,)
# Start thread
t1.start()
# Block till thread completes execution
t1.join()
thread_pool = list()
for stub in stubs:
# Start parallel execution of threads
t = threading.Thread(target=self.execute_services,
args=(p1, p2))
t.start()
thread_pool.append(t)
for thread in thread_pool:
# Block till all the threads complete execution: Wait for all
the parallel tasks to complete
thread.join()
# Start another process thread
t2 = threading.Thread(target=self.execute_services,
args=(p1, p2)
t2.start()
# Block till this thread completes execution
t2.join()
requests.post(url, data= json.dumps({status_code=200}))
except Exception, e:
print e
requests.post(url, data= json.dumps({status_code=500}))
# Don't return anything as this function is invoked as a thread from
# main calling function
class Service(ThreadServices):
"""
Service Class
"""
def main_thread(self, request, context):
"""
Main Thread:Invokes Task Execution Sequence in ThreadedService
:param request:
:param context:
:return:
"""
try:
main_thread = threading.Thread(target=self.invoke_services,
args=(request,))
main_thread.start()
return True
except Exception, e:
return False
When i call Service().main_thread(request, context) and there is some exception executing t1, I need to get it raised in main_thread and return False. How can i implement it for this structure. Thanks!!
For one thing, you are complicating matters too much. I would do it this way:
from thread import start_new_thread as thread
from time import sleep
class Task:
"""One thread per task.
This you should do with subclassing threading.Thread().
This is just conceptual example.
"""
def __init__ (self, func, args=(), kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
self.error = None
self.done = 0
self.result = None
def _run (self):
self.done = 0
self.error = None
self.result = None
# So this is what you should do in subclassed Thread():
try: self.result = self.func(*self.args, **self.kwargs)
except Exception, e:
self.error = e
self.done = 1
def start (self):
thread(self._run,())
def wait (self, retrexc=1):
"""Used in place of threading.Thread.join(), but it returns the result of the function self.func() and manages errors.."""
while not self.done: sleep(0.001)
if self.error:
if retrexc: return self.error
raise self.error
return self.result
# And this is how you should use your pool:
def do_something (tasknr):
print tasknr-20
if tasknr%7==0: raise Exception, "Dummy exception!"
return tasknr**120/82.0
pool = []
for task in xrange(20, 50):
t = Task(do_something, (task,))
pool.append(t)
# And only then wait for each one:
results = []
for task in pool:
results.append(task.wait())
print results
This way you can make task.wait() raise the error instead. The thread would already be stopped. So all you need to do is remove their references from pool, or whole pool, after you are done. You can even:
results = []
for task in pool:
try: results.append(task.wait(0))
except Exception, e:
print task.args, "Error:", str(e)
print results
Now, do not use strictly this (I mean Task() class) as it needs a lot of things added to be used for real.
Just subclass threading.Thread() and implement the similar concept by overriding run() and join() or add new functions like wait().
Im using the following code to multithread urlib2. However what is the best way to limit the number of threads that it consumes ??
class ApiMultiThreadHelper:
def __init__(self,api_calls):
self.q = Queue.Queue()
self.api_datastore = {}
self.api_calls = api_calls
self.userpass = '#####'
def query_api(self,q,api_query):
self.q.put(self.issue_request(api_query))
def issue_request(self,api_query):
self.api_datastore.update({api_query:{}})
for lookup in ["call1","call2"]:
query = api_query+lookup
request = urllib2.Request(query)
request.add_header("Authorization", "Basic %s" % self.userpass)
f = urllib2.urlopen(request)
response = f.read()
f.close()
self.api_datastore[api_query].update({lookup:response})
return True
def go(self):
threads = []
for i in self.api_calls:
t = threading.Thread(target=self.query_api, args = (self.q,i))
t.start()
threads.append(t)
for t in threads:
t.join()
You should use a thread pool. Here's my implementation I've made years ago (Python 3.x friendly):
import traceback
from threading import Thread
try:
import queue as Queue # Python3.x
except ImportError:
import Queue
class ThreadPool(object):
def __init__(self, no=10):
self.alive = True
self.tasks = Queue.Queue()
self.threads = []
for _ in range(no):
t = Thread(target=self.worker)
t.start()
self.threads.append(t)
def worker(self):
while self.alive:
try:
fn, args, kwargs = self.tasks.get(timeout=0.5)
except Queue.Empty:
continue
except ValueError:
self.tasks.task_done()
continue
try:
fn(*args, **kwargs)
except Exception:
# might wanna add some better error handling
traceback.print_exc()
self.tasks.task_done()
def add_job(self, fn, args=[], kwargs={}):
self.tasks.put((fn, args, kwargs))
def join(self):
self.tasks.join()
def deactivate(self):
self.alive = False
for t in self.threads:
t.join()
You can also find a similar class in multiprocessing.pool module (don't ask me why it is there). You can then refactor your code like this:
def go(self):
tp = ThreadPool(20) # <-- 20 thread workers
for i in self.api_calls:
tp.add_job(self.query_api, args=(self.q, i))
tp.join()
tp.deactivate()
Number of threads is now defined a priori.