How do I stop an asyncio event loop from a child thread? - python

from cryptofeed import FeedHandler
from cryptofeed.feed import Feed
from cryptofeed.defines import L2_BOOK, BID, ASK
from cryptofeed.exchange.ftx import FTX
from threading import Thread
from time import sleep
class Executor:
def __init__(self, feed: Feed, coin_symbol: str, fut_symbol: str):
self.coin_symbol = coin_symbol
self.fut_symbol = fut_symbol
self.feed = feed
self.fh = FeedHandler()
self.loop = None
self._coin_top_book: dict = {}
self._fut_top_book: dict = {}
async def _book_update(self, feed, symbol, book, timestamp, receipt_timestamp):
if symbol == self.coin_symbol:
self._coin_top_book[BID] = book[BID].peekitem(-1)
self._coin_top_book[ASK] = book[ASK].peekitem(0)
elif symbol == self.fut_symbol:
self._fut_top_book[BID] = book[BID].peekitem(-1)
self._fut_top_book[ASK] = book[ASK].peekitem(0)
def start_feed(self):
self.fh.add_feed(self.feed(symbols=[self.fut_symbol, self.coin_symbol], channels=[L2_BOOK],
callbacks={L2_BOOK: self._book_update}))
self.fh.run()
def shoot(self):
# give the orderbooks time to be populated
while len(self._coin_top_book) == 0 or len(self._fut_top_book) == 0:
sleep(1)
for i in range(5):
print(self._coin_top_book)
sleep(1) # do some stuff
self.fh.stop()
def run(self):
th1 = Thread(target=self.shoot)
th1.start()
self.start_feed()
def main():
g = Executor(feed=FTX, coin_symbol='SOL-USD', fut_symbol='SOL-PERP')
g.run()
if __name__ == '__main__':
main()
So in my current attempt to stop this program, I call self.fh.stop() when things are finished inside shoot(). However, I get this error:
Exception in thread Thread-1:
Traceback (most recent call last):
File "/Users/mc/.pyenv/versions/3.9.1/lib/python3.9/threading.py", line 954, in _bootstrap_inner
self.run()
File "/Users/mc/.pyenv/versions/3.9.1/lib/python3.9/threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "/Users/mc/Library/Application Support/JetBrains/PyCharmCE2021.2/scratches/scratch_1.py", line 43, in shoot
self.fh.stop()
File "/Users/mc/.virtualenvs/crypto/lib/python3.9/site-packages/cryptofeed/feedhandler.py", line 175, in stop
loop = asyncio.get_event_loop()
File "/Users/mc/.pyenv/versions/3.9.1/lib/python3.9/asyncio/events.py", line 642, in get_event_loop
raise RuntimeError('There is no current event loop in thread %r.'
RuntimeError: There is no current event loop in thread 'Thread-1'.
Presumably it's because I'm trying to access the event loop from the child thread whereas it only exists in the parent thread. However, I don't know how to handle this properly.

Related

How to recover the return value of a function passed to multiprocessing.Process?

I have looked at this question to get started and it works just fine How can I recover the return value of a function passed to multiprocessing.Process?
But in my case I would like to write a small tool, that would connect to many computers and gather some statistics, each stat would be gathered within a Process to make it snappy. But as soon as I try to wrap up the multiprocessing command in a class for a machine then it fails.
Here is my code
import multiprocessing
import pprint
def run_task(command):
p = subprocess.Popen(command, stdout = subprocess.PIPE, universal_newlines = True, shell = False)
result = p.communicate()[0]
return result
MACHINE_NAME = "cptr_name"
A_STAT = "some_stats_A"
B_STAT = "some_stats_B"
class MachineStatsGatherer():
def __init__(self, machineName):
self.machineName = machineName
manager = multiprocessing.Manager()
self.localStats = manager.dict() # creating a shared ressource for the sub processes to use
self.localStats[MACHINE_NAME] = machineName
def gatherStats(self):
self.runInParallel(
self.GatherSomeStatsA,
self.GatherSomeStatsB,
)
self.printStats()
def printStats(self):
pprint.pprint(self.localStats)
def runInParallel(self, *fns):
processes = []
for fn in fns:
process = multiprocessing.Process(target=fn, args=(self.localStats))
processes.append(process)
process.start()
for process in processes:
process.join()
def GatherSomeStatsA(self, returnStats):
# do some remote command, simplified here for the sake of debugging
result = "Windows"
returnStats[A_STAT] = result.find("Windows") != -1
def GatherSomeStatsB(self, returnStats):
# do some remote command, simplified here for the sake of debugging
result = "Windows"
returnStats[B_STAT] = result.find("Windows") != -1
def main():
machine = MachineStatsGatherer("SOMEMACHINENAME")
machine.gatherStats()
return
if __name__ == '__main__':
main()
And here is the error message
Traceback (most recent call last):
File "C:\Users\mesirard\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 297, in _bootstrap
self.run()
File "C:\Users\mesirard\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "d:\workdir\trunks6\Tools\VTKAppTester\Utils\NXMachineMonitorShared.py", line 45, in GatherSomeStatsA
returnStats[A_STAT] = result.find("Windows") != -1
TypeError: 'str' object does not support item assignment
Process Process-3:
Traceback (most recent call last):
File "C:\Users\mesirard\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 297, in _bootstrap
self.run()
File "C:\Users\mesirard\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "d:\workdir\trunks6\Tools\VTKAppTester\Utils\NXMachineMonitorShared.py", line 50, in GatherSomeStatsB
returnStats[B_STAT] = result.find("Windows") != -1
TypeError: 'str' object does not support item assignment
The issue is coming from this line
process = multiprocessing.Process(target=fn, args=(self.localStats))
it should have a extra comma at the end of args like so
process = multiprocessing.Process(target=fn, args=(self.localStats,))

Prometheus counter not yielding in custom collector

I am writing this custome collector where I want to add a counter.
#!/usr/bin/env python3
import sys
import time
from prometheus_client import start_http_server
from prometheus_client.core import CollectorRegistry, Counter
class MyCollector():
def __init__(self):
self.mymetrics_counter = Counter('observability_total', 'Status of My Services', ['app', 'test'])
def describe(self):
print("Started: Metrics Collector!")
return list()
def collect(self):
self.mymetrics_counter.labels('observability', 'test').inc()
yield self.mymetrics_counter
if __name__ == '__main__':
try:
myregistry = CollectorRegistry()
myregistry.register(MyCollector())
start_http_server(port=9100, registry=myregistry)
while True:
time.sleep(10)
except KeyboardInterrupt:
print("Ended: Metrics Collector!")
sys.exit(0)
But I am getting below error upon yeild
(venv) test_collector % python mycollector.py
Started: Metrics Collector!
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/wsgiref/handlers.py", line 137, in run
self.result = application(self.environ, self.start_response)
File "/Users/myid/Documents/myproj/workspace/test_collector/venv/lib/python3.9/site-packages/prometheus_client/exposition.py", line 123, in prometheus_app
status, header, output = _bake_output(registry, accept_header, params)
File "/Users/myid/Documents/myproj/workspace/test_collector/venv/lib/python3.9/site-packages/prometheus_client/exposition.py", line 105, in _bake_output
output = encoder(registry)
File "/Users/myid/Documents/myproj/workspace/test_collector/venv/lib/python3.9/site-packages/prometheus_client/exposition.py", line 179, in generate_latest
mname = metric.name
AttributeError: ("'Counter' object has no attribute 'name'", prometheus_client.metrics.Counter(observability))
collect returns metric families, not metrics. If you yield each of the results of mymetrics_counter.collect() it'd work.
Also, when you create the Counter its getting registered to the default registry which you don't want in this soft of usage as it'll end up returned twice which is invalid.

Python multiprocessing Deadlock using Queue

I have a python program like below.
from multiprocessing import Lock, Process, Queue, current_process
import time
lock = Lock()
def do_job(tasks_to_accomplish, tasks_that_are_done):
while not tasks_to_accomplish.empty():
task = tasks_to_accomplish.get()
print(task)
lock.acquire()
tasks_that_are_done.put(task + ' is done by ' + current_process().name)
lock.release()
time.sleep(1)
return True
def main():
number_of_task = 10
number_of_processes = 4
tasks_to_accomplish = Queue()
tasks_that_are_done = Queue()
processes = []
for i in range(number_of_task):
tasks_to_accomplish.put("Task no " + str(i))
# creating processes
for w in range(number_of_processes):
p = Process(target=do_job, args=(tasks_to_accomplish, tasks_that_are_done))
processes.append(p)
p.start()
# completing process
for p in processes:
p.join()
# print the output
while not tasks_that_are_done.empty():
print(tasks_that_are_done.get())
return True
if __name__ == '__main__':
main()
Sometimes program run perfectly but sometimes it gets stuck and doesn't complete. When quit manually, it produces following error.
$ python3 multiprocessing_example.py
Task no 0
Task no 1
Task no 2
Task no 3
Task no 4
Task no 5
Task no 6
Task no 7
Task no 8
Task no 9
^CProcess Process-1:
Traceback (most recent call last):
File "multiprocessing_example.py", line 47, in <module>
main()
File "multiprocessing_example.py", line 37, in main
p.join()
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/process.py", line 121, in join
res = self._popen.wait(timeout)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/popen_fork.py", line 51, in wait
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/popen_fork.py", line 29, in poll
pid, sts = os.waitpid(self.pid, flag)
KeyboardInterrupt
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/process.py", line 249, in _bootstrap
self.run()
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "multiprocessing_example.py", line 9, in do_job
task = tasks_to_accomplish.get()
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/queues.py", line 94, in get
res = self._recv_bytes()
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/connection.py", line 216, in recv_bytes
buf = self._recv_bytes(maxlength)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/connection.py", line 407, in _recv_bytes
buf = self._recv(4)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining)
KeyboardInterrupt
Can someone tell me what is the issue with the program? I am using python 3.6.
Note: Lock is not needed around a Queue.
lock.acquire()
tasks_that_are_done.put(task + ' is done by ' + current_process().name)
lock.release()
Queue
The Queue class in this module implements all the required locking semantics.
Question: ... what is the issue with the program?
You are using Queue.empty() and Queue.get(),
such leads to Deadlock on calling join() because there is no guarantee that the empty() State don't change until get()
was reaching.
Deadlock prone:
while not tasks_to_accomplish.empty():
task = tasks_to_accomplish.get()
Instead of using empty/get, Pair use for instance:
import queue
while True:
try:
task = tasks_to_accomplish.get_nowait()
except queue.Empty:
break
else:
# Handle task here
...
tasks_to_accomplish.task_done()

Python: How to call method in separate process

I want to start the ActorCore method in a seperte process and then process messages that come to that ActorCore. For some reason this code is not working.
import queue
from multiprocessing import Process
class NotMessage(Exception):
def __str__(self):
return 'NotMessage exception'
class Message(object):
def Do(self, Actor):
# Do some stuff to the actor
pass
def __str__(self):
return 'Generic message'
class StopMessage(Message):
def Do(self, Actor):
Actor.__stopped = True
def __str__(self):
return 'Stop message'
class Actor(object):
__DebugName = ''
__MsgQ = None
__stopped = False
def __init__(self, Name):
self.__DebugName = Name
self.__MsgQ = queue.Queue()
def LaunchActor(self):
p = Process(target=self.ActorCore)
p.start()
return self.__MsgQ
def ActorCore(self):
while not self.__stopped:
Msg = self.__MsgQ.get(block=True)
try:
Msg.Do(self)
print(Msg)
except NotMessage as e:
print(str(e), ' occurred in ', self.__DebugName)
def main():
joe = Actor('Joe')
msg = Message()
stop = StopMessage()
qToJoe = joe.LaunchActor()
qToJoe.put(msg)
qToJoe.put(msg)
qToJoe.put(stop)
if __name__ == '__main__':
main()
I am getting weird error when running:
Traceback (most recent call last):
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 64, in <module>
main()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 58, in main
qToJoe = joe.LaunchActor()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 40, in LaunchActor
p.start()
File "C:\Program Files\Python35\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python35\lib\multiprocessing\reduction.py", line 59, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects
Help please! I tried everything :(
Just use Queue instead of queue:
Remove import queue and add Queue to from multiprocessing like:
from multiprocessing import Process,Queue
then change self.__MsgQ = queue.Queue() to self.__MsgQ = Queue()
That's all you need to do to get it to work, the rest is the same for your case.
Edit, explanation:
queue.Queue is only thread-safe, and multiprocessing does actually spawn another process. Because of that, the additional multiprocessing.Queue is implemented to be also process-safe. As another option, if multithreading is wanted, the threading library can be used together with queue.Queue: https://docs.python.org/dev/library/threading.html#module-threading
Additional information:
Another parallelization option, depending on your further requirements is joblib, where the spawning can be defined to be either a process or a thread: https://joblib.readthedocs.io/

Python multiprocessing threads with return codes [duplicate]

I want to run something like this:
from multiprocessing import Pool
import time
import random
class Controler(object):
def __init__(self):
nProcess = 10
pages = 10
self.__result = []
self.manageWork(nProcess,pages)
def BarcodeSearcher(x):
return x*x
def resultCollector(self,result):
self.__result.append(result)
def manageWork(self,nProcess,pages):
pool = Pool(processes=nProcess)
for pag in range(pages):
pool.apply_async(self.BarcodeSearcher, args = (pag, ), callback = self.resultCollector)
print self.__result
if __name__ == '__main__':
Controler()
but the code result the error :
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python26\lib\threading.py", line 522, in __bootstrap_inner
self.run()
File "C:\Python26\lib\threading.py", line 477, in run
self.__target(*self.__args, **self.__kwargs)
File "C:\python26\lib\multiprocessing\pool.py", line 225, in _handle_tasks
put(task)
PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemethod failed
I've seen the posts (post1 , post2) to solve my problem. I'm looking for something like Mike McKerns solution in the second post but without using pathos.
This works, using copy_reg, as suggested by Alex Martelli in the first link you provided:
import copy_reg
import types
import multiprocessing
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
class Controler(object):
def __init__(self):
nProcess = 10
pages = 10
self.__result = []
self.manageWork(nProcess, pages)
def BarcodeSearcher(self, x):
return x*x
def resultCollector(self, result):
self.__result.append(result)
def manageWork(self, nProcess, pages):
pool = multiprocessing.Pool(processes=nProcess)
for pag in range(pages):
pool.apply_async(self.BarcodeSearcher, args=(pag,),
callback=self.resultCollector)
pool.close()
pool.join()
print(self.__result)
if __name__ == '__main__':
Controler()

Categories