In following script why my callback function never get called ?
I am using a pre-created kernel to run the code and trying to get the output of it with attaching the callback for respective sockets.
from zmq.eventloop import ioloop
ioloop.install()
from zmq.eventloop.zmqstream import ZMQStream
from functools import partial
from tornado import gen
from tornado.concurrent import Future
from jupyter_client import BlockingKernelClient
from pprint import pprint
import logging, os, zmq
reply_futures = {}
context = zmq.Context()
publisher = context.socket(zmq.PUSH)
publisher.connect("tcp://127.0.0.1:5253")
def reply_callback(session, stream, msg_list):
idents, msg_parts = session.feed_identities(msg_list)
reply = session.deserialize(msg_parts)
parent_id = reply['parent_header'].get('msg_id')
reply_future = reply_futures.get(parent_id)
print("{} \n".format(reply))
if reply_future:
if "execute_reply" == reply["msg_type"]:
reply_future.set_result(reply)
publisher.send(reply)
def fv_execute():
code = 'print ("hello")'
msg_id = execute(code)
return msg_id
def get_connection_file(kernel_id):
json_file = 'kernel-{}.json'.format(kernel_id)
return os.path.join('/tmp',json_file)
def execute(code,):
kernel_id = '46459cb4-fa34-497a-8e3d-dfb3ab4476fd'
cf = get_connection_file(kernel_id)
kernel_client = BlockingKernelClient(connection_file=cf)
setup_listener(kernel_client)
msg_id = ioloop.IOLoop.current().run_sync(lambda: execute_(kernel_client,code))
return msg_id
def setup_listener(kernel_client):
shell_stream = ZMQStream(kernel_client.shell_channel.socket)
iopub_stream = ZMQStream(kernel_client.iopub_channel.socket)
shell_stream.on_recv_stream(partial(reply_callback, kernel_client.session))
iopub_stream.on_recv_stream(partial(reply_callback, kernel_client.session))
#gen.coroutine
def execute_(kernel_client, code):
msg_id = kernel_client.execute(code)
f = reply_futures[msg_id] = Future()
print("Is kernel alive: {}".format(kernel_client.is_alive()))
print(msg_id)
yield f
raise gen.Return(msg_id)
if __name__ == '__main__':
fv_execute()
here is output, the script runs forever
jupyter#albus:~/lab$ python2 iolooptest2.py
Is kernel alive: True
de3eae2e-48d3-451a-b6bc-421674bb2a35
^X^CTraceback (most recent call last):
File "iolooptest2.py", line 61, in <module>
fv_execute()
File "iolooptest2.py", line 30, in fv_execute
msg_id = execute(code)
File "iolooptest2.py", line 42, in execute
msg_id = ioloop.IOLoop.current().run_sync(lambda: execute_(kernel_client,code))
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 452, in run_sync
self.start()
File "/usr/local/lib/python2.7/dist- packages/zmq/eventloop/ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 862, in start
event_pairs = self._impl.poll(poll_timeout)
File "/usr/local/lib/python2.7/dist- packages/zmq/eventloop/ioloop.py", line 122, in poll
z_events = self._poller.poll(1000*timeout)
File "/usr/local/lib/python2.7/dist-packages/zmq/sugar/poll.py", line 99, in poll
return zmq_poll(self.sockets, timeout=timeout)
File "zmq/backend/cython/_poll.pyx", line 116, in zmq.backend.cython._poll.zmq_poll (zmq/backend/cython/_poll.c:2036)
File "zmq/backend/cython/checkrc.pxd", line 12, in zmq.backend.cython.checkrc._check_rc (zmq/backend/cython/_poll.c:2418)
KeyboardInterrupt
A slightly modified version of the code is here
https://gist.github.com/jayendra13/76a4f5726428882013ea62d94974da5c
where I pass ioloop as a argument to zmqstream, while attaching the callback, which also has a same behaviour.
Here is almost similar script which works
https://gist.github.com/jayendra13/e553fafba5398e287107e947c16988df
Adding the following two lines after the creation of kernel_client solved my issue.
kernel_client.load_connection_file()
kernel_client.start_channels()
so new execute looks like this
def execute(code,):
kernel_id = '46459cb4-fa34-497a-8e3d-dfb3ab4476fd'
cf = get_connection_file(kernel_id)
kernel_client = BlockingKernelClient(connection_file=cf)
kernel_client.load_connection_file()
kernel_client.start_channels()
setup_listener(kernel_client)
msg_id = ioloop.IOLoop.current().run_sync(lambda: execute_(kernel_client,code))
return msg_id
Related
I want to create a class Storage where each object has a dictionary orderbooks as a property.
I want to write on orderbooks from the main process by invoking the method write, but I want to defer this action to another process and ensuring that the dictionary orderbooks is accessible from the main process.
To do so, I create a Mananger() that I pass during the definition of the object and that is used to notify the processes about the changes of the dictionary. My code is the following:
from multiprocessing import Process, Manager
class Storage():
def __init__(self,manager):
self.manager = manager
self.orderbooks = self.manager.dict()
def store_value(self,el):
self.orderbooks[el[0]] = el[1]
def write(self,el:list):
p = Process(target=self.store_value,args=(el,))
p.start()
if __name__ == '__main__':
manager=Manager()
book1 = Storage(manager)
book1.write([0,1])
However, when I run this code, I get the following error
Traceback (most recent call last):
File "/Users/main_user/PycharmProjects/handle_queue/main.py", line 21, in <module>
book1.write([0,1])
File "/Users/main_user/PycharmProjects/handle_queue/main.py", line 13, in write
p.start()
File "/Users/main_user/opt/anaconda3/envs/handle_queue/lib/python3.10/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/Users/main_user/opt/anaconda3/envs/handle_queue/lib/python3.10/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "/Users/main_user/opt/anaconda3/envs/handle_queue/lib/python3.10/multiprocessing/context.py", line 284, in _Popen
return Popen(process_obj)
File "/Users/main_user/opt/anaconda3/envs/handle_queue/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__
super().__init__(process_obj)
File "/Users/main_user/opt/anaconda3/envs/handle_queue/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "/Users/main_user/opt/anaconda3/envs/handle_queue/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "/Users/main_user/opt/anaconda3/envs/handle_queue/lib/python3.10/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'weakref' object
What is wrong with my code?
Per Aaron's posted comment:
from multiprocessing import Process, Manager
class Storage():
def __init__(self, orderbooks):
self.orderbooks = orderbooks
def store_value(self, el):
self.orderbooks[el[0]] = el[1]
def write(self, el: list):
p = Process(target=self.store_value, args=(el,))
p.start()
# Ensure we do not return until store_value has
# completed updating the dictionary:
p.join()
if __name__ == '__main__':
manager = Manager()
orderbooks = manager.dict()
book1 = Storage(orderbooks)
book1.write([0, 1])
print(orderbooks)
Prints:
{0: 1}
I am writing this custome collector where I want to add a counter.
#!/usr/bin/env python3
import sys
import time
from prometheus_client import start_http_server
from prometheus_client.core import CollectorRegistry, Counter
class MyCollector():
def __init__(self):
self.mymetrics_counter = Counter('observability_total', 'Status of My Services', ['app', 'test'])
def describe(self):
print("Started: Metrics Collector!")
return list()
def collect(self):
self.mymetrics_counter.labels('observability', 'test').inc()
yield self.mymetrics_counter
if __name__ == '__main__':
try:
myregistry = CollectorRegistry()
myregistry.register(MyCollector())
start_http_server(port=9100, registry=myregistry)
while True:
time.sleep(10)
except KeyboardInterrupt:
print("Ended: Metrics Collector!")
sys.exit(0)
But I am getting below error upon yeild
(venv) test_collector % python mycollector.py
Started: Metrics Collector!
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/wsgiref/handlers.py", line 137, in run
self.result = application(self.environ, self.start_response)
File "/Users/myid/Documents/myproj/workspace/test_collector/venv/lib/python3.9/site-packages/prometheus_client/exposition.py", line 123, in prometheus_app
status, header, output = _bake_output(registry, accept_header, params)
File "/Users/myid/Documents/myproj/workspace/test_collector/venv/lib/python3.9/site-packages/prometheus_client/exposition.py", line 105, in _bake_output
output = encoder(registry)
File "/Users/myid/Documents/myproj/workspace/test_collector/venv/lib/python3.9/site-packages/prometheus_client/exposition.py", line 179, in generate_latest
mname = metric.name
AttributeError: ("'Counter' object has no attribute 'name'", prometheus_client.metrics.Counter(observability))
collect returns metric families, not metrics. If you yield each of the results of mymetrics_counter.collect() it'd work.
Also, when you create the Counter its getting registered to the default registry which you don't want in this soft of usage as it'll end up returned twice which is invalid.
I wrote a program that would post events using asyncio and aiohttp. This program works when I run it locally. I can post 10k events no problem. However, I SCPed the whole codebase to a remote machine and within that machine I can't post more than 15 events without getting this error:
RuntimeError: Event loop is closed
Exception ignored in: <coroutine object Poster.async_post_event at 0x7f4a53989410>
Traceback (most recent call last):
File "/home/bli1/qe-trinity/tracer/utils/poster.py", line 63, in async_post_event
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 565, in __aenter__
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 198, in _request
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 316, in connect
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 349, in _release_waiter
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 332, in set_result
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 242, in _schedule_callbacks
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 447, in call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 456, in _call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 284, in _check_closed
RuntimeError: Event loop is closed
Exception ignored in: <coroutine object Poster.async_post_event at 0x7f4a5397ffc0>
Traceback (most recent call last):
File "/home/bli1/qe-trinity/tracer/utils/poster.py", line 63, in async_post_event
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 565, in __aenter__
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 198, in _request
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 316, in connect
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 349, in _release_waiter
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 332, in set_result
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 242, in _schedule_callbacks
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 447, in call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 456, in _call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 284, in _check_closed
RuntimeError: Event loop is closed
How can I debug this or find out the source of this problem?
Here is the class that I created and I use the method post() to run:
import uuid
import os
import asyncio
import time
import random
import json
import aiohttp
from tracer.utils.phase import Phase
class Poster(Phase):
def __init__(self, log, endpoint, num_post, topic, datafile, timeout, oracles, secure=False, thru_proxy=True):
Phase.__init__(self, log, "post", oracles, secure, thru_proxy)
self.log = log
self.num_post = int(num_post)
self.datafile = datafile.readlines()
self.topic = topic
self.endpoint = self.set_endpoint(endpoint, self.topic)
self.response = None
self.timeout = timeout
def random_line(self):
""" Returns random line from file and converts it to JSON """
return json.loads(random.choice(self.datafile))
#staticmethod
def change_uuid(event):
""" Creates new UUID for event_id """
new_uuid = str(uuid.uuid4())
event["event_header"]["event_id"] = new_uuid
return event
#staticmethod
def wrapevent(event):
""" Wrap event with metadata for analysis later on """
return {
"tracer": {
"post": {
"statusCode": None,
"timestamp": None,
},
"awsKafkaTimestamp": None,
"qdcKakfaTimestamp": None,
"hdfsTimestamp": None
},
"event": event
}
def gen_random_event(self):
random_event = self.random_line()
event = self.change_uuid(random_event)
dataspec = self.wrapevent(event)
return dataspec
async def async_post_event(self, event, session):
async with session.post(self.endpoint, data=event, proxy=self.proxy) as resp:
event["tracer"]["post"]["timestamp"] = time.time() * 1000.0
event["tracer"]["post"]["statusCode"] = resp.status
unique_id = event["event"]["event_header"]["event_id"]
oracle_endpoint = os.path.join(self.oracle, unique_id)
async with session.put(oracle_endpoint, data=json.dumps(event), proxy=self.proxy) as resp:
if resp.status != 200:
self.log.debug("Post to ElasticSearch not 200")
self.log.debug(event["event"]["event_header"]["event_id"])
self.log.debug("Status code: " + str(resp.status))
return event["event"]["event_header"]["event_id"], resp.status
async def async_post_events(self, events):
coros = []
conn = aiohttp.TCPConnector(verify_ssl=self.secure)
async with aiohttp.ClientSession(connector=conn) as session:
for event in events:
coros.append(self.async_post_event(event, session))
return await asyncio.gather(*coros)
def post(self):
event_loop = asyncio.get_event_loop()
try:
events = [self.gen_random_event() for i in range(self.num_post)]
start_time = time.time()
results = event_loop.run_until_complete(self.async_post_events(events))
print("Time taken: " + str(time.time() - start_time))
finally:
event_loop.close()
You cannot re-use a loop once it's closed. From AbstractEventLoop.close documentation:
This is idempotent and irreversible. No other methods should be called after this one.
Either remove the loop.close call or create a new loop for each post.
My advice would be to avoid those problems by running everything inside the loop and awaiting async_post_events when needed.
I'm trying to run pyalgotrade's event profiler. I'm using custom data, it works when I run it with the default stratergy/predicate 'BuyOnGap' however when I try and run it with a simple custom strategy it throw the error:
Traceback (most recent call last):
File "C:\Users\David\Desktop\Python\Coursera\Computational Finance\Week2\PyAlgoTrade\Bitfinex\FCT\FCT_single_event_test.py", line 43, in <module>
main(True)
File "C:\Users\David\Desktop\Python\Coursera\Computational Finance\Week2\PyAlgoTrade\Bitfinex\FCT\FCT_single_event_test.py", line 35, in main
eventProfiler.run(feed, True)
File "C:\Python27\lib\site-packages\pyalgotrade\eventprofiler.py", line 215, in run
disp.run()
File "C:\Python27\lib\site-packages\pyalgotrade\dispatcher.py", line 102, in run
eof, eventsDispatched = self.__dispatch()
File "C:\Python27\lib\site-packages\pyalgotrade\dispatcher.py", line 90, in __dispatch
if self.__dispatchSubject(subject, smallestDateTime):
File "C:\Python27\lib\site-packages\pyalgotrade\dispatcher.py", line 68, in __dispatchSubject
ret = subject.dispatch() is True
File "C:\Python27\lib\site-packages\pyalgotrade\feed\__init__.py", line 105, in dispatch
self.__event.emit(dateTime, values)
File "C:\Python27\lib\site-packages\pyalgotrade\observer.py", line 59, in emit
handler(*args, **kwargs)
File "C:\Python27\lib\site-packages\pyalgotrade\eventprofiler.py", line 172, in __onBars
eventOccurred = self.__predicate.eventOccurred(instrument, self.__feed[instrument])
File "C:\Python27\lib\site-packages\pyalgotrade\eventprofiler.py", line 89, in eventOccurred
raise NotImplementedError()
NotImplementedError
My code is:
from pyalgotrade import eventprofiler
from pyalgotrade.technical import stats
from pyalgotrade.technical import roc
from pyalgotrade.technical import ma
from pyalgotrade.barfeed import csvfeed
class single_event_strat( eventprofiler.Predicate ):
def __init__(self,feed):
self.__returns = {} # CLASS ATTR
for inst in feed.getRegisteredInstruments():
priceDS = feed[inst].getAdjCloseDataSeries() # STORE: priceDS ( a temporary representation )
self.__returns[inst] = roc.RateOfChange( priceDS, 1 )
# CALC: ATTR <- Returns over the adjusted close values, consumed priceDS
#( could be expressed as self.__returns[inst] = roc.RateOfChange( ( feed[inst].getAdjCloseDataSeries() ), 1 ),
#but would be less readable
def eventOccoured( self, instrument, aBarDS):
if (aBarDS[-1].getVolume() > 10 and aBarDS[-1].getClose() > 5 ):
return True
else:
return False
def main(plot):
feed = csvfeed.GenericBarFeed(0)
feed.addBarsFromCSV('FCT', "FCT_daily_converted.csv")
predicate = single_event_strat(feed)
eventProfiler = eventprofiler.Profiler( predicate, 5, 5)
eventProfiler.run(feed, True)
results = eventProfiler.getResults()
print "%d events found" % (results.getEventCount())
if plot:
eventprofiler.plot(results)
if __name__ == "__main__":
main(True)
What does this error mean ?
Does anyone know what's wrong and how to fix it ?
Here is a link to the eventprofiler code:
http://pastebin.com/QD220VQb
As a bonus does anyone know where I can find examples of the profiler being used? other that the example pyalgotrade gives, seen here
I think you just made a spelling mistake in eventOccurred method definition
def eventOccoured( self, instrument, aBarDS):
should be replaced by
def eventOccurred( self, instrument, aBarDS):
I want to start the ActorCore method in a seperte process and then process messages that come to that ActorCore. For some reason this code is not working.
import queue
from multiprocessing import Process
class NotMessage(Exception):
def __str__(self):
return 'NotMessage exception'
class Message(object):
def Do(self, Actor):
# Do some stuff to the actor
pass
def __str__(self):
return 'Generic message'
class StopMessage(Message):
def Do(self, Actor):
Actor.__stopped = True
def __str__(self):
return 'Stop message'
class Actor(object):
__DebugName = ''
__MsgQ = None
__stopped = False
def __init__(self, Name):
self.__DebugName = Name
self.__MsgQ = queue.Queue()
def LaunchActor(self):
p = Process(target=self.ActorCore)
p.start()
return self.__MsgQ
def ActorCore(self):
while not self.__stopped:
Msg = self.__MsgQ.get(block=True)
try:
Msg.Do(self)
print(Msg)
except NotMessage as e:
print(str(e), ' occurred in ', self.__DebugName)
def main():
joe = Actor('Joe')
msg = Message()
stop = StopMessage()
qToJoe = joe.LaunchActor()
qToJoe.put(msg)
qToJoe.put(msg)
qToJoe.put(stop)
if __name__ == '__main__':
main()
I am getting weird error when running:
Traceback (most recent call last):
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 64, in <module>
main()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 58, in main
qToJoe = joe.LaunchActor()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 40, in LaunchActor
p.start()
File "C:\Program Files\Python35\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python35\lib\multiprocessing\reduction.py", line 59, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects
Help please! I tried everything :(
Just use Queue instead of queue:
Remove import queue and add Queue to from multiprocessing like:
from multiprocessing import Process,Queue
then change self.__MsgQ = queue.Queue() to self.__MsgQ = Queue()
That's all you need to do to get it to work, the rest is the same for your case.
Edit, explanation:
queue.Queue is only thread-safe, and multiprocessing does actually spawn another process. Because of that, the additional multiprocessing.Queue is implemented to be also process-safe. As another option, if multithreading is wanted, the threading library can be used together with queue.Queue: https://docs.python.org/dev/library/threading.html#module-threading
Additional information:
Another parallelization option, depending on your further requirements is joblib, where the spawning can be defined to be either a process or a thread: https://joblib.readthedocs.io/