I'm am new to unittest and I am not sure why I am getting this error:
runTest (__main__.TestTimeInterval)
No test ... Traceback (most recent call last):
File "/Users/bli1/Development/Trinity/qa-trinity/python_lib/qe/tests/test_timestamp_interval.py", line 122, in <module>
sys.exit(main(sys.argv))
File "/Users/bli1/Development/Trinity/qa-trinity/python_lib/qe/tests/test_timestamp_interval.py", line 110, in main
result_set = runner.run(suite)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/unittest/runner.py", line 168, in run
test(result)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/unittest/suite.py", line 87, in __call__
return self.run(*args, **kwds)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/unittest/suite.py", line 125, in run
test(result)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/unittest/case.py", line 625, in __call__
return self.run(*args, **kwds)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/unittest/case.py", line 555, in run
testMethod = getattr(self, self._testMethodName)
AttributeError: 'TestTimeInterval' object has no attribute 'runTest'
I wanted to create a simple test to see if everything is working properly but I got the error above. I added the test to the suite and then ran it with .run()
class TestTimeInterval(unittest.TestCase):
def __init__(self, log, runtag, interval, path_file):
super(TestTimeInterval, self).__init__()
self.interval = interval
self.path_file = path_file
self.log = log
self.runtag = runtag
def test_record(self):
self.assertTrue(1 > 0)
##############################################################################
def main(argv):
exit_code = 0
global me; me = os.path.basename(argv[0]) # name of this program
global mydir; mydir = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument("file", metavar="FILE",
help="File filled with hdfs paths separated by newlines")
parser.add_argument("runtag", metavar="RUNTAG", help="tag for the test run")
parser.add_argument("-t", "--time", default="10", dest="interval",
help="Time interval (minutes) between server_timestamp and interval given by HDFS folder name")
args = parser.parse_args(args=argv[1:])
log = logging.getLogger(me)
logfile = args.runtag + ".log"
if os.path.exists(logfile):
os.remove(logfile)
log.addHandler(logging.FileHandler(logfile))
console = logging.StreamHandler(sys.stderr); console.setLevel(logging.WARNING); log.addHandler(console)
if exit_code == 0:
runner = unittest.TextTestRunner(stream=sys.stdout, descriptions=True, verbosity=2)
suite = unittest.TestSuite()
print(args)
suite.addTest(TestTimeInterval(log, args.runtag, args.interval, args.file))
try:
log.info("{0}: START: {1}".format(me, datetime.datetime.now().ctime()))
result_set = runner.run(suite)
except KeyboardInterrupt as e:
log.info("{0}: exit on keyboard interrupt".format(me))
exit_code = 1
else:
exit_code = len(result_set.errors) + len(result_set.failures)
finally:
log.info("{0}: FINISH: {1}".format(me, datetime.datetime.now().ctime()))
return exit_code
##############################################################################
# The following code calls main only if this program is invoked standalone
if __name__ == "__main__":
sys.exit(main(sys.argv))
You almost never need to create TestSuites and TestRunners yourself. Normally, you'd do something like:
# my_test.py
import unittest
class Something(object):
def __init__(self):
self.foo = 1
class TestSomething(unittest.TestCase):
def setUp(self):
super(TestSomething, self).setUp()
self.something = Something()
def test_record(self):
self.assertTrue(1 > 0)
def test_something_foo_equals_1(self):
self.assertEqual(self.something.foo, 1)
if __name__ == '__main__':
unittest.main()
now, to run your test, you just execute your script.
python my_test.py
Related
I have looked at this question to get started and it works just fine How can I recover the return value of a function passed to multiprocessing.Process?
But in my case I would like to write a small tool, that would connect to many computers and gather some statistics, each stat would be gathered within a Process to make it snappy. But as soon as I try to wrap up the multiprocessing command in a class for a machine then it fails.
Here is my code
import multiprocessing
import pprint
def run_task(command):
p = subprocess.Popen(command, stdout = subprocess.PIPE, universal_newlines = True, shell = False)
result = p.communicate()[0]
return result
MACHINE_NAME = "cptr_name"
A_STAT = "some_stats_A"
B_STAT = "some_stats_B"
class MachineStatsGatherer():
def __init__(self, machineName):
self.machineName = machineName
manager = multiprocessing.Manager()
self.localStats = manager.dict() # creating a shared ressource for the sub processes to use
self.localStats[MACHINE_NAME] = machineName
def gatherStats(self):
self.runInParallel(
self.GatherSomeStatsA,
self.GatherSomeStatsB,
)
self.printStats()
def printStats(self):
pprint.pprint(self.localStats)
def runInParallel(self, *fns):
processes = []
for fn in fns:
process = multiprocessing.Process(target=fn, args=(self.localStats))
processes.append(process)
process.start()
for process in processes:
process.join()
def GatherSomeStatsA(self, returnStats):
# do some remote command, simplified here for the sake of debugging
result = "Windows"
returnStats[A_STAT] = result.find("Windows") != -1
def GatherSomeStatsB(self, returnStats):
# do some remote command, simplified here for the sake of debugging
result = "Windows"
returnStats[B_STAT] = result.find("Windows") != -1
def main():
machine = MachineStatsGatherer("SOMEMACHINENAME")
machine.gatherStats()
return
if __name__ == '__main__':
main()
And here is the error message
Traceback (most recent call last):
File "C:\Users\mesirard\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 297, in _bootstrap
self.run()
File "C:\Users\mesirard\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "d:\workdir\trunks6\Tools\VTKAppTester\Utils\NXMachineMonitorShared.py", line 45, in GatherSomeStatsA
returnStats[A_STAT] = result.find("Windows") != -1
TypeError: 'str' object does not support item assignment
Process Process-3:
Traceback (most recent call last):
File "C:\Users\mesirard\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 297, in _bootstrap
self.run()
File "C:\Users\mesirard\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "d:\workdir\trunks6\Tools\VTKAppTester\Utils\NXMachineMonitorShared.py", line 50, in GatherSomeStatsB
returnStats[B_STAT] = result.find("Windows") != -1
TypeError: 'str' object does not support item assignment
The issue is coming from this line
process = multiprocessing.Process(target=fn, args=(self.localStats))
it should have a extra comma at the end of args like so
process = multiprocessing.Process(target=fn, args=(self.localStats,))
I am writing this custome collector where I want to add a counter.
#!/usr/bin/env python3
import sys
import time
from prometheus_client import start_http_server
from prometheus_client.core import CollectorRegistry, Counter
class MyCollector():
def __init__(self):
self.mymetrics_counter = Counter('observability_total', 'Status of My Services', ['app', 'test'])
def describe(self):
print("Started: Metrics Collector!")
return list()
def collect(self):
self.mymetrics_counter.labels('observability', 'test').inc()
yield self.mymetrics_counter
if __name__ == '__main__':
try:
myregistry = CollectorRegistry()
myregistry.register(MyCollector())
start_http_server(port=9100, registry=myregistry)
while True:
time.sleep(10)
except KeyboardInterrupt:
print("Ended: Metrics Collector!")
sys.exit(0)
But I am getting below error upon yeild
(venv) test_collector % python mycollector.py
Started: Metrics Collector!
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/wsgiref/handlers.py", line 137, in run
self.result = application(self.environ, self.start_response)
File "/Users/myid/Documents/myproj/workspace/test_collector/venv/lib/python3.9/site-packages/prometheus_client/exposition.py", line 123, in prometheus_app
status, header, output = _bake_output(registry, accept_header, params)
File "/Users/myid/Documents/myproj/workspace/test_collector/venv/lib/python3.9/site-packages/prometheus_client/exposition.py", line 105, in _bake_output
output = encoder(registry)
File "/Users/myid/Documents/myproj/workspace/test_collector/venv/lib/python3.9/site-packages/prometheus_client/exposition.py", line 179, in generate_latest
mname = metric.name
AttributeError: ("'Counter' object has no attribute 'name'", prometheus_client.metrics.Counter(observability))
collect returns metric families, not metrics. If you yield each of the results of mymetrics_counter.collect() it'd work.
Also, when you create the Counter its getting registered to the default registry which you don't want in this soft of usage as it'll end up returned twice which is invalid.
In following script why my callback function never get called ?
I am using a pre-created kernel to run the code and trying to get the output of it with attaching the callback for respective sockets.
from zmq.eventloop import ioloop
ioloop.install()
from zmq.eventloop.zmqstream import ZMQStream
from functools import partial
from tornado import gen
from tornado.concurrent import Future
from jupyter_client import BlockingKernelClient
from pprint import pprint
import logging, os, zmq
reply_futures = {}
context = zmq.Context()
publisher = context.socket(zmq.PUSH)
publisher.connect("tcp://127.0.0.1:5253")
def reply_callback(session, stream, msg_list):
idents, msg_parts = session.feed_identities(msg_list)
reply = session.deserialize(msg_parts)
parent_id = reply['parent_header'].get('msg_id')
reply_future = reply_futures.get(parent_id)
print("{} \n".format(reply))
if reply_future:
if "execute_reply" == reply["msg_type"]:
reply_future.set_result(reply)
publisher.send(reply)
def fv_execute():
code = 'print ("hello")'
msg_id = execute(code)
return msg_id
def get_connection_file(kernel_id):
json_file = 'kernel-{}.json'.format(kernel_id)
return os.path.join('/tmp',json_file)
def execute(code,):
kernel_id = '46459cb4-fa34-497a-8e3d-dfb3ab4476fd'
cf = get_connection_file(kernel_id)
kernel_client = BlockingKernelClient(connection_file=cf)
setup_listener(kernel_client)
msg_id = ioloop.IOLoop.current().run_sync(lambda: execute_(kernel_client,code))
return msg_id
def setup_listener(kernel_client):
shell_stream = ZMQStream(kernel_client.shell_channel.socket)
iopub_stream = ZMQStream(kernel_client.iopub_channel.socket)
shell_stream.on_recv_stream(partial(reply_callback, kernel_client.session))
iopub_stream.on_recv_stream(partial(reply_callback, kernel_client.session))
#gen.coroutine
def execute_(kernel_client, code):
msg_id = kernel_client.execute(code)
f = reply_futures[msg_id] = Future()
print("Is kernel alive: {}".format(kernel_client.is_alive()))
print(msg_id)
yield f
raise gen.Return(msg_id)
if __name__ == '__main__':
fv_execute()
here is output, the script runs forever
jupyter#albus:~/lab$ python2 iolooptest2.py
Is kernel alive: True
de3eae2e-48d3-451a-b6bc-421674bb2a35
^X^CTraceback (most recent call last):
File "iolooptest2.py", line 61, in <module>
fv_execute()
File "iolooptest2.py", line 30, in fv_execute
msg_id = execute(code)
File "iolooptest2.py", line 42, in execute
msg_id = ioloop.IOLoop.current().run_sync(lambda: execute_(kernel_client,code))
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 452, in run_sync
self.start()
File "/usr/local/lib/python2.7/dist- packages/zmq/eventloop/ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 862, in start
event_pairs = self._impl.poll(poll_timeout)
File "/usr/local/lib/python2.7/dist- packages/zmq/eventloop/ioloop.py", line 122, in poll
z_events = self._poller.poll(1000*timeout)
File "/usr/local/lib/python2.7/dist-packages/zmq/sugar/poll.py", line 99, in poll
return zmq_poll(self.sockets, timeout=timeout)
File "zmq/backend/cython/_poll.pyx", line 116, in zmq.backend.cython._poll.zmq_poll (zmq/backend/cython/_poll.c:2036)
File "zmq/backend/cython/checkrc.pxd", line 12, in zmq.backend.cython.checkrc._check_rc (zmq/backend/cython/_poll.c:2418)
KeyboardInterrupt
A slightly modified version of the code is here
https://gist.github.com/jayendra13/76a4f5726428882013ea62d94974da5c
where I pass ioloop as a argument to zmqstream, while attaching the callback, which also has a same behaviour.
Here is almost similar script which works
https://gist.github.com/jayendra13/e553fafba5398e287107e947c16988df
Adding the following two lines after the creation of kernel_client solved my issue.
kernel_client.load_connection_file()
kernel_client.start_channels()
so new execute looks like this
def execute(code,):
kernel_id = '46459cb4-fa34-497a-8e3d-dfb3ab4476fd'
cf = get_connection_file(kernel_id)
kernel_client = BlockingKernelClient(connection_file=cf)
kernel_client.load_connection_file()
kernel_client.start_channels()
setup_listener(kernel_client)
msg_id = ioloop.IOLoop.current().run_sync(lambda: execute_(kernel_client,code))
return msg_id
I'm trying to run pyalgotrade's event profiler. I'm using custom data, it works when I run it with the default stratergy/predicate 'BuyOnGap' however when I try and run it with a simple custom strategy it throw the error:
Traceback (most recent call last):
File "C:\Users\David\Desktop\Python\Coursera\Computational Finance\Week2\PyAlgoTrade\Bitfinex\FCT\FCT_single_event_test.py", line 43, in <module>
main(True)
File "C:\Users\David\Desktop\Python\Coursera\Computational Finance\Week2\PyAlgoTrade\Bitfinex\FCT\FCT_single_event_test.py", line 35, in main
eventProfiler.run(feed, True)
File "C:\Python27\lib\site-packages\pyalgotrade\eventprofiler.py", line 215, in run
disp.run()
File "C:\Python27\lib\site-packages\pyalgotrade\dispatcher.py", line 102, in run
eof, eventsDispatched = self.__dispatch()
File "C:\Python27\lib\site-packages\pyalgotrade\dispatcher.py", line 90, in __dispatch
if self.__dispatchSubject(subject, smallestDateTime):
File "C:\Python27\lib\site-packages\pyalgotrade\dispatcher.py", line 68, in __dispatchSubject
ret = subject.dispatch() is True
File "C:\Python27\lib\site-packages\pyalgotrade\feed\__init__.py", line 105, in dispatch
self.__event.emit(dateTime, values)
File "C:\Python27\lib\site-packages\pyalgotrade\observer.py", line 59, in emit
handler(*args, **kwargs)
File "C:\Python27\lib\site-packages\pyalgotrade\eventprofiler.py", line 172, in __onBars
eventOccurred = self.__predicate.eventOccurred(instrument, self.__feed[instrument])
File "C:\Python27\lib\site-packages\pyalgotrade\eventprofiler.py", line 89, in eventOccurred
raise NotImplementedError()
NotImplementedError
My code is:
from pyalgotrade import eventprofiler
from pyalgotrade.technical import stats
from pyalgotrade.technical import roc
from pyalgotrade.technical import ma
from pyalgotrade.barfeed import csvfeed
class single_event_strat( eventprofiler.Predicate ):
def __init__(self,feed):
self.__returns = {} # CLASS ATTR
for inst in feed.getRegisteredInstruments():
priceDS = feed[inst].getAdjCloseDataSeries() # STORE: priceDS ( a temporary representation )
self.__returns[inst] = roc.RateOfChange( priceDS, 1 )
# CALC: ATTR <- Returns over the adjusted close values, consumed priceDS
#( could be expressed as self.__returns[inst] = roc.RateOfChange( ( feed[inst].getAdjCloseDataSeries() ), 1 ),
#but would be less readable
def eventOccoured( self, instrument, aBarDS):
if (aBarDS[-1].getVolume() > 10 and aBarDS[-1].getClose() > 5 ):
return True
else:
return False
def main(plot):
feed = csvfeed.GenericBarFeed(0)
feed.addBarsFromCSV('FCT', "FCT_daily_converted.csv")
predicate = single_event_strat(feed)
eventProfiler = eventprofiler.Profiler( predicate, 5, 5)
eventProfiler.run(feed, True)
results = eventProfiler.getResults()
print "%d events found" % (results.getEventCount())
if plot:
eventprofiler.plot(results)
if __name__ == "__main__":
main(True)
What does this error mean ?
Does anyone know what's wrong and how to fix it ?
Here is a link to the eventprofiler code:
http://pastebin.com/QD220VQb
As a bonus does anyone know where I can find examples of the profiler being used? other that the example pyalgotrade gives, seen here
I think you just made a spelling mistake in eventOccurred method definition
def eventOccoured( self, instrument, aBarDS):
should be replaced by
def eventOccurred( self, instrument, aBarDS):
I want to start the ActorCore method in a seperte process and then process messages that come to that ActorCore. For some reason this code is not working.
import queue
from multiprocessing import Process
class NotMessage(Exception):
def __str__(self):
return 'NotMessage exception'
class Message(object):
def Do(self, Actor):
# Do some stuff to the actor
pass
def __str__(self):
return 'Generic message'
class StopMessage(Message):
def Do(self, Actor):
Actor.__stopped = True
def __str__(self):
return 'Stop message'
class Actor(object):
__DebugName = ''
__MsgQ = None
__stopped = False
def __init__(self, Name):
self.__DebugName = Name
self.__MsgQ = queue.Queue()
def LaunchActor(self):
p = Process(target=self.ActorCore)
p.start()
return self.__MsgQ
def ActorCore(self):
while not self.__stopped:
Msg = self.__MsgQ.get(block=True)
try:
Msg.Do(self)
print(Msg)
except NotMessage as e:
print(str(e), ' occurred in ', self.__DebugName)
def main():
joe = Actor('Joe')
msg = Message()
stop = StopMessage()
qToJoe = joe.LaunchActor()
qToJoe.put(msg)
qToJoe.put(msg)
qToJoe.put(stop)
if __name__ == '__main__':
main()
I am getting weird error when running:
Traceback (most recent call last):
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 64, in <module>
main()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 58, in main
qToJoe = joe.LaunchActor()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 40, in LaunchActor
p.start()
File "C:\Program Files\Python35\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python35\lib\multiprocessing\reduction.py", line 59, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects
Help please! I tried everything :(
Just use Queue instead of queue:
Remove import queue and add Queue to from multiprocessing like:
from multiprocessing import Process,Queue
then change self.__MsgQ = queue.Queue() to self.__MsgQ = Queue()
That's all you need to do to get it to work, the rest is the same for your case.
Edit, explanation:
queue.Queue is only thread-safe, and multiprocessing does actually spawn another process. Because of that, the additional multiprocessing.Queue is implemented to be also process-safe. As another option, if multithreading is wanted, the threading library can be used together with queue.Queue: https://docs.python.org/dev/library/threading.html#module-threading
Additional information:
Another parallelization option, depending on your further requirements is joblib, where the spawning can be defined to be either a process or a thread: https://joblib.readthedocs.io/