Lauch function with arguments as a thread in Python - python

Im trying to lauch this programm as a infinite thread in my program
https://github.com/qLethon/bitmex_simple_websocket
from bitmex_simple_websocket import BitMEXWebSocket
import json
class MyBitMEXWebsocket(BitMEXWebSocket):
def on_message(self, ws, message):
data = json.loads(message)
if 'table' in data and data['table'] == 'tradeBin1m':
print(data['data'][0])
bitmex = MyBitMEXWebsocket(endpoint='wss://www.bitmex.com/realtime?subscribe=tradeBin1m:XBTUSD')
how launch it in
if __name__ == '__main__':
as a thread correctly
In my current code, only first thread myBitMEXWebsocket function starts, but tradingview and trader wont
if __name__ == '__main__':
Thread(target = MyBitMEXWebsocket(endpoint='wss://www.bitmex.com/realtime?subscribe=quote:XBTUSD')).start()
Thread(target = tradingview).start()
for count_var_short, count_var_long in tradingview():
trader(count_var_short,count_var_long)

Solved, i asked github author, he gives this answer
class MyBitMEXWebsocket(BitMEXWebSocket):
def on_message(self, ws, message):
data = json.loads(message)
if 'table' in data and data['table'] == 'quote':
print(data['data'][0])
WS_BestBid = (data['data'][0]["bidPrice"])
WS_BestAsk = (data['data'][0]["askPrice"])
#print('bid:',WS_BestBid)
#print('ask:',WS_BestAsk)
#print('bid:',data['data'][0]["bidPrice"])
#print('ask:',data['data'][0]["askPrice"])
def f():
MyBitMEXWebsocket(endpoint='wss://www.bitmex.com/realtime?subscribe=quote:XBTUSD,order:XBTUSD')
th1 = threading.Thread(target=f)
th1.start()
if __name__ == '__main__':
Thread(target = f).start()
Thread(target = tradingview).start()
for count_var_short, count_var_long in tradingview():
trader(count_var_short,count_var_long)

Related

threading returning DataFrame using queue

Probably a simple question, I'm a beginner.
So, I have a script with a function that returns a DataFrame using threading. Its working fine, but I'm having trouble understanding what's going on when I create the thread. The code:
def test(word,word1):
print(f'start {word}')
df_1 = pd.DataFrame({'test_1':[word],'test_2':[word1]})
time.sleep(2)
print(f'end {word}')
return df_1
my_queue = queue.Queue()
if __name__ == "__main__":
t1_start = perf_counter()
x = threading.Thread(target=lambda q, arg1,arg2: q.put(test(arg1,arg2)), args=(my_queue, 'first','ok1'))
x1 = threading.Thread(target=lambda q, arg1,arg2: q.put(test(arg1,arg2)), args=(my_queue, 'second','ok2'))
x1.start()
x.start()
print('\nrun something')
x.join()
x1.join()
t2_start = perf_counter()
print(f'\n time:{t2_start-t1_start}')
as I said, this script works fine, giving me the following return:
But if I try to remove the lambda function as below:
if __name__ == "__main__":
t1_start = perf_counter()
x = threading.Thread(target=my_queue.put(test('first','ok1')))
x1 = threading.Thread(target=my_queue.put(test('second','ok2')))
x1.start()
x.start()
print('\nrun something')
x.join()
x1.join()
t2_start = perf_counter()
print(f'\n time:{t2_start-t1_start}')
The script will still work, but the threading not. I have the following return:
Why do I need to use a lambda function for threading to work?

Falcon, wsgiref : Unit test cases

I have the below code:
master.py
def create():
master = falcon.API(middleware=Auth())
msg = Message()
master.add_route('/message', msg)
master = create()
if __name__ == '__main__':
httpd = simple_server.make_server("127.0.0.1", 8989, master)
process = Thread(target=httpd.serve_forever, name="master_process")
process.start()
#Some logic happens here
s_process = Thread(target=httpd.shutdown(), name="shut_process")
s_process.start()
s.join()
I tried to create the below test case for the below:
from falcon import testing
from master import create
#pytest.fixture(scope='module')
def client():
return testing.TestClient(create())
def test_post_message(client):
result = client.simulate_post('/message', headers={'token': "ubxuybcwe"}, body='{"message": "I'm here!"}') --> This line throws the error
assert result.status_code == 200
I tried running the above but get the below error:
TypeError: 'NoneType' object is not callable
I actually am unable to figure out how I should go about writing the test case for this.
Based on what #hoefling said, the below fixed it:
master.py
def create():
master = falcon.API(middleware=Auth())
msg = Message()
master.add_route('/message', msg)
return master
master = create()
if __name__ == '__main__':
httpd = simple_server.make_server("127.0.0.1", 8989, master)
process = Thread(target=httpd.serve_forever, name="master_process")
process.start()
#Some logic happens here
s_process = Thread(target=httpd.shutdown(), name="shut_process")
s_process.start()
s.join()
And then the test case works:
from falcon import testing
from master import create
#pytest.fixture(scope='module')
def client():
return testing.TestClient(create())
def test_post_message(client):
result = client.simulate_post('/message', headers={'token': "ubxuybcwe"},
body='{"message": "I'm here!"}')
assert result.status_code == 200
Thanks a lot #hoefling!

message_filters doesn't call the callback function

I'm trying to use the message_filters in order to subscribe to two topics. Here's my code
class sync_listener:
def __init__(self):
self.image_sub = message_filters.Subscriber('camera/rgb/image_color', Image)
self.info_sub = message_filters.Subscriber('camera/projector/camera_info', CameraInfo)
self.ts = message_filters.TimeSynchronizer([self.image_sub, self.info_sub], 10)
self.ts.registerCallback(self.callback)
def callback(self, image, camera_info):
print("done")
def main(args):
ls = sync_listener()
rospy.init_node('sample_message_filters', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main(sys.argv)
But it never goes to the callback function. It just freezes at rospy.spin().
Rather than using TimeSynchronizer I used ApproximateTimeSynchronizer and it worked. So, I changed the code to-
class sync_listener:
def __init__(self):
self.image_sub = message_filters.Subscriber('camera/rgb/image_color', Image)
self.info_sub = message_filters.Subscriber('camera/projector/camera_info', CameraInfo)
self.ts = message_filters.ApproximateTimeSynchronizer([self.image_sub, self.info_sub], 1, 1) # Changed code
self.ts.registerCallback(self.callback)
def callback(self, image, camera_info):
print("done")
def main(args):
ls = sync_listener()
rospy.init_node('sample_message_filters', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main(sys.argv)
Before finding this solution, I just used global variables to access the message of the first topic by assigning the message to the global variable in the callback and used it on the callback of the second, and that's how I was able to work with both. It's not clean but saves hours of frustration.

Why can't I start two threads inside constructor __init__() of a python class?

I'm simply trying to run two different daemon threads and print a line from each for testing. While this code works :
import time
import threading
from threading import Thread
from myFunctions import *
class Monitoring:
def alarms(self):
return alarms.run()
def generator(self):
return generator.run()
def run(self):
generator = threading.Thread(target=self.alarms)
generator.daemon = True
generator.start()
alarm = threading.Thread(target=self.generator)
alarm.daemon = True
alarm.start()
print("started thread")
if __name__ == '__main__':
try:
d = Monitoring()
d.daemon = True
d.run()
print("started the thread")
while 1:
time.sleep(1)
except KeyboardInterrupt:
alarms.close()
generator.close()
print("Main - Keyboard interrupt in __main__")
Something like this doesn't seem to work and only the first thread "alarms" start. Why is that?
class Monitoring:
def __init__(self,a,g):
self.a = a
self.g = g
def run(self):
generator = threading.Thread(target=self.a)
generator.daemon = True
generator.start()
alarm = threading.Thread(target=self.g)
alarm.daemon = True
alarm.start()
print("#class run() ")
if __name__ == '__main__':
try:
d = Monitoring(alarms.run(), generator.run())
d.daemon = True
d.run()
print("#__main__")
while 1:
time.sleep(1)
except KeyboardInterrupt:
alarms.close()
generator.close()
print("Main - Keyboard interrupt in __main__")
In the line
d = Monitoring(alarms.run(), generator.run())
the functions alarms.run and generator.run are called instantly and the return values (probably None) are given as arguments to the Monitoring constructor.
To use the function objects as arguments (which are then called in a new thread), use
d = Monitoring(alarms.run, generator.run)
instead.

Integrating multiprocessing.Process with concurrent.future._base.Future

I have a requirement of creating child processes, receive results using Future and then kill some of them when required.
For this I have subclassed multiprocessing.Process class and return a Future object from the start() method.
The problem is that I am not able to receive the result in the cb() function as it never gets called.
Please help/suggest if this can be done in some other way or something I am missing in my current implementation?
Following is my current approach
from multiprocessing import Process, Queue
from concurrent.futures import _base
import threading
from time import sleep
def foo(x,q):
print('result {}'.format(x*x))
result = x*x
sleep(5)
q.put(result)
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = _base.Future()
def run(self):
q = Queue()
worker_thread = threading.Thread(target=self.target, args=(self.args+ (q,)))
worker_thread.start()
r = q.get(block=True)
print('setting result {}'.format(r))
self.f.set_result(result=r)
print('done setting result')
def start(self):
f = _base.Future()
run_thread = threading.Thread(target=self.run)
run_thread.start()
return f
def cb(future):
print('received result in callback {}'.format(future))
def main():
p1 = MyProcess(target=foo, args=(2,))
f = p1.start()
f.add_done_callback(fn=cb)
sleep(10)
if __name__ == '__main__':
main()
print('Main thread dying')
In your start method you create a new Future which you then return. This is a different future then the one you set the result on, this future is just not used at all. Try:
def start(self):
run_thread = threading.Thread(target=self.run)
run_thread.start()
return self.f
However there are more problems with your code. You override the start method of the process, replacing it with execution on a worker thread, therefore actually bypassing multiprocessing. Also you shouldn't import the _base module, that is an implementation detail as seen from the leading underscore. You should import concurrent.futures.Future (it's the same class, but through public API).
This really uses multiprocessing:
from multiprocessing import Process, Queue
from concurrent.futures import Future
import threading
from time import sleep
def foo(x,q):
print('result {}'.format(x*x))
result = x*x
sleep(5)
q.put(result)
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = Future()
def run(self):
q = Queue()
worker_thread = threading.Thread(target=self.target, args=(self.args+ (q,)))
worker_thread.start()
r = q.get(block=True)
print('setting result {}'.format(r))
self.f.set_result(result=r)
print('done setting result')
def cb(future):
print('received result in callback {}: {}'.format(future, future.result()))
def main():
p1 = MyProcess(target=foo, args=(2,))
p1.f.add_done_callback(fn=cb)
p1.start()
p1.join()
sleep(10)
if __name__ == '__main__':
main()
print('Main thread dying')
And you're already in a new process now, spawning a worker thread to execute your target function shouldn't really be necessary, you could just execute your target function directly instead. Should the target function raise an Exception you wouldn't know about it, your callback will only be called on success. So if you fix that, then you're left with:
from multiprocessing import Process
from concurrent.futures import Future
import threading
from time import sleep
def foo(x):
print('result {}'.format(x*x))
result = x*x
sleep(5)
return result
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = Future()
def run(self):
try:
r = self.target(*self.args)
print('setting result {}'.format(r))
self.f.set_result(result=r)
print('done setting result')
except Exception as ex:
self.f.set_exception(ex)
def cb(future):
print('received result in callback {}: {}'.format(future, future.result()))
def main():
p1 = MyProcess(target=foo, args=(2,))
p1.f.add_done_callback(fn=cb)
p1.start()
p1.join()
sleep(10)
if __name__ == '__main__':
main()
print('Main thread dying')
This is basically what a ProcessPoolExecutor does.

Categories