I have a flask app with websockets, and when a person hits a socket to start threading, I want it to run a thread like:
#socketio.on('start', namespace='/ws')
def patrol():
asset = {'x': 0, 'y': 1}
while True:
thread_patrol(asset, [[0, 0], [400, 400]])
def patrol(asset, coordinates):
count = 0
import itertools
for coordinate in itertools.cycle(coordinates):
val = True
while val:
asset, val = take_step(asset, coordinate[0], coordinate[1])
emit('asset',
{'data': asset, 'count': count},
broadcast=True)
count += 1
time.sleep(1)
import threading
def thread_patrol(asset, coordinates):
print('threading!')
patrolling_thread = threading.Thread(target=patrol, args=(asset, coordinates))
patrolling_thread.start()
def take_step(asset, x, y):
asset[x] = x
asset[y] = y
But then I get an error because it's outside of request context. What do I need to do to allow my app to thread?:
threading!
Exception in thread Thread-2005:
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "app2.py", line 270, in patrol
broadcast=True)
File "/usr/local/lib/python2.7/site-packages/flask_socketio/__init__.py", line 520, in emit
namespace = flask.request.namespace
File "/usr/local/lib/python2.7/site-packages/werkzeug/local.py", line 338, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python2.7/site-packages/werkzeug/local.py", line 297, in _get_current_object
return self.__local()
File "/usr/local/lib/python2.7/site-packages/flask/globals.py", line 20, in _lookup_req_object
raise RuntimeError('working outside of request context')
RuntimeError: working outside of request context
You (I) have to include thread.daemon = True to inform the app to run it as a background process, and I removed broadcast=True since that wasn't necessary anyways.
def thread_patrol(asset, coordinates):
patrolling_thread = Thread(target=patrol, args=(asset, coordinates))
thread.daemon = True
thread.start()
Related
I'm really new to python and I don't even know if the title of this python question is even right. Anyways I'm facing an error code that I have been trying to find a fix for in the last couple of hours. The error goes like this: raise RuntimeError("threads can only be started once").
Now I googled the error and I got a couple of solutions. I tried a couple of them and none of them seem to work. I know that you cannot use the start function for more than one time, But how else am I supposed to get my script to work again?
import pyautogui, requests, pyperclip
from pynput import mouse, keyboard
from tkinter import filedialog
url = "https://poxgur.com/"
file_path = filedialog.asksaveasfilename(defaultextension='.png')
def on_click(x, y, button, pressed):
global currentMouseX, currentMouseY
if pressed:
currentMouseX, currentMouseY = x, y
if not pressed:
mouse_listener.stop()
im = pyautogui.screenshot(region=(
currentMouseX, currentMouseY, abs(currentMouseX - x), abs(currentMouseY - y)))
im.save(file_path)
print(file_path)
files = {"file": open(file_path, "rb")}
r = requests.post(url + "api.php", files=files)
pyperclip.copy(url + r.text + ".png")
# os.remove(file_path)
mouse_listener.stop()
return False
mouse_listener = mouse.Listener(on_click=on_click)
def on_scroll_lock_release(key):
if key == keyboard.Key.scroll_lock:
if not mouse_listener.is_alive():
mouse_listener.start()
with keyboard.Listener(on_release=on_scroll_lock_release) as listener:
listener.join()
The error meesage:
Unhandled exception in listener callback
Traceback (most recent call last):
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\site-packages\pynput\_util\__init__.py", line 162, in inner
return f(self, *args, **kwargs)
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\site-packages\pynput\keyboard\_win32.py", line 283, in _process
self.on_release(key)
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\site-packages\pynput\_util\__init__.py", line 78, in inner
if f(*args) is False:
File "C:/Users/marti/Documents/PythonProjects/Acutal Projects/The 100 Projects/Screenshot2.0.py", line 33, in on_scroll_lock_release
mouse_listener.start()
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\threading.py", line 848, in start
raise RuntimeError("threads can only be started once")
RuntimeError: threads can only be started once
Traceback (most recent call last):
File "C:/Users/marti/Documents/PythonProjects/Acutal Projects/The 100 Projects/Screenshot2.0.py", line 37, in <module>
listener.join()
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\site-packages\pynput\_util\__init__.py", line 210, in join
six.reraise(exc_type, exc_value, exc_traceback)
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\site-packages\six.py", line 702, in reraise
raise value.with_traceback(tb)
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\site-packages\pynput\_util\__init__.py", line 162, in inner
return f(self, *args, **kwargs)
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\site-packages\pynput\keyboard\_win32.py", line 283, in _process
self.on_release(key)
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\site-packages\pynput\_util\__init__.py", line 78, in inner
if f(*args) is False:
File "C:/Users/marti/Documents/PythonProjects/Acutal Projects/The 100 Projects/Screenshot2.0.py", line 33, in on_scroll_lock_release
mouse_listener.start()
File "C:\Users\marti\anaconda3\envs\The 100 Projects\lib\threading.py", line 848, in start
raise RuntimeError("threads can only be started once")
RuntimeError: threads can only be started once
Process finished with exit code 1
A possible solution would be to intialize a new Listener each time before starting the Listener.
You could re-code your solution in the following way:
mouse_listener = mouse.Listener(on_click=on_click)
def on_scroll_lock_release(key):
if key == keyboard.Key.scroll_lock:
if not mouse_listener.is_alive():
mouse_listener = mouse.Listener(on_click=on_click)
mouse_listener.start()
I am trying to multithread a Singal R connection with python, I am getting a connection Rawlink error the connections works and I need to wait 1 second in order to receive the message from the client, I am also using a barrier to execute the thread "simultaneous".
Here is my code
with Session() as session:
global connection
connection = Connection("http://sampleSINGALRURL/signalr", session)
presenceservice = connection.register_hub('ClientRegistration')
presenceservice1 = connection.register_hub('PresenceClientHub')
connection.start()
presenceservice.server.invoke('IdentifyClient', devideIdentity, softwareVersion, IpAddress,
machineName, DeviceType, patientAdmissionGuid, patientID, pairingId)
presenceservice1.client.on('StaffPresenceNotified', self.get_data1)
connection.wait(1)
And then my threading functions
def get_clients(self):
global barrier
self.connect_to_database1()
barrier.wait()
self.get_message_from_client1()
self.print_data1()
def send_messages(self):
global MessageNumber
global machineName
global staffName
global request
machineName = final_result[MessageNumber][0]
staffName = staff_results[MessageNumber][0]
MessageNumber += 1
barrier.wait()
request = requests.post(
"http://sampleurl/api/sample")
return request
def print_response(self):
global request
timestamp = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
logging.info("Message sent at " + "Time : " + timestamp + " " + machineName)
def Spin_Clients(self, NumMessages):
for i in range(10):
self.client_list.append(Thread(target=self.send_messages))
self.client_list[i].start()
self.print_response()
sleep(2)
for i in range(10):
self.Message_List.append(Thread(target=self.get_clients))
self.Message_List[i].start()
for thread in self.client_list:
thread.join()
for thread in self.Message_List:
thread.join()
Error logs
All threads have finished
11:41:37.243
Exception in thread Thread-13: Traceback (most recent call last): File "c:\users\appdata\local\programs\python\python37\lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "c:\users\appdata\local\programs\python\python37\lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "C:\Data\LoadTest.py", line 103, in get_clients
self.get_message_from_client1()
File "C:\Data\LoadTest.py", line 89, in get_message_from_client1
connection.wait(1)
File "c:\users\appdata\local\programs\python\python37\lib\site-packages\signalr\_connection.py", line 60, in wait
gevent.joinall([self.__greenlet], timeout)
File "src\gevent\greenlet.py", line 849, in gevent._greenlet.joinall
File "src\gevent\greenlet.py", line 859, in gevent._greenlet.joinall
File "src\gevent\_hub_primitives.py", line 198, in gevent.__hub_primitives.wait_on_objects File "src\gevent\_hub_primitives.py", line 235, in gevent.__hub_primitives.wait_on_objects
File "src\gevent\_hub_primitives.py", line 125, in gevent.__hub_primitives._WaitIterator.__iter__ AttributeError: 'NoneType' object has no attribute 'rawlink'
I also tried to use locks but that had the same outcome.
Any idea?
I am trying to use my other cores in my python program. And the following is the basic structure/logic of my code:
import multiprocessing as mp
import pandas as pd
import gc
def multiprocess_RUN(param):
result = Analysis_Obj.run(param)
return result
class Analysis_Obj():
def __init__(self, filename):
self.DF = pd.read_csv(filename)
return
def run_Analysis(self, param):
# Multi-core option
pool = mp.Pool(processes=1)
run_result = pool.map(multiprocess_RUN, [self, param])
# Normal option
run_result = self.run(param)
return run_result
def run(self, param):
# Let's say I have written a function to count the frequency of 'param' in the target file
result = count(self.DF, param)
return result
if __name__ == "__main__":
files = ['file1.csv', 'file2.csv']
params = [1,2,3,4]
results = []
for i in range(0,len(files)):
analysis = Analysis_Obj(files[i])
for j in range(0,len(params)):
result = analysis.run_Analysis(params[j])
results.append(result)
del result
del analysis
gc.collect()
If I comment out the 'Multi-core option' and run the 'Normal option' everything runs fine. But even if I run the 'Multi-core option' with processes=1 I get a Memory Error when my for loop starts on the 2nd file. I have deliberately set it up so that I create and delete an Analysis object in each for loop, so that the file that has been processed will be cleared from memory. Clearly this hasn't worked. Advice of how to get around this would be very much appreciated.
Cheers
EDIT:
Here is the error message I have in the terminal:
Exception in thread Thread-7:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 326, in _handle_workers
pool._maintain_pool()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 230, in _maintain_pool
self._repopulate_pool()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 223, in _repopulate_pool
w.start()
File "/usr/lib/python2.7/multiprocessing/process.py", line 130, in start
self._popen = Popen(self)
File "/usr/lib/python2.7/multiprocessing/forking.py", line 121, in __init__
self.pid = os.fork()
OSError: [Errno 12] Cannot allocate memory
In following script why my callback function never get called ?
I am using a pre-created kernel to run the code and trying to get the output of it with attaching the callback for respective sockets.
from zmq.eventloop import ioloop
ioloop.install()
from zmq.eventloop.zmqstream import ZMQStream
from functools import partial
from tornado import gen
from tornado.concurrent import Future
from jupyter_client import BlockingKernelClient
from pprint import pprint
import logging, os, zmq
reply_futures = {}
context = zmq.Context()
publisher = context.socket(zmq.PUSH)
publisher.connect("tcp://127.0.0.1:5253")
def reply_callback(session, stream, msg_list):
idents, msg_parts = session.feed_identities(msg_list)
reply = session.deserialize(msg_parts)
parent_id = reply['parent_header'].get('msg_id')
reply_future = reply_futures.get(parent_id)
print("{} \n".format(reply))
if reply_future:
if "execute_reply" == reply["msg_type"]:
reply_future.set_result(reply)
publisher.send(reply)
def fv_execute():
code = 'print ("hello")'
msg_id = execute(code)
return msg_id
def get_connection_file(kernel_id):
json_file = 'kernel-{}.json'.format(kernel_id)
return os.path.join('/tmp',json_file)
def execute(code,):
kernel_id = '46459cb4-fa34-497a-8e3d-dfb3ab4476fd'
cf = get_connection_file(kernel_id)
kernel_client = BlockingKernelClient(connection_file=cf)
setup_listener(kernel_client)
msg_id = ioloop.IOLoop.current().run_sync(lambda: execute_(kernel_client,code))
return msg_id
def setup_listener(kernel_client):
shell_stream = ZMQStream(kernel_client.shell_channel.socket)
iopub_stream = ZMQStream(kernel_client.iopub_channel.socket)
shell_stream.on_recv_stream(partial(reply_callback, kernel_client.session))
iopub_stream.on_recv_stream(partial(reply_callback, kernel_client.session))
#gen.coroutine
def execute_(kernel_client, code):
msg_id = kernel_client.execute(code)
f = reply_futures[msg_id] = Future()
print("Is kernel alive: {}".format(kernel_client.is_alive()))
print(msg_id)
yield f
raise gen.Return(msg_id)
if __name__ == '__main__':
fv_execute()
here is output, the script runs forever
jupyter#albus:~/lab$ python2 iolooptest2.py
Is kernel alive: True
de3eae2e-48d3-451a-b6bc-421674bb2a35
^X^CTraceback (most recent call last):
File "iolooptest2.py", line 61, in <module>
fv_execute()
File "iolooptest2.py", line 30, in fv_execute
msg_id = execute(code)
File "iolooptest2.py", line 42, in execute
msg_id = ioloop.IOLoop.current().run_sync(lambda: execute_(kernel_client,code))
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 452, in run_sync
self.start()
File "/usr/local/lib/python2.7/dist- packages/zmq/eventloop/ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 862, in start
event_pairs = self._impl.poll(poll_timeout)
File "/usr/local/lib/python2.7/dist- packages/zmq/eventloop/ioloop.py", line 122, in poll
z_events = self._poller.poll(1000*timeout)
File "/usr/local/lib/python2.7/dist-packages/zmq/sugar/poll.py", line 99, in poll
return zmq_poll(self.sockets, timeout=timeout)
File "zmq/backend/cython/_poll.pyx", line 116, in zmq.backend.cython._poll.zmq_poll (zmq/backend/cython/_poll.c:2036)
File "zmq/backend/cython/checkrc.pxd", line 12, in zmq.backend.cython.checkrc._check_rc (zmq/backend/cython/_poll.c:2418)
KeyboardInterrupt
A slightly modified version of the code is here
https://gist.github.com/jayendra13/76a4f5726428882013ea62d94974da5c
where I pass ioloop as a argument to zmqstream, while attaching the callback, which also has a same behaviour.
Here is almost similar script which works
https://gist.github.com/jayendra13/e553fafba5398e287107e947c16988df
Adding the following two lines after the creation of kernel_client solved my issue.
kernel_client.load_connection_file()
kernel_client.start_channels()
so new execute looks like this
def execute(code,):
kernel_id = '46459cb4-fa34-497a-8e3d-dfb3ab4476fd'
cf = get_connection_file(kernel_id)
kernel_client = BlockingKernelClient(connection_file=cf)
kernel_client.load_connection_file()
kernel_client.start_channels()
setup_listener(kernel_client)
msg_id = ioloop.IOLoop.current().run_sync(lambda: execute_(kernel_client,code))
return msg_id
I wrote a program that would post events using asyncio and aiohttp. This program works when I run it locally. I can post 10k events no problem. However, I SCPed the whole codebase to a remote machine and within that machine I can't post more than 15 events without getting this error:
RuntimeError: Event loop is closed
Exception ignored in: <coroutine object Poster.async_post_event at 0x7f4a53989410>
Traceback (most recent call last):
File "/home/bli1/qe-trinity/tracer/utils/poster.py", line 63, in async_post_event
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 565, in __aenter__
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 198, in _request
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 316, in connect
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 349, in _release_waiter
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 332, in set_result
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 242, in _schedule_callbacks
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 447, in call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 456, in _call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 284, in _check_closed
RuntimeError: Event loop is closed
Exception ignored in: <coroutine object Poster.async_post_event at 0x7f4a5397ffc0>
Traceback (most recent call last):
File "/home/bli1/qe-trinity/tracer/utils/poster.py", line 63, in async_post_event
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 565, in __aenter__
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 198, in _request
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 316, in connect
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 349, in _release_waiter
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 332, in set_result
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 242, in _schedule_callbacks
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 447, in call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 456, in _call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 284, in _check_closed
RuntimeError: Event loop is closed
How can I debug this or find out the source of this problem?
Here is the class that I created and I use the method post() to run:
import uuid
import os
import asyncio
import time
import random
import json
import aiohttp
from tracer.utils.phase import Phase
class Poster(Phase):
def __init__(self, log, endpoint, num_post, topic, datafile, timeout, oracles, secure=False, thru_proxy=True):
Phase.__init__(self, log, "post", oracles, secure, thru_proxy)
self.log = log
self.num_post = int(num_post)
self.datafile = datafile.readlines()
self.topic = topic
self.endpoint = self.set_endpoint(endpoint, self.topic)
self.response = None
self.timeout = timeout
def random_line(self):
""" Returns random line from file and converts it to JSON """
return json.loads(random.choice(self.datafile))
#staticmethod
def change_uuid(event):
""" Creates new UUID for event_id """
new_uuid = str(uuid.uuid4())
event["event_header"]["event_id"] = new_uuid
return event
#staticmethod
def wrapevent(event):
""" Wrap event with metadata for analysis later on """
return {
"tracer": {
"post": {
"statusCode": None,
"timestamp": None,
},
"awsKafkaTimestamp": None,
"qdcKakfaTimestamp": None,
"hdfsTimestamp": None
},
"event": event
}
def gen_random_event(self):
random_event = self.random_line()
event = self.change_uuid(random_event)
dataspec = self.wrapevent(event)
return dataspec
async def async_post_event(self, event, session):
async with session.post(self.endpoint, data=event, proxy=self.proxy) as resp:
event["tracer"]["post"]["timestamp"] = time.time() * 1000.0
event["tracer"]["post"]["statusCode"] = resp.status
unique_id = event["event"]["event_header"]["event_id"]
oracle_endpoint = os.path.join(self.oracle, unique_id)
async with session.put(oracle_endpoint, data=json.dumps(event), proxy=self.proxy) as resp:
if resp.status != 200:
self.log.debug("Post to ElasticSearch not 200")
self.log.debug(event["event"]["event_header"]["event_id"])
self.log.debug("Status code: " + str(resp.status))
return event["event"]["event_header"]["event_id"], resp.status
async def async_post_events(self, events):
coros = []
conn = aiohttp.TCPConnector(verify_ssl=self.secure)
async with aiohttp.ClientSession(connector=conn) as session:
for event in events:
coros.append(self.async_post_event(event, session))
return await asyncio.gather(*coros)
def post(self):
event_loop = asyncio.get_event_loop()
try:
events = [self.gen_random_event() for i in range(self.num_post)]
start_time = time.time()
results = event_loop.run_until_complete(self.async_post_events(events))
print("Time taken: " + str(time.time() - start_time))
finally:
event_loop.close()
You cannot re-use a loop once it's closed. From AbstractEventLoop.close documentation:
This is idempotent and irreversible. No other methods should be called after this one.
Either remove the loop.close call or create a new loop for each post.
My advice would be to avoid those problems by running everything inside the loop and awaiting async_post_events when needed.