I am trying to multithread a Singal R connection with python, I am getting a connection Rawlink error the connections works and I need to wait 1 second in order to receive the message from the client, I am also using a barrier to execute the thread "simultaneous".
Here is my code
with Session() as session:
global connection
connection = Connection("http://sampleSINGALRURL/signalr", session)
presenceservice = connection.register_hub('ClientRegistration')
presenceservice1 = connection.register_hub('PresenceClientHub')
connection.start()
presenceservice.server.invoke('IdentifyClient', devideIdentity, softwareVersion, IpAddress,
machineName, DeviceType, patientAdmissionGuid, patientID, pairingId)
presenceservice1.client.on('StaffPresenceNotified', self.get_data1)
connection.wait(1)
And then my threading functions
def get_clients(self):
global barrier
self.connect_to_database1()
barrier.wait()
self.get_message_from_client1()
self.print_data1()
def send_messages(self):
global MessageNumber
global machineName
global staffName
global request
machineName = final_result[MessageNumber][0]
staffName = staff_results[MessageNumber][0]
MessageNumber += 1
barrier.wait()
request = requests.post(
"http://sampleurl/api/sample")
return request
def print_response(self):
global request
timestamp = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
logging.info("Message sent at " + "Time : " + timestamp + " " + machineName)
def Spin_Clients(self, NumMessages):
for i in range(10):
self.client_list.append(Thread(target=self.send_messages))
self.client_list[i].start()
self.print_response()
sleep(2)
for i in range(10):
self.Message_List.append(Thread(target=self.get_clients))
self.Message_List[i].start()
for thread in self.client_list:
thread.join()
for thread in self.Message_List:
thread.join()
Error logs
All threads have finished
11:41:37.243
Exception in thread Thread-13: Traceback (most recent call last): File "c:\users\appdata\local\programs\python\python37\lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "c:\users\appdata\local\programs\python\python37\lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "C:\Data\LoadTest.py", line 103, in get_clients
self.get_message_from_client1()
File "C:\Data\LoadTest.py", line 89, in get_message_from_client1
connection.wait(1)
File "c:\users\appdata\local\programs\python\python37\lib\site-packages\signalr\_connection.py", line 60, in wait
gevent.joinall([self.__greenlet], timeout)
File "src\gevent\greenlet.py", line 849, in gevent._greenlet.joinall
File "src\gevent\greenlet.py", line 859, in gevent._greenlet.joinall
File "src\gevent\_hub_primitives.py", line 198, in gevent.__hub_primitives.wait_on_objects File "src\gevent\_hub_primitives.py", line 235, in gevent.__hub_primitives.wait_on_objects
File "src\gevent\_hub_primitives.py", line 125, in gevent.__hub_primitives._WaitIterator.__iter__ AttributeError: 'NoneType' object has no attribute 'rawlink'
I also tried to use locks but that had the same outcome.
Any idea?
Related
PermissionError Multiprocessing argument pyppeteer.Page
successful but inefficient
import asyncio
from pyppeteer import launch
from multiprocessing import Process
async def f(x):
print("async def f(x,page):",x)
browser = await launch(headless=False, autoClose=False)
page = (await browser.pages())[0]
await page.goto('https://example.com')
h1 = await page.querySelector("body > div > h1")
await page.evaluate(f'(element) => element.textContent="{x}"', h1)
def p(x):
print("def p(x,page):",x)
asyncio.run(f(x))
async def main():
pro = Process(target=p, args=("1111",))
pro.start()
pro = Process(target=p, args=("2222",))
pro.start()
if __name__ =="__main__":
asyncio.get_event_loop().run_until_complete(main())
In order to process a lot, it is burdensome to create multiple browsers.
So, I try to create a lot of tabs.
This is the code I want, but I get an PermissionError
How can I solve this?
import asyncio
from pyppeteer import launch
from multiprocessing import Process
async def f(x,page):
print("async def f(x,page):",x)
await page.goto('https://example.com')
h1 = await page.querySelector("body > div > h1")
await page.evaluate(f'(element) => element.textContent="{x}"', h1)
def p(x,page):
print("def p(x,page):",x)
asyncio.run(f(x,page))
async def main():
browser = await launch(headless=False, autoClose=False)
page = (await browser.pages())[0]
pro = Process(target=p, args=("1111",page))
pro.start()
if __name__ =="__main__":
asyncio.get_event_loop().run_until_complete(main())
error message
c:\Users\mimmi\python\ttttt.py:24: DeprecationWarning: There is no current event loop
asyncio.get_event_loop().run_until_complete(main())
Traceback (most recent call last):
File "c:\Users\mimmi\python\ttttt.py", line 24, in <module>
asyncio.get_event_loop().run_until_complete(main())
File "C:\python\python311\Lib\asyncio\base_events.py", line 650, in run_until_complete
return future.result()
^^^^^^^^^^^^^^^
File "c:\Users\mimmi\python\ttttt.py", line 21, in main
pro.start()
^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
^^^^^^^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\context.py", line 336, in _Popen
return Popen(process_obj)
^^^^^^^^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\popen_spawn_win32.py", line 94, in __init__
reduction.dump(process_obj, to_child)
File "C:\python\python311\Lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\python\python311\Lib\multiprocessing\spawn.py", line 111, in spawn_main
new_handle = reduction.duplicate(pipe_handle,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\reduction.py", line 79, in duplicate
return _winapi.DuplicateHandle(
^^^^^^^^^^^^^^^^^^^^^^^^
PermissionError: [WinError 5] Access is denied
my environment
windows11
python3.11
pyppeteer1.0.2
I got the desired result with this code.
queue = asyncio.Queue()
browser = await launch(headless=False, autoClose=False)
for i in range(MAX_TASK_COUNT-1):
await browser.newPage()
pages = await browser.pages()
for page in pages:
asyncio.create_task(crawlingTask(queue, page))
await asyncio.create_task(queuePutter(queue, session, appendList))
await queue.join()
I wrote a program that would post events using asyncio and aiohttp. This program works when I run it locally. I can post 10k events no problem. However, I SCPed the whole codebase to a remote machine and within that machine I can't post more than 15 events without getting this error:
RuntimeError: Event loop is closed
Exception ignored in: <coroutine object Poster.async_post_event at 0x7f4a53989410>
Traceback (most recent call last):
File "/home/bli1/qe-trinity/tracer/utils/poster.py", line 63, in async_post_event
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 565, in __aenter__
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 198, in _request
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 316, in connect
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 349, in _release_waiter
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 332, in set_result
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 242, in _schedule_callbacks
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 447, in call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 456, in _call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 284, in _check_closed
RuntimeError: Event loop is closed
Exception ignored in: <coroutine object Poster.async_post_event at 0x7f4a5397ffc0>
Traceback (most recent call last):
File "/home/bli1/qe-trinity/tracer/utils/poster.py", line 63, in async_post_event
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 565, in __aenter__
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 198, in _request
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 316, in connect
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 349, in _release_waiter
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 332, in set_result
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 242, in _schedule_callbacks
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 447, in call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 456, in _call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 284, in _check_closed
RuntimeError: Event loop is closed
How can I debug this or find out the source of this problem?
Here is the class that I created and I use the method post() to run:
import uuid
import os
import asyncio
import time
import random
import json
import aiohttp
from tracer.utils.phase import Phase
class Poster(Phase):
def __init__(self, log, endpoint, num_post, topic, datafile, timeout, oracles, secure=False, thru_proxy=True):
Phase.__init__(self, log, "post", oracles, secure, thru_proxy)
self.log = log
self.num_post = int(num_post)
self.datafile = datafile.readlines()
self.topic = topic
self.endpoint = self.set_endpoint(endpoint, self.topic)
self.response = None
self.timeout = timeout
def random_line(self):
""" Returns random line from file and converts it to JSON """
return json.loads(random.choice(self.datafile))
#staticmethod
def change_uuid(event):
""" Creates new UUID for event_id """
new_uuid = str(uuid.uuid4())
event["event_header"]["event_id"] = new_uuid
return event
#staticmethod
def wrapevent(event):
""" Wrap event with metadata for analysis later on """
return {
"tracer": {
"post": {
"statusCode": None,
"timestamp": None,
},
"awsKafkaTimestamp": None,
"qdcKakfaTimestamp": None,
"hdfsTimestamp": None
},
"event": event
}
def gen_random_event(self):
random_event = self.random_line()
event = self.change_uuid(random_event)
dataspec = self.wrapevent(event)
return dataspec
async def async_post_event(self, event, session):
async with session.post(self.endpoint, data=event, proxy=self.proxy) as resp:
event["tracer"]["post"]["timestamp"] = time.time() * 1000.0
event["tracer"]["post"]["statusCode"] = resp.status
unique_id = event["event"]["event_header"]["event_id"]
oracle_endpoint = os.path.join(self.oracle, unique_id)
async with session.put(oracle_endpoint, data=json.dumps(event), proxy=self.proxy) as resp:
if resp.status != 200:
self.log.debug("Post to ElasticSearch not 200")
self.log.debug(event["event"]["event_header"]["event_id"])
self.log.debug("Status code: " + str(resp.status))
return event["event"]["event_header"]["event_id"], resp.status
async def async_post_events(self, events):
coros = []
conn = aiohttp.TCPConnector(verify_ssl=self.secure)
async with aiohttp.ClientSession(connector=conn) as session:
for event in events:
coros.append(self.async_post_event(event, session))
return await asyncio.gather(*coros)
def post(self):
event_loop = asyncio.get_event_loop()
try:
events = [self.gen_random_event() for i in range(self.num_post)]
start_time = time.time()
results = event_loop.run_until_complete(self.async_post_events(events))
print("Time taken: " + str(time.time() - start_time))
finally:
event_loop.close()
You cannot re-use a loop once it's closed. From AbstractEventLoop.close documentation:
This is idempotent and irreversible. No other methods should be called after this one.
Either remove the loop.close call or create a new loop for each post.
My advice would be to avoid those problems by running everything inside the loop and awaiting async_post_events when needed.
I have a flask app with websockets, and when a person hits a socket to start threading, I want it to run a thread like:
#socketio.on('start', namespace='/ws')
def patrol():
asset = {'x': 0, 'y': 1}
while True:
thread_patrol(asset, [[0, 0], [400, 400]])
def patrol(asset, coordinates):
count = 0
import itertools
for coordinate in itertools.cycle(coordinates):
val = True
while val:
asset, val = take_step(asset, coordinate[0], coordinate[1])
emit('asset',
{'data': asset, 'count': count},
broadcast=True)
count += 1
time.sleep(1)
import threading
def thread_patrol(asset, coordinates):
print('threading!')
patrolling_thread = threading.Thread(target=patrol, args=(asset, coordinates))
patrolling_thread.start()
def take_step(asset, x, y):
asset[x] = x
asset[y] = y
But then I get an error because it's outside of request context. What do I need to do to allow my app to thread?:
threading!
Exception in thread Thread-2005:
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "app2.py", line 270, in patrol
broadcast=True)
File "/usr/local/lib/python2.7/site-packages/flask_socketio/__init__.py", line 520, in emit
namespace = flask.request.namespace
File "/usr/local/lib/python2.7/site-packages/werkzeug/local.py", line 338, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/local/lib/python2.7/site-packages/werkzeug/local.py", line 297, in _get_current_object
return self.__local()
File "/usr/local/lib/python2.7/site-packages/flask/globals.py", line 20, in _lookup_req_object
raise RuntimeError('working outside of request context')
RuntimeError: working outside of request context
You (I) have to include thread.daemon = True to inform the app to run it as a background process, and I removed broadcast=True since that wasn't necessary anyways.
def thread_patrol(asset, coordinates):
patrolling_thread = Thread(target=patrol, args=(asset, coordinates))
thread.daemon = True
thread.start()
I try to use pathos.multiprocessing.Pool in my project.
However, it will meet the following problem when I terminate the Pool.
I use CentOS 6.5, I'm not sure if it is caused by pathos.multiprocessing.Pool or other thing, can anyone help me on it?
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 1073, in run
self.function(*self.args, **self.kwargs)
File "receiver.py", line 132, in kill_clients
pool.terminate()
File "/usr/local/lib/python2.7/site-packages/multiprocess/pool.py", line 465, in terminate
self._terminate()
File "/usr/local/lib/python2.7/site-packages/multiprocess/util.py", line 207, in __call__
res = self._callback(*self._args, **self._kwargs)
File "/usr/local/lib/python2.7/site-packages/multiprocess/pool.py", line 513, in _terminate_pool
p.terminate()
File "/usr/local/lib/python2.7/site-packages/multiprocess/process.py", line 137, in terminate
self._popen.terminate()
File "/usr/local/lib/python2.7/site-packages/multiprocess/forking.py", line 174, in terminate
os.kill(self.pid, signal.SIGTERM)
OSError: [Errno 3] No such process
The wired thing is that at the beginning, it works well. But when the 4th job is received, there will be such problem.
class Receiver:
def __init__(self):
....
self.results={}
def kill_clients(self, client_list, pool):
for client in client_list:
client.kill()
pool.terminate()
def process_result(self, result):
if result is None:
self.results = {}
return
res = result.split(':')
if len(res) != 4:
raise Exception("result with wrong format: %s" % result)
self.results['%s_%s' % (res[0], res[1])] = {"code": res[3], "msg": res[4]}
...
def handler(self, job):
self.lg.debug("Receive job in rtmp_start_handler.")
self.lg.debug("<%s>" % str(job))
# each client corresponding one process
cli_counts = job['count']
pool = Pool(processes=cli_counts)
clients = []
try:
for i in xrange(cli_counts):
rtmp_cli = RtmpClient(job['case'], i)
clients.append(rtmp_cli)
[pool.apply_async(client.run, callback=self.process_result)
for client in clients]
pool.close()
sleep(1)
self.lg.debug("All clients are started.")
t = Timer(
job['timeout'],
self.kill_clients,
args=(clients, pool)
)
t.start()
self.lg.debug("Timer is started. timeout %s s" % job['timeout'])
pool.join()
except Exception, e:
self.lg.warning("Exception occurred: %s" % e)
self.lg.warning(format_exc())
return "0"
# here the self.results shall be ready
return self.parse_results()
The OSError is not caused by the Pool but by my program issue.
When I use Popen to create a subprocess and exec ffmpeg, it will exit immediately(due to other problem), so when I try to kill the subprocess, it it not existed by then. That's why OSError will be raised.
I have been writing a bit of code that will eventually take commands from a remote and local (within the code itself) source and and then will be carried out and the results displayed using tkinter.
The Problem I am currently having is that when I run the code using threading and queues this error appears and I have tried putting the gui code at the bottom under the for loops for the threading but this had another error that occurred.
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python34\lib\threading.py", line 921, in _bootstrap_inner
self.run()
File "C:\Python34\lib\threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "C:/Users/Eddy/Programing/Python/Sockets/GUI Server.py", line 60, in threader
t_visuals()
File "C:/Users/Eddy/Programing/Python/Sockets/GUI Server.py", line 49, in t_visuals
label = Label(root, width=70, height=30,relief=RIDGE,bd=5,bg="white",textvariable=v,anchor=NW,justify= LEFT,font=("Times New Roman", 12)).grid(row=1,column=0)
File "C:\Python34\lib\tkinter\__init__.py", line 2604, in __init__
Widget.__init__(self, master, 'label', cnf, kw)
File "C:\Python34\lib\tkinter\__init__.py", line 2122, in __init__
(widgetName, self._w) + extra + self._options(cnf))
RuntimeError: main thread is not in main loop
Exception in thread Thread-2:
Traceback (most recent call last):
File "C:\Python34\lib\threading.py", line 921, in _bootstrap_inner
self.run()
File "C:\Python34\lib\threading.py", line 869, in run
self._target(*self._args, **self._kwargs)
File "C:/Users/Eddy/Programing/Python/Sockets/GUI Server.py", line 58, in threader
t_connections()
File "C:/Users/Eddy/Programing/Python/Sockets/GUI Server.py", line 43, in t_connections
sLog_update("waiting for connection...")
File "C:/Users/Eddy/Programing/Python/Sockets/GUI Server.py", line 40, in sLog_update
v.set(g)
File "C:\Python34\lib\tkinter\__init__.py", line 263, in set
return self._tk.globalsetvar(self._name, value)
RuntimeError: main thread is not in main loop
This is the code:
from socket import *
import time
from tkinter import *
import threading
from queue import Queue
#server setup
HOST = ''
PORT = 24601
BUFSIZ = 1024
ADDR = (HOST, PORT)
tcpSerSock = socket(AF_INET, SOCK_STREAM)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(5)
#Window Setup
HEIGHT = 720
WIDTH = 1280
root = Tk()
root.title("Server Controler")
root.geometry("1280x720")
#Variables needed
v =StringVar()
f =StringVar()
g = "" #Server update log
#locked variables
sLog_lock = threading.Lock()
#t_ means threaded
def sLog_update(self):
with sLog_lock:
global g
currentTime = str("["+time.ctime()+"] ")
g+=str(currentTime)
g+=str(self)
g+=str("\n")
v.set(g)
def t_connections():
sLog_update("waiting for connection...")
tcpCliSock, addr = tcpSerSock.accept()
avr = "connected from: " + str(addr)
sLog_update(avr)
def t_visuals():
label = Label(root, width=70, height=30,relief=RIDGE,bd=5,bg="white",textvariable=v,anchor=NW,justify= LEFT,font=("Times New Roman", 12)).grid(row=1,column=0)
entry = Entry(root, width=105,relief=RIDGE,bd=5,textvariable=f).grid(row=2,column=0)
button = Button(root,command= lambda:sLog_update(f.get()),text="send").grid(row=3,column=0)
mainloop()
def threader():
worker = q.get()
print(worker)
if worker == 1:
t_connections()
elif worker == 0:
t_visuals()
q =Queue()
for x in range(2):
t = threading.Thread(target=threader)
t.daemon = True
t.start()
for worker in range(20):
q.put(worker)
q.join()
You can make this problem going away by keeping all of the GUI code in the main thread.