I'm new in Python 3. I use aiohttp module for Python 3.5.
When I run my project, I have a following error
TypeError: an integer is required (got type str)
The stack-trace is:
Traceback (most recent call last):
File "/home/santi/tesis/tanner/server.py", line 82, in <module>
srv = loop.run_until_complete(f)
File "/usr/lib/python3.5/asyncio/base_events.py", line 373, in run_until_complete
return future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 274, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 240, in _step
result = coro.send(None)
File "/usr/lib/python3.5/asyncio/base_events.py", line 949, in create_server
sock.bind(sa)
The code is:
if __name__ == '__main__':
loop = asyncio.get_event_loop()
f = loop.create_server(
lambda: HttpRequestHandler(debug=False, keep_alive=75),'0.0.0.0','8090')
srv = loop.run_until_complete(f)
print('serving on', srv.sockets[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
What is the error in my code?
What am I doing wrong?
The port number should be an Integer:
f = loop.create_server(
lambda: HttpRequestHandler(debug=False, keep_alive=75), '0.0.0.0', 8090)
Related
PermissionError Multiprocessing argument pyppeteer.Page
successful but inefficient
import asyncio
from pyppeteer import launch
from multiprocessing import Process
async def f(x):
print("async def f(x,page):",x)
browser = await launch(headless=False, autoClose=False)
page = (await browser.pages())[0]
await page.goto('https://example.com')
h1 = await page.querySelector("body > div > h1")
await page.evaluate(f'(element) => element.textContent="{x}"', h1)
def p(x):
print("def p(x,page):",x)
asyncio.run(f(x))
async def main():
pro = Process(target=p, args=("1111",))
pro.start()
pro = Process(target=p, args=("2222",))
pro.start()
if __name__ =="__main__":
asyncio.get_event_loop().run_until_complete(main())
In order to process a lot, it is burdensome to create multiple browsers.
So, I try to create a lot of tabs.
This is the code I want, but I get an PermissionError
How can I solve this?
import asyncio
from pyppeteer import launch
from multiprocessing import Process
async def f(x,page):
print("async def f(x,page):",x)
await page.goto('https://example.com')
h1 = await page.querySelector("body > div > h1")
await page.evaluate(f'(element) => element.textContent="{x}"', h1)
def p(x,page):
print("def p(x,page):",x)
asyncio.run(f(x,page))
async def main():
browser = await launch(headless=False, autoClose=False)
page = (await browser.pages())[0]
pro = Process(target=p, args=("1111",page))
pro.start()
if __name__ =="__main__":
asyncio.get_event_loop().run_until_complete(main())
error message
c:\Users\mimmi\python\ttttt.py:24: DeprecationWarning: There is no current event loop
asyncio.get_event_loop().run_until_complete(main())
Traceback (most recent call last):
File "c:\Users\mimmi\python\ttttt.py", line 24, in <module>
asyncio.get_event_loop().run_until_complete(main())
File "C:\python\python311\Lib\asyncio\base_events.py", line 650, in run_until_complete
return future.result()
^^^^^^^^^^^^^^^
File "c:\Users\mimmi\python\ttttt.py", line 21, in main
pro.start()
^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
^^^^^^^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\context.py", line 336, in _Popen
return Popen(process_obj)
^^^^^^^^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\popen_spawn_win32.py", line 94, in __init__
reduction.dump(process_obj, to_child)
File "C:\python\python311\Lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\python\python311\Lib\multiprocessing\spawn.py", line 111, in spawn_main
new_handle = reduction.duplicate(pipe_handle,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\python\python311\Lib\multiprocessing\reduction.py", line 79, in duplicate
return _winapi.DuplicateHandle(
^^^^^^^^^^^^^^^^^^^^^^^^
PermissionError: [WinError 5] Access is denied
my environment
windows11
python3.11
pyppeteer1.0.2
I got the desired result with this code.
queue = asyncio.Queue()
browser = await launch(headless=False, autoClose=False)
for i in range(MAX_TASK_COUNT-1):
await browser.newPage()
pages = await browser.pages()
for page in pages:
asyncio.create_task(crawlingTask(queue, page))
await asyncio.create_task(queuePutter(queue, session, appendList))
await queue.join()
So I got two very similar python file A and B, with tiny difference: place of the dead loop statement. The first raise exception while the second works as expected. But Why?!
Code A:
import json
import multiprocessing
from time import sleep
def aaa(shared_data):
while True:
sleep(1)
shared_data["hello"] = "worldA"
print(json.dumps(shared_data.copy()))
def bbb(shared_data):
while True:
sleep(1)
shared_data["world"] = "helloB"
print(json.dumps(shared_data.copy()))
def run():
shared_data = multiprocessing.Manager().dict()
multiprocessing.Process(target=aaa, args=(shared_data, )).start()
multiprocessing.Process(target=bbb, args=(shared_data, )).start()
if __name__ == "__main__":
run()
while True:
pass
Code B:
import json
import multiprocessing
from time import sleep
def aaa(shared_data):
while True:
sleep(1)
shared_data["hello"] = "worldA"
print(json.dumps(shared_data.copy()))
def bbb(shared_data):
while True:
sleep(1)
shared_data["world"] = "helloB"
print(json.dumps(shared_data.copy()))
def run():
shared_data = multiprocessing.Manager().dict()
multiprocessing.Process(target=aaa, args=(shared_data, )).start()
multiprocessing.Process(target=bbb, args=(shared_data, )).start()
while True:
pass
if __name__ == "__main__":
run()
Why does the first raise exception and the second not?
Output of The first, (only got nothing else but exception):
Process Process-3:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/managers.py", line 827, in _callmethod
conn = self._tls.connection
AttributeError: 'ForkAwareLocal' object has no attribute 'connection'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "foo.py", line 21, in bbb
shared_data["world"] = "hello" + str(cnt)
File "<string>", line 2, in __setitem__
File "/usr/lib/python3.8/multiprocessing/managers.py", line 831, in _callmethod
self._connect()
File "/usr/lib/python3.8/multiprocessing/managers.py", line 818, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
Process Process-2:
File "/usr/lib/python3.8/multiprocessing/connection.py", line 502, in Client
c = SocketClient(address)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 630, in SocketClient
s.connect(address)
FileNotFoundError: [Errno 2] No such file or directory
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/managers.py", line 827, in _callmethod
conn = self._tls.connection
AttributeError: 'ForkAwareLocal' object has no attribute 'connection'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "foo.py", line 12, in aaa
shared_data["hello"] = "world" + str(cnt)
File "<string>", line 2, in __setitem__
File "/usr/lib/python3.8/multiprocessing/managers.py", line 831, in _callmethod
self._connect()
File "/usr/lib/python3.8/multiprocessing/managers.py", line 818, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 502, in Client
c = SocketClient(address)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 630, in SocketClient
s.connect(address)
FileNotFoundError: [Errno 2] No such file or directory
Output of the sescond (as expected):
{"hello": "worldA"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
...
I am using Python 3.8.5 and 'Ubuntu 20.04.1 LTS (Focal Fossa)'.
Solved, the reason is that shared_data is destroyed after run, change to this could work:
def run():
shared_data = multiprocessing.Manager().dict()
multiprocessing.Process(target=aaa, args=(shared_data, )).start()
multiprocessing.Process(target=bbb, args=(shared_data, )).start()
return shared_data
if __name__ == "__main__":
_ = run()
while True:
pass
I try to run this code:
import asyncio
async def eva(code):
exec("async def ex(): return {}".format(code))
return await asyncio.wait_for(locals()["ex"](), timeout=1.0)
async def main():
while True:
code = input()
x = await asyncio.wait_for(eva(code), timeout=1.0)
print(x)
asyncio.run(main())
And getting following error:
<module>
asyncio.run(main()) File "C:\Users\\{user}\AppData\Local\Programs\Python\Python37\lib\asyncio\runners.py",
line 43, in run
return loop.run_until_complete(main) File "C:\Users\\{user}\AppData\Local\Programs\Python\Python37\lib\asyncio\base_events.py",
line 584, in run_until_complete
return future.result()
File "eval.py", line 10, in main
x = await asyncio.wait_for(eva(code), timeout=1.0) File "C:\Users\\{user}\AppData\Local\Programs\Python\Python37\lib\asyncio\tasks.py",
line 416, in wait_for
return fut.result()
File "eval.py", line 5, in eva
return await asyncio.wait_for(locals()["ex"](), timeout=1.0)
File "C:\Users\\
{user}\AppData\Local\Programs\Python\Python37\lib\asyncio\tasks.py",
line 416, in wait_for
return fut.result()
File "<string>", line 1, in ex TypeError: 'int' object is not
iterable ```
Can you help me to understand what exactly happens?
From traceback you can see that error happend at function ex:
File "<string>", line 1, in ex
And shortly before it this line was executed:
File "eval.py", line 5, in eva
return await asyncio.wait_for(locals()["ex"](), timeout=1.0)
In other words exception was raised somewhere inside coroutine ex you got from locals()["ex"].
Exception message is:
TypeError: 'int' object is not iterable
You can google it to understand typical situation when it can happen, but it's not hard to assume either: something inside ex tried to iterate object of type int.
Something like this happened:
import asyncio
async def ex():
for i in 123:
pass
async def main():
return await asyncio.wait_for(ex(), timeout=1.0)
asyncio.run(main())
Run and you'll see similar:
File "...\main.py", line 15, in main
return await asyncio.wait_for(ex(), timeout=1.0)
File "...\python37\lib\asyncio\tasks.py", line 416, in wait_for
return fut.result()
File "...\main.py", line 11, in ex
for i in 123:
TypeError: 'int' object is not iterable
Can anyone suggest a Python client for AWS Redis Cluster enabled?
I'm using redis-py-cluster, but it fails:
Sample code:
from rediscluster import StrictRedisCluster
startup_nodes = [{"host": "xxxx.clustercfg.apn2.cache.amazonaws.com", "port": "6379"}]
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
r.set('foo', 'bar')
value = r.get('foo')
======
Exception:
Traceback (most recent call last):
File "testRedisCluster.py", line 11, in
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
File "/Library/Python/2.7/site-packages/rediscluster/client.py", line 181, in init
**kwargs
File "/Library/Python/2.7/site-packages/rediscluster/connection.py", line 141, in init
self.nodes.initialize()
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 228, in initialize
need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache)
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in cluster_require_full_coverage
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 267, in node_require_full_coverage
return "yes" in r_node.config_get("cluster-require-full-coverage").values()
File "/Library/Python/2.7/site-packages/redis/client.py", line 715, in config_get
return self.execute_command('CONFIG GET', pattern)
File "/Library/Python/2.7/site-packages/redis/client.py", line 668, in execute_command
return self.parse_response(connection, command_name, **options)
File "/Library/Python/2.7/site-packages/redis/client.py", line 680, in parse_response
response = connection.read_response()
File "/Library/Python/2.7/site-packages/redis/connection.py", line 629, in read_response
raise response
redis.exceptions.ResponseError: unknown command 'CONFIG'
I'm using redis-py-cluster 1.3.4.
Any idea?
Change the parameter skip_full_coverage_check=False to skip_full_coverage_check=True
Is it possible to spawn multiple processes from a single thread? Or is it a proper design to implement?
My code sample is -
def run_all_tasks(self):
for platform in self._platforms:
task_thread = threading.Thread(
target=self._run_task_list, args=(
self._get_tasks(),platform,))
taskset_threads.append(task_thread)
for taskset_thread in taskset_threads:
taskset_thread.start()
for taskset_thread in taskset_threads:
taskset_thread.join()
def _run_task_list(self, tasklist, platform):
try:
test_case_name = task.__class__.__name__
try:
test_case_name = task._get_test_case_name()
except:
test_case_name = task.__class__.__name__
pass
max_runtime = task.get_max_runtime()
manager = Manager()
self._shared_mem = manager.dict()
for task in tasklist:
task_proc = Process(
target=self.proc_setup,
args=(task, self, self._shared_mem))
task_proc.start()
task_proc.join(max_runtime)
This works however, sometimes it gives following error -
Traceback (most recent call last):
File "C:\wor\lib\TaskSet.py", line 430, in _run_task_list
if "warning" in self._shared_mem:
File "<string>", line 2, in __contains__
File "C:\Python27\lib\multiprocessing\managers.py", line 755, in _callmethod
self._connect()
File "C:\Python27\lib\multiprocessing\managers.py", line 742, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
File "C:\Python27\lib\multiprocessing\connection.py", line 167, in Client
c = PipeClient(address)
File "C:\Python27\lib\multiprocessing\connection.py", line 387, in PipeClient
win32.WaitNamedPipe(address, 1000)
WindowsError: [Error 2] The system cannot find the file specified
This can also be seen on linux platform.