So I got two very similar python file A and B, with tiny difference: place of the dead loop statement. The first raise exception while the second works as expected. But Why?!
Code A:
import json
import multiprocessing
from time import sleep
def aaa(shared_data):
while True:
sleep(1)
shared_data["hello"] = "worldA"
print(json.dumps(shared_data.copy()))
def bbb(shared_data):
while True:
sleep(1)
shared_data["world"] = "helloB"
print(json.dumps(shared_data.copy()))
def run():
shared_data = multiprocessing.Manager().dict()
multiprocessing.Process(target=aaa, args=(shared_data, )).start()
multiprocessing.Process(target=bbb, args=(shared_data, )).start()
if __name__ == "__main__":
run()
while True:
pass
Code B:
import json
import multiprocessing
from time import sleep
def aaa(shared_data):
while True:
sleep(1)
shared_data["hello"] = "worldA"
print(json.dumps(shared_data.copy()))
def bbb(shared_data):
while True:
sleep(1)
shared_data["world"] = "helloB"
print(json.dumps(shared_data.copy()))
def run():
shared_data = multiprocessing.Manager().dict()
multiprocessing.Process(target=aaa, args=(shared_data, )).start()
multiprocessing.Process(target=bbb, args=(shared_data, )).start()
while True:
pass
if __name__ == "__main__":
run()
Why does the first raise exception and the second not?
Output of The first, (only got nothing else but exception):
Process Process-3:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/managers.py", line 827, in _callmethod
conn = self._tls.connection
AttributeError: 'ForkAwareLocal' object has no attribute 'connection'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "foo.py", line 21, in bbb
shared_data["world"] = "hello" + str(cnt)
File "<string>", line 2, in __setitem__
File "/usr/lib/python3.8/multiprocessing/managers.py", line 831, in _callmethod
self._connect()
File "/usr/lib/python3.8/multiprocessing/managers.py", line 818, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
Process Process-2:
File "/usr/lib/python3.8/multiprocessing/connection.py", line 502, in Client
c = SocketClient(address)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 630, in SocketClient
s.connect(address)
FileNotFoundError: [Errno 2] No such file or directory
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/managers.py", line 827, in _callmethod
conn = self._tls.connection
AttributeError: 'ForkAwareLocal' object has no attribute 'connection'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "foo.py", line 12, in aaa
shared_data["hello"] = "world" + str(cnt)
File "<string>", line 2, in __setitem__
File "/usr/lib/python3.8/multiprocessing/managers.py", line 831, in _callmethod
self._connect()
File "/usr/lib/python3.8/multiprocessing/managers.py", line 818, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 502, in Client
c = SocketClient(address)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 630, in SocketClient
s.connect(address)
FileNotFoundError: [Errno 2] No such file or directory
Output of the sescond (as expected):
{"hello": "worldA"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
{"hello": "worldA", "world": "helloB"}
...
I am using Python 3.8.5 and 'Ubuntu 20.04.1 LTS (Focal Fossa)'.
Solved, the reason is that shared_data is destroyed after run, change to this could work:
def run():
shared_data = multiprocessing.Manager().dict()
multiprocessing.Process(target=aaa, args=(shared_data, )).start()
multiprocessing.Process(target=bbb, args=(shared_data, )).start()
return shared_data
if __name__ == "__main__":
_ = run()
while True:
pass
Related
I am trying to create a webservice to update a wallet pass using apns push notifications. I am using httpx for this as it can use http/2. I have the following test code for this:
import httpx
import ssl
import asyncio
async def send_push():
context = ssl.create_default_context()
context.load_verify_locations(cafile = "/Users/valley/Desktop/External_Finder/Learning_Centers_Development/Arcade-Pass/certs/passcertificate.pem")
payload = {
"aps" : ""
}
async with httpx.AsyncClient(http2 = True, cert = "/Users/valley/Desktop/External_Finder/Learning_Centers_Development/Arcade-Pass/certs/ArcadePassCertKey.pem") as client:
r = await client.post("https://api.sandbox.push.apple.com:2197/3/device/4c526af3e9cd29cc0c6f8954de5f68fd1d00348696fe4a984581e35f19fe1ddf", data = payload)
print(r.http_version)
print(r.text)
asyncio.run(send_push())
When I try to run this, I am getting the following traceback and error:
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_transports/default.py", line 60, in map_httpcore_exceptions
yield
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_transports/default.py", line 353, in handle_async_request
resp = await self._pool.handle_async_request(req)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpcore/_async/connection_pool.py", line 253, in handle_async_request
raise exc
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpcore/_async/connection_pool.py", line 237, in handle_async_request
response = await connection.handle_async_request(request)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpcore/_async/connection.py", line 90, in handle_async_request
return await self._connection.handle_async_request(request)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpcore/_async/http2.py", line 146, in handle_async_request
raise exc
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpcore/_async/http2.py", line 114, in handle_async_request
status, headers = await self._receive_response(
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpcore/_async/http2.py", line 231, in _receive_response
event = await self._receive_stream_event(request, stream_id)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpcore/_async/http2.py", line 262, in _receive_stream_event
await self._receive_events(request, stream_id)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpcore/_async/http2.py", line 291, in _receive_events
raise RemoteProtocolError(event)
httpcore.RemoteProtocolError: <ConnectionTerminated error_code:ErrorCodes.NO_ERROR, last_stream_id:0, additional_data:7b22726561736f6e223a22426164436572746966>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "test_push_notifications.py", line 24, in <module>
asyncio.run(send_push())
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/base_events.py", line 608, in run_until_complete
return future.result()
File "test_push_notifications.py", line 19, in send_push
r = await client.post("https://api.sandbox.push.apple.com:2197/3/device/4c526af3e9cd29cc0c6f8954de5f68fd1d00348696fe4a984581e35f19fe1ddf", data = payload)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_client.py", line 1848, in post
return await self.request(
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_client.py", line 1533, in request
return await self.send(request, auth=auth, follow_redirects=follow_redirects)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_client.py", line 1620, in send
response = await self._send_handling_auth(
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_client.py", line 1648, in _send_handling_auth
response = await self._send_handling_redirects(
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_client.py", line 1685, in _send_handling_redirects
response = await self._send_single_request(request)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_client.py", line 1722, in _send_single_request
response = await transport.handle_async_request(request)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_transports/default.py", line 353, in handle_async_request
resp = await self._pool.handle_async_request(req)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/contextlib.py", line 131, in __exit__
self.gen.throw(type, value, traceback)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/httpx/_transports/default.py", line 77, in map_httpcore_exceptions
raise mapped_exc(message) from exc
httpx.RemoteProtocolError: <ConnectionTerminated error_code:ErrorCodes.NO_ERROR, last_stream_id:0, additional_data:7b22726561736f6e223a22426164436572746966>
Can someone please help me navigate this error and send a successful request to APNS?
I am trying to store feedparser data into a Postgres database via asyncpg and I am getting an error while storing pubdate that is of type timestamptz in the database
I have a Postgres table test with the following structure
+---------------------+----------------------+
| feed_item_id (uuid) | pubdate(timestamptz) |
+---------------------+----------------------+
| ... | ... |
+---------------------+----------------------+
I am using feedparser to load data from RSS and save it to this table
def md5(text):
import hashlib
return hashlib.md5(text.encode('utf-8')).hexdigest()
def fetch():
import feedparser
data = feedparser.parse('https://cointelegraph.com/rss')
return data
async def insert(rows):
import asyncpg
async with asyncpg.create_pool(user='postgres', database='postgres') as pool:
async with pool.acquire() as conn:
results = await conn.executemany('INSERT INTO test (feed_item_id, pubdate) VALUES($1, $2)', rows)
print(results)
async def main():
data = fetch()
first_entry = data.entries[0]
await insert([(md5(first_entry.guid), first_entry.published)])
import asyncio
asyncio.run(main())
I immediately get this error
Traceback (most recent call last):
File "asyncpg/protocol/prepared_stmt.pyx", line 168, in asyncpg.protocol.protocol.PreparedStatementState._encode_bind_msg
File "asyncpg/protocol/codecs/base.pyx", line 206, in asyncpg.protocol.protocol.Codec.encode
File "asyncpg/protocol/codecs/base.pyx", line 111, in asyncpg.protocol.protocol.Codec.encode_scalar
File "asyncpg/pgproto/./codecs/datetime.pyx", line 208, in asyncpg.pgproto.pgproto.timestamptz_encode
TypeError: expected a datetime.date or datetime.datetime instance, got 'str'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/homebrew/Cellar/python#3.9/3.9.13_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/homebrew/Cellar/python#3.9/3.9.13_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "<stdin>", line 4, in main
File "<stdin>", line 5, in insert
File "/Users/vr/.local/share/virtualenvs/python-load-feed-items-uopsj7-P/lib/python3.9/site-packages/asyncpg/connection.py", line 358, in executemany
return await self._executemany(command, args, timeout)
File "/Users/vr/.local/share/virtualenvs/python-load-feed-items-uopsj7-P/lib/python3.9/site-packages/asyncpg/connection.py", line 1697, in _executemany
result, _ = await self._do_execute(query, executor, timeout)
File "/Users/vr/.local/share/virtualenvs/python-load-feed-items-uopsj7-P/lib/python3.9/site-packages/asyncpg/connection.py", line 1731, in _do_execute
result = await executor(stmt, None)
File "asyncpg/protocol/protocol.pyx", line 254, in bind_execute_many
File "asyncpg/protocol/coreproto.pyx", line 958, in asyncpg.protocol.protocol.CoreProtocol._bind_execute_many_more
File "asyncpg/protocol/protocol.pyx", line 220, in genexpr
File "asyncpg/protocol/prepared_stmt.pyx", line 197, in asyncpg.protocol.protocol.PreparedStatementState._encode_bind_msg
asyncpg.exceptions.DataError: invalid input for query argument $2 in element #0 of executemany() sequence: 'Thu, 04 Aug 2022 03:29:19 +0100' (expected a datetime.date or datetime.datetime instance, got 'str')
I tried to use the published_parsed field instead of the published and I got another error
async def main():
...
await insert([(md5(first_entry.guid), first_entry.published_parsed)])
asyncio.run(main())
This time the error is complaining about struct_time
Traceback (most recent call last):
File "asyncpg/protocol/prepared_stmt.pyx", line 168, in asyncpg.protocol.protocol.PreparedStatementState._encode_bind_msg
File "asyncpg/protocol/codecs/base.pyx", line 206, in asyncpg.protocol.protocol.Codec.encode
File "asyncpg/protocol/codecs/base.pyx", line 111, in asyncpg.protocol.protocol.Codec.encode_scalar
File "asyncpg/pgproto/./codecs/datetime.pyx", line 208, in asyncpg.pgproto.pgproto.timestamptz_encode
TypeError: expected a datetime.date or datetime.datetime instance, got 'struct_time'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/homebrew/Cellar/python#3.9/3.9.13_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/homebrew/Cellar/python#3.9/3.9.13_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete
return future.result()
File "<stdin>", line 4, in main
File "<stdin>", line 5, in insert
File "/Users/vr/.local/share/virtualenvs/python-load-feed-items-uopsj7-P/lib/python3.9/site-packages/asyncpg/connection.py", line 358, in executemany
return await self._executemany(command, args, timeout)
File "/Users/vr/.local/share/virtualenvs/python-load-feed-items-uopsj7-P/lib/python3.9/site-packages/asyncpg/connection.py", line 1697, in _executemany
result, _ = await self._do_execute(query, executor, timeout)
File "/Users/vr/.local/share/virtualenvs/python-load-feed-items-uopsj7-P/lib/python3.9/site-packages/asyncpg/connection.py", line 1731, in _do_execute
result = await executor(stmt, None)
File "asyncpg/protocol/protocol.pyx", line 254, in bind_execute_many
File "asyncpg/protocol/coreproto.pyx", line 958, in asyncpg.protocol.protocol.CoreProtocol._bind_execute_many_more
File "asyncpg/protocol/protocol.pyx", line 220, in genexpr
File "asyncpg/protocol/prepared_stmt.pyx", line 197, in asyncpg.protocol.protocol.PreparedStatementState._encode_bind_msg
asyncpg.exceptions.DataError: invalid input for query argument $2 in element #0 of executemany() sequence: time.struct_time(tm_year=2022, tm_mon=8,... (expected a datetime.date or datetime.datetime instance, got 'struct_time')
How do I store this feedparser date into Postgres using asyncpg?
Can anyone suggest a Python client for AWS Redis Cluster enabled?
I'm using redis-py-cluster, but it fails:
Sample code:
from rediscluster import StrictRedisCluster
startup_nodes = [{"host": "xxxx.clustercfg.apn2.cache.amazonaws.com", "port": "6379"}]
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
r.set('foo', 'bar')
value = r.get('foo')
======
Exception:
Traceback (most recent call last):
File "testRedisCluster.py", line 11, in
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
File "/Library/Python/2.7/site-packages/rediscluster/client.py", line 181, in init
**kwargs
File "/Library/Python/2.7/site-packages/rediscluster/connection.py", line 141, in init
self.nodes.initialize()
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 228, in initialize
need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache)
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in cluster_require_full_coverage
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 267, in node_require_full_coverage
return "yes" in r_node.config_get("cluster-require-full-coverage").values()
File "/Library/Python/2.7/site-packages/redis/client.py", line 715, in config_get
return self.execute_command('CONFIG GET', pattern)
File "/Library/Python/2.7/site-packages/redis/client.py", line 668, in execute_command
return self.parse_response(connection, command_name, **options)
File "/Library/Python/2.7/site-packages/redis/client.py", line 680, in parse_response
response = connection.read_response()
File "/Library/Python/2.7/site-packages/redis/connection.py", line 629, in read_response
raise response
redis.exceptions.ResponseError: unknown command 'CONFIG'
I'm using redis-py-cluster 1.3.4.
Any idea?
Change the parameter skip_full_coverage_check=False to skip_full_coverage_check=True
Is it possible to spawn multiple processes from a single thread? Or is it a proper design to implement?
My code sample is -
def run_all_tasks(self):
for platform in self._platforms:
task_thread = threading.Thread(
target=self._run_task_list, args=(
self._get_tasks(),platform,))
taskset_threads.append(task_thread)
for taskset_thread in taskset_threads:
taskset_thread.start()
for taskset_thread in taskset_threads:
taskset_thread.join()
def _run_task_list(self, tasklist, platform):
try:
test_case_name = task.__class__.__name__
try:
test_case_name = task._get_test_case_name()
except:
test_case_name = task.__class__.__name__
pass
max_runtime = task.get_max_runtime()
manager = Manager()
self._shared_mem = manager.dict()
for task in tasklist:
task_proc = Process(
target=self.proc_setup,
args=(task, self, self._shared_mem))
task_proc.start()
task_proc.join(max_runtime)
This works however, sometimes it gives following error -
Traceback (most recent call last):
File "C:\wor\lib\TaskSet.py", line 430, in _run_task_list
if "warning" in self._shared_mem:
File "<string>", line 2, in __contains__
File "C:\Python27\lib\multiprocessing\managers.py", line 755, in _callmethod
self._connect()
File "C:\Python27\lib\multiprocessing\managers.py", line 742, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
File "C:\Python27\lib\multiprocessing\connection.py", line 167, in Client
c = PipeClient(address)
File "C:\Python27\lib\multiprocessing\connection.py", line 387, in PipeClient
win32.WaitNamedPipe(address, 1000)
WindowsError: [Error 2] The system cannot find the file specified
This can also be seen on linux platform.
I'm new in Python 3. I use aiohttp module for Python 3.5.
When I run my project, I have a following error
TypeError: an integer is required (got type str)
The stack-trace is:
Traceback (most recent call last):
File "/home/santi/tesis/tanner/server.py", line 82, in <module>
srv = loop.run_until_complete(f)
File "/usr/lib/python3.5/asyncio/base_events.py", line 373, in run_until_complete
return future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 274, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 240, in _step
result = coro.send(None)
File "/usr/lib/python3.5/asyncio/base_events.py", line 949, in create_server
sock.bind(sa)
The code is:
if __name__ == '__main__':
loop = asyncio.get_event_loop()
f = loop.create_server(
lambda: HttpRequestHandler(debug=False, keep_alive=75),'0.0.0.0','8090')
srv = loop.run_until_complete(f)
print('serving on', srv.sockets[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
What is the error in my code?
What am I doing wrong?
The port number should be an Integer:
f = loop.create_server(
lambda: HttpRequestHandler(debug=False, keep_alive=75), '0.0.0.0', 8090)