asyncio client server does not work inside docker - python

I have client and server (send json messages over http) works ok on my standart ubuntu 16.04
But then I try to run it client and server inside docker or client outside docker and server inside i got errors.
My docker command:
sudo docker run -p 127.0.0.1:8888:8888 -i -t seo_server
Here is my server and client code and errors:
server
import asyncio
import json
import aiohttp
import re
async def get_cy(domain):
return {'result': 'ok','value':10}
async def handle(reader, writer):
# data = await reader.read(1000)
data = bytearray()
while not reader.at_eof():
chunk = await reader.read(2 ** 12)
data += chunk
# print(json.loads(data))
# https://aiomas.readthedocs.io/en/latest/
message = data.decode()
addr = writer.get_extra_info('peername')
print("Received %r from %r" % (message, addr))
m = json.loads(message)
f = m['method']
del m['method']
r = await globals()[f](**m)
r = json.dumps(r)
# await asyncio.sleep(int(m['time']))
print("Send: %r" % r)
writer.write(r.encode())
await writer.drain()
print("Close the client socket")
writer.close()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
coro = asyncio.start_server(handle, '127.0.0.1', 8888, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving cy microservice on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
client
import asyncio
import json
from itertools import zip_longest
import time
def to_json(func):
def wrap(**kwargs):
message = kwargs
message['method'] = func.__name__
print(message)
return asyncio.ensure_future(tcp_send(json.dumps(message)))
return wrap
#to_json
def get_cy(domain):
pass
async def tcp_send(message):
loop = asyncio.get_event_loop()
reader, writer = await asyncio.open_connection('127.0.0.1', 8888,
loop=loop)
print('Send: %r' % message)
writer.write(message.encode())
writer.write_eof()
data = await reader.read()
data = data.decode()
print('Received: %r' % data)
print('Close the socket')
writer.close()
return json.loads(data)
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def async_map(loop, f, iterable, chunk_size=2):
for chunk in grouper(iterable, chunk_size):
future = asyncio.gather(*(f(param) for param in chunk if param))
loop.run_until_complete(future)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
async def cy(site):
cy = await get_cy(domain=site)
print(site + " cy =", cy)
#update site here
while True:
sites = ('site1.ru', 'site2.ru', 'site3.ru', 'site4.ru', 'site5.ru')
async_map(loop, cy, sites)
time.sleep(100) #if not sites
error's then i try client and server inside docker:
client error
root#341fdee56d6d:/seo_server# python client.py
{'domain': 'site1.ru', 'method': 'get_cy'}
{'domain': 'site2.ru', 'method': 'get_cy'}
Send: '{"domain": "site2.ru", "method": "get_cy"}'
Send: '{"domain": "site1.ru", "method": "get_cy"}'
Received: ''
Close the socket
Traceback (most recent call last):
File "client.py", line 63, in <module>
async_map(loop, cy, sites)
File "client.py", line 49, in async_map
loop.run_until_complete(future)
File "/usr/local/lib/python3.5/asyncio/base_events.py", line 341, in run_until_complete
return future.result()
File "/usr/local/lib/python3.5/asyncio/futures.py", line 276, in result
raise self._exception
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 236, in _step
result = coro.throw(exc)
File "client.py", line 57, in cy
cy = await get_cy(domain=site)
File "/usr/local/lib/python3.5/asyncio/futures.py", line 387, in __iter__
yield self # This tells Task to wait for completion.
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 287, in _wakeup
value = future.result()
File "/usr/local/lib/python3.5/asyncio/futures.py", line 276, in result
raise self._exception
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 238, in _step
result = coro.send(value)
File "client.py", line 36, in tcp_send
return json.loads(data)
File "/usr/local/lib/python3.5/json/__init__.py", line 319, in loads
return _default_decoder.decode(s)
File "/usr/local/lib/python3.5/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/local/lib/python3.5/json/decoder.py", line 357, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Task was destroyed but it is pending!
task: <Task pending coro=<tcp_send() running at client.py:30> wait_for=<Future pending cb=[Task._wakeup()]> cb=[Task._wakeup()]>
server error
sudo docker run -p 127.0.0.1:8888:8888 -i -t seo_server
Serving cy microservice on ('127.0.0.1', 8888)
Received '{"domain": "site2.ru", "method": "get_cy"}' from ('127.0.0.1', 47768)
http://bar-navig.yandex.ru/u?ver=2&show=31&url=http://site2.ru
Received '{"domain": "site1.ru", "method": "get_cy"}' from ('127.0.0.1', 47770)
http://bar-navig.yandex.ru/u?ver=2&show=31&url=http://site1.ru
Send: '{"result": "ok", "value": "50"}'
Task exception was never retrieved
future: <Task finished coro=<handle() done, defined at seo_server.py:18> exception=ConnectionResetError('Connection lost',)>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 238, in _step
result = coro.send(value)
File "seo_server.py", line 40, in handle
await writer.drain()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 304, in drain
yield from self._protocol._drain_helper()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 195, in _drain_helper
raise ConnectionResetError('Connection lost')
ConnectionResetError: Connection lost
Send: '{"result": "ok", "value": "50"}'
Task exception was never retrieved
future: <Task finished coro=<handle() done, defined at seo_server.py:18> exception=ConnectionResetError('Connection lost',)>
Traceback (most recent call last):
File "/usr/local/lib/python3.5/asyncio/tasks.py", line 238, in _step
result = coro.send(value)
File "seo_server.py", line 40, in handle
await writer.drain()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 304, in drain
yield from self._protocol._drain_helper()
File "/usr/local/lib/python3.5/asyncio/streams.py", line 195, in _drain_helper
raise ConnectionResetError('Connection lost')
ConnectionResetError: Connection lost
If i try client outside docker i got this client error and no actions at server:
outside client error
/usr/bin/python3.5 /home/se7en/examples/python_3.5/seo_server/client.py
{'method': 'get_cy', 'domain': 'site1.ru'}
{'method': 'get_cy', 'domain': 'site2.ru'}
Send: '{"method": "get_cy", "domain": "site1.ru"}'
Send: '{"method": "get_cy", "domain": "site2.ru"}'
Traceback (most recent call last):
File "/home/se7en/examples/python_3.5/seo_server/client.py", line 63, in <module>
async_map(loop, cy, sites)
File "/home/se7en/examples/python_3.5/seo_server/client.py", line 49, in async_map
loop.run_until_complete(future)
File "/usr/lib/python3.5/asyncio/base_events.py", line 373, in run_until_complete
return future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 274, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 242, in _step
result = coro.throw(exc)
File "/home/se7en/examples/python_3.5/seo_server/client.py", line 57, in cy
cy = await get_cy(domain=site)
File "/usr/lib/python3.5/asyncio/futures.py", line 361, in __iter__
yield self # This tells Task to wait for completion.
File "/usr/lib/python3.5/asyncio/tasks.py", line 297, in _wakeup
future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 274, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 240, in _step
result = coro.send(None)
File "/home/se7en/examples/python_3.5/seo_server/client.py", line 28, in tcp_send
writer.write_eof()
File "/usr/lib/python3.5/asyncio/streams.py", line 294, in write_eof
return self._transport.write_eof()
File "/usr/lib/python3.5/asyncio/selector_events.py", line 745, in write_eof
self._sock.shutdown(socket.SHUT_WR)
OSError: [Errno 107] Transport endpoint is not connected
my docker file:
FROM davidjfelix/python3.5
RUN pip3 install aiohttp
ADD . /seo_server
WORKDIR /seo_server
CMD python seo_server.py
docker version:
$ sudo docker version
Client:
Version: 1.11.2
API version: 1.23
Go version: go1.5.4
Git commit: b9f10c9
Built: Wed Jun 1 22:00:43 2016
OS/Arch: linux/amd64
Server:
Version: 1.11.2
API version: 1.23
Go version: go1.5.4
Git commit: b9f10c9
Built: Wed Jun 1 22:00:43 2016
OS/Arch: linux/amd64
Please help to find a problem and fix

Because of container isolation python threads in different container don't see each other. So for asyncio loop you need to start server and worker/client in the same container. You can do it via launch .sh script as described here https://docs.docker.com/config/containers/multi-service_container/
or same via Supervisord.

I had this same issue.
async server and async client outside docker refusing to connect,
this solved my problem: https://forums.docker.com/t/python-asyncio-container-is-not-receiving-socket-connections/34018
u need to change:
coro = asyncio.start_server(handle, '127.0.0.1', 8888, loop=loop)
to
coro = asyncio.start_server(handle, '0.0.0.0', 8888, loop=loop)
here quoting "currently you just bind the local interface."

Related

FastAPI/asyncpg/Postgresql 200 request per second

I have a simple Flask application which calls FastApi with route /api,
FastAPI calls Postgresql Database.
Need to achieve 200 request/second (insertions through Flask => FastAPI => DB).
Flask
To run it I use gunicorn
gunicorn --worker-class gevent --workers 2 --threads 50 --bind 0.0.0.0:5000 main_api:app
from gevent import monkey
monkey.patch_all()
app = Flask(__name__)
#app.route('/api/mt_retailprice', methods=['POST'])
# #jwt_required()
def mt_retailprice():
try:
request_data = request.get_json()
_write_to_database(request_data)
return data
except Exception as e:
logging.error("Prediction is failed, review that json sent in the body is correct.Exception %s has happened", e)
return e
def _write_to_database(data):
api_url = f'http://127.0.0.1:3000/api/'
return write_sql(api_url, data)
def write_sql(url, data):
"""
Makes a POST request to a specific URL
"""
try:
headers = {
'Accept-Language': 'application/json',
'Content-Type': 'application/json'
}
payload = json.dumps(data)
return requests.post(url, headers=headers, data=payload)
except Exception as e:
logging.error(e)
def main():
configure_logging()
if __name__ == "__main__":
main()
FastAPI
class Database():
async def create_pool(self):
self.pool = await asyncpg.create_pool(user=username, password=password,
database=db_name, host=host, max_size=1000)
def create_app():
app = FastAPI()
db = Database()
#app.on_event("startup")
async def startup():
await db.create_pool()
#app.post('/api')
async def insert_db(request: Request):
data = ...
df = pd.DataFrame.from_dict(data)
tuples = [tuple(x) for x in df.values]
connection = await db.pool.acquire()
s = await connection.copy_records_to_table(table, schema_name=schema, records = tuples, columns = list(df.columns), timeout = 10)
return s
return app
app = create_app()
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=3000)
Here the logs from Apache Benchmark
$ ab -n 200 -c 20 -T "application/json" -p body.json http://127.0.0.1:5000/api/mt_retailprice
This is ApacheBench, Version 2.3 <$Revision: 1879490 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 100 requests
Completed 200 requests
Finished 200 requests
Server Software: gunicorn
Server Hostname: 127.0.0.1
Server Port: 5000
Document Path: /api/mt_retailprice
Document Length: 60 bytes
Concurrency Level: 20
Time taken for tests: 12.372 seconds
Complete requests: 200
Failed requests: 0
Total transferred: 42600 bytes
Total body sent: 137800
HTML transferred: 12000 bytes
Requests per second: 16.17 [#/sec] (mean)
Time per request: 1237.222 [ms] (mean)
Time per request: 61.861 [ms] (mean, across all concurrent requests)
Transfer rate: 3.36 [Kbytes/sec] received
10.88 kb/s sent
14.24 kb/s total
I have such error when make more requests. For example
ab -n 300 -c 20 -T "application/json" -p body.json
http://127.0.0.1:5000/api/mt_retailprice
Anyone know what's this error about ? can't find anything.
INFO: 127.0.0.1:50600 - "POST /api/
HTTP/1.1" 500 Internal Server Error
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 372, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__
return await self.app(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/applications.py", line 270, in __call__
await super().__call__(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/applications.py", line 124, in __call__
await self.middleware_stack(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/middleware/errors.py", line 184, in __call__
raise exc
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/middleware/errors.py", line 162, in __call__
await self.app(scope, receive, _send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/middleware/exceptions.py", line 75, in __call__
raise exc
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/middleware/exceptions.py", line 64, in __call__
await self.app(scope, receive, sender)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/middleware/asyncexitstack.py", line 21, in __call__
raise e
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__
await self.app(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/routing.py", line 680, in __call__
await route.handle(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/routing.py", line 275, in handle
await self.app(scope, receive, send)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/starlette/routing.py", line 65, in app
response = await func(request)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/routing.py", line 232, in app
dependant=dependant, values=values, is_coroutine=is_coroutine
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/fastapi/routing.py", line 160, in run_endpoint_function
return await dependant.call(**values)
File "app_test.py", line 49, in insert_db
connection = await db.pool.acquire()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 842, in _acquire
return await _acquire_impl()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 827, in _acquire_impl
proxy = await ch.acquire() # type: PoolConnectionProxy
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 141, in acquire
await self.connect()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 133, in connect
self._con = await self._pool._get_new_connection()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/pool.py", line 532, in _get_new_connection
record_class=self._record_class,
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/connect_utils.py", line 781, in _connect_addr
return await __connect_addr(params, timeout, True, *args)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/connect_utils.py", line 833, in __connect_addr
tr, pr = await compat.wait_for(connector, timeout=timeout)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/compat.py", line 66, in wait_for
return await asyncio.wait_for(fut, timeout)
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/asyncio/tasks.py", line 442, in wait_for
return fut.result()
File "/Users/dmitry/anaconda3/envs/nova/lib/python3.7/site-packages/asyncpg/connect_utils.py", line 695, in _create_ssl_connection
host, port)
File "uvloop/loop.pyx", line 1978, in create_connection
socket.gaierror: [Errno 8] nodename nor servname provided, or not known

How to reconnect a websocket connection websocket-client

I've been trying to write code that collects crypto data from Binance. Binance auto disconnects after 24 hours. Is there any way for me to reconnect after disconnection? I believe running forever should take care of that for me, but it dies when an error is thrown. I will be running this program on a server 24/7. I will also need a way to be notified maybe telegram/discord bot that I can build where do I type the code to send when it is disconnected
This is the error I get.
Traceback (most recent call last):
File "exchanges/binance/binance_ticker.py", line 97, in <module>
start()
File "exchanges/binance/binance_ticker.py", line 94, in start
rel.dispatch()
File "/home/pyjobs/.local/lib/python3.8/site-packages/rel/rel.py", line 205, in dispatch
registrar.dispatch()
File "/home/pyjobs/.local/lib/python3.8/site-packages/rel/registrar.py", line 72, in dispatch
if not self.loop():
File "/home/pyjobs/.local/lib/python3.8/site-packages/rel/registrar.py", line 81, in loop
e = self.check_events()
File "/home/pyjobs/.local/lib/python3.8/site-packages/rel/registrar.py", line 232, in check_events
self.callback('read', fd)
File "/home/pyjobs/.local/lib/python3.8/site-packages/rel/registrar.py", line 125, in callback
self.events[etype][fd].callback()
File "/home/pyjobs/.local/lib/python3.8/site-packages/rel/listener.py", line 108, in callback
if not self.cb(*self.args) and not self.persist and self.active:
File "/home/pyjobs/.local/lib/python3.8/site-packages/websocket/_app.py", line 349, in read
op_code, frame = self.sock.recv_data_frame(True)
File "/home/pyjobs/.local/lib/python3.8/site-packages/websocket/_core.py", line 401, in recv_data_frame
frame = self.recv_frame()
File "/home/pyjobs/.local/lib/python3.8/site-packages/websocket/_core.py", line 440, in recv_frame
return self.frame_buffer.recv_frame()
File "/home/pyjobs/.local/lib/python3.8/site-packages/websocket/_abnf.py", line 352, in recv_frame
payload = self.recv_strict(length)
File "/home/pyjobs/.local/lib/python3.8/site-packages/websocket/_abnf.py", line 373, in recv_strict
bytes_ = self.recv(min(16384, shortage))
File "/home/pyjobs/.local/lib/python3.8/site-packages/websocket/_core.py", line 524, in _recv
return recv(self.sock, bufsize)
File "/home/pyjobs/.local/lib/python3.8/site-packages/websocket/_socket.py", line 122, in recv
raise WebSocketConnectionClosedException(
websocket._exceptions.WebSocketConnectionClosedException: Connection to remote host was lost.
My code:
import websocket
import rel
uri = "wss://stream.binance.com:9443/ws/!ticker#arr"
def on_message(ws, message):
print(message)
def on_error(ws, error):
print(error)
write_logs(error)
def on_close(ws, close_status_code, close_msg):
print("### closed ###")
write_logs(str(close_status_code) + str(close_msg))
start(
def on_open(ws):
print("Opened connection")
start()
websocket.enableTrace(True)
ws = websocket.WebSocketApp(uri,
on_open = on_open,
on_message=on_message,
on_error = on_error,
on_close (on_close)
ws.run_forever(dispatcher=rel) #Set the dispatcher to automatic reconnection.
rel.signal(2, rel.abort) # Keyboard Interrupt
rel.dispatch()
start()
The comment in this line of code ws.run_forever(dispatcher=rel) #Set the dispatcher to automatic reconnection. could auto reconnection depending on rel module? And how the module rel and func dispatcher work together?

Python XMLRPC: Cannot allow None even after allow_none = True

I'm trying to write a simple application that communicates using RPCs. I'm using python 3.7's xmlrpc.
This is my server code
MY_ADDR = ("localhost", int(sys.argv[1]))
HOST_ADDR = ("localhost", int(sys.argv[2]))
class RpcServer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.port = MY_ADDR[1]
self.addr = MY_ADDR[0]
# serve other hosts using this
self.server = SimpleXMLRPCServer((self.addr, self.port))
self.server.register_function(self.recv_ops)
def run(self):
self.server.serve_forever()
def recv_ops(self, sender, op):
print("Sender ", sender, " sent: ", op)
pass
And this is what I'm using as my client's code
def send_ops(host_addr, op):
# contact the other host using this
proxy_addr = "http://{addr}:{port}/".format(addr=host_addr[0], port=host_addr[1])
client_proxy = xmlrpc.client.ServerProxy(proxy_addr, allow_none=True)
resp = client_proxy.recv_ops(MY_ADDR, op)
...
send_ops(HOST_ADDR, ("d", ii, last_line[ii])) # THE RPC CALL I MAKE
Despite setting allow_none=True, I keep getting this:
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "nb.py", line 102, in editor
send_ops(HOST_ADDR, ("d", ii, last_line[ii]))
File "nb.py", line 63, in send_ops
resp = client_proxy.recv_ops(MY_ADDR, op)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1112, in __call__
return self.__send(self.__name, args)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1452, in __request
verbose=self.__verbose
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1154, in request
return self.single_request(host, handler, request_body, verbose)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1170, in single_request
return self.parse_response(resp)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1342, in parse_response
return u.close()
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 656, in close
raise Fault(**self._stack[0])
xmlrpc.client.Fault: <Fault 1: "<class 'TypeError'>:cannot marshal None unless allow_none is enabled">
What's tripping my is that the server on the other side actually receives the message (without any None)
Sender ['localhost', 8001] sent: ['d', 4, 'o']
What am I missing here? Any help would be appreciated.
Thanks!
In your server class, add allow_none=True to your SimpleXMLRPCServer instantiation.
self.server = SimpleXMLRPCServer((self.addr, self.port), allow_none=True)
The allow_none and encoding parameters are passed on to xmlrpc.client and control the XML-RPC responses that will be returned from the server.

aiohttp.client_exceptions.ClientResponseError: 400, message='invalid constant string'

I was learning some async/await in python, and i wanted to try it, but
I'm getting this error while trying to connect to chatango via websocket and i don't know what means.
I'm using python 3.6.1 and aiohttp 2.2.3
This is my code:
import asyncio
import aiohttp
msgs = []
async def main():
async with aiohttp.ClientSession() as session:
async with session.ws_connect("ws://s12.chatango.com:8081/") as ws:
for msg in ws:
msgs.append(msg)
print(msg)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
Full traceback:
Traceback (most recent call last):
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client_reqrep.py", line 559, in start
(message, payload) = yield from self._protocol.read()
File "C:\Program Files\Python36\lib\site-packages\aiohttp\streams.py", line 509, in read
yield from self._waiter
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client_proto.py", line 165, in data_received
messages, upgraded, tail = self._parser.feed_data(data)
File "aiohttp\_http_parser.pyx", line 274, in aiohttp._http_parser.HttpParser.feed_data (aiohttp/_http_parser.c:4364)
aiohttp.http_exceptions.BadHttpMessage: 400, message='invalid constant string'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/Users/joseh/Desktop/a.ws.py", line 42, in <module>
loop.run_until_complete(main())
File "C:\Program Files\Python36\lib\asyncio\base_events.py", line 466, in run_until_complete
return future.result()
File "C:/Users/joseh/Desktop/a.ws.py", line 34, in main
async with session.ws_connect("ws://s12.chatango.com:8081/") as ws:
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client.py", line 603, in __aenter__
self._resp = yield from self._coro
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client.py", line 390, in _ws_connect
proxy_auth=proxy_auth)
File "C:\Program Files\Python36\lib\site-packages\aiohttp\helpers.py", line 91, in __iter__
ret = yield from self._coro
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client.py", line 241, in _request
yield from resp.start(conn, read_until_eof)
File "C:\Program Files\Python36\lib\site-packages\aiohttp\client_reqrep.py", line 564, in start
message=exc.message, headers=exc.headers) from exc
aiohttp.client_exceptions.ClientResponseError: 400, message='invalid constant string'
invalid constant string is a custom response from chatango, they probably want a protocol or some kind of auth header.
If you don't know much about how chatango uses websockets, reverse engineering their system is probably not a good task for learning asyncio and aiohttp.
Better to use something like httparrot which just echos back the message you send it.
Here's your code modified to use httparrot and send 5 messages, get 5 responses, then exit.
import asyncio
import aiohttp
msgs = []
async def main():
async with aiohttp.ClientSession() as session:
async with session.ws_connect('ws://httparrot.herokuapp.com/websocket') as ws:
ws.send_str('hello')
async for msg in ws:
msgs.append(msg)
print(msg)
ws.send_str('hello')
if len(msgs) >= 5:
break
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
print(msgs)

Autobahn Python Errno 99 cannot assign requested address

While trying to set up a Websocket server, I have encountered the following error.
The same code works fine under LAN IP '192.168.x.x', but fails to work with a public ip/domain name
Here is the error trace
Traceback (most recent call last):
File "WSServer.py", line 19, in <module>
server = loop.run_until_complete(coro)
File "/usr/lib64/python3.4/asyncio/base_events.py", line 208, in run_until_complete
return future.result()
File "/usr/lib64/python3.4/asyncio/futures.py", line 243, in result
raise self._exception
File "/usr/lib64/python3.4/asyncio/tasks.py", line 319, in _step
result = coro.send(value)
File "/usr/lib64/python3.4/asyncio/base_events.py", line 579, in create_server
% (sa, err.strerror.lower()))
OSError: [Errno 99] error while attempting to bind on address ('121.6.x.x', 9000): cannot assign requested address
Python Server Code:
from autobahn.asyncio.websocket import WebSocketServerProtocol
class MyServerProtocol(WebSocketServerProtocol):
def onMessage(self, payload, isBinary):
print("message received")
self.sendMessage(payload, isBinary)
if __name__ == '__main__':
import asyncio
from autobahn.asyncio.websocket import WebSocketServerFactory
factory = WebSocketServerFactory()
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '121.6.x.x', 9000)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
Could the issue be related with the server setting? e.g. hostname, SELinux

Categories