Unit testing using Tornado Websocket - no attribute 'io_loop' error - python

I have stitched together a tornado websocket client code and using it in my python unit test case. This is my first time use of tornado websocket and not very familiar with its unit test API. Looking for some help to understand the use of tornado websocket asynchronous unit test code and the below case working.
Client class code:
import logging
import logging.config
import ssl
import time
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.web import Application, RequestHandler
class TorWebSocketClient():
def __init__(self, ip_addr, port, cookie):
self.ip_addr = ip_addr
self.port = port
self.cookie = cookie
self.sockConnected = False
self.logger = logging.getLogger(__name__)
def Connect(self):
# Creating the websocket client for each test case.
url = "ws://{0}:{1}/socket{2}".format(str(self.ip_addr), str(self.port), self.cookie)
self.logger.debug('Websocket URL: ' + url)
sslopt={"cert_reqs": ssl.CERT_NONE,
"check_hostname": False,
"ssl_version": ssl.PROTOCOL_TLSv1}
self.logger.debug('New web socket connection is being establshed by the client')
self.ws = websocket.websocket_connect(HTTPRequest(url, headers=headers, ssl_options=sslopt), io_loop=self.io_loop)
# Start the websocket client thread. A wait is added till connection is established.
self.sockConnected = True
def send(self, data):
# Wait till websocket is connected.
if not self.ws.sock.connected:
self.logger.debug('Send failed; Websocket connection is not yet established')
return
self.logger.info('Sending data to the server: ' + data)
self.ws.write_message(data)
def recv(self, expValues):
# Read data from the response message.
resp = yield self.ws.read_message()
print '>>>> Response: ', resp
def stop(self):
self.logger.debug('Client closing the websocket connection with the server')
self.ws.close()
Unit test function is below:
import functools
import json
import logging
import logging.config
import time
# These are couple of custom classes.
import TorWebSocketClient
from infra.serverbase import Server
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
class TornadoTest(AsyncHTTPTestCase):
def get_app(self):
app = tornado.web.Application([('/', EchoWebSocketHandler)])
return app
#gen_test
def testToradoWSConection(self):
# Login to the server to get the cookie.
logger = logging.getLogger(__name__)
server = Server(self.ipaddr, self.port, self.username, self.password)
result = server.Login()
self.assertEqual(result, True, 'login failed')
webSocClient = yield TorWebSocketClient(self.ipaddr, self.port, server.GetCookie())
result = webSocClient.Connect()
self.assertEqual(result, True, 'Websocket connection failed')
Error I am getting:
Traceback (most recent call last):
File "/users/usr1/pyvenv/venv/lib/python2.7/site-packages/tornado/testing.py", line 527, in post_coroutine
return self.io_loop.run_sync(
AttributeError: TornadoTest instance has no attribute 'io_loop'
----------------------------------------------------------------------
Ran 1 tests in 0.002s
FAILED (errors=1)

Did you have your own setUp function?
The io_loop is created under AsyncTestCase's setUp function, I think you need to call super's setUp function.

Related

Python not able to connect to grpc channel

I have a problem with my server and client in my grpc channel, the server:
import logging
import grpc
import sys
sys.path.append("proto")
import proto.nvidia_pb2_grpc
from servicer import NvidiaServicer
GRPC_PORT = '50057'
socket = "localhost:{0}".format(GRPC_PORT)
def server():
logger.info('Setting up gRPC server')
grpc_server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=10))
proto.nvidia_pb2_grpc.add_NvidiaServicer_to_server(
NvidiaServicer(), grpc_server
)
logger.info(f'Starting server at {socket}')
grpc_server.add_insecure_port(socket)
grpc_server.start()
the following servicer:
import proto.nvidia_pb2
import proto.nvidia_pb2_grpc
import logging
from driver_status import checkDriverStatus
logger = logging.getLogger()
class NvidiaServicer(proto.nvidia_pb2_grpc.NvidiaServicer):
def NvidiaDriverStatus(self, request, context):
logger.info('######################################################')
logger.info('gRPC server got request to check driver status')
response = proto.nvidia_pb2.DriverStatus()
result = checkDriverStatus()
response.status.value = result
return response
and from the client side this is my client
from asyncio.log import logger
import grpc
import os
from proto import nvidia_pb2
from proto import nvidia_pb2_grpc
GRPC_PORT = '50057'
socket = 'localhost:{}'.format(GRPC_PORT)
def GrpcClientNvidia():
try:
if os.environ.get('https_proxy'):
del os.environ['https_proxy']
if os.environ.get('http_proxy'):
del os.environ['http_proxy']
channel = grpc.insecure_channel(socket, options=(('grpc.enable_http_proxy', 0),))
stub = nvidia_pb2_grpc.NvidiaStub(channel)
res = stub.NvidiaDriverStatus(nvidia_pb2.Empty())
return res.status.value
except grpc.FutureTimeoutError:
logger.error('Error connecting to nvidia server')
But whenever I run my main I still am getting
File "/usr/local/lib/python3.6/site-packages/grpc/_channel.py", line 849, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "failed to connect to all addresses"
debug_error_string = "{"created":"#1661349318.910632939","description":"Failed to pick subchannel","file":"src/core/ext/filters/client_channel/client_channel.cc","file_line":3260,"referenced_errors":[{"created":"#1661349318.910631609","description":"failed to connect to all addresses","file":"src/core/lib/transport/error_utils.cc","file_line":167,"grpc_status":14}]}"
my main:
from GrpcClientNvidia import GrpcClientNvidia
result = {}
result['SW'] = {}
result['SW']['nvidia'] = runNvidiaDriverCheck()
def runNvidiaDriverCheck():
return GrpcClientNvidia()
What am I doing wrong?
I needed to change localhost into 127.0.0.1

Client-server implementaion using tornado_http2 api

I am trying to implement a client-server using tornado_http2 api in python but server never receive messages from the client.
I have checked that server is well started with this comm
and and I had this result:
(mmsx-TPjM8MGB-py3.9) xx#ITLP071: 7 (master) ~/dev/mmsx/tornado_http2/demo$ proxy=127.0.0.1:8443; curl --http2-prior-knowledge -d "bla bla" -X POST https://localhost:8443/ -E test.crt
curl: (60) SSL certificate problem: self signed certificate
More details here: https://curl.se/docs/sslcerts.html
curl failed to verify the legitimacy of the server and therefore could not
establish a secure connection to it. To learn more about this situation and
how to fix it, please visit the web page mentioned above.
And from the output server :
(mmsx-TPjM8MGB-py3.9) xx#ITLP071: 130 (master) ~/dev/mmsx/tornado_http2/demo$ poetry run python server_test.py
[I 220722 04:02:37 server_test:30] starting
[W 220722 04:02:41 iostream:1517] SSL Error on 7 ('127.0.0.1', 60040): [SSL: TLSV1_ALERT_UNKNOWN_CA] tlsv1 alert unknown ca (_ssl.c:1123)
The connection is not perfectly done (that I do not succed to resolve for now) but at least I have a reaction from the server.
With request from the client, I have no response.
Please find my server code below:
import logging
import os
import ssl
from tornado.ioloop import IOLoop
from tornado.options import parse_command_line
from tornado.web import Application, RequestHandler
from tornado_http2.server import Server
class MainHandler(RequestHandler):
def get(self):
self.write("Hello world")
def post(self):
self.write("bla bla")
def main():
parse_command_line()
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.load_cert_chain(
os.path.join(os.path.dirname(__file__), 'test.crt'),
os.path.join(os.path.dirname(__file__), 'test.key'))
app = Application([('/hello', MainHandler)], debug=True)
server = Server(app, ssl_options=ssl_ctx)
port = 8443
address = "127.0.0.1"
server.listen(port, address)
logging.info("starting")
IOLoop.instance().start()
if __name__ == '__main__':
main()
And my client code:
from tornado_http2.curl import CurlAsyncHTTP2Client as HTTP2Client
import asyncio
URI = "http:127.0.0.1:8443/hello"
class Test():
def __init__(self):
self.__client = HTTP2Client(force_instance=True)
async def send(self):
global URI
body = "body"
response = await self.__client.fetch(URI, method='POST', body=body,
validate_cert=False)
print(response)
def main():
asyncio.run(Test().send())
if __name__ == "__main__":
main()
I started the server in a terminal and then the client in another one and for me, it should displayed in the client console the result of the request.
Thanks for your help !
OK, I have found.
It is a bug in tornado_http2 api. The event loop has to be created before the instanciation of the class HTTP2Client, else this does not work.
If the client code is remplaced bu this, it will work :
from tornado_http2.curl import CurlAsyncHTTP2Client as HTTP2Client
import asyncio
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
class Test():
def __init__(self):
self.__client = HTTP2Client(force_instance=True)
async def send(self):
uri = "https://127.0.0.1:8443/hello"
response = await self.__client.fetch(uri, validate_cert=False)
print(response.body.decode('utf-8'))
def run_asyncio():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(Test().send())
finally:
loop.close()
asyncio.set_event_loop(None)
def main():
run_asyncio()
if __name__ == "__main__":
main()
Hopefully it will help someone =).

How can i properly use circular imports in python

Context: I have a websocket 'server.py' and a script that executes some tasks: 'worker.py'. I want to use the functions in server.py to send the results from that task in worker.py. But the thing is, when a client requests the worker to send results, i need to use a function from worker.py. How can I avoid circular dependies in this situation?
Server.py:
import eventlet
#eventlet.monkey_patch()
import socketio
from flask import Flask
from flask_cors import CORS
#The Worker file
import worker
sio = socketio.Server(cors_allowed_origins='http://localhost:8100')
#sio.on('connect')
def connectHandler(sid, environ):
print('[INFO] Incoming connection from: ' + environ['REMOTE_ADDR'])
sio.emit('response', {'data' : 'Connection established, you can now request classifications by firing the "requestClassification" event.'})
#sio.on('disconnect')
def disconnectHandler(sid):
print('disconnect ', sid)
#sio.on('requestClassification')
def requestHandler(data):
print('[INFO] recieved request to classify')
print(data)
#using a function in the worker module
worker.someFunc()
eventlet.wsgi.server(eventlet.listen(('127.0.0.1', 8080)), app)
worker.py:
import server
def work():
while True:
result = doSomeTask()
print(f'sending result: {str(i)}')
server.sio.emit('result', {'data' : result})
server.sio.sleep(1)
How can i properly use the imports without defining sio (the server) twice for example?

Why cant I use gevent websockets inside a greenlet

I'm trying to receive websocket messages in a greenlet, but it doent seem to be working. I have this code:
import gevent
from geventwebsocket.handler import WebSocketHandler
from gevent.pywsgi import WSGIServer
from geventwebsocket import WebSocketServer, WebSocketApplication, Resource
def recvWs(ws):
gevent.sleep(0)
recvedData = ws.receive()
rData = json.loads(recvedData)
print(rData)
def app(environ, start_response):
websocket = environ['wsgi.websocket']
while True:
gevent.spawn(recvWs,websocket)
gevent.sleep(0)
if __name__ == '__main__':
server = WSGIServer(("0.0.0.0", 80), app,handler_class=WebSocketHandler)
server.serve_forever()
And when running, it returns this error:
<Greenlet "Greenlet-0" at 0x23fa4306148:
recvWs(<geventwebsocket.websocket.WebSocket object at 0x0)> failed with
RuntimeError
As well as:
line 197, in read_frame
header = Header.decode_header(self.stream)
How do I fix this?
Here is an example of what I have done that works very well.
Python with bottle and gevent:
from gevent import sleep as gsleep, Timeout
from geventwebsocket import WebSocketError
from bottle import Bottle, get, post, route, request, response, template, redirect, abort
#route('/ws/app')
def handle_websocket():
wsock = request.environ.get('wsgi.websocket')
if not wsock:
abort(400, 'Expected WebSocket request.')
# Send initial data here
wsock.send(json.dumps(data))
while 1:
try:
#Process incoming message. 2 second timeout to not block
message = {}
with Timeout(2, False) as timeout:
message = wsock.receive()
if message:
message = json.loads(message)
if isinstance(message, dict):
# Do something with data and return
wsock.send(json.dumps(result))
# Add an additional second just for sanity. Not necessarily needed
gsleep(1)
except WebSocketError:
break
except Exception as exc:
traceback.print_exc()
gsleep(2)
Then in your javascript you open a websocket connection and send and recieve the data as you would normally.

How to write unit tests for your GRPC server in Python?

I would like to use Python unittest to write tests for my GRPC server implementation. I have found grpcio-testing package but I could not find any documentation how to use this.
Let's say that I have the following server:
import helloworld_pb2
import helloworld_pb2_grpc
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
How do I create an unit test to call SayHello and check the response?
You can start a real server When setUp and stop the server when tearDown.
import unittest
from concurrent import futures
class RPCGreeterServerTest(unittest.TestCase):
server_class = Greeter
port = 50051
def setUp(self):
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(self.server_class(), self.server)
self.server.add_insecure_port(f'[::]:{self.port}')
self.server.start()
def tearDown(self):
self.server.stop(None)
def test_server(self):
with grpc.insecure_channel(f'localhost:{self.port}') as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='Jack'))
self.assertEqual(response.message, 'Hello, Jack!')
I took J.C's idea and expanded it to be able to create a fake server (mock) for each test case. Also, bind on port 0 to avoid port conflicts:
#contextmanager
def helloworld(cls):
"""Instantiate a helloworld server and return a stub for use in tests"""
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(cls(), server)
port = server.add_insecure_port('[::]:0')
server.start()
try:
with grpc.insecure_channel('localhost:%d' % port) as channel:
yield helloworld_pb2_grpc.GreeterStub(channel)
finally:
server.stop(None)
class HelloWorldTest(unittest.TestCase):
def test_hello_name(self):
# may do something extra for this mock if it's stateful
class FakeHelloworld(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.SayHelloResponse()
with helloworld(Fakehelloworld) as stub:
response = stub.SayHello(helloworld_pb2.HelloRequest(name='Jack'))
self.assertEqual(response.message, 'Hello, Jack!')
There is inline API docstrings on the code elements that you can use. There's an issue filed to host it on grpc.io in a nice format: https://github.com/grpc/grpc/issues/13340
You can give pytest-grpc a try.
If you are using Django, you can have a look at django-grpc-framework testing.

Categories