Python not able to connect to grpc channel - python

I have a problem with my server and client in my grpc channel, the server:
import logging
import grpc
import sys
sys.path.append("proto")
import proto.nvidia_pb2_grpc
from servicer import NvidiaServicer
GRPC_PORT = '50057'
socket = "localhost:{0}".format(GRPC_PORT)
def server():
logger.info('Setting up gRPC server')
grpc_server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=10))
proto.nvidia_pb2_grpc.add_NvidiaServicer_to_server(
NvidiaServicer(), grpc_server
)
logger.info(f'Starting server at {socket}')
grpc_server.add_insecure_port(socket)
grpc_server.start()
the following servicer:
import proto.nvidia_pb2
import proto.nvidia_pb2_grpc
import logging
from driver_status import checkDriverStatus
logger = logging.getLogger()
class NvidiaServicer(proto.nvidia_pb2_grpc.NvidiaServicer):
def NvidiaDriverStatus(self, request, context):
logger.info('######################################################')
logger.info('gRPC server got request to check driver status')
response = proto.nvidia_pb2.DriverStatus()
result = checkDriverStatus()
response.status.value = result
return response
and from the client side this is my client
from asyncio.log import logger
import grpc
import os
from proto import nvidia_pb2
from proto import nvidia_pb2_grpc
GRPC_PORT = '50057'
socket = 'localhost:{}'.format(GRPC_PORT)
def GrpcClientNvidia():
try:
if os.environ.get('https_proxy'):
del os.environ['https_proxy']
if os.environ.get('http_proxy'):
del os.environ['http_proxy']
channel = grpc.insecure_channel(socket, options=(('grpc.enable_http_proxy', 0),))
stub = nvidia_pb2_grpc.NvidiaStub(channel)
res = stub.NvidiaDriverStatus(nvidia_pb2.Empty())
return res.status.value
except grpc.FutureTimeoutError:
logger.error('Error connecting to nvidia server')
But whenever I run my main I still am getting
File "/usr/local/lib/python3.6/site-packages/grpc/_channel.py", line 849, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "failed to connect to all addresses"
debug_error_string = "{"created":"#1661349318.910632939","description":"Failed to pick subchannel","file":"src/core/ext/filters/client_channel/client_channel.cc","file_line":3260,"referenced_errors":[{"created":"#1661349318.910631609","description":"failed to connect to all addresses","file":"src/core/lib/transport/error_utils.cc","file_line":167,"grpc_status":14}]}"
my main:
from GrpcClientNvidia import GrpcClientNvidia
result = {}
result['SW'] = {}
result['SW']['nvidia'] = runNvidiaDriverCheck()
def runNvidiaDriverCheck():
return GrpcClientNvidia()
What am I doing wrong?

I needed to change localhost into 127.0.0.1

Related

Client-server implementaion using tornado_http2 api

I am trying to implement a client-server using tornado_http2 api in python but server never receive messages from the client.
I have checked that server is well started with this comm
and and I had this result:
(mmsx-TPjM8MGB-py3.9) xx#ITLP071: 7 (master) ~/dev/mmsx/tornado_http2/demo$ proxy=127.0.0.1:8443; curl --http2-prior-knowledge -d "bla bla" -X POST https://localhost:8443/ -E test.crt
curl: (60) SSL certificate problem: self signed certificate
More details here: https://curl.se/docs/sslcerts.html
curl failed to verify the legitimacy of the server and therefore could not
establish a secure connection to it. To learn more about this situation and
how to fix it, please visit the web page mentioned above.
And from the output server :
(mmsx-TPjM8MGB-py3.9) xx#ITLP071: 130 (master) ~/dev/mmsx/tornado_http2/demo$ poetry run python server_test.py
[I 220722 04:02:37 server_test:30] starting
[W 220722 04:02:41 iostream:1517] SSL Error on 7 ('127.0.0.1', 60040): [SSL: TLSV1_ALERT_UNKNOWN_CA] tlsv1 alert unknown ca (_ssl.c:1123)
The connection is not perfectly done (that I do not succed to resolve for now) but at least I have a reaction from the server.
With request from the client, I have no response.
Please find my server code below:
import logging
import os
import ssl
from tornado.ioloop import IOLoop
from tornado.options import parse_command_line
from tornado.web import Application, RequestHandler
from tornado_http2.server import Server
class MainHandler(RequestHandler):
def get(self):
self.write("Hello world")
def post(self):
self.write("bla bla")
def main():
parse_command_line()
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.load_cert_chain(
os.path.join(os.path.dirname(__file__), 'test.crt'),
os.path.join(os.path.dirname(__file__), 'test.key'))
app = Application([('/hello', MainHandler)], debug=True)
server = Server(app, ssl_options=ssl_ctx)
port = 8443
address = "127.0.0.1"
server.listen(port, address)
logging.info("starting")
IOLoop.instance().start()
if __name__ == '__main__':
main()
And my client code:
from tornado_http2.curl import CurlAsyncHTTP2Client as HTTP2Client
import asyncio
URI = "http:127.0.0.1:8443/hello"
class Test():
def __init__(self):
self.__client = HTTP2Client(force_instance=True)
async def send(self):
global URI
body = "body"
response = await self.__client.fetch(URI, method='POST', body=body,
validate_cert=False)
print(response)
def main():
asyncio.run(Test().send())
if __name__ == "__main__":
main()
I started the server in a terminal and then the client in another one and for me, it should displayed in the client console the result of the request.
Thanks for your help !
OK, I have found.
It is a bug in tornado_http2 api. The event loop has to be created before the instanciation of the class HTTP2Client, else this does not work.
If the client code is remplaced bu this, it will work :
from tornado_http2.curl import CurlAsyncHTTP2Client as HTTP2Client
import asyncio
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
class Test():
def __init__(self):
self.__client = HTTP2Client(force_instance=True)
async def send(self):
uri = "https://127.0.0.1:8443/hello"
response = await self.__client.fetch(uri, validate_cert=False)
print(response.body.decode('utf-8'))
def run_asyncio():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(Test().send())
finally:
loop.close()
asyncio.set_event_loop(None)
def main():
run_asyncio()
if __name__ == "__main__":
main()
Hopefully it will help someone =).

Why cant I use gevent websockets inside a greenlet

I'm trying to receive websocket messages in a greenlet, but it doent seem to be working. I have this code:
import gevent
from geventwebsocket.handler import WebSocketHandler
from gevent.pywsgi import WSGIServer
from geventwebsocket import WebSocketServer, WebSocketApplication, Resource
def recvWs(ws):
gevent.sleep(0)
recvedData = ws.receive()
rData = json.loads(recvedData)
print(rData)
def app(environ, start_response):
websocket = environ['wsgi.websocket']
while True:
gevent.spawn(recvWs,websocket)
gevent.sleep(0)
if __name__ == '__main__':
server = WSGIServer(("0.0.0.0", 80), app,handler_class=WebSocketHandler)
server.serve_forever()
And when running, it returns this error:
<Greenlet "Greenlet-0" at 0x23fa4306148:
recvWs(<geventwebsocket.websocket.WebSocket object at 0x0)> failed with
RuntimeError
As well as:
line 197, in read_frame
header = Header.decode_header(self.stream)
How do I fix this?
Here is an example of what I have done that works very well.
Python with bottle and gevent:
from gevent import sleep as gsleep, Timeout
from geventwebsocket import WebSocketError
from bottle import Bottle, get, post, route, request, response, template, redirect, abort
#route('/ws/app')
def handle_websocket():
wsock = request.environ.get('wsgi.websocket')
if not wsock:
abort(400, 'Expected WebSocket request.')
# Send initial data here
wsock.send(json.dumps(data))
while 1:
try:
#Process incoming message. 2 second timeout to not block
message = {}
with Timeout(2, False) as timeout:
message = wsock.receive()
if message:
message = json.loads(message)
if isinstance(message, dict):
# Do something with data and return
wsock.send(json.dumps(result))
# Add an additional second just for sanity. Not necessarily needed
gsleep(1)
except WebSocketError:
break
except Exception as exc:
traceback.print_exc()
gsleep(2)
Then in your javascript you open a websocket connection and send and recieve the data as you would normally.

Unit testing using Tornado Websocket - no attribute 'io_loop' error

I have stitched together a tornado websocket client code and using it in my python unit test case. This is my first time use of tornado websocket and not very familiar with its unit test API. Looking for some help to understand the use of tornado websocket asynchronous unit test code and the below case working.
Client class code:
import logging
import logging.config
import ssl
import time
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.web import Application, RequestHandler
class TorWebSocketClient():
def __init__(self, ip_addr, port, cookie):
self.ip_addr = ip_addr
self.port = port
self.cookie = cookie
self.sockConnected = False
self.logger = logging.getLogger(__name__)
def Connect(self):
# Creating the websocket client for each test case.
url = "ws://{0}:{1}/socket{2}".format(str(self.ip_addr), str(self.port), self.cookie)
self.logger.debug('Websocket URL: ' + url)
sslopt={"cert_reqs": ssl.CERT_NONE,
"check_hostname": False,
"ssl_version": ssl.PROTOCOL_TLSv1}
self.logger.debug('New web socket connection is being establshed by the client')
self.ws = websocket.websocket_connect(HTTPRequest(url, headers=headers, ssl_options=sslopt), io_loop=self.io_loop)
# Start the websocket client thread. A wait is added till connection is established.
self.sockConnected = True
def send(self, data):
# Wait till websocket is connected.
if not self.ws.sock.connected:
self.logger.debug('Send failed; Websocket connection is not yet established')
return
self.logger.info('Sending data to the server: ' + data)
self.ws.write_message(data)
def recv(self, expValues):
# Read data from the response message.
resp = yield self.ws.read_message()
print '>>>> Response: ', resp
def stop(self):
self.logger.debug('Client closing the websocket connection with the server')
self.ws.close()
Unit test function is below:
import functools
import json
import logging
import logging.config
import time
# These are couple of custom classes.
import TorWebSocketClient
from infra.serverbase import Server
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
class TornadoTest(AsyncHTTPTestCase):
def get_app(self):
app = tornado.web.Application([('/', EchoWebSocketHandler)])
return app
#gen_test
def testToradoWSConection(self):
# Login to the server to get the cookie.
logger = logging.getLogger(__name__)
server = Server(self.ipaddr, self.port, self.username, self.password)
result = server.Login()
self.assertEqual(result, True, 'login failed')
webSocClient = yield TorWebSocketClient(self.ipaddr, self.port, server.GetCookie())
result = webSocClient.Connect()
self.assertEqual(result, True, 'Websocket connection failed')
Error I am getting:
Traceback (most recent call last):
File "/users/usr1/pyvenv/venv/lib/python2.7/site-packages/tornado/testing.py", line 527, in post_coroutine
return self.io_loop.run_sync(
AttributeError: TornadoTest instance has no attribute 'io_loop'
----------------------------------------------------------------------
Ran 1 tests in 0.002s
FAILED (errors=1)
Did you have your own setUp function?
The io_loop is created under AsyncTestCase's setUp function, I think you need to call super's setUp function.

How do I run pyzmq and a webserver in one ioloop?

I want to write a single threaded program that hosts a webserver using Tornado and also receive messages on a ZMQ socket (using PyZMQ Tornado event loop: http://learning-0mq-with-pyzmq.readthedocs.org/en/latest/pyzmq/multisocket/tornadoeventloop.html), but I'm not sure how to structure it. Should I be using
from zmq.eventloop import ioloop
or
from tornado.ioloop import IOLoop
or both?
Before all Tornado imports you need import zmq.eventloop.ioloop and call zmq.eventloop.ioloop.install function. Then you may import Tornado ioloop and use it.
See:
http://zeromq.github.io/pyzmq/eventloop.html
Here is an example with Tornado HTTP server with zeroMQ PUB SUB sockets.
#!/usr/bin/env python
import json
import tornado
import tornado.web
import zmq
from tornado import httpserver
from zmq.eventloop import ioloop
from zmq.eventloop.zmqstream import ZMQStream
ioloop.install()
tornado.ioloop = ioloop
import sys
def ping_remote():
"""callback to keep the connection with remote server alive while we wait
Network routers between raspberry pie and cloud server will close the socket
if there is no data exchanged for long time.
"""
pub_inst.send_json_data(msg="Ping", req_id="##")
sys.stdout.write('.')
sys.stdout.flush()
pending_requests = {}
class ZMQSub(object):
def __init__(self, callback):
self.callback = callback
context = zmq.Context()
socket = context.socket(zmq.SUB)
# socket.connect('tcp://127.0.0.1:5559')
socket.bind('tcp://*:8081')
self.stream = ZMQStream(socket)
self.stream.on_recv(self.callback)
socket.setsockopt(zmq.SUBSCRIBE, "")
def shutdown_zmq_sub(self):
self.stream.close()
class ZMQPub(object):
def __init__(self):
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind('tcp://*:8082')
self.publish_stream = ZMQStream(socket)
def send_json_data(self, msg, req_id):
topic = str(req_id)
self.publish_stream.send_multipart([topic, msg])
def shutdown_zmq_sub(self):
self.publish_stream.close()
def SensorCb(msg):
# decode message from raspberry pie and the channel ID.
key, msg = (i for i in msg)
if not key == "##":
msg = json.loads(msg)
if key in pending_requests.keys():
req_inst = pending_requests[key]
req_inst.write(msg)
req_inst.finish()
del pending_requests[key]
else:
print "no such request"
print pending_requests
else:
print "received ping"
class Handler(tornado.web.RequestHandler):
def __init__(self, *args, **kwargs):
super(Handler, self).__init__(*args, **kwargs)
# get the unique req id
self.req_id = str(self.application.req_id) + "#"
self.application.req_id += 1
# set headers
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS, PUT')
#tornado.web.asynchronous
def get(self):
print self.request
if self.req_id not in pending_requests.keys():
pending_requests[self.req_id] = self
else:
print "WTF"
pub_inst.send_json_data(msg=json.dumps({"op": "ServiceCall"}), req_id=self.req_id)
if __name__ == "__main__":
pub_inst = ZMQPub()
sub_inst = ZMQSub(callback=SensorCb)
application = tornado.web.Application(
[(r'/get_sensor_data', Handler), (r'/(.*)')])
application.req_id = 0
server = httpserver.HTTPServer(application, )
port = 8080
server.listen(port)
print "Sensor server ready on port: ", port
ping = ioloop.PeriodicCallback(ping_remote, 3000)
ping.start()
tornado.ioloop.IOLoop.instance().start()

Why does gevent.socket break multiprocessing.connection's auth

I have an application that uses both grequests and multiprocessing.managers for a combination of IPC communication and Asynchronous RESTful communications over HTTP.
It seems that grequests, in using gevent.monkey's patch_all() method, breaks the multiprocessing.connection module used by the multiprocessing.manager.SyncManager class and its derivatives.
This is apparently not an isolated issue, but affects any use case that implements multiprocessing.connetion, such as multiprocessing.pool, for example.
Drilling down into the code in gevent/monkey.py, I found that the swapping of the stdlib socket module with gevent.socket is what causes the breakage.
This can be found at line 115 in gevent/monkey.py under the patch_socket() function:
def patch_socket(dns=True, aggressive=True):
"""Replace the standard socket object with gevent's cooperative sockets.
...
_socket.socket = socket.socket # This line breaks multiprocessing.connection!
...
My question is then why does this swappage break multiprocessing.connection, and what advantages are derived from using gevent.socket instead of the stdlib's socket module? That is, what performance loss, if any, will I incur from not patching the socket module?
Traceback
Traceback (most recent call last):
File "clientWithGeventMonkeyPatch.py", line 49, in <module>
client = GetClient(host, port, authkey)
File "clientWithGeventMonkeyPatch.py", line 39, in GetClient
client.connect()
File "/usr/lib/python2.7/multiprocessing/managers.py", line 500, in connect
conn = Client(self._address, authkey=self._authkey)
File "/usr/lib/python2.7/multiprocessing/connection.py", line 175, in Client
answer_challenge(c, authkey)
File "/usr/lib/python2.7/multiprocessing/connection.py", line 414, in answer_challenge
response = connection.recv_bytes(256) # reject large message
IOError: [Errno 11] Resource temporarily unavailable
code to reproduce the error
(on ubuntu server 11.10, python2.7.3, with gevent, greenlet, and grequests installed)
manager.py
## manager.py
import multiprocessing
import multiprocessing.managers
import datetime
class LocalManager(multiprocessing.managers.SyncManager):
def __init__(self, *args, **kwargs):
multiprocessing.managers.SyncManager.__init__(self, *args, **kwargs)
self.__type__ = 'LocalManager'
def GetManager(host, port, authkey):
def getdatetime():
return '{}'.format(datetime.datetime.now())
LocalManager.register('getdatetime', callable = getdatetime)
manager = LocalManager(address = (host, port), authkey = authkey)
manager.start()
return manager
if __name__ == '__main__':
# define our manager connection parameters
port = 55555
host = 'localhost'
authkey = 'auth1234'
# start a manager
man = GetManager(host, port, authkey)
# wait for user input to shut down
raw_input('return to shutdown')
man.shutdown()
client.py
## client.py -- this one works
import time
import multiprocessing.managers
class RemoteClient(multiprocessing.managers.SyncManager):
def __init__(self, *args, **kwargs):
multiprocessing.managers.SyncManager.__init__(self, *args, **kwargs)
self.__type__ = 'RemoteClient'
def GetClient(host, port, authkey):
RemoteClient.register('getdatetime')
client = RemoteClient(address = (host, port), authkey = authkey)
client.connect()
return client
if __name__ == '__main__':
# define our client connection parameters
port = 55555
host = 'localhost'
authkey = 'auth1234'
# start a manager
client = GetClient(host, port, authkey)
print 'connected', client
print 'client.getdatetime()', client.getdatetime()
# wait a couple of seconds, then do it again
time.sleep(2)
print 'client.getdatetime()', client.getdatetime()
# exit...
clientWithGeventMonkeyPatch.py
## clientWithGeventMonkeyPatch.py -- breaks, depending on patch_all() parameters
import time
import multiprocessing.managers
# this part is copied from grequests
# bear in mind that it doesn't actually do anything in this module.
try:
import gevent
from gevent import monkey as curious_george
from gevent.pool import Pool
except ImportError:
raise RuntimeError('Gevent is required for grequests.')
# this line causes breakage of the multiprocessing.manager connection auth method:
# Monkey-patch.
# patch_all() parameters with default values: socket=True, dns=True, time=True, select=True, thread=True, os=True, ssl=True, aggressive=True
curious_george.patch_all(thread=False, select=False) # breaks
#~ curious_george.patch_all(thread=False, select=False, socket = False) # works!
#~ curious_george.patch_all(thread=False, select=False, socket = True, aggressive = True, dns = True) # same as (thread=False, select=False); breaks
#~ curious_george.patch_all(thread=False, select=False, socket = True, aggressive = True, dns = False) # breaks
#~ curious_george.patch_all(thread=False, select=False, socket = True, aggressive = False, dns = True) # breaks
#~ curious_george.patch_all(thread=False, select=False, socket = True, aggressive = False, dns = False) # breaks
class RemoteClient(multiprocessing.managers.SyncManager):
def __init__(self, *args, **kwargs):
multiprocessing.managers.SyncManager.__init__(self, *args, **kwargs)
self.__type__ = 'RemoteClient'
def GetClient(host, port, authkey):
RemoteClient.register('getdatetime')
client = RemoteClient(address = (host, port), authkey = authkey)
client.connect()
return client
if __name__ == '__main__':
# define our client connection parameters
port = 55555
host = 'localhost'
authkey = 'auth1234'
# start a manager
client = GetClient(host, port, authkey)
print 'connected', client
print 'client.getdatetime()', client.getdatetime()
# wait a couple of seconds, then do it again
time.sleep(2)
print 'client.getdatetime()', client.getdatetime()
# exit...
If you don't patch the socket module, gevent's ability to not block on network operations won't be available, and thus most of the benefit of using gevent in the first place won't be available.
gevent and multiprocessing aren't really designed to play nicely with one another - gevent mostly assumes that you're doing your network connections through it, and not bypassing the highest level Python socket interfaces (which multiprocessing does).

Categories