python grpc.channel._Rendevezous exception status Unknwon - python

I have a grpc connection established and i try to make a request in the channel but when i call the service from the client i get the following exception. Does anyone know something about it? Does the usage of threads is the reason for it? I can't figure out what is wrong with it.
This is the protobuf schema with the service:
service P4Runtime {
// Update one or more P4 entities on the target.
rpc Write(WriteRequest) returns (WriteResponse) {
}
// Read one or more P4 entities from the target.
rpc Read(ReadRequest) returns (stream ReadResponse) {
}
// Sets the P4 forwarding-pipeline config.
rpc SetForwardingPipelineConfig(SetForwardingPipelineConfigRequest)
returns (SetForwardingPipelineConfigResponse) {
}
// Gets the current P4 forwarding-pipeline config.
rpc GetForwardingPipelineConfig(GetForwardingPipelineConfigRequest)
returns (GetForwardingPipelineConfigResponse) {
}
// Represents the bidirectional stream between the controller and the
// switch (initiated by the controller), and is managed for the following
// purposes:
// - connection initiation through client arbitration
// - indicating switch session liveness: the session is live when switch
// sends a positive client arbitration update to the controller, and is
// considered dead when either the stream breaks or the switch sends a
// negative update for client arbitration
// - the controller sending/receiving packets to/from the switch
// - streaming of notifications from the switch
rpc StreamChannel(stream StreamMessageRequest)
returns (stream StreamMessageResponse) {
}
rpc Capabilities(CapabilitiesRequest) returns (CapabilitiesResponse) {
}
}
Below is the function that i call and exception happens:
def write_IPv4_Rules(p4info_helper,ingress_sw,ipv4_dst,lpm,dst_mac,out_port):
table_entry = p4info_helper.buildTableEntry(
table_name="MyIngress.ipv4_lpm",
match_fields={
"hdr.ipv4.dstAddr": (ipv4_dst, lpm)
},
action_name="MyIngress.ipv4_forward",
action_params={
"dstAddr": dst_mac,
"port": out_port
})
ingress_sw.WriteTableEntry(table_entry)
print("Installed ipv4 rule on %s" % ingress_sw.name)
This is the invocation of the above function which is inside a thead:
write_IPv4_Rules(p4info_helper,ingress_sw,ip_dest,32,dst_mac,2)
Below i have the code of a controller that uses grpc service:
class SwitchConnection(object):
def __init__(self, name=None, address='127.0.0.1:50051', device_id=0,
proto_dump_file=None):
self.name = name
self.address = address
self.device_id = device_id
self.p4info = None
self.channel = grpc.insecure_channel(self.address)
if proto_dump_file is not None:
interceptor = GrpcRequestLogger(proto_dump_file)
self.channel = grpc.intercept_channel(self.channel, interceptor)
self.client_stub = p4runtime_pb2_grpc.P4RuntimeStub(self.channel)
self.requests_stream = IterableQueue()
self.stream_msg_resp = self.client_stub.StreamChannel(iter(self.requests_stream))
self.proto_dump_file = proto_dump_file
connections.append(self)
def WriteTableEntry(self, table_entry, dry_run=False):
request = p4runtime_pb2.WriteRequest()
request.device_id = self.device_id
request.election_id.low = 1
update = request.updates.add()
if table_entry.is_default_action:
update.type = p4runtime_pb2.Update.MODIFY
else:
update.type = p4runtime_pb2.Update.INSERT
update.entity.table_entry.CopyFrom(table_entry)
if dry_run:
print("P4Runtime Write:", request)
else:
self.client_stub.Write(request)
class GrpcRequestLogger(grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor):
"""Implementation of a gRPC interceptor that logs request to a file"""
def __init__(self, log_file):
self.log_file = log_file
with open(self.log_file, 'w') as f:
# Clear content if it exists.
f.write("")
def log_message(self, method_name, body):
with open(self.log_file, 'a') as f:
ts = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
msg = str(body)
f.write("\n[%s] %s\n---\n" % (ts, method_name))
if len(msg) < MSG_LOG_MAX_LEN:
f.write(str(body))
else:
f.write("Message too long (%d bytes)! Skipping log...\n" % len(msg))
f.write('---\n')
def intercept_unary_unary(self, continuation, client_call_details, request):
self.log_message(client_call_details.method, request)
return continuation(client_call_details, request)
def intercept_unary_stream(self, continuation, client_call_details, request):
self.log_message(client_call_details.method, request)
return continuation(client_call_details, request)
class IterableQueue(Queue):
_sentinel = object()
def __iter__(self):
return iter(self.get, self._sentinel)
def close(self):
self.put(self._sentinel)
The exception that i receive when i run the program:
Exception in thread Thread-11:
Traceback (most recent call last):
File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/usr/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "./mycontroller.py", line 348, in packet_router_processing
ipv4_forwarding(p4info_helper,extract_header,ingress_sw,packetIn.packet.metadata)
File "./mycontroller.py", line 256, in ipv4_forwarding
write_IPv4_Rules(p4info_helper,ingress_sw,ip_dest,24,dst_mac,1)
File "./mycontroller.py", line 38, in write_IPv4_Rules
ingress_sw.WriteTableEntry(table_entry)
File "/home/p4/tutorials/exercises/test/../../utils/p4runtime_lib/switch.py", line 102, in WriteTableEntry
self.client_stub.Write(request)
File "/usr/local/lib/python3.8/dist-packages/grpc/_interceptor.py", line 207, in __call__
response, ignored_call = self._with_call(
File "/usr/local/lib/python3.8/dist-packages/grpc/_interceptor.py", line 240, in _with_call
call = self._interceptor.intercept_unary_unary(
File "/home/p4/tutorials/exercises/test/../../utils/p4runtime_lib/switch.py", line 220, in intercept_unary_unary
return continuation(client_call_details, request)
File "/usr/local/lib/python3.8/dist-packages/grpc/_interceptor.py", line 228, in continuation
response, call = self._thunk(new_method).with_call(
File "/usr/local/lib/python3.8/dist-packages/grpc/_channel.py", line 557, in with_call
return _end_unary_response_blocking(state, call, True, None)
File "/usr/local/lib/python3.8/dist-packages/grpc/_channel.py", line 466, in _end_unary_response_blocking
raise _Rendezvous(state, None, None, deadline)
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with:
status = StatusCode.UNKNOWN
details = ""
debug_error_string = "{"created":"#1646087190.862135612","description":"Error received from peer","file":"src/core/lib/surface/call.cc","file_line":1036,"grpc_message":"","grpc_status":2}"

Related

getting this error ,google.api_core.exceptions.InvalidArgument: 400 Request contains an invalid argument. From google BigQuery Storage Write API

I am trying to pull a huge amount of data (in millions) and I am getting the following error when running my code. If I run the same code with a small range (to be exact a range of 2) it runs successfully. Please assist in helping me know if this is my issue or is coming from the API side
Thanks
The Error I am getting
DEBUG:google.api_core.bidi:Started helper thread Thread-ConsumeBidirectionalStream
DEBUG:google.api_core.bidi:Thread-ConsumeBidirectionalStream caught error 400 Request contains an invalid argument. and will exit. Generally this is due to the RPC itself being cancelled and the error will be surfaced to the calling code.
Traceback (most recent call last):
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/grpc_helpers.py", line 147, in error_remapped_callable
return _StreamingResponseIterator(
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/grpc_helpers.py", line 73, in __init__
self._stored_first_result = next(self._wrapped)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/grpc/_channel.py", line 426, in __next__
return self._next()
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/grpc/_channel.py", line 826, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.INVALID_ARGUMENT
details = "Request contains an invalid argument."
debug_error_string = "{"created":"#1652904360.179503883","description":"Error received from peer ipv4:173.194.76.95:443","file":"src/core/lib/surface/call.cc","file_line":952,"grpc_message":"Request contains an invalid argument.","grpc_status":3}"
>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/bidi.py", line 636, in _thread_main
self._bidi_rpc.open()
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/bidi.py", line 279, in open
call = self._start_rpc(iter(request_generator), metadata=self._rpc_metadata)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/cloud/bigquery_storage_v1/services/big_query_write/client.py", line 678, in append_rows
response = rpc(
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/gapic_v1/method.py", line 154, in __call__
return wrapped_func(*args, **kwargs)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/retry.py", line 283, in retry_wrapped_func
return retry_target(
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/retry.py", line 190, in retry_target
return target()
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/grpc_helpers.py", line 151, in error_remapped_callable
raise exceptions.from_grpc_error(exc) from exc
google.api_core.exceptions.InvalidArgument: 400 Request contains an invalid argument.
INFO:google.api_core.bidi:Thread-ConsumeBidirectionalStream exiting
DEBUG:google.cloud.bigquery_storage_v1.writer:Finished stopping manager.
Traceback (most recent call last):
File "write_data_to_db2.py", line 207, in <module>
p.append_rows_pending(project_id='dwingestion', dataset_id='ke',
File "write_data_to_db2.py", line 188, in append_rows_pending
response_future_1 = append_rows_stream.send(request)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/cloud/bigquery_storage_v1/writer.py", line 234, in send
return self._open(request)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/cloud/bigquery_storage_v1/writer.py", line 207, in _open
raise request_exception
google.api_core.exceptions.Unknown: None There was a problem opening the stream. Try turning on DEBUG level logs to see the error.
Summary Of My Code
# PULLING DATA FROM THE API
def whole_teltel_raw_data():
# Creating a session to introduce network consistency
session = requests.Session()
retry = Retry(connect=3, backoff_factor=1.0)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
url = "https://my_api_url"
the_headers = {"X-API-KEY": 'my key'}
offset_limit = 1249500
teltel_data = []
# Loop through the results and if present extend the teltel_data list
#======================================================================================================================
# WRITE THE DATA TO THE DATA WAREHOUSE
# ======================================================================================================================
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'dwingestion-b033d9535e9d.json'
def create_row_data(tuple_data):
call_id, starttime, stoptime, direction, type, status, duration_sec, rate, cost, transfer, extra_prefix, audio_url, \
hangup_element, caller_number, caller_type, caller_cid, caller_dnid, caller_user_id, caller_user_short, \
callee_number, calle_type, callee, hangup_element_name, hangup_element_element, callee_user_id, callee_user_short, \
caller = tuple_data
row = teltel_call_data_pb2.TeltelCall()
row.call_id = call_id
row.starttime = starttime
row.stoptime = stoptime
row.direction = direction
row.type = type
row.status = status
row.duration_sec = duration_sec
row.rate = rate
row.cost = cost
row.transfer = transfer
row.extra_prefix = extra_prefix
row.audio_url = audio_url
row.hangup_element = hangup_element
row.caller_number = caller_number
row.caller_type = caller_type
row.caller_cid = caller_cid
row.caller_dnid = caller_dnid
row.caller_user_id = caller_user_id
row.caller_user_short = caller_user_short
row.callee_number = callee_number
row.calle_type = calle_type
row.callee = callee
row.hangup_element_name = hangup_element_name
row.hangup_element_title = hangup_element_element
row.callee_user_id = callee_user_id
row.callee_user_short = callee_user_short
row.caller = caller
return row.SerializeToString()
# Creating connection to the data warehouse
def create_bigquery_storage_client(google_credentials):
return bigquery_storage_v1.client.BigQueryWriteClient(
credentials=google_credentials
)
class GcpBigqueryStorageService(object):
def __init__(self, google_credentials=None, gcp_config=None):
self.client = create_bigquery_storage_client(google_credentials)
self.config = gcp_config
def append_rows_pending(self, project_id: str, dataset_id: str, table_id: str):
"""Create a write stream, write some sample data, and commit the stream."""
# write_client = self.client
parent = self.client.table_path(project_id, dataset_id, table_id)
write_stream = types.WriteStream()
# When creating the stream, choose the type. Use the PENDING type to wait
write_stream.type_ = types.WriteStream.Type.PENDING
write_stream = self.client.create_write_stream(
parent=parent, write_stream=write_stream
)
stream_name = write_stream.name
# Create a template with fields needed for the first request.
request_template = types.AppendRowsRequest()
# The initial request must contain the stream name.
request_template.write_stream = stream_name
# So that BigQuery knows how to parse the serialized_rows, generate a
# protocol buffer representation of your message descriptor.
proto_schema = types.ProtoSchema()
proto_descriptor = descriptor_pb2.DescriptorProto()
teltel_call_data_pb2.TeltelCall.DESCRIPTOR.CopyToProto(proto_descriptor)
proto_schema.proto_descriptor = proto_descriptor
proto_data = types.AppendRowsRequest.ProtoData()
proto_data.writer_schema = proto_schema
request_template.proto_rows = proto_data
# Some stream types support an unbounded number of requests. Construct an
# AppendRowsStream to send an arbitrary number of requests to a stream.
append_rows_stream = writer.AppendRowsStream(self.client, request_template)
# Create a batch of row data by appending proto2 serialized bytes to the
# serialized_rows repeated field.
proto_rows = types.ProtoRows()
row_number = 0
for row in whole_teltel_raw_data():
proto_rows.serialized_rows.append(create_row_data(row))
# checking the writing progress
row_number = row_number + 1
print("Writing to the database row number", row_number)
# The first request must always have an offset of 0.
request = types.AppendRowsRequest()
proto_data = types.AppendRowsRequest.ProtoData()
proto_data.rows = proto_rows
request.proto_rows = proto_data
append_rows_stream.close()
# A PENDING type stream must be "finalized" before being committed. No new
# records can be written to the stream after this method has been called.
self.client.finalize_write_stream(name=write_stream.name)
# Commit the stream you created earlier.
batch_commit_write_streams_request = types.BatchCommitWriteStreamsRequest()
batch_commit_write_streams_request.parent = parent
batch_commit_write_streams_request.write_streams = [write_stream.name]
self.client.batch_commit_write_streams(batch_commit_write_streams_request)
print(f"Writes to stream: '{write_stream.name}' have been committed.")
p = GcpBigqueryStorageService()
p.append_rows_pending(project_id='my_project', dataset_id='my_id', table_id='teltel_call_2')

OPCUA Server doesn't receive host/port/sock to start

During a small test to learn how to run a small server, the method 'start' returns an error when I tried to start the server afet including the endpoint and some variables:
from opcua import Server
import datetime
import time
my_server = Server()
url = 'opc.tcp//192.168.1.5:4841'
my_server.set_endpoint(url)
name = "OPCUA_TEST_Server"
addspace = my_server.register_namespace(name)
node = my_server.get_objects_node()
param = node.add_object(addspace, "Parameters")
t_text1 = param.add_variable(addspace, "Text 1", "Text_1")
i_int1 = param.add_variable(addspace, "myInteger1", 0)
b_bool1 = param.add_variable(addspace, "myBool1", False)
t_text1.set_writable()
i_int1.set_writable()
b_bool1.set_writable()
my_server.start()
print("Server started at {}".format(url))
print("At" + str(datetime.datetime.now()))
while True:
time.sleep(0.5)
And in line of
my_server.start()
Returns the following error:
Endpoints other than open requested but private key and certificate are not set.
Traceback (most recent call last):
File "C:/Users/a767611/Desktop/Repositorios/flexigrid/opc-ua-server/test-opc-ua-server.py", line 23, in <module>
my_server.start()
File "C:\Users\a767611\Anaconda3\lib\site-packages\opcua\server\server.py", line 347, in start
raise exp
File "C:\Users\a767611\Anaconda3\lib\site-packages\opcua\server\server.py", line 344, in start
self.bserver.start()
File "C:\Users\a767611\Anaconda3\lib\site-packages\opcua\server\binary_server_asyncio.py", line 116, in start
self._server = self.loop.run_coro_and_wait(coro)
File "C:\Users\a767611\Anaconda3\lib\site-packages\opcua\common\utils.py", line 197, in run_coro_and_wait
return task.result()
File "C:\Users\a767611\Anaconda3\lib\asyncio\base_events.py", line 1393, in create_server
raise ValueError('Neither host/port nor sock were specified')
ValueError: Neither host/port nor sock were specified
Your endpoint URL is malformed.
It should be:
url = 'opc.tcp://192.168.1.5:4841'
note the missing colon after opc.tcp.

Unable to make high number of posts on remote machine vs local using asynio and aiohttp

I wrote a program that would post events using asyncio and aiohttp. This program works when I run it locally. I can post 10k events no problem. However, I SCPed the whole codebase to a remote machine and within that machine I can't post more than 15 events without getting this error:
RuntimeError: Event loop is closed
Exception ignored in: <coroutine object Poster.async_post_event at 0x7f4a53989410>
Traceback (most recent call last):
File "/home/bli1/qe-trinity/tracer/utils/poster.py", line 63, in async_post_event
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 565, in __aenter__
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 198, in _request
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 316, in connect
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 349, in _release_waiter
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 332, in set_result
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 242, in _schedule_callbacks
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 447, in call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 456, in _call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 284, in _check_closed
RuntimeError: Event loop is closed
Exception ignored in: <coroutine object Poster.async_post_event at 0x7f4a5397ffc0>
Traceback (most recent call last):
File "/home/bli1/qe-trinity/tracer/utils/poster.py", line 63, in async_post_event
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 565, in __aenter__
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/client.py", line 198, in _request
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 316, in connect
File "/home/bli1/py/python3.5/lib/python3.5/site-packages/aiohttp/connector.py", line 349, in _release_waiter
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 332, in set_result
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/futures.py", line 242, in _schedule_callbacks
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 447, in call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 456, in _call_soon
File "/home/bli1/py/python3.5/lib/python3.5/asyncio/base_events.py", line 284, in _check_closed
RuntimeError: Event loop is closed
How can I debug this or find out the source of this problem?
Here is the class that I created and I use the method post() to run:
import uuid
import os
import asyncio
import time
import random
import json
import aiohttp
from tracer.utils.phase import Phase
class Poster(Phase):
def __init__(self, log, endpoint, num_post, topic, datafile, timeout, oracles, secure=False, thru_proxy=True):
Phase.__init__(self, log, "post", oracles, secure, thru_proxy)
self.log = log
self.num_post = int(num_post)
self.datafile = datafile.readlines()
self.topic = topic
self.endpoint = self.set_endpoint(endpoint, self.topic)
self.response = None
self.timeout = timeout
def random_line(self):
""" Returns random line from file and converts it to JSON """
return json.loads(random.choice(self.datafile))
#staticmethod
def change_uuid(event):
""" Creates new UUID for event_id """
new_uuid = str(uuid.uuid4())
event["event_header"]["event_id"] = new_uuid
return event
#staticmethod
def wrapevent(event):
""" Wrap event with metadata for analysis later on """
return {
"tracer": {
"post": {
"statusCode": None,
"timestamp": None,
},
"awsKafkaTimestamp": None,
"qdcKakfaTimestamp": None,
"hdfsTimestamp": None
},
"event": event
}
def gen_random_event(self):
random_event = self.random_line()
event = self.change_uuid(random_event)
dataspec = self.wrapevent(event)
return dataspec
async def async_post_event(self, event, session):
async with session.post(self.endpoint, data=event, proxy=self.proxy) as resp:
event["tracer"]["post"]["timestamp"] = time.time() * 1000.0
event["tracer"]["post"]["statusCode"] = resp.status
unique_id = event["event"]["event_header"]["event_id"]
oracle_endpoint = os.path.join(self.oracle, unique_id)
async with session.put(oracle_endpoint, data=json.dumps(event), proxy=self.proxy) as resp:
if resp.status != 200:
self.log.debug("Post to ElasticSearch not 200")
self.log.debug(event["event"]["event_header"]["event_id"])
self.log.debug("Status code: " + str(resp.status))
return event["event"]["event_header"]["event_id"], resp.status
async def async_post_events(self, events):
coros = []
conn = aiohttp.TCPConnector(verify_ssl=self.secure)
async with aiohttp.ClientSession(connector=conn) as session:
for event in events:
coros.append(self.async_post_event(event, session))
return await asyncio.gather(*coros)
def post(self):
event_loop = asyncio.get_event_loop()
try:
events = [self.gen_random_event() for i in range(self.num_post)]
start_time = time.time()
results = event_loop.run_until_complete(self.async_post_events(events))
print("Time taken: " + str(time.time() - start_time))
finally:
event_loop.close()
You cannot re-use a loop once it's closed. From AbstractEventLoop.close documentation:
This is idempotent and irreversible. No other methods should be called after this one.
Either remove the loop.close call or create a new loop for each post.
My advice would be to avoid those problems by running everything inside the loop and awaiting async_post_events when needed.

Overwriting constants when imported

So i have been trying to understand the usage of a class constant, but i don't see how this can be overwritten. If my library look like this:
class ArcsightLogger(object):
"""
Main Class to interact with Arcsight Logger REST API
"""
TARGET = 'https://SOMETHING:9000'
def __init__(self, username, password, disable_insecure_warning=False):
"""
Log in the user whose credentials are provided and
store the access token to be used with all requests
against Arcsight
"""
action = 'ignore' if disable_insecure_warning else 'once'
warnings.simplefilter(action, InsecureRequestWarning)
r = self._post(
'/core-service/rest/LoginService/login', data={
'login': username,
'password': password,
}, is_json=False)
r.raise_for_status()
loginrequest = untangle.parse(r.content)
self.token = loginrequest.ns3_loginResponse.ns3_return.cdata
def format_time(self, *args):
currentdt = datetime.datetime.now(pytz.utc)
if len(args) > 0:
currentdt += datetime.timedelta(*args)
(dt, micro) = currentdt.strftime('%Y-%m-%dT%H:%M:%S.%f').split('.')
tz_offset = currentdt.astimezone(tzlocal()).strftime('%z')
tz_offset = "Z" if tz_offset == "" else tz_offset[:3] + ":" + tz_offset[3:]
dt = "%s.%03d%s" % (dt, int(micro) / 1000, tz_offset)
return dt
def _post(self, route, data, is_json=True, ):
"""
Post Call towards Arcsight Logger
:param route: API endpoint to fetch
:param is_json: Checks if post needs to be JSON
:param data: Request Body
:return: HTTP Response
"""
if not data:
return
url = self.TARGET + route
if is_json:
return requests.post(url, json=data, verify=False)
else:
return requests.post(url, data, verify=False)
This works just fine, if i manually set TARGET in this script, but when i import to another script, like this:
import arcsightrest
arcsight = arcsightrest.ArcsightLogger('admin', 'somepassword', False)
arcsight.TARGET = 'https://10.10.10.10:9000'
with arcsight.search('query') as search:
search.wait()
data = search.events(custom=True)
print data
Then when i run the script, i see that TARGET is never actually overwritten, because the Traceback still states that it is using the old TARGET in the init function of this call (which calls _post):
Traceback (most recent call last):
File "test.py", line 3, in <module>
arcsight = arcsightrest.ArcsightLogger('admin', 'somepassword', False)
File "/var/www/Projects2/ArcsightSDK/arcsightrest.py", line 37, in __init__
}, is_json=False)
File "/var/www/Projects2/ArcsightSDK/arcsightrest.py", line 69, in _post
return requests.post(url, data, verify=False)
File "/usr/lib/python2.7/site-packages/requests/api.py", line 110, in post
return request('post', url, data=data, json=json, **kwargs)
File "/usr/lib/python2.7/site-packages/requests/api.py", line 56, in request
return session.request(method=method, url=url, **kwargs)
File "/usr/lib/python2.7/site-packages/requests/sessions.py", line 475, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python2.7/site-packages/requests/sessions.py", line 596, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python2.7/site-packages/requests/adapters.py", line 487, in send
raise ConnectionError(e, request=request)
requests.exceptions.ConnectionError: HTTPSConnectionPool(host='something', port=9000): Max retries exceeded with url: /core-service/rest/LoginService/login (Caused by NewConnectionError('<requests.packages.urllib3.connection.VerifiedHTTPSConnection object at 0x1e59e50>: Failed to establish a new connection: [Errno -2] Name or service not known',))
You are overriding variable after creating it's instance
arcsight = arcsightrest.ArcsightLogger('admin', 'somepassword', False)
#__init__ has been already done
arcsight.TARGET = 'https://10.10.10.10:9000'
so in the __init__ function it has the old value. You need to change variable by using class not the instance
import arcsightrest
arcsightrest.ArcsightLogger.TARGET = 'https://10.10.10.10:9000'
Since you want to use a different target for different instances use an instance variable, not a class variable. After all, it's not really a constant if it's going to change.
You can pass the value for the URL target in the __init__() method. Use a default value if there is an appropriate one:
class ArcsightLogger(object):
"""
Main Class to interact with Arcsight Logger REST API
"""
def __init__(self, username, password, disable_insecure_warning=False, target='https://SOMETHING:9000'):
self.target = target
# etc...
Then use self.target in _post().
If you don't like setting the default in the __init__() method's argument then you can define a default value as a class variable and use it to initialise self.target:
class ArcsightLogger(object):
"""
Main Class to interact with Arcsight Logger REST API
"""
TARGET = 'https://SOMETHING:9000'
def __init__(self, username, password, disable_insecure_warning=False, target=None):
self.target = target if target is not None else self.TARGET

tornado.gen.BadYieldError: yielded unknown object None

I`m using Python and Tornado to build basic apps with a jquery UI slider element. My goal is, when users interact with the slider, it will be sent a value to a python function, and the result will be displayed in python console.
My custom.js is:
$(function() {
$("#slider-range-max").slider({
min : 0,
max : 100,
slide : function(event, ui) {
$("#amount").val(ui.value);
ajax({
url: "/action",
data: {parameter:ui.value},
});
},
});
$("#amount").val($("#slider-range-max").slider("value"));
});
main.py
define("port", default=8888, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", AuxHandler),
(r"/action", MainHandler)
]
settings = {
"template_path": Settings.TEMPLATE_PATH,
"static_path": Settings.STATIC_PATH,
}
tornado.web.Application.__init__(self, handlers, **settings)
class AuxHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
class MainHandler(tornado.web.RequestHandler):
#asynchronous
#tornado.gen.coroutine
def get(self):
speed = int(self.get_argument("parameter"))
p=P()
if speed > 1:
p.startApp(speed)
if speed<1:
p.stopApp()
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
and p.py
#tornado.gen.coroutine
def startApp(self,speed):
x= yield print(speed)
while True:
yield x
In console I receive this:
12
[I 160516 12:47:19 web:1946] 304 GET /action?parameter=12 (::1) 0.00ms
13
[I 160516 12:47:19 web:1946] 304 GET /action?parameter=13 (::1) 15.60ms
14
[E 160516 12:47:19 concurrent:336] Future <tornado.concurrent.Future object at 0x02FAA7D0> exception was never retrieved: Traceback (most recent call last):
File "C:\Users\home\AppData\Local\Programs\Python\Python35-32\lib\site-packages\tornado\gen.py", line 1014, in run
yielded = self.gen.throw(*exc_info)
File "E:\work\python\Example2\p.py", line 11, in startApp
x= yield print(speed)
File "C:\Users\home\AppData\Local\Programs\Python\Python35-32\lib\site-packages\tornado\gen.py", line 1008, in run
value = future.result()
File "C:\Users\home\AppData\Local\Programs\Python\Python35-32\lib\site-packages\tornado\concurrent.py", line 232, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "C:\Users\home\AppData\Local\Programs\Python\Python35-32\lib\site-packages\tornado\gen.py", line 1090, in handle_yield
self.future = convert_yielded(yielded)
File "C:\Users\home\AppData\Local\Programs\Python\Python35-32\lib\functools.py", line 743, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "C:\Users\home\AppData\Local\Programs\Python\Python35-32\lib\site-packages\tornado\gen.py", line 1222, in convert_yielded
raise BadYieldError("yielded unknown object %r" % (yielded,))
tornado.gen.BadYieldError: yielded unknown object None
I don`t know how to handle this "yielded unknown object None" error or if my approach is correct. Any idea will be very helpfully.
The exception is from yield print(speed). print returns None, and you can't yield None. You can only yield Futures and similar awaitable objects, typically when you yield the result of calling a coroutine. See Refactoring Tornado Coroutines for a guide to calling coroutines.
If you want to print the value of speed, just do this:
def startApp(self, speed):
print(speed)

Categories