I am implementing a Pysnmp responder that receives SNMP GET/SET requests currently and would like to extend with Walk, getNext and getBulk operations. All the OID and its values are stored in key-value in a file.
What I have tried is to use readNextVars() method from instrum.AbstractMibInstrumController class where I iterate over OID's list by calling self.readVars() within readNextVars()
Below is a code snippet showing only GET request and SET request is similar but it writes value to its respective OID in oid.json file.
from pysnmp.entity import engine, config
from pysnmp.entity.rfc3413 import cmdrsp, context
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.smi import instrum, error
from pysnmp.proto.api import v2c
import json
class SnmpData:
def __init__(self, host, port):
self.snmpEngine = engine.SnmpEngine()
config.addSocketTransport(
self.snmpEngine,
udp.domainName,
udp.UdpTransport().openServerMode((host, port))
)
config.addV1System(self.snmpEngine, 'my-area', 'public', contextName='my-context')
config.addVacmUser(self.snmpEngine, 2, 'my-area', 'noAuthNoPriv', (1, 3, 6), (4, 5, 7))
self.snmpContext = context.SnmpContext(self.snmpEngine)
def snmp_run_command(self):
self.snmpContext.registerContextName(
v2c.OctetString('my-context'),
FileInstrumController()
)
cmdrsp.GetCommandResponder(self.snmpEngine, self.snmpContext)
cmdrsp.SetCommandResponder(self.snmpEngine, self.snmpContext)
cmdrsp.NextCommandResponder(self.snmpEngine, self.snmpContext)
cmdrsp.BulkCommandResponder(self.snmpEngine, self.snmpContext)
self.snmpEngine.transportDispatcher.jobStarted(1)
try:
self.snmpEngine.transportDispatcher.runDispatcher()
except:
self.snmpEngine.transportDispatcher.closeDispatcher()
return "yes"
def main(self):
self.snmp_run_command()
class FileInstrumController(instrum.AbstractMibInstrumController):
def readVars(self, vars, acInfo=(None, None)):
try:
data = None
final_data = None
with open('oid.json') as f:
data = json.load(f)
if str(vars[0][0]) in data.keys())):
final_data = data[str(vars[0][0])]
return [(vars[0][0], v2c.OctetString(str(final_data)))]
else:
return [(vars[0][0], v2c.OctetString(str("Not a Valid OID")))]
except IOError:
raise error.SmiError
def readNextVars(self, vars, acInfo=(None, None))
# get oid & split and match if in file than return its value
# else return invalid oid and break but its not breaking
# and sending continuously requests of next oid (OID + 1)
Here is the oid file (oid.json)
{
"1.3.6.1.1.999.1.1.0": 1,
"1.3.6.1.1.999.1.2.0": 2,
"1.3.6.1.1.999.1.3.0": 3,
"1.3.6.1.1.999.1.4.0": 4,
"1.3.6.1.1.999.1.5.0": 5,
"1.3.6.1.1.999.1.6.0": 100,
"1.3.6.1.1.999.1.7.0": 200,
"1.3.6.1.1.999.1.8.0": 300,
"1.3.6.1.1.999.1.9.0": 400,
"1.3.6.1.1.999.1.10.0": 500
}
Related
I need to process a very large number of traps (10,000 per second). I have the simplest linux server. I tried to implement through threads, but cpu clogs up very quickly. Please tell me how to minimize the load on memory and processor, but at the same time process a large number of traps?
There is also work with the database. Writing to the database of taps
from pysnmp.entity import engine, config
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.entity.rfc3413 import ntfrcv
import psycopg2
from pysnmp.hlapi import SnmpEngine as Sm, CommunityData, UdpTransportTarget,\
ContextData, ObjectType, ObjectIdentity, getCmd
from datetime import datetime
import logging.config
from os import getpid, system, stat, path, chdir, listdir, remove
from threading import Thread
snmpEngine = engine.SnmpEngine()
config.addTransport(
snmpEngine,
udp.domainName + (1,),
udp.UdpTransport().openServerMode(('localhost', 162))
)
config.addV1System(snmpEngine, '', 'public')
class cbFun(Thread):
def __init__(self, snmpEngine, stateReference, contextEngineId, contextName,
varBinds, cbCtx):
Thread.__init__(self)
self.snmpEngine = snmpEngine
self.stateReference = stateReference
self.contextEngineId = contextEngineId
self.contextName = contextName
self.varBinds = varBinds
self.cbCtx = cbCtx
self.localConnected = False
self.localDb = None
self.errorFlag = False
self.start()
def run(self):
print('\n{0}New trap message received on {1} {0}'.format(
'-' * 7,
datetime.now().strftime('%d-%b-%Y at %H:%M:%S')))
execContext = self.snmpEngine.observer.getExecutionContext(
'rfc3412.receiveMessage:request')
print('Trap is coming from %s:%s' % execContext['transportAddress'])
dict_traps = {}
for name, val in self.varBinds:
oid = name.prettyPrint()
value = val.prettyPrint()
print(f'{oid} = {value}')
dict_traps.update({oid: value})
connectDB(dict_traps)
def connectDB(self, values):
connect = psycopg2.connect(dbname="test", user="test",
password="test",
host="test")
cursor = connect.cursor()
for key,value in values:
command = f"insert into TRAPS VALUES ({key}, {value})"
cursor.execute(command)
connect.commit()
connect.close()
ntfrcv.NotificationReceiver(snmpEngine, cbFun)
snmpEngine.transportDispatcher.jobStarted(1)
try:
snmpEngine.transportDispatcher.runDispatcher()
except:
snmpEngine.transportDispatcher.closeDispatcher()
raise
I am using a json socket server/client using the jsocket python library.
I made request to the server with a client that is open and close the connection each time.
My server crashes after the first time the connection is closed. How can I maintain the server running?
The server code:
import logging
import random
import sys
import time
import jsocket
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
class DataloggerServer(jsocket.ServerFactoryThread):
# This is an example factory thread, which the server factory will
# instantiate for each new connection.
def __init__(self):
super(DataloggerServer, self).__init__()
self.timeout = 2.0
# def isAlive(self):
# return True
def _process_message(self, obj):
# virtual method - Implementer must define protocol
logging.debug("Recived object %s", obj)
if obj != '':
if obj['type'] == "REQ_VAR_SAMPLE":
data = obj['data']
var_name = data["id"]
logging.debug("Got variable name %s", var_name)
if var_name == 'EMERGENCY':
value = 0 if random.random() < 0.75 else 1
logging.debug("variable value: %s", value)
elif var_name == "RPM_SPINDLE":
value = 70 if random.random() < 0.85 else 45
elif var_name == "FEEDRATE_OVERRIDE":
value = 50 if random.random() < 0.85 else 75
sample = {}
timestamp = int(time.time()*1000)
sample['timestamp'] = timestamp
sample['value'] = value
logging.debug("created sample %s", sample)
else:
logging.info(obj)
sample = {}
data = {'sample': sample}
res_obj = {
"type": "RESP_SAMPLE",
"data": data
}
logging.debug("Response object: %s", res_obj)
self.send_obj(res_obj)
if __name__ == '__main__':
server = jsocket.ServerFactory(DataloggerServer, address="0.0.0.0", port=12340)
server.timeout = 2.0
server.start()
print("Datalogger server started")
The client, basically, does this:
monitor_client.open()
value_timestamp, value = monitor_client.get_var_sample(variable_name)
monitor_client.close()
where monitor_client has a jsocket.JsonClient attribute object. Opens connection, does some request and closes the connection. When the client closes I got the following error at the server side:
DEBUG:root:Response object: {'type': 'RESP_SAMPLE', 'data': {'sample': {'timestamp': 1630055461803, 'value': 0}}}
File "/home/zia/.local/share/virtualenvs/vixion-edge-horus-notifier-sNtaDgls/lib/python3.9/site-packages/jsocket/tserver.py", line 162, in run
self._purge_threads()
File "/home/zia/.local/share/virtualenvs/vixion-edge-horus-notifier-sNtaDgls/lib/python3.9/site-packages/jsocket/tserver.py", line 189, in _purge_threads
if not t.isAlive():
AttributeError: 'DataloggerServer' object has no attribute 'isAlive'
INFO:jsocket.tserver:client connection broken, closing socket
DEBUG:jsocket:closing the connection socket
DEBUG:jsocket:closing main socket
(The first line is correct debug info).
I added the follwing method to the server:
def isAlive(self):
return self._isAlive
But the problem was related to the client. It was clossing the socket before finishing all the requests.
I've been following this tutorial on how to set up and use Apache Arrow Flight.
From the example, server.py:
import pyarrow as pa
import pyarrow.flight as fl
def create_table_int():
data = [
pa.array([1, 2, 3]),
pa.array([4, 5, 6])
]
return pa.Table.from_arrays(data, names=['column1', 'column2'])
def create_table_dict():
keys = pa.array(["x", "y", "z"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([0, 1, 2], keys),
pa.DictionaryArray.from_arrays([0, 1, 2], keys)
]),
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 1, 1], keys),
pa.DictionaryArray.from_arrays([2, 2, 2], keys)
])
]
return pa.Table.from_arrays(data, names=['column1', 'column2'])
class FlightServer(fl.FlightServerBase):
def __init__(self, location="grpc://0.0.0.0:8815", **kwargs):
super(FlightServer, self).__init__(location, **kwargs)
self.tables = {
b'table_int': create_table_int(),
b'table_dict': create_table_dict(),
}
def do_get(self, context, ticket):
table = self.tables[ticket.ticket]
return fl.RecordBatchStream(table)
# return fl.GeneratorStream(table.schema, table.to_batches(max_chunksize=1024))
def main():
FlightServer().serve()
if __name__ == '__main__':
main()
client.py
import argparse
import sys
import pyarrow as pa
import pyarrow.flight as fl
def get_by_ticket(args, client):
ticket_name = args.name
response = client.do_get(fl.Ticket(ticket_name)).read_all()
print_response(response)
def get_by_ticket_pandas(args, client):
ticket_name = args.name
response = client.do_get(fl.Ticket(ticket_name)).read_pandas()
print_response(response)
def print_response(data):
print("=== Response ===")
print(data)
print("================")
def main():
parser = argparse.ArgumentParser()
subcommands = parser.add_subparsers()
cmd_get_by_t = subcommands.add_parser('get_by_ticket')
cmd_get_by_t.set_defaults(action='get_by_ticket')
cmd_get_by_t.add_argument('-n', '--name', type=str, help="Name of the ticket to fetch.")
cmd_get_by_tp = subcommands.add_parser('get_by_ticket_pandas')
cmd_get_by_tp.set_defaults(action='get_by_ticket_pandas')
cmd_get_by_tp.add_argument('-n', '--name', type=str, help="Name of the ticket to fetch.")
args = parser.parse_args()
if not hasattr(args, 'action'):
parser.print_help()
sys.exit(1)
commands = {
'get_by_ticket': get_by_ticket,
'get_by_ticket_pandas': get_by_ticket_pandas,
}
client = fl.connect("grpc://0.0.0.0:8815")
commands[args.action](args, client)
if __name__ == '__main__':
main()
I'm running the server in a k8s cluster accessed through a service, with various other pods making calls to the server. This works fine EXCEPT when a second call is made to the server before the first call returns. In that case I'm not getting the proper response from the first call, but I don't seem to be getting any errors either. I'm not sure what the proper term is but is there a way to make the server "blocking" so it finishes processing the first call before it starts the second, or some other way of fixing this?
I am trying to implement a snmp agent with two different context name.I got some sample program .This is my agent side code look like . which I got from the snmplabs.com. I tried to implement it and I am facing some error while doing the snmpwalk.
"""
| $ snmpwalk -v3 -u usr-md5-none -l authNoPriv -A authkey1 -n context-a 127.0.0.1 .1.3.6
| $ snmpwalk -v3 -u usr-md5-none -l authNoPriv -A authkey1 -n context-b 127.0.0.1 .1.3.6
""" #
from pysnmp.entity import engine, config
from pysnmp.entity.rfc3413 import cmdrsp, context
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.smi import instrum, builder
from pysnmp.proto.api import v2c
import datetime
from pysnmp.smi import exval
# Create SNMP engine
snmpEngine = engine.SnmpEngine()
# Transport setup
# UDP over IPv4
config.addTransport(
snmpEngine,
udp.domainName,
udp.UdpTransport().openServerMode(('127.0.0.1', 161))
)
# SNMPv3/USM setup
# user: usr-md5-none, auth: MD5, priv NONE
config.addV3User(
snmpEngine, 'usr-md5-none',
config.usmHMACMD5AuthProtocol, 'authkey1'
)
# Allow full MIB access for each user at VACM
config.addVacmUser(snmpEngine, 3, 'usr-md5-none', 'authNoPriv', (1, 3, 6, 1, 2, 1), (1, 3, 6, 1, 2, 1))
# Create an SNMP context with default ContextEngineId (same as SNMP engine ID)
snmpContext = context.SnmpContext(snmpEngine)
class EchoMibInstrumController(instrum.AbstractMibInstrumController):
def readVars(self, varBinds, acInfo=(None, None)):
retItem = []
print ('varbinds', varBinds)
for ov in varBinds:
if str(ov[0]) == '1.3.6.1.2.1.1.1.0':
currentDT = datetime.datetime.now()
retItem.extend([(ov[0], v2c.OctetString('Hello World! It\'s currently: %s' % str(currentDT)))])
elif str(ov[0]) == '1.3.6.1.2.1.1.1.1':
retItem.extend([(ov[0], v2c.OctetString('You queried walk OID %s' % ov[0]))])
else:
retItem.extend([(ov[0], v2c.OctetString('You queried readVars OID %s' % str(currentDT)))])
return retItem
def readNextVars(self, varBinds, acInfo=(None, None)):
retItem = []
print ('Next varbinds', varBinds)
for ov in varBinds:
if str(ov[0]) == '1.3.6.1.2.1.1.1.0':
currentDT = datetime.datetime.now()
retItem.extend([(ov[0], v2c.OctetString('Hello World! It\'s currently: %s' % str(currentDT)))])
elif str(ov[0]) == '1.3.6.1.2.1.1.1.1':
retItem.extend([(ov[0], v2c.OctetString('You queried walk OID %s' % ov[0]))])
else:
currentDT = datetime.datetime.now()
retItem.extend([(ov[0], v2c.OctetString('You queried readNextVars OID %s' % str(currentDT)))])
return retItem
mibBuilder = snmpContext.getMibInstrum().getMibBuilder()
# mibInstrum = instrum.MibInstrumController(mibBuilder)
MibScalar, MibScalarInstance = mibBuilder.importSymbols(
'SNMPv2-SMI', 'MibScalar', 'MibScalarInstance'
)
class MyStaticMibScalarInstance(MibScalarInstance):
def getValue(self, name, idx):
currentDT = datetime.datetime.now()
return self.getSyntax().clone(
'Hello World! It\'s currently: ' + str(currentDT)
)
mibBuilder.exportSymbols(
'__MY_MIB', MibScalar((1, 3, 6, 1, 2, 1, 1, 1), v2c.OctetString()),
MyStaticMibScalarInstance((1, 3, 6, 1, 2, 1, 1, 1), (0,), v2c.OctetString())
)
# Create multiple independent trees of MIB managed objects (empty so far)
mibTreeA = EchoMibInstrumController()
mibTreeB = instrum.MibInstrumController(builder.MibBuilder())
# Register MIB trees at distinct SNMP Context names
snmpContext.registerContextName(v2c.OctetString('context-a'), mibTreeA)
snmpContext.registerContextName(v2c.OctetString('context-b'), mibTreeB)
oid, val = (), None
# logging.debug('done')
# Register SNMP Applications at the SNMP engine for particular SNMP context
cmdrsp.GetCommandResponder(snmpEngine, snmpContext)
cmdrsp.SetCommandResponder(snmpEngine, snmpContext)
cmdrsp.NextCommandResponder(snmpEngine, snmpContext)
cmdrsp.BulkCommandResponder(snmpEngine, snmpContext)
# Register an imaginary never-ending job to keep I/O dispatcher running forever
snmpEngine.transportDispatcher.jobStarted(1)
# Run I/O dispatcher which would receive queries and send responses
try:
snmpEngine.transportDispatcher.runDispatcher()
except:
snmpEngine.transportDispatcher.closeDispatcher()
raise
when I ever I do snmpwalk like
snmpwalk -v3 -u usr-md5-none -l authNoPriv -A authkey1 -n context-a 192.168.2.233 .1.3.6
It giving reply like
SNMPv2-SMI::dod = STRING: "You queried readNextVars OID 2019-11-21 19:18:22.566000"
Error: OID not increasing: SNMPv2-SMI::dod
>= SNMPv2-SMI::dod
So my doubt is what I am doing wrong and how to increase this OID ?
Your server (agent) should never return lesser or equal OIDs than arrived with GETNEXT/GETBULK commands.
With your code, make sure that readNextVars always returns increasing OIDs.
SNMP manager has a check for that condition, otherwise manager-agent pair may engage in an endless exchange.
I've made my first Python SNMP agent from a custom MIB .
It is supporting SNMP GET and SET requests, but it returns values pre-determined by me.
How do I make my function's returned varbinds' be the values that users have supplied via their SNMP SETs?
The code:
from pysnmp.entity import engine, config
from pysnmp import debug
from pysnmp.entity.rfc3413 import cmdrsp, context, ntforg
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.proto.rfc1902 import OctetString
from pysnmp.smi import builder
import threading
import collections
import time
#can be useful
debug.setLogger(debug.Debug('all'))
MibObject = collections.namedtuple('MibObject', ['mibName',
'objectType', 'valueFunc'])
class Mib(object):
"""Stores the data we want to serve.
"""
def __init__(self):
self._lock = threading.RLock()
self._system_channel = 0
self._system_programmed = 0
def getSystemModel(self):
return "Teste 1 Ok"
def getTransportStream(self):
return "Teste 2 Ok"
def getSystemProgrammedPower(self):
with self._lock:
return self._system_programmed
def setSystemProgrammedPower(self, value):
with self._lock:
self._system_programmed = value
def getSystemChannel(self):
with self._lock:
return self._system_channel
def setSystemChannel(self, value):
with self._lock:
self._system_channel = value
def createVariable(SuperClass, getValue, *args):
"""This is going to create a instance variable that we can export.
getValue is a function to call to retreive the value of the scalar
"""
class Var(SuperClass):
def readGet(self, name, *args):
return name, self.syntax.clone(getValue())
return Var(*args)
class SNMPAgent(object):
"""Implements an Agent that serves the custom MIB and
can send a trap.
"""
def __init__(self, mibObjects):
"""
mibObjects - a list of MibObject tuples that this agent
will serve
"""
#each SNMP-based application has an engine
self._snmpEngine = engine.SnmpEngine()
#open a UDP socket to listen for snmp requests
config.addSocketTransport(
self._snmpEngine,
udp.domainName,
udp.UdpTransport().openServerMode(('127.0.0.1', 161))
)
# SNMPv3/USM setup
config.addV1System(self._snmpEngine, 'test-agent', 'public')
# user: usr-sha-none, auth: SHA, priv NONE
config.addV3User(
self._snmpEngine, 'test-user',
config.usmHMACMD5AuthProtocol, 'authkey1',
config.usmDESPrivProtocol, 'privkey1'
)
# Allow full MIB access for each user at VACM
config.addContext(self._snmpEngine, '')
config.addRwUser(self._snmpEngine, 1, 'test-agent', 'noAuthNoPriv', (1,3,6)) # v1
config.addRwUser(self._snmpEngine, 2, 'test-agent', 'noAuthNoPriv', (1,3,6)) # v2c
config.addRwUser(self._snmpEngine, 3, 'test-user', 'authPriv', (1,3,6)) # v3
#each app has one or more contexts
self._snmpContext = context.SnmpContext(self._snmpEngine)
#the builder is used to load mibs. tell it to look in the
#current directory for our new MIB. We'll also use it to
#export our symbols later
mibBuilder = self._snmpContext.getMibInstrum().getMibBuilder()
mibSources = mibBuilder.getMibSources() + (builder.DirMibSource('.'),)
mibBuilder.setMibSources(*mibSources)
#our variables will subclass this since we only have scalar types
#can't load this type directly, need to import it
MibScalarInstance, = mibBuilder.importSymbols('SNMPv2-SMI',
'MibScalarInstance')
#export our custom mib
for mibObject in mibObjects:
nextVar, = mibBuilder.importSymbols(mibObject.mibName,
mibObject.objectType)
instance = createVariable(MibScalarInstance,
mibObject.valueFunc,
nextVar.name, (0,),
nextVar.syntax)
#need to export as <var name>Instance
instanceDict = {str(nextVar.name)+"Instance":instance}
mibBuilder.exportSymbols(mibObject.mibName,
**instanceDict)
# tell pysnmp to respotd to get, set, getnext, and getbulk
cmdrsp.GetCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.NextCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.BulkCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.SetCommandResponder(self._snmpEngine, self._snmpContext)
def setTrapReceiver(self, host, community):
"""Send traps to the host using community string community
"""
config.addV1System(self._snmpEngine, 'nms-area', community)
config.addVacmUser(self._snmpEngine, 2, 'nms-area', 'noAuthNoPriv',
notifySubTree=(1,3,6,1,4,1))
config.addTargetParams(self._snmpEngine,
'nms-creds', 'nms-area', 'noAuthNoPriv', 1)
config.addTargetAddr(self._snmpEngine, 'my-nms', udp.domainName,
(host, 162), 'nms-creds',
tagList='all-my-managers')
#set last parameter to 'notification' to have it send
#informs rather than unacknowledged traps
config.addNotificationTarget(
self._snmpEngine, 'test-notification', 'my-filter',
'all-my-managers', 'trap')
def sendTrap(self):
print "Sending trap"
ntfOrg = ntforg.NotificationOriginator(self._snmpContext)
errorIndication = ntfOrg.sendNotification(
self._snmpEngine,
'test-notification',
('LINEARISDBLQ-MIB', 'systemCurrentAlarmTrap'),
())
def serve_forever(self):
print "Starting agent"
self._snmpEngine.transportDispatcher.jobStarted(1)
try:
self._snmpEngine.transportDispatcher.runDispatcher()
except:
self._snmpEngine.transportDispatcher.closeDispatcher()
raise
class Worker(threading.Thread):
"""Just to demonstrate updating the MIB
and sending traps
"""
def __init__(self, agent, mib):
threading.Thread.__init__(self)
self._agent = agent
self._mib = mib
self.setDaemon(True)
def run(self):
while True:
time.sleep(3)
self._mib.setSystemChannel(mib.getSystemChannel()+1)
self._agent.sendTrap()
if __name__ == '__main__':
mib = Mib()
objects = [MibObject('LINEARISDBLQ-MIB', 'systemModel', mib.getSystemModel),
MibObject('LINEARISDBLQ-MIB', 'systemChannel', mib.getSystemChannel),
MibObject('LINEARISDBLQ-MIB', 'transportStream', mib.getTransportStream),
MibObject('LINEARISDBLQ-MIB', 'systemProgrammedPower', mib.getSystemProgrammedPower)]
agent = SNMPAgent(objects)
agent.setTrapReceiver('127.0.0.1', 'traps')
Worker(agent, mib).start()
try:
agent.serve_forever()
except KeyboardInterrupt:
print "Shutting down"
Looks like you designed your own MIB structures which are not connected to pysnmp engine.
To make your MIB variables available to pysnmp-based Agent, you have to either A) inherit your MIB objects from pysnmp's MibScalarInstance class or B) build your own MIB Controller supporting pysnmp-compatible interfaces.
For more information please refer to the above examples.