How to create a high load trap receiver? - python

I need to process a very large number of traps (10,000 per second). I have the simplest linux server. I tried to implement through threads, but cpu clogs up very quickly. Please tell me how to minimize the load on memory and processor, but at the same time process a large number of traps?
There is also work with the database. Writing to the database of taps
from pysnmp.entity import engine, config
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.entity.rfc3413 import ntfrcv
import psycopg2
from pysnmp.hlapi import SnmpEngine as Sm, CommunityData, UdpTransportTarget,\
ContextData, ObjectType, ObjectIdentity, getCmd
from datetime import datetime
import logging.config
from os import getpid, system, stat, path, chdir, listdir, remove
from threading import Thread
snmpEngine = engine.SnmpEngine()
config.addTransport(
snmpEngine,
udp.domainName + (1,),
udp.UdpTransport().openServerMode(('localhost', 162))
)
config.addV1System(snmpEngine, '', 'public')
class cbFun(Thread):
def __init__(self, snmpEngine, stateReference, contextEngineId, contextName,
varBinds, cbCtx):
Thread.__init__(self)
self.snmpEngine = snmpEngine
self.stateReference = stateReference
self.contextEngineId = contextEngineId
self.contextName = contextName
self.varBinds = varBinds
self.cbCtx = cbCtx
self.localConnected = False
self.localDb = None
self.errorFlag = False
self.start()
def run(self):
print('\n{0}New trap message received on {1} {0}'.format(
'-' * 7,
datetime.now().strftime('%d-%b-%Y at %H:%M:%S')))
execContext = self.snmpEngine.observer.getExecutionContext(
'rfc3412.receiveMessage:request')
print('Trap is coming from %s:%s' % execContext['transportAddress'])
dict_traps = {}
for name, val in self.varBinds:
oid = name.prettyPrint()
value = val.prettyPrint()
print(f'{oid} = {value}')
dict_traps.update({oid: value})
connectDB(dict_traps)
def connectDB(self, values):
connect = psycopg2.connect(dbname="test", user="test",
password="test",
host="test")
cursor = connect.cursor()
for key,value in values:
command = f"insert into TRAPS VALUES ({key}, {value})"
cursor.execute(command)
connect.commit()
connect.close()
ntfrcv.NotificationReceiver(snmpEngine, cbFun)
snmpEngine.transportDispatcher.jobStarted(1)
try:
snmpEngine.transportDispatcher.runDispatcher()
except:
snmpEngine.transportDispatcher.closeDispatcher()
raise

Related

How to do this signal handling with less code/overhead? Perhaps with threading? Or even better, without threads or processes?

In order to run code on Fargate Spot, I wrote API code to handle SIGTERM in a way so that (1) an instance would report itself as unhealthy once it received SIGTERM and (b) the API would not terminate until it had finished all calls less than 20 seconds old.
I accomplished this in a hacky way using a sqlite database and a kill script called with Popen, as follows in the following MWE:
signalhandler.py:
import time
import signal
import sqlite3
import numpy as np
import os
from subprocess import Popen
from flask_restx import Resource, Api, reqparse, abort
import flask
import psutil
app = flask.Flask(__name__)
api = Api(app, title='Signal tester')
parser = reqparse.RequestParser()
def method():
time.sleep(5)
return {'statement': 'That was relaxing! By the way, I\'m %s'
% os.getpid()}
class FunctionWrapper():
def __init__(self):
self.conn = sqlite3.Connection('/tmp/api_calls_%s.sqlite'
% os.getpid(),
timeout=1)
self.c = self.conn.cursor()
self.c.execute('CREATE TABLE IF NOT EXISTS'
+ ' running (id INT PRIMARY KEY, time INT)')
self.c.execute('PRAGMA journal_mode=WAL')
self.conn.commit()
self.method_defaultargs = {}
np.random.seed()
def call_method(self, **args):
try:
while True:
id_ = np.random.randint(2**32)
try:
self.c.execute('INSERT INTO running VALUES (%s, %s)'
% (id_, int(time.time())))
self.conn.commit()
break
except Exception:
pass
args = {**self.method_defaultargs, **args}
return_value = method(**args)
self.c.execute('DELETE FROM running WHERE id=%s' % id_)
self.conn.commit()
except Exception:
self.c.execute('DELETE FROM running WHERE id=%s' % id_)
self.conn.commit()
return return_value
class Killer():
def __init__(self):
self.being_killed = False
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, *args):
self.being_killed = True
self.p = os.getpid()
Popen(['python3', 'kill.py', str(self.p)])
functionWrapper = FunctionWrapper()
killer = Killer()
#api.route('/sleep', endpoint='sleep', methods=['GET'])
class ApiThing(Resource):
#api.expect(parser)
def get(self):
return functionWrapper.call_method()
#api.route('/healthcheck', endpoint='healthcheck', methods=['GET'])
class HealthCheck(Resource):
def get(self):
if killer.being_killed:
abort(503, {'health': 'unhealthy',
'reason': 'program is terminating'})
if psutil.cpu_percent(percpu=False, interval=2.5) > 90:
abort(503, {'health': 'unhealthy',
'reason': 'cpu load too high'})
if killer.being_killed:
abort(503, {'health': 'unhealthy',
'reason': 'program is terminating'})
return {'health': 'healthy'}
kill.py:
import time
import sys
import sqlite3
import signal
import os
conn = sqlite3.Connection('/tmp/api_calls_%s.sqlite' % sys.argv[1])
c = conn.cursor()
value = True
while value:
sql = 'SELECT id FROM running WHERE time > %s' % (time.time() - 20)
c.execute(sql)
value = c.fetchone()
time.sleep(1)
time.sleep(1)
os.kill(int(sys.argv[1]), signal.SIGKILL)

How to add configuration setting for sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication in python script

Need some help to set the configuration for sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication.
We are using confluent Kafka here, there are two scripts, one a python script and the second one is a bash script which calls the python one. You can find the script below.
Thanks for the help in advance!
import json
import os
import string
import random
import socket
import uuid
import re
from datetime import datetime
import time
import hashlib
import math
import sys
from functools import cache
from confluent_kafka import Producer, KafkaError, KafkaException
topic_name = os.environ['TOPIC_NAME']
partition_count = int(os.environ['PARTITION_COUNT'])
message_key_template = json.loads(os.environ['KEY_TEMPLATE'])
message_value_template = json.loads(os.environ['VALUE_TEMPLATE'])
message_header_template = json.loads(os.environ['HEADER_TEMPLATE'])
bootstrap_servers = os.environ['BOOTSTRAP_SERVERS']
perf_counter_batch_size = int(os.environ.get('PERF_COUNTER_BATCH_SIZE', 100))
messages_per_aggregate = int(os.environ.get('MESSAGES_PER_AGGREGATE', 1))
max_message_count = int(os.environ.get('MAX_MESSAGE_COUNT', sys.maxsize))
def error_cb(err):
""" The error callback is used for generic client errors. These
errors are generally to be considered informational as the client will
automatically try to recover from all errors, and no extra action
is typically required by the application.
For this example however, we terminate the application if the client
is unable to connect to any broker (_ALL_BROKERS_DOWN) and on
authentication errors (_AUTHENTICATION). """
print("Client error: {}".format(err))
if err.code() == KafkaError._ALL_BROKERS_DOWN or \
err.code() == KafkaError._AUTHENTICATION:
# Any exception raised from this callback will be re-raised from the
# triggering flush() or poll() call.
raise KafkaException(err)
def acked(err, msg):
if err is not None:
print("Failed to send message: %s: %s" % (str(msg), str(err)))
producer_configs = {
'bootstrap.servers': bootstrap_servers,
'client.id': socket.gethostname(),
'error_cb': error_cb
}
# TODO: Need to support sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication.
# TODO: Need to support truststores for connecting to private DCs.
producer = Producer(producer_configs)
# generates a random value if it is not cached in the template_values dictionary
def get_templated_value(term, template_values):
if not term in template_values:
template_values[term] = str(uuid.uuid4())
return template_values[term]
def fill_template_value(value, template_values):
str_value = str(value)
template_regex = '{{(.+?)}}'
templated_terms = re.findall(template_regex, str_value)
for term in templated_terms:
str_value = str_value.replace(f"{{{{{term}}}}}", get_templated_value(term, template_values))
return str_value
def fill_template(template, templated_terms):
# TODO: Need to address metadata field, as it's treated as a string instead of a nested object.
return {field: fill_template_value(value, templated_terms) for field, value in template.items()}
#cache
def get_partition(lock_id):
bits = 128
bucket_size = 2**bits / partition_count
partition = (int(hashlib.md5(lock_id.encode('utf-8')).hexdigest(), 16) / bucket_size)
return math.floor(partition)
sequence_number = int(time.time() * 1000)
sequence_number = 0
message_count = 0
producing = True
start_time = time.perf_counter()
aggregate_message_counter = 0
# cache for templated term values so that they match across the different templates
templated_values = {}
try:
while producing:
sequence_number += 1
aggregate_message_counter += 1
message_count += 1
if aggregate_message_counter % messages_per_aggregate == 0:
# reset templated values
templated_values = {}
else:
for term in list(templated_values):
if term not in ['aggregateId', 'tenantId']:
del(templated_values[term])
# Fill in templated field values
message_key = fill_template(message_key_template, templated_values)
message_value = fill_template(message_value_template, templated_values)
message_header = fill_template(message_header_template, templated_values)
ts = datetime.utcnow().isoformat()[:-3]+'Z'
message_header['timestamp'] = ts
message_header['sequence_number'] = str(sequence_number)
message_value['timestamp'] = ts
message_value['sequenceNumber'] = sequence_number
lock_id = message_header['lock_id']
partition = get_partition(lock_id) # partition by lock_id, since key could be random, but a given aggregate_id should ALWAYS resolve to the same partition, regardless of key.
# Send message
producer.produce(topic_name, partition=partition, key=json.dumps(message_key), value=json.dumps(message_value), headers=message_header, callback=acked)
if sequence_number % perf_counter_batch_size == 0:
producer.flush()
end_time = time.perf_counter()
total_duration = end_time - start_time
messages_per_second=(perf_counter_batch_size/total_duration)
print(f'{messages_per_second} messages/second')
# reset start time
start_time = time.perf_counter()
if message_count >= max_message_count:
break
except Exception as e:
print(f'ERROR: %s' % e)
sys.exit(1)
finally:
producer.flush()

django+celery+ansibleApi return None

python call ansibleApi with celery return None,I have searched a few days.It works well with call deploy function without celery ,but with celery my code call ansibleApi return None.
reproduce steps.
1.tasks.py
from celery import shared_task
from .deploy_tomcat2 import django_process
#shared_task
def deploy(jira_num):
#return 'hello world {0}'.format(jira_num)
#rdb.set_trace()
return django_process(jira_num)
2.deploy_tomcat2.py
from .AnsibleApi import CallApi
def django_process(jira_num):
server = '10.10.10.30'
name = 'abc'
port = 11011
code = 'efs'
jdk = '1.12.13'
jvm = 'xxxx'
if str.isdigit(jira_num):
# import pdb
# pdb.set_trace()
call = CallApi(server,name,port,code,jdk,jvm)
return call.run_task()
3.AnsibleApi.py
#!/usr/bin/env python
import logging
from .Logger import Logger
from django.conf import settings
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
Log = Logger('/tmp/auto_deploy_tomcat.log',logging.INFO)
class ResultCallback(CallbackBase):
def __init__(self, *args, **kwargs):
super(ResultCallback ,self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host_failed[result._host.get_name()] = result
class CallApi(object):
user = settings.SSH_USER
ssh_private_key_file = settings.SSH_PRIVATE_KEY_FILE
results_callback = ResultCallback()
Options = namedtuple('Options',
['connection', 'module_path', 'private_key_file', 'forks', 'become', 'become_method',
'become_user', 'check'])
def __init__(self,ip,name,port,code,jdk,jvm):
self.ip = ip
self.name = name
self.port = port
self.code = code
self.jdk = jdk
self.jvm = jvm
self.results_callback = ResultCallback()
self.results_raw = {}
def _gen_user_task(self):
tasks = []
deploy_script = 'autodeploy/tomcat_deploy.sh'
dst_script = '/tmp/tomcat_deploy.sh'
cargs = dict(src=deploy_script, dest=dst_script, owner=self.user, group=self.user, mode='0755')
args = "%s %s %d %s %s '%s'" % (dst_script, self.name, self.port, self.code, self.jdk, self.jvm)
tasks.append(dict(action=dict(module='copy', args=cargs),register='shell_out'))
tasks.append(dict(action=dict(module='debug', args=dict(msg='{{shell_out}}'))))
# tasks.append(dict(action=dict(module='command', args=args)))
# tasks.append(dict(action=dict(module='command', args=args), register='result'))
# tasks.append(dict(action=dict(module='debug', args=dict(msg='{{result.stdout}}'))))
self.tasks = tasks
def _set_option(self):
self._gen_user_task()
self.variable_manager = VariableManager()
self.loader = DataLoader()
self.options = self.Options(connection='smart', module_path=None, private_key_file=self.ssh_private_key_file, forks=None,
become=True, become_method='sudo', become_user='root', check=False)
self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list=[self.ip])
self.variable_manager.set_inventory(self.inventory)
play_source = dict(
name = "auto deploy tomcat",
hosts = self.ip,
remote_user = self.user,
gather_facts='no',
tasks = self.tasks
)
self.play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)
def run_task(self):
self.results_raw = {'success':{}, 'failed':{}, 'unreachable':{}}
tqm = None
from celery.contrib import rdb;rdb.set_trace()
#import pdb;pdb.set_trace()
self._set_option()
try:
tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=None,
stdout_callback=self.results_callback,
)
result = tqm.run(self.play)
finally:
if tqm is not None:
tqm.cleanup()
for host, result in self.results_callback.host_ok.items():
self.results_raw['success'][host] = result._result
for host, result in self.results_callback.host_failed.items():
self.results_raw['failed'][host] = result._result
for host, result in self.results_callback.host_unreachable.items():
self.results_raw['unreachable'][host]= result._result
Log.info("result is :%s" % self.results_raw)
return self.results_raw
4.celery worker
celery -A jira worker -Q queue.ops.deploy -n "deploy.%h" -l info
5.produce msg:
deploy.apply_async(args=['150'], queue='queue.ops.deploy', routing_key='ops.deploy')
It seems OK.
The only question is None is really the deploy task return?
It will be better that if you can post your celery worker log.
there are two method to solve this problem ,disable assert:
1.where celery starts set export PYTHONOPTIMIZE=1 OR start celery with this parameter -O OPTIMIZATION
2.disable python packet multiprocessing process.py line 102:
assert not _current_process._config.get('daemon'), \
'daemonic processes are not allowed to have children'

Return value VarBinds pysnmp

I've made my first Python SNMP agent from a custom MIB .
It is supporting SNMP GET and SET requests, but it returns values ​​pre-determined by me.
How do I make my function's returned varbinds' be the values ​​that users have supplied via their SNMP SETs?
The code:
from pysnmp.entity import engine, config
from pysnmp import debug
from pysnmp.entity.rfc3413 import cmdrsp, context, ntforg
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.proto.rfc1902 import OctetString
from pysnmp.smi import builder
import threading
import collections
import time
#can be useful
debug.setLogger(debug.Debug('all'))
MibObject = collections.namedtuple('MibObject', ['mibName',
'objectType', 'valueFunc'])
class Mib(object):
"""Stores the data we want to serve.
"""
def __init__(self):
self._lock = threading.RLock()
self._system_channel = 0
self._system_programmed = 0
def getSystemModel(self):
return "Teste 1 Ok"
def getTransportStream(self):
return "Teste 2 Ok"
def getSystemProgrammedPower(self):
with self._lock:
return self._system_programmed
def setSystemProgrammedPower(self, value):
with self._lock:
self._system_programmed = value
def getSystemChannel(self):
with self._lock:
return self._system_channel
def setSystemChannel(self, value):
with self._lock:
self._system_channel = value
def createVariable(SuperClass, getValue, *args):
"""This is going to create a instance variable that we can export.
getValue is a function to call to retreive the value of the scalar
"""
class Var(SuperClass):
def readGet(self, name, *args):
return name, self.syntax.clone(getValue())
return Var(*args)
class SNMPAgent(object):
"""Implements an Agent that serves the custom MIB and
can send a trap.
"""
def __init__(self, mibObjects):
"""
mibObjects - a list of MibObject tuples that this agent
will serve
"""
#each SNMP-based application has an engine
self._snmpEngine = engine.SnmpEngine()
#open a UDP socket to listen for snmp requests
config.addSocketTransport(
self._snmpEngine,
udp.domainName,
udp.UdpTransport().openServerMode(('127.0.0.1', 161))
)
# SNMPv3/USM setup
config.addV1System(self._snmpEngine, 'test-agent', 'public')
# user: usr-sha-none, auth: SHA, priv NONE
config.addV3User(
self._snmpEngine, 'test-user',
config.usmHMACMD5AuthProtocol, 'authkey1',
config.usmDESPrivProtocol, 'privkey1'
)
# Allow full MIB access for each user at VACM
config.addContext(self._snmpEngine, '')
config.addRwUser(self._snmpEngine, 1, 'test-agent', 'noAuthNoPriv', (1,3,6)) # v1
config.addRwUser(self._snmpEngine, 2, 'test-agent', 'noAuthNoPriv', (1,3,6)) # v2c
config.addRwUser(self._snmpEngine, 3, 'test-user', 'authPriv', (1,3,6)) # v3
#each app has one or more contexts
self._snmpContext = context.SnmpContext(self._snmpEngine)
#the builder is used to load mibs. tell it to look in the
#current directory for our new MIB. We'll also use it to
#export our symbols later
mibBuilder = self._snmpContext.getMibInstrum().getMibBuilder()
mibSources = mibBuilder.getMibSources() + (builder.DirMibSource('.'),)
mibBuilder.setMibSources(*mibSources)
#our variables will subclass this since we only have scalar types
#can't load this type directly, need to import it
MibScalarInstance, = mibBuilder.importSymbols('SNMPv2-SMI',
'MibScalarInstance')
#export our custom mib
for mibObject in mibObjects:
nextVar, = mibBuilder.importSymbols(mibObject.mibName,
mibObject.objectType)
instance = createVariable(MibScalarInstance,
mibObject.valueFunc,
nextVar.name, (0,),
nextVar.syntax)
#need to export as <var name>Instance
instanceDict = {str(nextVar.name)+"Instance":instance}
mibBuilder.exportSymbols(mibObject.mibName,
**instanceDict)
# tell pysnmp to respotd to get, set, getnext, and getbulk
cmdrsp.GetCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.NextCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.BulkCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.SetCommandResponder(self._snmpEngine, self._snmpContext)
def setTrapReceiver(self, host, community):
"""Send traps to the host using community string community
"""
config.addV1System(self._snmpEngine, 'nms-area', community)
config.addVacmUser(self._snmpEngine, 2, 'nms-area', 'noAuthNoPriv',
notifySubTree=(1,3,6,1,4,1))
config.addTargetParams(self._snmpEngine,
'nms-creds', 'nms-area', 'noAuthNoPriv', 1)
config.addTargetAddr(self._snmpEngine, 'my-nms', udp.domainName,
(host, 162), 'nms-creds',
tagList='all-my-managers')
#set last parameter to 'notification' to have it send
#informs rather than unacknowledged traps
config.addNotificationTarget(
self._snmpEngine, 'test-notification', 'my-filter',
'all-my-managers', 'trap')
def sendTrap(self):
print "Sending trap"
ntfOrg = ntforg.NotificationOriginator(self._snmpContext)
errorIndication = ntfOrg.sendNotification(
self._snmpEngine,
'test-notification',
('LINEARISDBLQ-MIB', 'systemCurrentAlarmTrap'),
())
def serve_forever(self):
print "Starting agent"
self._snmpEngine.transportDispatcher.jobStarted(1)
try:
self._snmpEngine.transportDispatcher.runDispatcher()
except:
self._snmpEngine.transportDispatcher.closeDispatcher()
raise
class Worker(threading.Thread):
"""Just to demonstrate updating the MIB
and sending traps
"""
def __init__(self, agent, mib):
threading.Thread.__init__(self)
self._agent = agent
self._mib = mib
self.setDaemon(True)
def run(self):
while True:
time.sleep(3)
self._mib.setSystemChannel(mib.getSystemChannel()+1)
self._agent.sendTrap()
if __name__ == '__main__':
mib = Mib()
objects = [MibObject('LINEARISDBLQ-MIB', 'systemModel', mib.getSystemModel),
MibObject('LINEARISDBLQ-MIB', 'systemChannel', mib.getSystemChannel),
MibObject('LINEARISDBLQ-MIB', 'transportStream', mib.getTransportStream),
MibObject('LINEARISDBLQ-MIB', 'systemProgrammedPower', mib.getSystemProgrammedPower)]
agent = SNMPAgent(objects)
agent.setTrapReceiver('127.0.0.1', 'traps')
Worker(agent, mib).start()
try:
agent.serve_forever()
except KeyboardInterrupt:
print "Shutting down"
Looks like you designed your own MIB structures which are not connected to pysnmp engine.
To make your MIB variables available to pysnmp-based Agent, you have to either A) inherit your MIB objects from pysnmp's MibScalarInstance class or B) build your own MIB Controller supporting pysnmp-compatible interfaces.
For more information please refer to the above examples.

Twisted AMP Server is not receiving data

I have built a multiservice daemon on twisted, which one receive command from a django, and that happen is
The django view connect the amp server
DJango doesnt sent the command or AMP is not receiving the command
My question is What I am doing wrong
My code is:
AMP Server
from twisted.protocols.amp import AMP, Command, String
class AmpProcessor(Command):
arguments = [('proto', String()),
('imei', String()),
('ip', String()),
('port', String()),
('cmmd', String())]
response = [('answer', String())]
class AMPServer(AMP):
#AmpProcessor.responder
def processor(self, proto, imei, ip, port, cmmd):
print cmmd
self.factories['proto'].clients['ip'].sendMessage(cmmd)
return {'answer': 'ok'}
TAC File
import os, sys
import ConfigParser
from twisted.application import internet, service
from twisted.internet import protocol, reactor
from listener.TrackerServer import TrackerFactory
from listener.AMPServer import AMPServer
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.application.internet import StreamServerEndpointService
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(PROJECT_DIR)
path = None
config = ConfigParser.ConfigParser()
config.read('protocols.cfg')
application = service.Application("tracker")
factories = {}
for device in config.get('protocols', 'keys').split(','):
devicer = config.get(device, 'name')
factories[devicer] = TrackerFactory(devicer)
internet.TCPServer(int(config.get(device, 'port')), factories[devicer]).setServiceParent(application)
endpoint = TCP4ServerEndpoint(reactor, 8750)
factory = Factory()
factory.protocol = AMPServer
factory.protocol.factories = factories
ampService = StreamServerEndpointService(endpoint, factory)
ampService.setServiceParent(application)
Django View
def send_fence_to_device (request):
device_fence_id = request.GET['device_fence_id']
device_id = request.GET['device_id']
fence_id = request.GET['fence_id']
fnc = Fence.objects.get(id=fence_id)
dev = Device.objects.get(id=device_id)
try:
devLog = dev.devicelog_set.filter(device_id=device_id, status = True).order_by('created').reverse()[:1].all()[0]
params = simplejson.loads(fnc.other)
lttdlgtd = simplejson.loads(fnc.points)
strCommand = ".geo.%s,%s,%s,%s,%s,%s,%s,%s,%s" % (params['identificator'], fnc.name[:4], round(float(lttdlgtd[0][0]), 4), round(float(lttdlgtd[0][1]), 4), round(float(fnc.radius), 4), params['time_to_arrive'], params['fence_class'], params['tolerance'], 1)
d = connect()
def connected(protocol):
return protocol.callRemote(
AmpProcessor,
proto='TELCOMIP',
imei=devLog.ip,
ip=devLog.ip,
port=devLog.port,
command=strCommand)
d.addCallback(connected)
def saved(result):
return HttpResponse(simplejson.dumps(result), mimetype='application/json')
#print 'Registration result:', result
d.addCallback(saved)
#d.addErrback(err, "Failed to register")
def finished(ignored):
reactor.stop()
d.addCallback(finished)
reactor.run(installSignalHandlers=0)
#return HttpResponse(simplejson.dumps(1), mimetype='application/json')
except:
return HttpResponse(simplejson.dumps(0), mimetype='application/json')
def connect():
endpoint = TCP4ClientEndpoint(reactor, "127.0.0.1", 8750)
factory = Factory()
factory.protocol = AMP
return endpoint.connect(factory)
class DeviceUnavailable(Exception):
pass
class AmpProcessor(Command):
arguments = [('proto', String()),
('imei', String()),
('ip', String()),
('port', String()),
('cmmd', String())]
response = [('answer', String())]
errors = {DeviceUnavailable: 'device-unavailable'}
You can only call reactor.run once per process. I am guessing that you are calling send_fence_to_device once per request. This means that it may work once, but all subsequent calls will fail.
If you are set on using Twisted reliably inside a Django application, Crochet might help.

Categories