python call ansibleApi with celery return None,I have searched a few days.It works well with call deploy function without celery ,but with celery my code call ansibleApi return None.
reproduce steps.
1.tasks.py
from celery import shared_task
from .deploy_tomcat2 import django_process
#shared_task
def deploy(jira_num):
#return 'hello world {0}'.format(jira_num)
#rdb.set_trace()
return django_process(jira_num)
2.deploy_tomcat2.py
from .AnsibleApi import CallApi
def django_process(jira_num):
server = '10.10.10.30'
name = 'abc'
port = 11011
code = 'efs'
jdk = '1.12.13'
jvm = 'xxxx'
if str.isdigit(jira_num):
# import pdb
# pdb.set_trace()
call = CallApi(server,name,port,code,jdk,jvm)
return call.run_task()
3.AnsibleApi.py
#!/usr/bin/env python
import logging
from .Logger import Logger
from django.conf import settings
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
Log = Logger('/tmp/auto_deploy_tomcat.log',logging.INFO)
class ResultCallback(CallbackBase):
def __init__(self, *args, **kwargs):
super(ResultCallback ,self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host_failed[result._host.get_name()] = result
class CallApi(object):
user = settings.SSH_USER
ssh_private_key_file = settings.SSH_PRIVATE_KEY_FILE
results_callback = ResultCallback()
Options = namedtuple('Options',
['connection', 'module_path', 'private_key_file', 'forks', 'become', 'become_method',
'become_user', 'check'])
def __init__(self,ip,name,port,code,jdk,jvm):
self.ip = ip
self.name = name
self.port = port
self.code = code
self.jdk = jdk
self.jvm = jvm
self.results_callback = ResultCallback()
self.results_raw = {}
def _gen_user_task(self):
tasks = []
deploy_script = 'autodeploy/tomcat_deploy.sh'
dst_script = '/tmp/tomcat_deploy.sh'
cargs = dict(src=deploy_script, dest=dst_script, owner=self.user, group=self.user, mode='0755')
args = "%s %s %d %s %s '%s'" % (dst_script, self.name, self.port, self.code, self.jdk, self.jvm)
tasks.append(dict(action=dict(module='copy', args=cargs),register='shell_out'))
tasks.append(dict(action=dict(module='debug', args=dict(msg='{{shell_out}}'))))
# tasks.append(dict(action=dict(module='command', args=args)))
# tasks.append(dict(action=dict(module='command', args=args), register='result'))
# tasks.append(dict(action=dict(module='debug', args=dict(msg='{{result.stdout}}'))))
self.tasks = tasks
def _set_option(self):
self._gen_user_task()
self.variable_manager = VariableManager()
self.loader = DataLoader()
self.options = self.Options(connection='smart', module_path=None, private_key_file=self.ssh_private_key_file, forks=None,
become=True, become_method='sudo', become_user='root', check=False)
self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list=[self.ip])
self.variable_manager.set_inventory(self.inventory)
play_source = dict(
name = "auto deploy tomcat",
hosts = self.ip,
remote_user = self.user,
gather_facts='no',
tasks = self.tasks
)
self.play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)
def run_task(self):
self.results_raw = {'success':{}, 'failed':{}, 'unreachable':{}}
tqm = None
from celery.contrib import rdb;rdb.set_trace()
#import pdb;pdb.set_trace()
self._set_option()
try:
tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=None,
stdout_callback=self.results_callback,
)
result = tqm.run(self.play)
finally:
if tqm is not None:
tqm.cleanup()
for host, result in self.results_callback.host_ok.items():
self.results_raw['success'][host] = result._result
for host, result in self.results_callback.host_failed.items():
self.results_raw['failed'][host] = result._result
for host, result in self.results_callback.host_unreachable.items():
self.results_raw['unreachable'][host]= result._result
Log.info("result is :%s" % self.results_raw)
return self.results_raw
4.celery worker
celery -A jira worker -Q queue.ops.deploy -n "deploy.%h" -l info
5.produce msg:
deploy.apply_async(args=['150'], queue='queue.ops.deploy', routing_key='ops.deploy')
It seems OK.
The only question is None is really the deploy task return?
It will be better that if you can post your celery worker log.
there are two method to solve this problem ,disable assert:
1.where celery starts set export PYTHONOPTIMIZE=1 OR start celery with this parameter -O OPTIMIZATION
2.disable python packet multiprocessing process.py line 102:
assert not _current_process._config.get('daemon'), \
'daemonic processes are not allowed to have children'
Related
I need to process a very large number of traps (10,000 per second). I have the simplest linux server. I tried to implement through threads, but cpu clogs up very quickly. Please tell me how to minimize the load on memory and processor, but at the same time process a large number of traps?
There is also work with the database. Writing to the database of taps
from pysnmp.entity import engine, config
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.entity.rfc3413 import ntfrcv
import psycopg2
from pysnmp.hlapi import SnmpEngine as Sm, CommunityData, UdpTransportTarget,\
ContextData, ObjectType, ObjectIdentity, getCmd
from datetime import datetime
import logging.config
from os import getpid, system, stat, path, chdir, listdir, remove
from threading import Thread
snmpEngine = engine.SnmpEngine()
config.addTransport(
snmpEngine,
udp.domainName + (1,),
udp.UdpTransport().openServerMode(('localhost', 162))
)
config.addV1System(snmpEngine, '', 'public')
class cbFun(Thread):
def __init__(self, snmpEngine, stateReference, contextEngineId, contextName,
varBinds, cbCtx):
Thread.__init__(self)
self.snmpEngine = snmpEngine
self.stateReference = stateReference
self.contextEngineId = contextEngineId
self.contextName = contextName
self.varBinds = varBinds
self.cbCtx = cbCtx
self.localConnected = False
self.localDb = None
self.errorFlag = False
self.start()
def run(self):
print('\n{0}New trap message received on {1} {0}'.format(
'-' * 7,
datetime.now().strftime('%d-%b-%Y at %H:%M:%S')))
execContext = self.snmpEngine.observer.getExecutionContext(
'rfc3412.receiveMessage:request')
print('Trap is coming from %s:%s' % execContext['transportAddress'])
dict_traps = {}
for name, val in self.varBinds:
oid = name.prettyPrint()
value = val.prettyPrint()
print(f'{oid} = {value}')
dict_traps.update({oid: value})
connectDB(dict_traps)
def connectDB(self, values):
connect = psycopg2.connect(dbname="test", user="test",
password="test",
host="test")
cursor = connect.cursor()
for key,value in values:
command = f"insert into TRAPS VALUES ({key}, {value})"
cursor.execute(command)
connect.commit()
connect.close()
ntfrcv.NotificationReceiver(snmpEngine, cbFun)
snmpEngine.transportDispatcher.jobStarted(1)
try:
snmpEngine.transportDispatcher.runDispatcher()
except:
snmpEngine.transportDispatcher.closeDispatcher()
raise
In order to run code on Fargate Spot, I wrote API code to handle SIGTERM in a way so that (1) an instance would report itself as unhealthy once it received SIGTERM and (b) the API would not terminate until it had finished all calls less than 20 seconds old.
I accomplished this in a hacky way using a sqlite database and a kill script called with Popen, as follows in the following MWE:
signalhandler.py:
import time
import signal
import sqlite3
import numpy as np
import os
from subprocess import Popen
from flask_restx import Resource, Api, reqparse, abort
import flask
import psutil
app = flask.Flask(__name__)
api = Api(app, title='Signal tester')
parser = reqparse.RequestParser()
def method():
time.sleep(5)
return {'statement': 'That was relaxing! By the way, I\'m %s'
% os.getpid()}
class FunctionWrapper():
def __init__(self):
self.conn = sqlite3.Connection('/tmp/api_calls_%s.sqlite'
% os.getpid(),
timeout=1)
self.c = self.conn.cursor()
self.c.execute('CREATE TABLE IF NOT EXISTS'
+ ' running (id INT PRIMARY KEY, time INT)')
self.c.execute('PRAGMA journal_mode=WAL')
self.conn.commit()
self.method_defaultargs = {}
np.random.seed()
def call_method(self, **args):
try:
while True:
id_ = np.random.randint(2**32)
try:
self.c.execute('INSERT INTO running VALUES (%s, %s)'
% (id_, int(time.time())))
self.conn.commit()
break
except Exception:
pass
args = {**self.method_defaultargs, **args}
return_value = method(**args)
self.c.execute('DELETE FROM running WHERE id=%s' % id_)
self.conn.commit()
except Exception:
self.c.execute('DELETE FROM running WHERE id=%s' % id_)
self.conn.commit()
return return_value
class Killer():
def __init__(self):
self.being_killed = False
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, *args):
self.being_killed = True
self.p = os.getpid()
Popen(['python3', 'kill.py', str(self.p)])
functionWrapper = FunctionWrapper()
killer = Killer()
#api.route('/sleep', endpoint='sleep', methods=['GET'])
class ApiThing(Resource):
#api.expect(parser)
def get(self):
return functionWrapper.call_method()
#api.route('/healthcheck', endpoint='healthcheck', methods=['GET'])
class HealthCheck(Resource):
def get(self):
if killer.being_killed:
abort(503, {'health': 'unhealthy',
'reason': 'program is terminating'})
if psutil.cpu_percent(percpu=False, interval=2.5) > 90:
abort(503, {'health': 'unhealthy',
'reason': 'cpu load too high'})
if killer.being_killed:
abort(503, {'health': 'unhealthy',
'reason': 'program is terminating'})
return {'health': 'healthy'}
kill.py:
import time
import sys
import sqlite3
import signal
import os
conn = sqlite3.Connection('/tmp/api_calls_%s.sqlite' % sys.argv[1])
c = conn.cursor()
value = True
while value:
sql = 'SELECT id FROM running WHERE time > %s' % (time.time() - 20)
c.execute(sql)
value = c.fetchone()
time.sleep(1)
time.sleep(1)
os.kill(int(sys.argv[1]), signal.SIGKILL)
I've written some api to communicate with a website using websocketapp. It works fine only on 2 pc. If i put my code on every other pc the websocket doesnt receive any message and closes. I've tried a lot of different machines and operating systems, many version of python (included the same that works), wireless and wired connection but nothing changed. There's no error or exception. What can it be?
EDIT: i don't own the website or the server. All other methods send messages and parse the response in on_socket_message
import requests
import websocket
import time
from threading import Thread
from datetime import datetime
import json
from position import Position
from constants import ACTIVES
class IQOption():
practice_balance = 0
real_balance = 0
server_time = 0
positions = {}
instruments_categories = ["cfd","forex","crypto"]
top_assets_categories = ["forex","crypto","fx-option"]
instruments_to_id = ACTIVES
id_to_instruments = {y:x for x,y in ACTIVES.items()}
market_data = {}
binary_expiration_list = {}
open_markets = {}
digital_strike_list = {}
candle_data = []
latest_candle = 0
position_id = 0
quotes =[]
position_id_list=[]
def __init__(self,username,password,host="iqoption.com"):
self.username = username
self.password = password
self.host = host
self.session = requests.Session()
self.generate_urls()
self.socket = websocket.WebSocketApp(self.socket_url,on_open=self.on_socket_connect,on_message=self.on_socket_message,on_close=self.on_socket_close,on_error=self.on_socket_error)
def generate_urls(self):
"""Generates Required Urls to operate the API"""
#https://auth.iqoption.com/api/v1.0/login
self.api_url = "https://{}/api/".format(self.host)
self.socket_url = "wss://{}/echo/websocket".format(self.host)
self.login_url = self.api_url+"v1.0/login"
self.profile_url = self.api_url+"profile"
self.change_account_url = self.profile_url+"/"+"changebalance"
self.getprofile_url = self.api_url+"getprofile"
def login(self):
"""Login and set Session Cookies"""
print("LOGIN")
data = {"email":self.username,"password":self.password}
self.log_resp = self.session.request(url="https://auth.iqoption.com/api/v1.0/login",data=data,method="POST")
requests.utils.add_dict_to_cookiejar(self.session.cookies, dict(platform="9"))
self.__ssid = self.log_resp.cookies.get("ssid")
print(self.__ssid)
self.start_socket_connection()
time.sleep(1) ## artificial delay to complete socket connection
self.log_resp2 = self.session.request(url="https://eu.iqoption.com/api/getprofile",method="GET")
ss = self.log_resp2._content.decode('utf-8')
js_ss=json.loads(ss)
self.parse_account_info(js_ss)
self.balance_id = js_ss["result"]["balance_id"]
self.get_instruments()
self.get_top_assets()
self.setOptions()
#self.getFeatures()
time.sleep(1)
print(js_ss["isSuccessful"])
return js_ss["isSuccessful"]
def on_socket_message(self,socket,message):
#do things
def on_socket_connect(self,socket):
"""Called on Socket Connection"""
self.initial_subscriptions()
print("On connect")
def initial_subscriptions(self):
self.send_socket_message("ssid",self.__ssid)
self.send_socket_message("subscribe","tradersPulse")
def on_socket_error(self,socket,error):
"""Called on Socket Error"""
print(message)
def on_socket_close(self,socket):
"""Called on Socket Close, does nothing"""
def start_socket_connection(self):
"""Start Socket Connection"""
self.socket_thread = Thread(target=self.socket.run_forever)
self.socket_thread.start()
def send_socket_message(self,name,msg):
#print(msg)
data = {"name":name,"msg":msg}
self.socket.send(json.dumps(data))
Here is an example running under Gevent Websockets. This makes it ASYNC (which I suspect is part of your problem) and allows for bidirectional communication.
import gevent
from gevent import monkey, signal, Timeout, sleep, spawn as gspawn
monkey.patch_all()
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
from geventwebsocket import WebSocketError
import bottle
from bottle import get, route, template, request, response, abort, static_file
import ujson as json
#route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='static')
#route('/ws/remote')
def handle_websocket():
wsock = request.environ.get('wsgi.websocket')
if not wsock:
abort(400, 'Expected WebSocket request.')
while 1:
try:
message = ''
with Timeout(2, False) as timeout:
message = wsock.receive()
if message:
message = json.loads(message)
if 'command' in message:
r.command(message['command'])
except WebSocketError:
break
except Exception as exc:
print(str(exc))
#get('/')
def remote():
return template('templates/remote.tpl', title='WebsocketTest', websocket=WEBSOCKET, command='command', status=status)
if __name__ == '__main__':
r=None
status="Connecting..."
gspawn(initialize)
print 'Started...'
HOST = socket.gethostbyname(socket.gethostname())
HOST = 'localhost'
WEBSOCKET = 'ws://{}/ws/remote'.format(HOST)
botapp = bottle.app()
server = WSGIServer(("0.0.0.0", 80), botapp, handler_class=WebSocketHandler)
def shutdown():
print('Shutting down ...')
server.stop(timeout=60)
exit(signal.SIGTERM)
gevent.signal(signal.SIGTERM, shutdown)
gevent.signal(signal.SIGINT, shutdown) #CTRL C
server.serve_forever()
Then in your HTML you really should use reconnecting websocket library
https://github.com/joewalnes/reconnecting-websocket
<button id="TRIGGERED" type="button" class="btn btn-outline-primary">TRIGGER</button>
<script type="text/javascript" src="/static/reconnecting-websocket.min.js"></script>
<script>
var ws = new ReconnectingWebSocket('{{websocket}}');
ws.reconnectInterval = 3000;
ws.maxReconnectAttempts = 10;
ws.onmessage = function (evt) {
var wsmsg = JSON.parse(evt.data);
console.log(evt.data)
};
$("button").click(function() {
<!--console.log(this.id);-->
ws.send(JSON.stringify({'{{command}}': this.id}));
});
</script>
I've made my first Python SNMP agent from a custom MIB .
It is supporting SNMP GET and SET requests, but it returns values pre-determined by me.
How do I make my function's returned varbinds' be the values that users have supplied via their SNMP SETs?
The code:
from pysnmp.entity import engine, config
from pysnmp import debug
from pysnmp.entity.rfc3413 import cmdrsp, context, ntforg
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.proto.rfc1902 import OctetString
from pysnmp.smi import builder
import threading
import collections
import time
#can be useful
debug.setLogger(debug.Debug('all'))
MibObject = collections.namedtuple('MibObject', ['mibName',
'objectType', 'valueFunc'])
class Mib(object):
"""Stores the data we want to serve.
"""
def __init__(self):
self._lock = threading.RLock()
self._system_channel = 0
self._system_programmed = 0
def getSystemModel(self):
return "Teste 1 Ok"
def getTransportStream(self):
return "Teste 2 Ok"
def getSystemProgrammedPower(self):
with self._lock:
return self._system_programmed
def setSystemProgrammedPower(self, value):
with self._lock:
self._system_programmed = value
def getSystemChannel(self):
with self._lock:
return self._system_channel
def setSystemChannel(self, value):
with self._lock:
self._system_channel = value
def createVariable(SuperClass, getValue, *args):
"""This is going to create a instance variable that we can export.
getValue is a function to call to retreive the value of the scalar
"""
class Var(SuperClass):
def readGet(self, name, *args):
return name, self.syntax.clone(getValue())
return Var(*args)
class SNMPAgent(object):
"""Implements an Agent that serves the custom MIB and
can send a trap.
"""
def __init__(self, mibObjects):
"""
mibObjects - a list of MibObject tuples that this agent
will serve
"""
#each SNMP-based application has an engine
self._snmpEngine = engine.SnmpEngine()
#open a UDP socket to listen for snmp requests
config.addSocketTransport(
self._snmpEngine,
udp.domainName,
udp.UdpTransport().openServerMode(('127.0.0.1', 161))
)
# SNMPv3/USM setup
config.addV1System(self._snmpEngine, 'test-agent', 'public')
# user: usr-sha-none, auth: SHA, priv NONE
config.addV3User(
self._snmpEngine, 'test-user',
config.usmHMACMD5AuthProtocol, 'authkey1',
config.usmDESPrivProtocol, 'privkey1'
)
# Allow full MIB access for each user at VACM
config.addContext(self._snmpEngine, '')
config.addRwUser(self._snmpEngine, 1, 'test-agent', 'noAuthNoPriv', (1,3,6)) # v1
config.addRwUser(self._snmpEngine, 2, 'test-agent', 'noAuthNoPriv', (1,3,6)) # v2c
config.addRwUser(self._snmpEngine, 3, 'test-user', 'authPriv', (1,3,6)) # v3
#each app has one or more contexts
self._snmpContext = context.SnmpContext(self._snmpEngine)
#the builder is used to load mibs. tell it to look in the
#current directory for our new MIB. We'll also use it to
#export our symbols later
mibBuilder = self._snmpContext.getMibInstrum().getMibBuilder()
mibSources = mibBuilder.getMibSources() + (builder.DirMibSource('.'),)
mibBuilder.setMibSources(*mibSources)
#our variables will subclass this since we only have scalar types
#can't load this type directly, need to import it
MibScalarInstance, = mibBuilder.importSymbols('SNMPv2-SMI',
'MibScalarInstance')
#export our custom mib
for mibObject in mibObjects:
nextVar, = mibBuilder.importSymbols(mibObject.mibName,
mibObject.objectType)
instance = createVariable(MibScalarInstance,
mibObject.valueFunc,
nextVar.name, (0,),
nextVar.syntax)
#need to export as <var name>Instance
instanceDict = {str(nextVar.name)+"Instance":instance}
mibBuilder.exportSymbols(mibObject.mibName,
**instanceDict)
# tell pysnmp to respotd to get, set, getnext, and getbulk
cmdrsp.GetCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.NextCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.BulkCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.SetCommandResponder(self._snmpEngine, self._snmpContext)
def setTrapReceiver(self, host, community):
"""Send traps to the host using community string community
"""
config.addV1System(self._snmpEngine, 'nms-area', community)
config.addVacmUser(self._snmpEngine, 2, 'nms-area', 'noAuthNoPriv',
notifySubTree=(1,3,6,1,4,1))
config.addTargetParams(self._snmpEngine,
'nms-creds', 'nms-area', 'noAuthNoPriv', 1)
config.addTargetAddr(self._snmpEngine, 'my-nms', udp.domainName,
(host, 162), 'nms-creds',
tagList='all-my-managers')
#set last parameter to 'notification' to have it send
#informs rather than unacknowledged traps
config.addNotificationTarget(
self._snmpEngine, 'test-notification', 'my-filter',
'all-my-managers', 'trap')
def sendTrap(self):
print "Sending trap"
ntfOrg = ntforg.NotificationOriginator(self._snmpContext)
errorIndication = ntfOrg.sendNotification(
self._snmpEngine,
'test-notification',
('LINEARISDBLQ-MIB', 'systemCurrentAlarmTrap'),
())
def serve_forever(self):
print "Starting agent"
self._snmpEngine.transportDispatcher.jobStarted(1)
try:
self._snmpEngine.transportDispatcher.runDispatcher()
except:
self._snmpEngine.transportDispatcher.closeDispatcher()
raise
class Worker(threading.Thread):
"""Just to demonstrate updating the MIB
and sending traps
"""
def __init__(self, agent, mib):
threading.Thread.__init__(self)
self._agent = agent
self._mib = mib
self.setDaemon(True)
def run(self):
while True:
time.sleep(3)
self._mib.setSystemChannel(mib.getSystemChannel()+1)
self._agent.sendTrap()
if __name__ == '__main__':
mib = Mib()
objects = [MibObject('LINEARISDBLQ-MIB', 'systemModel', mib.getSystemModel),
MibObject('LINEARISDBLQ-MIB', 'systemChannel', mib.getSystemChannel),
MibObject('LINEARISDBLQ-MIB', 'transportStream', mib.getTransportStream),
MibObject('LINEARISDBLQ-MIB', 'systemProgrammedPower', mib.getSystemProgrammedPower)]
agent = SNMPAgent(objects)
agent.setTrapReceiver('127.0.0.1', 'traps')
Worker(agent, mib).start()
try:
agent.serve_forever()
except KeyboardInterrupt:
print "Shutting down"
Looks like you designed your own MIB structures which are not connected to pysnmp engine.
To make your MIB variables available to pysnmp-based Agent, you have to either A) inherit your MIB objects from pysnmp's MibScalarInstance class or B) build your own MIB Controller supporting pysnmp-compatible interfaces.
For more information please refer to the above examples.
I have built a multiservice daemon on twisted, which one receive command from a django, and that happen is
The django view connect the amp server
DJango doesnt sent the command or AMP is not receiving the command
My question is What I am doing wrong
My code is:
AMP Server
from twisted.protocols.amp import AMP, Command, String
class AmpProcessor(Command):
arguments = [('proto', String()),
('imei', String()),
('ip', String()),
('port', String()),
('cmmd', String())]
response = [('answer', String())]
class AMPServer(AMP):
#AmpProcessor.responder
def processor(self, proto, imei, ip, port, cmmd):
print cmmd
self.factories['proto'].clients['ip'].sendMessage(cmmd)
return {'answer': 'ok'}
TAC File
import os, sys
import ConfigParser
from twisted.application import internet, service
from twisted.internet import protocol, reactor
from listener.TrackerServer import TrackerFactory
from listener.AMPServer import AMPServer
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.application.internet import StreamServerEndpointService
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(PROJECT_DIR)
path = None
config = ConfigParser.ConfigParser()
config.read('protocols.cfg')
application = service.Application("tracker")
factories = {}
for device in config.get('protocols', 'keys').split(','):
devicer = config.get(device, 'name')
factories[devicer] = TrackerFactory(devicer)
internet.TCPServer(int(config.get(device, 'port')), factories[devicer]).setServiceParent(application)
endpoint = TCP4ServerEndpoint(reactor, 8750)
factory = Factory()
factory.protocol = AMPServer
factory.protocol.factories = factories
ampService = StreamServerEndpointService(endpoint, factory)
ampService.setServiceParent(application)
Django View
def send_fence_to_device (request):
device_fence_id = request.GET['device_fence_id']
device_id = request.GET['device_id']
fence_id = request.GET['fence_id']
fnc = Fence.objects.get(id=fence_id)
dev = Device.objects.get(id=device_id)
try:
devLog = dev.devicelog_set.filter(device_id=device_id, status = True).order_by('created').reverse()[:1].all()[0]
params = simplejson.loads(fnc.other)
lttdlgtd = simplejson.loads(fnc.points)
strCommand = ".geo.%s,%s,%s,%s,%s,%s,%s,%s,%s" % (params['identificator'], fnc.name[:4], round(float(lttdlgtd[0][0]), 4), round(float(lttdlgtd[0][1]), 4), round(float(fnc.radius), 4), params['time_to_arrive'], params['fence_class'], params['tolerance'], 1)
d = connect()
def connected(protocol):
return protocol.callRemote(
AmpProcessor,
proto='TELCOMIP',
imei=devLog.ip,
ip=devLog.ip,
port=devLog.port,
command=strCommand)
d.addCallback(connected)
def saved(result):
return HttpResponse(simplejson.dumps(result), mimetype='application/json')
#print 'Registration result:', result
d.addCallback(saved)
#d.addErrback(err, "Failed to register")
def finished(ignored):
reactor.stop()
d.addCallback(finished)
reactor.run(installSignalHandlers=0)
#return HttpResponse(simplejson.dumps(1), mimetype='application/json')
except:
return HttpResponse(simplejson.dumps(0), mimetype='application/json')
def connect():
endpoint = TCP4ClientEndpoint(reactor, "127.0.0.1", 8750)
factory = Factory()
factory.protocol = AMP
return endpoint.connect(factory)
class DeviceUnavailable(Exception):
pass
class AmpProcessor(Command):
arguments = [('proto', String()),
('imei', String()),
('ip', String()),
('port', String()),
('cmmd', String())]
response = [('answer', String())]
errors = {DeviceUnavailable: 'device-unavailable'}
You can only call reactor.run once per process. I am guessing that you are calling send_fence_to_device once per request. This means that it may work once, but all subsequent calls will fail.
If you are set on using Twisted reliably inside a Django application, Crochet might help.