Does Disque have a feature like RedisJobStore in redis? - python

I wanted to use disque as a job-store.
I implemented the same with redis using RedisJobStore Class in Apscheduler.
Here is the snippet with redis job-store:
from apscheduler.jobstores.redis import RedisJobStore
job_stores = {
'default': RedisJobStore(jobs_key='ap_scheduler.jobs', run_times_key='ap_scheduler.run_times')
}
Is it possible to create a wrapper using RedisJobStore class to create job store in disque which can then be use by apscheduler?
class RedisJobStore(BaseJobStore):
def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
super(RedisJobStore, self).__init__()
c = Client(['localhost:7711'])
self.disque = c.connect()
def lookup_job(self, job_id):
job_state = self.disque.hget(self.jobs_key, job_id)
return self._reconstitute_job(job_state) if job_state else
None

Related

multiple protocols in a single Jina Flow?

I would like to serve both gRPC and HTTP in my flow, but the flow description only allows a single value in the protocol parameter. Is it possible to add both? If not, do i have to deploy two flows or is there a better workaround?
The documentation doesn't mention if i can have two gateways from what i can see?
f = Flow(protocol='grpc', port=12345).add(uses=FooExecutor)
with f:
client = Client(port=12345)
docs = client.post(on='/')
print(docs.texts)
Unfortunately by default, no.
But you can develop your own custom gateway that enables both protocols at the same time.
A sample custom gateway looks like the following (borrowed from here)
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.ports[0]
self.grpc_port = self.ports[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
#app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
#property
def _should_exit(self) -> bool:
return self.http_server.should_exit
And you can access it in the following way:
from xxx import MultiProtocolGateway
from xxx import MyExecutor
from jina import Flow, Client, DocumentArray
http_port = 51000
grpc_port = 52000
flow = Flow().config_gateway(
uses=MultiProtocolGateway,
port=[http_port, grpc_port],
protocol=['http', 'grpc'],
).add(MyExecutor)
with flow:
c1 = Client(host='http://0.0.0.0:51000)
c1.post(on='/', inputs=DocumentArray().empty(5))
c2 = Client(host='grpc://0.0.0.0:52000)
c2.post(on='/', inputs=DocumentArray().empty(5))

django+celery+ansibleApi return None

python call ansibleApi with celery return None,I have searched a few days.It works well with call deploy function without celery ,but with celery my code call ansibleApi return None.
reproduce steps.
1.tasks.py
from celery import shared_task
from .deploy_tomcat2 import django_process
#shared_task
def deploy(jira_num):
#return 'hello world {0}'.format(jira_num)
#rdb.set_trace()
return django_process(jira_num)
2.deploy_tomcat2.py
from .AnsibleApi import CallApi
def django_process(jira_num):
server = '10.10.10.30'
name = 'abc'
port = 11011
code = 'efs'
jdk = '1.12.13'
jvm = 'xxxx'
if str.isdigit(jira_num):
# import pdb
# pdb.set_trace()
call = CallApi(server,name,port,code,jdk,jvm)
return call.run_task()
3.AnsibleApi.py
#!/usr/bin/env python
import logging
from .Logger import Logger
from django.conf import settings
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
Log = Logger('/tmp/auto_deploy_tomcat.log',logging.INFO)
class ResultCallback(CallbackBase):
def __init__(self, *args, **kwargs):
super(ResultCallback ,self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host_failed[result._host.get_name()] = result
class CallApi(object):
user = settings.SSH_USER
ssh_private_key_file = settings.SSH_PRIVATE_KEY_FILE
results_callback = ResultCallback()
Options = namedtuple('Options',
['connection', 'module_path', 'private_key_file', 'forks', 'become', 'become_method',
'become_user', 'check'])
def __init__(self,ip,name,port,code,jdk,jvm):
self.ip = ip
self.name = name
self.port = port
self.code = code
self.jdk = jdk
self.jvm = jvm
self.results_callback = ResultCallback()
self.results_raw = {}
def _gen_user_task(self):
tasks = []
deploy_script = 'autodeploy/tomcat_deploy.sh'
dst_script = '/tmp/tomcat_deploy.sh'
cargs = dict(src=deploy_script, dest=dst_script, owner=self.user, group=self.user, mode='0755')
args = "%s %s %d %s %s '%s'" % (dst_script, self.name, self.port, self.code, self.jdk, self.jvm)
tasks.append(dict(action=dict(module='copy', args=cargs),register='shell_out'))
tasks.append(dict(action=dict(module='debug', args=dict(msg='{{shell_out}}'))))
# tasks.append(dict(action=dict(module='command', args=args)))
# tasks.append(dict(action=dict(module='command', args=args), register='result'))
# tasks.append(dict(action=dict(module='debug', args=dict(msg='{{result.stdout}}'))))
self.tasks = tasks
def _set_option(self):
self._gen_user_task()
self.variable_manager = VariableManager()
self.loader = DataLoader()
self.options = self.Options(connection='smart', module_path=None, private_key_file=self.ssh_private_key_file, forks=None,
become=True, become_method='sudo', become_user='root', check=False)
self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list=[self.ip])
self.variable_manager.set_inventory(self.inventory)
play_source = dict(
name = "auto deploy tomcat",
hosts = self.ip,
remote_user = self.user,
gather_facts='no',
tasks = self.tasks
)
self.play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)
def run_task(self):
self.results_raw = {'success':{}, 'failed':{}, 'unreachable':{}}
tqm = None
from celery.contrib import rdb;rdb.set_trace()
#import pdb;pdb.set_trace()
self._set_option()
try:
tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=None,
stdout_callback=self.results_callback,
)
result = tqm.run(self.play)
finally:
if tqm is not None:
tqm.cleanup()
for host, result in self.results_callback.host_ok.items():
self.results_raw['success'][host] = result._result
for host, result in self.results_callback.host_failed.items():
self.results_raw['failed'][host] = result._result
for host, result in self.results_callback.host_unreachable.items():
self.results_raw['unreachable'][host]= result._result
Log.info("result is :%s" % self.results_raw)
return self.results_raw
4.celery worker
celery -A jira worker -Q queue.ops.deploy -n "deploy.%h" -l info
5.produce msg:
deploy.apply_async(args=['150'], queue='queue.ops.deploy', routing_key='ops.deploy')
It seems OK.
The only question is None is really the deploy task return?
It will be better that if you can post your celery worker log.
there are two method to solve this problem ,disable assert:
1.where celery starts set export PYTHONOPTIMIZE=1 OR start celery with this parameter -O OPTIMIZATION
2.disable python packet multiprocessing process.py line 102:
assert not _current_process._config.get('daemon'), \
'daemonic processes are not allowed to have children'

How to pass a class based task into CELERY_BEAT_SCHEDULE

As seen in the docs class based tasks are a fair way to express complex logic.
However, the docs do not specify how to add your shiny newly created class based task into you CELERY_BEAT_SCHEDULE (using django)
Thing I tried:
celery.py
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, 'task_summary')
#app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
from payments.tasks.generic.payeer import PayeerPaymentChecker
from payments.tasks.generic.ok_pay import OkPayPaymentChecker
okpay_import = OkPayPaymentChecker()
payeer_imprt = PayeerPaymentChecker()
sender.add_periodic_task(60.0, okpay_import.s(),
name='OkPay import',
expires=30)
sender.add_periodic_task(60.0, payeer_imprt.s(),
name='Payeer import',
expires=30)
-- OR --
payments/task_summary.py
from tasks.generic.import import OkPayPaymentChecker, PayeerPaymentChecker
run_okpay = OkPayPaymentChecker()
run_payeer = PayeerPaymentChecker()
CELERY_BEAT_SCHEDULE = {
# yes, i did try referring to the class here
'check_okpay_payments': {
'task': 'payments.tasks.task_summary.run_okpay',
'schedule': timedelta(seconds=60),
},
'check_payeer_payments': {
'task': 'payments.task_summary.run_payeer',
'schedule': timedelta(seconds=60),
},
}
Really don't know what to do, restoring to something like:
payments/task_summary.py/
from payments.tasks.generic.ok_pay import OkPayPaymentChecker
from payments.tasks.generic.payeer import PayeerPaymentChecker
from celery import shared_task
#shared_task
def run_payer():
instance = PayeerPaymentChecker()
return instance.run()
#shared_task
def run_okpay():
instance = OkPayPaymentChecker()
return instance.run()
Online Resources which I've checked and do not help me / solve the problem:
https://denibertovic.com/posts/celery-best-practices/
https://blog.balthazar-rouberol.com/celery-best-practices
http://shulhi.com/class-based-celery-task/
http://jsatt.com/blog/class-based-celery-tasks/
It took me a while to find the answer to this too, and because this question is so high up on google search results I figure I'd drop it here for people who are struggling to find the answer:
You add it just as you would a normal task but using the class name.
CELERY_BEAT_SCHEDULE = {
'my_task_name': {
'task': 'mymodule.tasks.MyTaskClass',
'schedule': timedelta(seconds=60),
},
(This is assuming you have mymodule/tasks.py with:
from celery import Task
class MyTaskClass(Task):
def run(self, *args, **kwargs):
... stuff ...

Return value VarBinds pysnmp

I've made my first Python SNMP agent from a custom MIB .
It is supporting SNMP GET and SET requests, but it returns values ​​pre-determined by me.
How do I make my function's returned varbinds' be the values ​​that users have supplied via their SNMP SETs?
The code:
from pysnmp.entity import engine, config
from pysnmp import debug
from pysnmp.entity.rfc3413 import cmdrsp, context, ntforg
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.proto.rfc1902 import OctetString
from pysnmp.smi import builder
import threading
import collections
import time
#can be useful
debug.setLogger(debug.Debug('all'))
MibObject = collections.namedtuple('MibObject', ['mibName',
'objectType', 'valueFunc'])
class Mib(object):
"""Stores the data we want to serve.
"""
def __init__(self):
self._lock = threading.RLock()
self._system_channel = 0
self._system_programmed = 0
def getSystemModel(self):
return "Teste 1 Ok"
def getTransportStream(self):
return "Teste 2 Ok"
def getSystemProgrammedPower(self):
with self._lock:
return self._system_programmed
def setSystemProgrammedPower(self, value):
with self._lock:
self._system_programmed = value
def getSystemChannel(self):
with self._lock:
return self._system_channel
def setSystemChannel(self, value):
with self._lock:
self._system_channel = value
def createVariable(SuperClass, getValue, *args):
"""This is going to create a instance variable that we can export.
getValue is a function to call to retreive the value of the scalar
"""
class Var(SuperClass):
def readGet(self, name, *args):
return name, self.syntax.clone(getValue())
return Var(*args)
class SNMPAgent(object):
"""Implements an Agent that serves the custom MIB and
can send a trap.
"""
def __init__(self, mibObjects):
"""
mibObjects - a list of MibObject tuples that this agent
will serve
"""
#each SNMP-based application has an engine
self._snmpEngine = engine.SnmpEngine()
#open a UDP socket to listen for snmp requests
config.addSocketTransport(
self._snmpEngine,
udp.domainName,
udp.UdpTransport().openServerMode(('127.0.0.1', 161))
)
# SNMPv3/USM setup
config.addV1System(self._snmpEngine, 'test-agent', 'public')
# user: usr-sha-none, auth: SHA, priv NONE
config.addV3User(
self._snmpEngine, 'test-user',
config.usmHMACMD5AuthProtocol, 'authkey1',
config.usmDESPrivProtocol, 'privkey1'
)
# Allow full MIB access for each user at VACM
config.addContext(self._snmpEngine, '')
config.addRwUser(self._snmpEngine, 1, 'test-agent', 'noAuthNoPriv', (1,3,6)) # v1
config.addRwUser(self._snmpEngine, 2, 'test-agent', 'noAuthNoPriv', (1,3,6)) # v2c
config.addRwUser(self._snmpEngine, 3, 'test-user', 'authPriv', (1,3,6)) # v3
#each app has one or more contexts
self._snmpContext = context.SnmpContext(self._snmpEngine)
#the builder is used to load mibs. tell it to look in the
#current directory for our new MIB. We'll also use it to
#export our symbols later
mibBuilder = self._snmpContext.getMibInstrum().getMibBuilder()
mibSources = mibBuilder.getMibSources() + (builder.DirMibSource('.'),)
mibBuilder.setMibSources(*mibSources)
#our variables will subclass this since we only have scalar types
#can't load this type directly, need to import it
MibScalarInstance, = mibBuilder.importSymbols('SNMPv2-SMI',
'MibScalarInstance')
#export our custom mib
for mibObject in mibObjects:
nextVar, = mibBuilder.importSymbols(mibObject.mibName,
mibObject.objectType)
instance = createVariable(MibScalarInstance,
mibObject.valueFunc,
nextVar.name, (0,),
nextVar.syntax)
#need to export as <var name>Instance
instanceDict = {str(nextVar.name)+"Instance":instance}
mibBuilder.exportSymbols(mibObject.mibName,
**instanceDict)
# tell pysnmp to respotd to get, set, getnext, and getbulk
cmdrsp.GetCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.NextCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.BulkCommandResponder(self._snmpEngine, self._snmpContext)
cmdrsp.SetCommandResponder(self._snmpEngine, self._snmpContext)
def setTrapReceiver(self, host, community):
"""Send traps to the host using community string community
"""
config.addV1System(self._snmpEngine, 'nms-area', community)
config.addVacmUser(self._snmpEngine, 2, 'nms-area', 'noAuthNoPriv',
notifySubTree=(1,3,6,1,4,1))
config.addTargetParams(self._snmpEngine,
'nms-creds', 'nms-area', 'noAuthNoPriv', 1)
config.addTargetAddr(self._snmpEngine, 'my-nms', udp.domainName,
(host, 162), 'nms-creds',
tagList='all-my-managers')
#set last parameter to 'notification' to have it send
#informs rather than unacknowledged traps
config.addNotificationTarget(
self._snmpEngine, 'test-notification', 'my-filter',
'all-my-managers', 'trap')
def sendTrap(self):
print "Sending trap"
ntfOrg = ntforg.NotificationOriginator(self._snmpContext)
errorIndication = ntfOrg.sendNotification(
self._snmpEngine,
'test-notification',
('LINEARISDBLQ-MIB', 'systemCurrentAlarmTrap'),
())
def serve_forever(self):
print "Starting agent"
self._snmpEngine.transportDispatcher.jobStarted(1)
try:
self._snmpEngine.transportDispatcher.runDispatcher()
except:
self._snmpEngine.transportDispatcher.closeDispatcher()
raise
class Worker(threading.Thread):
"""Just to demonstrate updating the MIB
and sending traps
"""
def __init__(self, agent, mib):
threading.Thread.__init__(self)
self._agent = agent
self._mib = mib
self.setDaemon(True)
def run(self):
while True:
time.sleep(3)
self._mib.setSystemChannel(mib.getSystemChannel()+1)
self._agent.sendTrap()
if __name__ == '__main__':
mib = Mib()
objects = [MibObject('LINEARISDBLQ-MIB', 'systemModel', mib.getSystemModel),
MibObject('LINEARISDBLQ-MIB', 'systemChannel', mib.getSystemChannel),
MibObject('LINEARISDBLQ-MIB', 'transportStream', mib.getTransportStream),
MibObject('LINEARISDBLQ-MIB', 'systemProgrammedPower', mib.getSystemProgrammedPower)]
agent = SNMPAgent(objects)
agent.setTrapReceiver('127.0.0.1', 'traps')
Worker(agent, mib).start()
try:
agent.serve_forever()
except KeyboardInterrupt:
print "Shutting down"
Looks like you designed your own MIB structures which are not connected to pysnmp engine.
To make your MIB variables available to pysnmp-based Agent, you have to either A) inherit your MIB objects from pysnmp's MibScalarInstance class or B) build your own MIB Controller supporting pysnmp-compatible interfaces.
For more information please refer to the above examples.

Twisted AMP Server is not receiving data

I have built a multiservice daemon on twisted, which one receive command from a django, and that happen is
The django view connect the amp server
DJango doesnt sent the command or AMP is not receiving the command
My question is What I am doing wrong
My code is:
AMP Server
from twisted.protocols.amp import AMP, Command, String
class AmpProcessor(Command):
arguments = [('proto', String()),
('imei', String()),
('ip', String()),
('port', String()),
('cmmd', String())]
response = [('answer', String())]
class AMPServer(AMP):
#AmpProcessor.responder
def processor(self, proto, imei, ip, port, cmmd):
print cmmd
self.factories['proto'].clients['ip'].sendMessage(cmmd)
return {'answer': 'ok'}
TAC File
import os, sys
import ConfigParser
from twisted.application import internet, service
from twisted.internet import protocol, reactor
from listener.TrackerServer import TrackerFactory
from listener.AMPServer import AMPServer
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.application.internet import StreamServerEndpointService
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(PROJECT_DIR)
path = None
config = ConfigParser.ConfigParser()
config.read('protocols.cfg')
application = service.Application("tracker")
factories = {}
for device in config.get('protocols', 'keys').split(','):
devicer = config.get(device, 'name')
factories[devicer] = TrackerFactory(devicer)
internet.TCPServer(int(config.get(device, 'port')), factories[devicer]).setServiceParent(application)
endpoint = TCP4ServerEndpoint(reactor, 8750)
factory = Factory()
factory.protocol = AMPServer
factory.protocol.factories = factories
ampService = StreamServerEndpointService(endpoint, factory)
ampService.setServiceParent(application)
Django View
def send_fence_to_device (request):
device_fence_id = request.GET['device_fence_id']
device_id = request.GET['device_id']
fence_id = request.GET['fence_id']
fnc = Fence.objects.get(id=fence_id)
dev = Device.objects.get(id=device_id)
try:
devLog = dev.devicelog_set.filter(device_id=device_id, status = True).order_by('created').reverse()[:1].all()[0]
params = simplejson.loads(fnc.other)
lttdlgtd = simplejson.loads(fnc.points)
strCommand = ".geo.%s,%s,%s,%s,%s,%s,%s,%s,%s" % (params['identificator'], fnc.name[:4], round(float(lttdlgtd[0][0]), 4), round(float(lttdlgtd[0][1]), 4), round(float(fnc.radius), 4), params['time_to_arrive'], params['fence_class'], params['tolerance'], 1)
d = connect()
def connected(protocol):
return protocol.callRemote(
AmpProcessor,
proto='TELCOMIP',
imei=devLog.ip,
ip=devLog.ip,
port=devLog.port,
command=strCommand)
d.addCallback(connected)
def saved(result):
return HttpResponse(simplejson.dumps(result), mimetype='application/json')
#print 'Registration result:', result
d.addCallback(saved)
#d.addErrback(err, "Failed to register")
def finished(ignored):
reactor.stop()
d.addCallback(finished)
reactor.run(installSignalHandlers=0)
#return HttpResponse(simplejson.dumps(1), mimetype='application/json')
except:
return HttpResponse(simplejson.dumps(0), mimetype='application/json')
def connect():
endpoint = TCP4ClientEndpoint(reactor, "127.0.0.1", 8750)
factory = Factory()
factory.protocol = AMP
return endpoint.connect(factory)
class DeviceUnavailable(Exception):
pass
class AmpProcessor(Command):
arguments = [('proto', String()),
('imei', String()),
('ip', String()),
('port', String()),
('cmmd', String())]
response = [('answer', String())]
errors = {DeviceUnavailable: 'device-unavailable'}
You can only call reactor.run once per process. I am guessing that you are calling send_fence_to_device once per request. This means that it may work once, but all subsequent calls will fail.
If you are set on using Twisted reliably inside a Django application, Crochet might help.

Categories