How to add jobs to apscheduler via request using uwsgi + flask_apscheduler - python

When run under Flask's local development server, jobs are added and run normally. When run under uWSGI, the job appears to get added to the job store but never is executed. A simple example with the described undesired behavior is given below:
__init__.py
import flask
from datetime import datetime, timedelta
from flask_apscheduler import APScheduler
app = flask.Flask("apscheduler_test")
app.config["SCHEDULER_API_ENABLED"] = True
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
def test_job():
print("test job run")
#app.route("/test")
def apscheduler_test():
print("Adding Job")
scheduler.add_job(id="101",
func=test_job,
next_run_time=(datetime.now() + timedelta(seconds=10)))
return "view", 200
if __name__ == '__main__':
app.run(port=5050)
apschedule_test.ini
[uwsgi]
pidfile = /var/run/%n.pid
chdir = /opt/apscheduler
master = true
enable-threads = true
threads = 20
http-socket = :48197
logto = /var/log/%n.log
plugin = python3
module = %n
callable = app
processes = 1
uid = root
gid = root
daemonize = /var/log/apscheduler_test.log

Try adding this flag to your ini file:
lazy-apps=true
Similar issue: uWSGI lazy-apps and ThreadPool

Related

FastAPI is not quitting when pressing Ctr+c

I am finding a difficulty with quitting FastAPI. Ctr+c does not work.
Here is my pyproject.toml
[tool.pyright]
exclude = ["app/worker"]
ignore = ["app/worker"]
[tool.poetry]
name = "api"
version = "0.1.0"
description = ""
authors = ["SamiAlsubhi <sami#alsubhi.me>"]
[tool.poetry.dependencies]
python = ">=3.8,<3.9"
fastapi = "^0.65.2"
tortoise-orm = "^0.17.4"
asyncpg = "^0.23.0"
aerich = "^0.5.3"
networkx = "^2.5.1"
numpy = "^1.21.0"
ldap3 = "^2.9.1"
fastapi-jwt-auth = "^0.5.0"
python-multipart = "^0.0.5"
torch = "1.7.1"
pyts = "0.11.0"
Pint = "^0.17"
Cython = "^0.29.24"
python-dotenv = "^0.19.0"
arq = "^0.22"
uvicorn = {extras = ["standard"], version = "^0.15.0"}
[tool.poetry.dev-dependencies]
pytest = "^6.2.4"
requests = "^2.25.1"
asynctest = "^0.13.0"
coverage = "^5.5"
pytest-html = "^3.1.1"
pytest-sugar = "^0.9.4"
pytest-json-report = "^1.4.0"
pytest-cov = "^2.12.1"
pylint = "^2.11.1"
autopep8 = "^1.5.7"
black = "^22.3.0"
aiosqlite = "^0.17.0"
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
here is my entry point
"""running API in a local dev environment"""
import os
import uvicorn
from dotenv import load_dotenv
# laoding env values
load_dotenv("../.env")
if __name__ == "__main__":
port = os.getenv("FASTAPI_PORT")
port = int(port) if port else None
uvicorn.run("app.main:app", host=os.getenv("FASTAPI_HOST"),
port=port, reload=True)
This what I get when I run it and then try to quit, the process hangs and does not go back to terminal:
(trendr) sami#Samis-MBP backend % python run.py
INFO: Will watch for changes in these directories: ['/Users/name/Desktop/etc']
INFO: Uvicorn running on http://0.0.0.0:1000 (Press CTRL+C to quit)
INFO: Started reloader process [70087] using watchgod
INFO: Started server process [70089]
INFO: Waiting for application startup.
INFO: Application startup complete.
^CINFO: Shutting down
INFO: Finished server process [70089]
INFO: ASGI 'lifespan' protocol appears unsupported.
I've read about this problem in using uvicorn and I found the below code snippet to resolve that:
# Add the below code snippet to your app.py module after the app initialization.
def receive_signal(signalNumber, frame):
print('Received:', signalNumber)
sys.exit()
#app.on_event("startup")
async def startup_event():
import signal
signal.signal(signal.SIGINT, receive_signal)
# startup tasks
Reference:
CTRL^C doesn't work while startup in progress
It looks like there was a compatibility issue between unvicorn, starlette and FastAPI around those versions. I updated them to the latest versions and that solved the issue.

Different queues in celery

I have a project where I'm starting my FastAPI using a file (python main.py):
import uvicorn
from configuration import API_HOST, API_PORT
if __name__ == "__main__":
uvicorn.run("endpoints:app", host="localhost", port=8811, reload=True, access_log=False)
Inside endpoints.py I have:
from celery import Celery
from fastapi import FastAPI
import os
import time
# Create object for fastAPI
app = FastAPI(
title="MYFASTAPI",
description="MYDESCRIPTION",
version=1.0,
contact="ME!",
)
celery = Celery(__name__)
celery.conf.broker_url = os.environ.get("CELERY_BROKER_URL", "redis://localhost:6379")
celery.conf.result_backend = os.environ.get("CELERY_RESULT_BACKEND", "redis://localhost:6379")
celery.conf.task_track_started = True
celery.conf.task_serializer = pickle
celery.conf.result_serializer = pickle
celery.conf.accept_content = ["pickle"]
# By defaul celery can handle as many threads as CPU cores have the instance.
celery.conf.worker_concurrency = os.cpu_count()
# Start the celery worker. I start it in a separate thread, so fastapi can run in parallel
worker = celery.Worker()
def start_worker():
worker.start()
ce = threading.Thread(target=start_worker)
ce.start()
#app.post("/taskA")
def taskA():
task = ask_taskA.delay()
return {"task_id": task.id}
#celery.task(name="ask_taskA", bind=True)
def ask_taskA(self):
time.sleep(100)
#app.post("/get_results")
def get_results(task_id):
task_result = celery.AsyncResult(task_id)
return {'task_status': task_result.status}
Given this code, how can I have two different queues, assign a specific number of workers per earch queue and assign a specific task to one of these queues?
I read that people use to execute celery as:
celery -A proj worker
but there was a structure in the project that limited me because of some importings, and at the end I finished by starting the celery worker in a different thread (which works perfectly)
Based on the official celery documentation https://docs.celeryq.dev/en/stable/userguide/routing.html#manual-routing[1] you can follow this to specify different queues.
from kombu import Queue
app.conf.task_default_queue = 'default'
app.conf.task_queues = (
Queue('default', routing_key='task.#'),
Queue('feed_tasks', routing_key='feed.#'),
)
app.conf.task_default_exchange = 'tasks'
app.conf.task_default_exchange_type = 'topic'
app.conf.task_default_routing_key = 'task.default'

Python3 threading with uWSGI

I wasted many time but couldn't find a solution.
If i use threads in my app deployed with uwsgi, they aren't sync.
Here simple code for an example(wsgi.py):
from time import sleep
import threading
i = 0
def daemon():
global i
while True:
i += 1
print(i)
sleep(3)
th = threading.Thread(target=daemon, args=())
th.start()
def application(environ, start_response):
start_response('200 OK', [('Content-Type','text/html')])
return [str(i).encode()]
And when I run this app the i increases in log, but I always get 1 when a make request from browser.(Or get 0 if I move sleep(3) before i first increment)
I tried uwsgi.thread decorator, but got the same result.
uwsgi config:
[uwsgi]
socket = 127.0.0.1:3034
plugins-dir = /srv/uwsgi
plugin = python34
uid = py3utils
gid = py3utils
chdir = /srv/python/3/py3utils/tht/app/
wsgi-file = wsgi.py
enable-threads = true
daemonize = %(chdir)/../uwsgi.log
master = true
die-on-term = true
touch-reload = ../uwsgi_restart.txt
*sorry for my English
This happens because after importing your application the master process forks into a worker:
spawned uWSGI master process (pid: 7167)
spawned uWSGI worker 1 (pid: 7169, cores: 1)
spawned uWSGI http 1 (pid: 7170)
So your thread which prints i is running in master process, and your requests are processed by the worker. The worker during the fork sees i equal to 1. If you move sleep before incrementing i the process manages to fork before the first increment.
Threads except the main one are not copied during a fork, so i does not increment in the worker.
You should use something like uwsgidecorators.thread:
from time import sleep
import threading
import uwsgidecorators
i = 0
#uwsgidecorators.postfork
#uwsgidecorators.thread
def daemon():
global i
while True:
i += 1
print(i)
sleep(3)
def application(environ, start_response):
start_response('200 OK', [('Content-Type','text/html')])
return [str(i).encode()]
Or use:
[uwsgi]
master = false
Python threading is disabled by default in uwsgi, you can enable it by adding option --enable-threads:
uwsgi --http :8090 --wsgi-file uwsgi_test.py --enable-threads
It works in my test environment.

Python Celery could start with a threading inprocess ?

I want make a testcase with my celery codes.
But usually celery need start with a new process like $ celery -A CELERY_MODULE worker, It's means I can't run my testcase code directly ?
I'm configurate the Celery with memory store to void the extra I/O in the testcase. That's config can't sample share the task queue in different process.
Here is my naive implements.
The celery entry from celery.bin.celeryd.WorkCommand, it's parse the args and execute works.
Use the solo to void the MultiProcess use in the case. Of course you need install that's lib first.
You could use this before your celery testcase start.
#!/usr/bin/env python
#vim: encoding=utf-8
import time
import unittest
from threading import Thread
from celery import Celery, states
from celery.bin.celeryd import WorkerCommand
class CELERY_CONFIG(object):
BROKER_URL = "memory://"
CELERY_CACHE_BACKEND = "memory"
CELERY_RESULT_BACKEND = "cache"
CELERYD_POOL = "solo"
class CeleryTestCase(unittest.TestCase):
def test_inprocess(self):
app = Celery(__name__)
app.config_from_object(CELERY_CONFIG)
#app.task
def dumpy_task(dct):
return 321
worker = WorkerCommand(app)
#worker.execute_from_commandline(["-P solo"])
t = Thread(target=worker.execute_from_commandline, args=(["-c 1"],))
t.daemon = True
t.start()
ar = dumpy_task.apply_async(({"a": 123},))
while ar.status != states.SUCCESS:
time.sleep(.01)
self.assertEqual(states.SUCCESS, ar.status)
self.assertEqual(ar.result, 321)
t.join(0)

How to create Celery Windows Service?

I'm trying to create a Windows Service to launch Celery. I have come across an article that does it using Task Scheduler. However it seems to launch numerous celery instances and keeps eating up memory till the machine dies. Is there any way to launch it as a Windows service?
I got the answer from another website. Celeryd (daemon service for Celery) runs as a paster application, searching for 'Paster Windows Service' lead me here. It describes how to run a Pylons application as a Windows Service. Being new to paster framework and hosting python web services, it didn't cross my mind to check it at first. But that solution works for Celery with a slight change here and there in the script.
I've modified the script to make it easier for modifying Celery settings. The essential changes are:
Create an INI file with the settings for Celery service (shown below)
Create a python script to create a Windows service.
INI file settings (celeryd.ini):
[celery:service]
service_name = CeleryService
service_display_name = Celery Service
service_description = WSCGI Windows Celery Service
service_logfile = celeryd.log
Python script to create Windows Service (CeleryService.py):
"""
The most basic (working) Windows service possible.
Requires Mark Hammond's pywin32 package.
Most of the code was taken from a CherryPy 2.2 example of how to set up a service
"""
import pkg_resources
import win32serviceutil
from paste.script.serve import ServeCommand as Server
import os, sys
import ConfigParser
import win32service
import win32event
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
INI_FILE = 'celeryd.ini'
SERV_SECTION = 'celery:service'
SERV_NAME = 'service_name'
SERV_DISPLAY_NAME = 'service_display_name'
SERV_DESC = 'service_description'
SERV_LOG_FILE = 'service_logfile'
SERV_APPLICATION = 'celeryd'
SERV_LOG_FILE_VAR = 'CELERYD_LOG_FILE'
# Default Values
SERV_NAME_DEFAULT = 'CeleryService'
SERV_DISPLAY_NAME_DEFAULT = 'Celery Service'
SERV_DESC_DEFAULT = 'WSCGI Windows Celery Service'
SERV_LOG_FILE_DEFAULT = r'D:\logs\celery.log'
class DefaultSettings(object):
def __init__(self):
if SCRIPT_DIR:
os.chdir(SCRIPT_DIR)
# find the ini file
self.ini = os.path.join(SCRIPT_DIR,INI_FILE)
# create a config parser opject and populate it with the ini file
c = ConfigParser.SafeConfigParser()
c.read(self.ini)
self.c = c
def getDefaults(self):
'''
Check for and get the default settings
'''
if (
(not self.c.has_section(SERV_SECTION)) or
(not self.c.has_option(SERV_SECTION, SERV_NAME)) or
(not self.c.has_option(SERV_SECTION, SERV_DISPLAY_NAME)) or
(not self.c.has_option(SERV_SECTION, SERV_DESC)) or
(not self.c.has_option(SERV_SECTION, SERV_LOG_FILE))
):
print 'setting defaults'
self.setDefaults()
service_name = self.c.get(SERV_SECTION, SERV_NAME)
service_display_name = self.c.get(SERV_SECTION, SERV_DISPLAY_NAME)
service_description = self.c.get(SERV_SECTION, SERV_DESC)
iniFile = self.ini
service_logfile = self.c.get(SERV_SECTION, SERV_LOG_FILE)
return service_name, service_display_name, service_description, iniFile, service_logfile
def setDefaults(self):
'''
set and add the default setting to the ini file
'''
if not self.c.has_section(SERV_SECTION):
self.c.add_section(SERV_SECTION)
self.c.set(SERV_SECTION, SERV_NAME, SERV_NAME_DEFAULT)
self.c.set(SERV_SECTION, SERV_DISPLAY_NAME, SERV_DISPLAY_NAME_DEFAULT)
self.c.set(SERV_SECTION, SERV_DESC, SERV_DESC_DEFAULT)
self.c.set(SERV_SECTION, SERV_LOG_FILE, SERV_LOG_FILE_DEFAULT)
cfg = file(self.ini, 'wr')
self.c.write(cfg)
cfg.close()
print '''
you must set the celery:service section service_name, service_display_name,
and service_description options to define the service
in the %s file
''' % self.ini
sys.exit()
class CeleryService(win32serviceutil.ServiceFramework):
"""NT Service."""
d = DefaultSettings()
service_name, service_display_name, service_description, iniFile, logFile = d.getDefaults()
_svc_name_ = service_name
_svc_display_name_ = service_display_name
_svc_description_ = service_description
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
# create an event that SvcDoRun can wait on and SvcStop
# can set.
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
os.chdir(SCRIPT_DIR)
s = Server(SERV_APPLICATION)
os.environ[SERV_LOG_FILE_VAR] = self.logFile
s.run([self.iniFile])
win32event.WaitForSingleObject(self.stop_event, win32event.INFINITE)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
#win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
sys.exit()
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(CeleryService)
To install the service run python CeleryService.py install and then python CeleryService.py start to start the service. NOTE: These commands should be run in command-line with administrator rights.
If the service needs to be removed, run python CeleryService.py remove.
I was trying to host Celery as part of enhancing my RhodeCode installation. This solution seems to work. Hope this will help someone.
The accepted answer does not apply for running celery with a Django application. But it inspired me to come up with a solution for running celery as a Windows service with Django. Note that the following is for Django projects only. It may work with other applications with some modifications.
The following discussion assumes Python >= 3.6 and RabbitMQ are already installed, and rabbitmq-server is running on localhost.
Create a file celery_service.py (or whatever you like) inside your Django project's top level folder, same level as manage.py, with the following content:
'''Usage : python celery_service.py install (start / stop / remove)
Run celery as a Windows service
'''
import win32service
import win32serviceutil
import win32api
import win32con
import win32event
import subprocess
import sys
import os
from pathlib import Path
import shlex
import logging
import time
# The directory for celery.log and celery_service.log
# Default: the directory of this script
INSTDIR = Path(__file__).parent
# The path of python Scripts
# Usually it is in path_to/venv/Scripts.
# If it is already in system PATH, then it can be set as ''
PYTHONSCRIPTPATH = INSTDIR / 'venvcelery/Scripts'
# The directory name of django project
# Note: it is the directory at the same level of manage.py
# not the parent directory
PROJECTDIR = 'proj'
logging.basicConfig(
filename = INSTDIR / 'celery_service.log',
level = logging.DEBUG,
format = '[%(asctime)-15s: %(levelname)-7.7s] %(message)s'
)
class CeleryService(win32serviceutil.ServiceFramework):
_svc_name_ = "Celery"
_svc_display_name_ = "Celery Distributed Task Queue Service"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
logging.info('Stopping {name} service ...'.format(name=self._svc_name_))
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
sys.exit()
def SvcDoRun(self):
logging.info('Starting {name} service ...'.format(name=self._svc_name_))
os.chdir(INSTDIR) # so that proj worker can be found
logging.info('cwd: ' + os.getcwd())
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
command = '"{celery_path}" -A {proj_dir} worker -f "{log_path}" -l info -P eventlet'.format(
celery_path=PYTHONSCRIPTPATH / 'celery.exe',
proj_dir=PROJECTDIR,
log_path=INSTDIR / 'celery.log')
logging.info('command: ' + command)
args = shlex.split(command)
proc = subprocess.Popen(args)
logging.info('pid: {pid}'.format(pid=proc.pid))
self.timeout = 3000
while True:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
if rc == win32event.WAIT_OBJECT_0:
# stop signal encountered
# terminate process 'proc'
PROCESS_TERMINATE = 1
handle = win32api.OpenProcess(PROCESS_TERMINATE, False, proc.pid)
win32api.TerminateProcess(handle, -1)
win32api.CloseHandle(handle)
break
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(CeleryService)
Before the script can be run, you need to
Optionally create a python virtual environment e.g. 'venvcelery'.
Install the following requirements:
django>=2.0.0
sqlalchemy>=1.0.14
celery>=4.3.0,<5.0
pywin32>=227
eventlet>=0.25
Fix pywin32 pywintypes36.dll location. ref
Correctly set PYTHONSCRIPTPATH and PROJECTDIR in celery_service.py
PYTHONSCRIPTPATH is usually the "Scripts" folder under your python's installation path or current virtual environment
PROJECTDIR is the directory name of the Django project.
It is the directory at the same level of manage.py, not the parent directory.
Now you can install / start / stop / remove the service with:
python celery_service.py install
python celery_service.py start
python celery_service.py stop
python celery_service.py remove
I created a demo Django project with celery running as a Windows service:
https://github.com/azalea/django_celery_windows_service
In case you are interested in a running example.
Note: this is an updated version assuming Python >= 3.6, Django 2.2 and Celery 4.
An older version with Python 2.7, Django 1.6 and Celery 3 can be viewed in edit history.
#azalea 's answer helped me a lot, but one thing I like to highlight here is, the service (celery_service.py) needs to be installed with your user/password, otherwise, when you run subprocess.Popen(args) in SvcDoRun() function, nothing will happen as there will be a permission issue. To set the user/password, you can choose one of two methods:
Using command line:
python33 .\celeryService1.py --username .\USERNAME --password PASSWORD
Go to Computer Management(local) > Services and Applications > Services, find your server (in #azalea's example, it is "Celery Distributed Task Queue Service"), and right click to open Properties page, input "This account" in Log On tab
A good project here but didn't succeed to use it :
Link to the GitHub of the django-windows-tools.
It gave me a timeout at the last command line. Doesn't have enough time to search why.
The package allow settings FastCGI, Celery and Static files of a Django project on IIS.
Thanks to Azalea as this led me to the path in being able to create 2 windows services with Celery 4 on Windows.
One to be able to start/stop multiple workers T
Second to be able to start/stop the beat service and tidy up the pid using Celery 4.
Only caveat in this that I do not have a solution for is you can not restart the workers as you need to ensure the spawned processes for multiple are stopped before starting back up.
Workers.py:
'''Usage : python celery_service.py install (start / stop / remove)
Run celery as a Windows service
'''
import win32service
import win32serviceutil
import win32api
import win32con
import win32event
import subprocess
import sys
import os
import shlex
import logging
import time
# The directory for celery_worker.log and celery_worker_service.log
# Default: the directory of this script
INSTDIR = 'X:\Application\Project'
LOGDIR = 'X:\Application\LogFiles'
# The path of python Scripts
# Usually it is in PYTHON_INSTALL_DIR/Scripts. e.g.
# r'C:\Python27\Scripts'
# If it is already in system PATH, then it can be set as ''
PYTHONSCRIPTPATH = 'C:\Python36\Scripts'
# The directory name of django project
# Note: it is the directory at the same level of manage.py
# not the parent directory
PROJECTDIR = 'Project'
logging.basicConfig(
filename = os.path.join(LOGDIR, 'celery_worker_service.log'),
level = logging.DEBUG,
format = '[%(asctime)-15s: %(levelname)-7.7s] %(message)s'
)
class CeleryService(win32serviceutil.ServiceFramework):
_svc_name_ = "CeleryWorkers"
_svc_display_name_ = "CeleryWorkers"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
logging.info('Stopping {name} service ...'.format(name=self._svc_name_))
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
logging.info('Stopped1 {name} service ...'.format(name=self._svc_name_))
logging.info('Stopped3 {name} service ...'.format(name=self._svc_name_))
command = '"{celery_path}" -A {proj_dir} --workdir=X:/Application/Project control shutdown --timeout=10'.format(
celery_path=os.path.join(PYTHONSCRIPTPATH, 'celery.exe'),
proj_dir=PROJECTDIR,
log_path=os.path.join(LOGDIR,'celery_worker.log'))
logging.info('command: ' + command)
args = shlex.split(command)
proc = subprocess.Popen(args)
logging.info('Stopped celery shutdown ...')
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
logging.info('Stopped2 {name} service ...'.format(name=self._svc_name_))
sys.exit()
def SvcDoRun(self):
logging.info('Starting {name} service ...'.format(name=self._svc_name_))
os.chdir(INSTDIR) # so that proj worker can be found
logging.info('cwd: ' + os.getcwd())
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
command = '"{celery_path}" -A {proj_dir} -c 8 worker --workdir=X:/Application/Project --pidfile=celeryservice.pid -f "{log_path}" -l info'.format(
celery_path=os.path.join(PYTHONSCRIPTPATH, 'celery.exe'),
proj_dir=PROJECTDIR,
log_path=os.path.join(LOGDIR,'celery_worker.log'))
logging.info('command: ' + command)
args = shlex.split(command)
proc = subprocess.Popen(args)
logging.info('pid: {pid}'.format(pid=proc.pid))
self.timeout = 3000
while True:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
if rc == win32event.WAIT_OBJECT_0:
# stop signal encountered
# terminate process 'proc'
PROCESS_TERMINATE = 1
handle = win32api.OpenProcess(PROCESS_TERMINATE, False, proc.pid)
win32api.TerminateProcess(handle, -1)
win32api.CloseHandle(handle)
break
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(CeleryService)
Beatservice.py:
'''Usage : python celery_service.py install (start / stop / remove)
Run celery as a Windows service
'''
import win32service
import win32serviceutil
import win32api
import win32con
import win32event
import subprocess
import sys
import os
import shlex
import logging
import time
import signal
# The directory for celery_beat.log and celery_beat_service.log
# Default: the directory of this script
INSTDIR = os.path.dirname(os.path.realpath(__file__))
LOGPATH = 'X:\Application\Logs'
# The path of python Scripts
# Usually it is in PYTHON_INSTALL_DIR/Scripts. e.g.
# r'C:\Python27\Scripts'
# If it is already in system PATH, then it can be set as ''
PYTHONSCRIPTPATH = 'C:\Python36\Scripts'
# The directory name of django project
# Note: it is the directory at the same level of manage.py
# not the parent directory
PROJECTDIR = 'PROJECT'
logging.basicConfig(
filename = os.path.join(LOGPATH, 'celery_beat_service.log'),
level = logging.DEBUG,
format = '[%(asctime)-15s: %(levelname)-7.7s] %(message)s'
)
class CeleryService(win32serviceutil.ServiceFramework):
_svc_name_ = "CeleryBeat"
_svc_display_name_ = "CeleryBeat"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
logging.info('Stopping 1 {name} service ...'.format(name=self._svc_name_))
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
pidno = open("X:\Aplication\Project\celerybeat.pid", "r")
_pid_id_ = pidid=pidno.read()
pidno.close()
logging.info(_pid_id_)
logging.info('taskkill /F /PID {pidid} ..'.format(pidid=_pid_id_))
cmdcom = 'taskkill /F /PID {pidid}'.format(pidid=_pid_id_)
logging.info(cmdcom)
killargs = shlex.split(cmdcom)
process = subprocess.Popen(killargs)
output, error = process.communicate()
logging.info(output)
logging.info('Stopping 2 {name} service ...'.format(name=self._svc_name_))
os.remove("X:\Application\PROJECT\celerybeat.pid")
logging.info('X:\Application\PROJECT\celerybeat.pid file removed')
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
sys.exit()
def SvcDoRun(self):
logging.info('Starting {name} service ...'.format(name=self._svc_name_))
os.chdir(INSTDIR) # so that proj worker can be found
logging.info('cwd: ' + os.getcwd())
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
command = '"{celery_path}" -A {proj_dir} beat --workdir=X:/Application/Project -f X:/Application/logs/beat.log -l info'.format(
celery_path=os.path.join(PYTHONSCRIPTPATH, 'celery.exe'),
proj_dir=PROJECTDIR,
log_path=os.path.join(LOGPATH,'celery_beat.log'))
logging.info('command: ' + command)
args = shlex.split(command)
proc = subprocess.Popen(args)
logging.info('pid: {pid}'.format(pid=proc.pid))
self.timeout = 3000
while True:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
if rc == win32event.WAIT_OBJECT_0:
# stop signal encountered
# terminate process 'proc'
PROCESS_TERMINATE = 1
handle = win32api.OpenProcess(PROCESS_TERMINATE, False, proc.pid)
win32api.TerminateProcess(handle, -1)
win32api.CloseHandle(handle)
break
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(CeleryService)

Categories