Create crontab with python-crontab in Python? - python

I am trying to add a line to my system user's crontab, from a Python script which uses the package python-crontab. My crontab file does not exist yet, and when I run this code, nothing happens (no errors, no results, no creation of crontab file):
from crontab import CronTab
cron = CronTab(user=True)
# cron = CronTab(user='my_user') I tried this line too without any results
job = cron.new(command='python3 /opt/my_script.py')
job.minute.on(2)
job.hour.on(12)
True == job.is_valid()
Am I missing anything?

You need to save the cronjob, that's all that's missing:
#!/bin/python
from crontab import CronTab
cron = CronTab(user=True)
job = cron.new(command='python3 /opt/my_script.py')
job.minute.on(2)
job.hour.on(12)
cron.write()

Related

Python Script doesn´t work when started via other script

I´m currently working on a raspberry pi 4 and wrote a script in python that send a mail with a picture and then rename the file and puts it in another folder.
The script works fine when I start with command
sudo python script.py
but when start it with another script it won´t execute the part with the renaming
Now the question what is my mistake ?
import os
import time
from sendmail import mail
from sendmail import file_rename
from time import sleep
pic = '/home/pi/Monitor/Bewegung.jpg'
movie= '/home/pi/Monitor/Aufnahme.avi'
archiv = '/home/pi/Archiv/'
time = time.strftime('%d.%m.%Y %H:%M')
mail(filename = pic )
file_rename(oldname = pic ,name = 'Serverraum Bild' + time ,format = '.jpg' ,place = archiv )
file_rename(oldname = movie ,name = 'Serverraum Video' + time ,format = '.avi' ,place = archiv )
I see that you are starting the script as a user with sudo privileges.
but when start it with another script it won´t execute the part with the renaming
This makes me suspicious that the caller script does not have the correct permissions to rename/move a file. You can view the permissions of the script with the following command
ls -la callerscript.py

Celery: The module was not found

I am using Open Semantic Search (OSS) and I would like to monitor its processes using the Flower tool. The workers that Celery needs should be given as OSS states on its website
The workers will do tasks like analysis and indexing of the queued files. The workers are implemented by etl/tasks.py and will be started automatically on boot by the service opensemanticsearch.
This tasks.py file looks as follows:
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Queue tasks for batch processing and parallel processing
#
# Queue handler
from celery import Celery
# ETL connectors
from etl import ETL
from etl_delete import Delete
from etl_file import Connector_File
from etl_web import Connector_Web
from etl_rss import Connector_RSS
verbose = True
quiet = False
app = Celery('etl.tasks')
app.conf.CELERYD_MAX_TASKS_PER_CHILD = 1
etl_delete = Delete()
etl_web = Connector_Web()
etl_rss = Connector_RSS()
#
# Delete document with URI from index
#
#app.task(name='etl.delete')
def delete(uri):
etl_delete.delete(uri=uri)
#
# Index a file
#
#app.task(name='etl.index_file')
def index_file(filename, wait=0, config=None):
if wait:
time.sleep(wait)
etl_file = Connector_File()
if config:
etl_file.config = config
etl_file.index(filename=filename)
#
# Index file directory
#
#app.task(name='etl.index_filedirectory')
def index_filedirectory(filename):
from etl_filedirectory import Connector_Filedirectory
connector_filedirectory = Connector_Filedirectory()
result = connector_filedirectory.index(filename)
return result
#
# Index a webpage
#
#app.task(name='etl.index_web')
def index_web(uri, wait=0, downloaded_file=False, downloaded_headers=[]):
if wait:
time.sleep(wait)
result = etl_web.index(uri, downloaded_file=downloaded_file, downloaded_headers=downloaded_headers)
return result
#
# Index full website
#
#app.task(name='etl.index_web_crawl')
def index_web_crawl(uri, crawler_type="PATH"):
import etl_web_crawl
result = etl_web_crawl.index(uri, crawler_type)
return result
#
# Index webpages from sitemap
#
#app.task(name='etl.index_sitemap')
def index_sitemap(uri):
from etl_sitemap import Connector_Sitemap
connector_sitemap = Connector_Sitemap()
result = connector_sitemap.index(uri)
return result
#
# Index RSS Feed
#
#app.task(name='etl.index_rss')
def index_rss(uri):
result = etl_rss.index(uri)
return result
#
# Enrich with / run plugins
#
#app.task(name='etl.enrich')
def enrich(plugins, uri, wait=0):
if wait:
time.sleep(wait)
etl = ETL()
etl.read_configfile('/etc/opensemanticsearch/etl')
etl.read_configfile('/etc/opensemanticsearch/enhancer-rdf')
etl.config['plugins'] = plugins.split(',')
filename = uri
# if exist delete protocoll prefix file://
if filename.startswith("file://"):
filename = filename.replace("file://", '', 1)
parameters = etl.config.copy()
parameters['id'] = uri
parameters['filename'] = filename
parameters, data = etl.process (parameters=parameters, data={})
return data
#
# Read command line arguments and start
#
#if running (not imported to use its functions), run main function
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser("etl-tasks [options]")
parser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False, help="Don\'t print status (filenames) while indexing")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Print debug messages")
(options, args) = parser.parse_args()
if options.verbose == False or options.verbose==True:
verbose = options.verbose
etl_delete.verbose = options.verbose
etl_web.verbose = options.verbose
etl_rss.verbose = options.verbose
if options.quiet == False or options.quiet==True:
quiet = options.quiet
app.worker_main()
I read multiple tutorials about Celery and from my understanding, this line should do the job
celery -A etl.tasks flower
but it doesnt. The result is the statement
Error: Unable to load celery application. The module etl was not found.
Same for
celery -A etl.tasks worker --loglevel=debug
so Celery itself seems to be causing the trouble, not flower. I also tried e.g. celery -A etl.index_filedirectory worker --loglevel=debug but with the same result.
What am I missing? Do I have to somehow tell Celery where to find etl.tasks? Online research doesn't really show a similar case, most of the "Module not found" errors seem to occur while importing stuff. So possibly it's a silly question but I couldn't find a solution anywhere. I hope you guys can help me. Unfortunately, I won't be able to respond until Monday though, sorry in advance.
I got same issue, I installed and configured my queue as follows, and it works.
Install RabbitMQ
MacOS
brew install rabbitmq
sudo vim ~/.bash_profile
In bash_profile add the following line:
PATH=$PATH:/usr/local/sbin
Then update bash_profile:
sudo source ~/.bash_profile
Linux
sudo apt-get install rabbitmq-server
Configure RabbitMQ
Launch the queue:
sudo rabbitmq-server
In another Terminal, configure the queue:
sudo rabbitmqctl add_user myuser mypassword
sudo rabbitmqctl add_vhost myvhost
sudo rabbitmqctl set_user_tags myuser mytag
sudo rabbitmqctl set_permissions -p myvhost myuser ".*" ".*" ".*"
Launch Celery
I would suggest to go in the folder that contains task.py and use the following command:
celery -A task worker -l info -Q celery --concurrency 5
Beware that this error means two things:
The module is missing
The module exists but cannot be loaded. If it has errors in it, such as a SyntaxError for instance.
To check that it's not the latter, run:
python -c "import <myModuleContainingTasksDotPyFile>"
In the context of this question:
python -c "import etl"
If it crashes, fix this first (Unlike with celery, you'll get a detailed error message).
Solutions above did not work for me.
I had the same issue and my problem was that in main celery.py (that was in SmartCalend folder) I had:
app = Celery('proj')
but instead I must type there:
app = Celery('SmartCalend')
where SmartCalend is the actual app name where celery.py belongs (!). not any random word, but precisely app name. Thats nowhere mentioned, only in official docs here:
Try export PYTHONPATH=<parent directory> where parent directory is the folder where the etl is. Run the Celery worker, and see it if fixes your problem. This is probably one of the most common Celery "issues" (not really Celery, but Python in general). Alternatively, run the Celery worker from that folder.
Answer for MacOS Catalina:
When you install celery with pip (pip install celery), python can import celery, but you are not able to launch celery from the terminal because the terminal does not know of the celery executable.
Add celery to the path to fix:
nano ~/.bash_profile
In the file add: export PATH="/Users/gavinbelson/Library/Python/2.7/bin:$PATH"
To save the file in the nano editor: ctrl+o, then enter, then ctrl+x
To update the terminal with your change type: source ~/.bash_profile
Now you should be able to type celery in the terminal window
---- Note this is for the default python terminal command which runs version 2.7. If you are using python3 to run python, you would need to change alter the path variable accordingly

Cron job with django application

I would like to use a cron task in order to delete media files if the condition is True.
Users generate export files stored in the Media folder. In order to clean export files in the background, I have a Cron task which loops over each file and looks if the expiry delay is passed or not.
I used django-cron library
Example:
File in Media Folder : Final_Products___2019-04-01_17:50:43.487845.xlsx
My Cron task looks like this :
class MyCronExportJob(CronJobBase):
""" Cron job which removes expired files at 18:30 """
RUN_AT_TIMES = ['18:30']
schedule = Schedule(run_at_times=RUN_AT_TIMES)
code = 'app.export_cron_job'
def do(self):
now = datetime.datetime.now()
media_folder = os.listdir(os.path.join(settings.MEDIA_ROOT, 'exports'))
for files in media_folder:
file = os.path.splitext(files.split(settings.EXPORT_TITLE_SEPARATOR, 1)[1])[0]
if datetime.datetime.strptime(file, '%Y-%m-%d_%H:%M:%S.%f') + timedelta(minutes=settings.EXPORT_TOKEN_DELAY) < now:
os.remove(os.path.join(os.path.join(settings.MEDIA_ROOT, 'exports'), files))
# settings.EXPORT_TOKEN_DELAY = 60 * 24
I edited my crontab -e :
30 18 * * * source /home/user/Bureau/Projets/app/venv/bin/activate.csh && python /home/user/Bureau/Projets/app/src/manage.py runcrons --force app.cron.MyCronExportJob
Then I launched service cron restart
But nothing as changed. My file is still there. However, it should be removed because his date is greater than now + settings.EXPORT_TOKEN_DELAY
I'm using Ubuntu to local dev and FreeBSD as a production server environment.
EDIT:
I tried some things but crontab doesn't work for the moment.
1) * * * * * /bin/date >> /home/user/Bureau/Projets/app/cron_output
==> It works, so crontab works
2) I ran : python manage.py runcrons in my console
==> It works
3) I ran this script (cron.sh):
source /home/user/.bashrc
cd /home/user/Bureau/Projets/app
pyenv activate app
python src/manage.py runcrons --force
deactivate
==> It works
4) I ran this crontab line :
35 10 * * * /home/user/Bureau/Projets/app/utility/cron.sh
==> Service restarted at 10h32, I waited until 10h38 : nothing !

Jinja not imported when executing script from another one

I have a web server with CGI script calling python scripts.
When i try to execute in a main file (test1.py) another script called via
os.system('/var/www/cgi-bin/readIRtemp.py '+arg1+' '+arg2+' '+arg3)
I get his error message in /var/log/apache2/error.log :
import: not found
from: can't read /var/mail/jinja2
this is understandable for me since when called directly from the python console my script works !
its content is:
import sys, os
from jinja2 import Environment, FileSystemLoader, select_autoescape
last20values=sys.argv[1]
currTempInDegreesCelcius=sys.argv[2]
print('test '+last20values+' '+currTempInDegreesCelcius)
env = Environment(
loader=FileSystemLoader('/var/www/html/templates'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template('IR.html')
updatedTemplate=template.render( arrayOfTemp = last20values, currTemp=currTempInDegreesCelcius)
Html_file=open("/var/www/html/IR.html","w")
Html_file.write(updatedTemplate)
Html_file.close()
I read somewhere something like maybe when calling os.system() the script is running with a different user account or some crazy things like that ... please help!
of course i chmod 777 * everything but that doesnt help ...

PyEZ - Cron Job to connect to 8 routers and save the running config to 8 local files at a specific time stamp

I am a starter to PyEZ. Can I write a cron job in PyEZ which will connect to 8 routers and fetch the running Config on device and save to 8 different files at a particular timestamp. Could you help me achieve the same.
I have already written a PyEZ code which will write the Base config to my local file.
Loading the config files to local file
from jnpr.junos import Device
from lxml import etree
dev = Device(host='hostname',port='22',user='root', password='sitlab123!' )
dev.open()class Create_Config():
def __init__(self):
cnf = dev.rpc.get_config() ####Get Config as Str
with open('myfile.txt', "w") as text_file:
text_file.write(etree.tostring(cnf))
text_file.close()
#####Return Configuration
def get_conf(self):
return dev.cli("show configuration")
You can use python-crontab module along with PyEZ module.
Python-crontab
To create a new cron job is as follows:
from crontab import CronTab
#init cron
cron = CronTab()
#add new cron job
job = cron.new(command='/usr/bin/echo')
#job settings
job.hour.every(4)

Categories