I have a proof of concept script based on the watchdog module, it registers when a new file is added to a set folder and sends off a command, this script runs constantly, but the final design is to be put on a server, meaning we will not have access to the command line to "CTRL + C" it. How do I kill it from an outside source (e.g. second script that activates a function within the primary script)?
Here is my current script, which contains a "stop_watchdog" function at the bottom.
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import os, sys, time
import sqlite3
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if __name__ == "__main__":
patterns = ["*"]
ignore_patterns = None
ignore_directories = False
case_sensitive = True
my_event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive)
def file_detected(textInput):
str(textInput)
if ".txt" not in textInput:
conn = sqlite3.connect(textInput) # You can create a new database by changing the name within the quotes
c = conn.cursor() # The database will be saved in the location where your 'py' file is saved
c.execute("SELECT * FROM sqlite_master where type = 'table'")
##print(c.fetchall())
textTest = "{}.txt".format(textInput)
f = open(textTest, "w")
f.write(str(c.fetchall()))
f.close()
def on_created(event):
print(f"hey, {event.src_path} has been created!")
file_detected(event.src_path)
##test("{event.src_path}", shell=True)
my_event_handler.on_created = on_created
path = "./xyz"
go_recursively = True
file_observer = Observer()
file_observer.schedule(my_event_handler, path, recursive=go_recursively)
file_observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
file_observer.stop()
file_observer.join()
def stop_watchdog():
print(f"Quitting!")
file_observer.stop()
sys.exit()
Related
I'm trying to make a watchdog to listen to a folder changes (adding/deleting) files.
My problem is, that every time I copy-create/delete several files from this folder (and its subfolders), the event chain starts one by one for each and every file.
How can I make the on_event() method to be invoked only once, after multiple files creation/deletion?
Let's say I'm copying to this folders two images.
I want the event handler to be invoked only once after file transfer finishes, and not twice - once for each image - as it currently works.
Thanks!
The code runs on a raspberry pi 3 with python 3.7.
Here's the code:
import os
import time
import psutil
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
i = 0
def show_stats():
global i
read = "read #" + str(i) + ":"
mem = "\nmemory in use: " + str(psutil.virtual_memory().percent)+"%"
cpu = "\ncpu load: " + str(psutil.cpu_percent())+"%"
temp = "\ncurrent " + \
os.popen("vcgencmd measure_temp").readline().replace(
"=", ": ").replace("'C", " C°")
end = "\n=================="
i += 1
stats = read + mem + cpu + temp + end
return stats
class Watcher:
DIRECTORY_TO_WATCH = r'/home/pi/Desktop/jsSlider/images'
def __init__(self):
self.observer = Observer()
print("watching ", self.DIRECTORY_TO_WATCH, "...")
def run(self):
event_handler = Handler()
self.observer.schedule(
event_handler, self.DIRECTORY_TO_WATCH, recursive=True)
self.observer.start()
try:
while True:
time.sleep(5)
print(show_stats())
except Exception as e:
self.observer.stop()
print(e)
self.observer.join()
class Handler(FileSystemEventHandler):
#staticmethod
def on_event(event):
wait = 1
elif event.event_type == 'created' or event.event_type == 'deleted':
print("Received event - %s. " %event.src_path, str(event.event_type))
time.sleep(wait) #i found that its best to give some timeout between commands because it overwhelmed the pi for some reason (one second seems to be enough)...
os.system('python /home/pi/Desktop/Slider/scripts/arr_edit.py') #recreate the JS array
time.sleep(wait)
os.system('cp -r /home/pi/Desktop/jsSlider/scripts/imgArr.js /home/pi/Desktop/jsSlider/themes/1') #copy the newly created JS array to its place
time.sleep(wait)
os.system('sudo pkill chromium') #"refresh" the page -the kiosk mode reactivates the process...
# os.system('cls')
print('done!')
if __name__ == '__main__':
w = Watcher()
w.run()
Edit I
There is a poor rpi3 connected to a tv in some clinic, working in kiosk mode to display images from a local html file (with some js code - the slide show run with an existing JS script - i can upload everything if requested | the images are also on the pi itself).
What I'm trying to achieve is to automatically:
rebulid the JS array (with a working python script - code below (arr_edit.py)).
copy the new array to its desired location. (shell command)
and restart chromium with "pkill chromium". (shell command)
Now, I cannot allow that every time someone copies/deletes multiple images, the commands will run each time - which means:
whenever 2+ images are being added, i cannot "restart" the kiosk
(sudo pkill chromium) each and every time a file is created.
Every time you copy multiple files (images in that case), for each individual image that was created in the folder, an entirely individual event.created is invoked, therefore for 5 images, there will be 5 different event.created events that will fire the on_event() method each on its own turn, making the kiosk restart 5 times in a row. (now think of what will happen if a 50 files transfer occurs - the pi will just crash)
Therefore, I need a method to invoke the command only 1 time after file transfer finishes, regardless of how many files has changed/created/deleted in the folder.
arr_edit.py (not entirely my code):
import os
dir_path = r'/home/pi/Desktop/jsSlider/images'
file_path = r'/home/pi/Desktop/jsSlider/scripts/imgArr.js'
directory = os.fsencode(dir_path)
arr_name = 'images=[\n'
start_str = '{"img":"./images/'
end_str = '"},\n'
images = ''
def writer(array, imagesList):
str_to_write = array + imagesList + ']'
f = open(file_path, 'w')
f.write(str_to_write)
f.close
file_list = os.listdir(directory)
for file in file_list:
filename = os.fsdecode(file)
if filename.endswith(".jpg") or filename.endswith(".jpeg") or filename.endswith(".webp") or filename.endswith(".webp"):
if file == file_list[len(file_list)-1]:
end_str = '"}\n'
images += start_str + filename + end_str
continue
else:
continue
writer(arr_name, images)
output JS array (sample from inside imgArr.js):
images=[
{"img":"./images/246.jpg"},
{"img":"./images/128.jpg"},
{"img":"./images/238.webp"},
{"img":"./images/198.jpg"},
{"img":"./images/247.webp"}
]
As Mark suggested in the comments,
i added a check to see if the js file has changed in the past 5 minutes.
if the file changed,
wait for another 5 minutes and re-initiate the cange (if more files have been added to the folder) so the new, larger files will also be shown in this run.
Works like a charm!
many thanks!!
here's the final watchdog.py
import os
import time
import psutil
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
i = 0
def show_stats():
global i
read = "read #" + str(i) + ":"
mem = "\nmemory in use: " + str(psutil.virtual_memory().percent)+"%"
cpu = "\ncpu load: " + str(psutil.cpu_percent())+"%"
temp = "\ncurrent " + \
os.popen("vcgencmd measure_temp").readline().replace(
"=", ": ").replace("'C", " C°")
end = "\n=================="
i += 1
stats = read + mem + cpu + temp + end
return stats
def wait_for_file(file):
time.sleep(300)
if age(file) >= 5:
modify()
def modify():
os.system('python /home/pi/Desktop/jsSlider/scripts/arr_edit.py')
os.system(
'cp -r /home/pi/Desktop/jsSlider/scripts/imgArr.js /home/pi/Desktop/jsSlider/themes/1')
time.sleep(1)
os.system('sudo pkill chromium')
# os.system('cls')
print("done!\nwatching...")
def age(filename):
return ((time.time() - os.path.getmtime(filename))//60)
class Watcher:
DIRECTORY_TO_WATCH = r'/home/pi/Desktop/jsSlider/images'
def __init__(self):
self.observer = Observer()
print("watching ", self.DIRECTORY_TO_WATCH, "...")
def run(self):
event_handler = Handler()
self.observer.schedule(
event_handler, self.DIRECTORY_TO_WATCH, recursive=True)
self.observer.start()
try:
while True:
time.sleep(5)
print(show_stats())
except Exception as e:
self.observer.stop()
print(e)
self.observer.join()
class Handler(FileSystemEventHandler):
# staticmethod
def on_any_event(event):
file = r'/home/pi/Desktop/jsSlider/scripts/imgArr.js'
if event.event_type == 'created' or event.event_type == 'deleted':
print("Received event - %s. " %
event.src_path, str(event.event_type))
time.sleep(5)
if age(file) < 5:
wait_for_file(file)
else:
modify()
if __name__ == '__main__':
w = Watcher()
w.run()
I have a watchdog-script to monitor a directory recursively. When an event happens I want to do something with the file.
This worked super fine, but some files are very big, so the treatment of this file blocked the watcher and some files were later missing because the watcher didn't recognize them due to the blocking.
So I thought, multiprocessing could help. My idea was, that the event (created, modified, ....) would start a new process and then execute the function.
I do now have a sample script that combines watchdog with multiprocessing, but I am having trouble getting it working.
import os
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from multiprocessing import Process
def print_msg(text):
proc = os.getpid()
print("{0} über Prozess ID: {1}".format(text, proc))
def on_created(event):
text = "hey, {0} has been created!".format(event.src_path)
proc = Process(target=print_msg, args=(text))
proc.start()
proc.join()
def on_deleted(event):
text = "what the f**k! Someone deleted {0}!".format(event.src_path)
proc = Process(target=print_msg, args=(text))
proc.start()
proc.join()
def on_modified(event):
text = "hey buddy, {0} has been modified".format(event.src_path)
proc = Process(target=print_msg, args=(text))
proc.start()
proc.join()
def on_moved(event):
text = "ok ok ok, someone moved {0} to {1}".format(event.src_path, event.dest_path)
proc = Process(target=print_msg, args=(text))
proc.start()
proc.join()
if __name__ == "__main__":
patterns = "*"
ignore_patterns = ""
ignore_directories = False
case_sensitive = True
my_event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive)
my_event_handler.on_created = on_created
my_event_handler.on_deleted = on_deleted
my_event_handler.on_modified = on_modified
my_event_handler.on_moved = on_moved
path = "\\\swibinacl01-cifs\\BelVis\\PROD\\Importer\\Messdaten"
go_recursively = True
my_observer = Observer()
my_observer.schedule(my_event_handler, path, recursive=go_recursively)
my_observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
my_observer.stop()
my_observer.join()
When I test the script I get following message just when the first event comes up:
my_event_handler.on_created = on_created
NameError: name 'my_event_handler' is not defined
So I think that after the event (or starting the new process) the my_event_handler object has gone and needs to be re-initialized.
But why is that? My thinking was, that when the event starts the function within a new process, the original process (the watcher) would continue and the new process acts independently.
What is my mistake? Can anyone help me out?
I need to observe files of type, say .tsv but also need to observe all events on directories.
Currently, with the patterns argument, this is ignoring directories.
I want it to observe every event on directories.
Here's my code:
import time, sys
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from datetime import datetime
import os
class MyHandler(PatternMatchingEventHandler):
patterns = ["*.tsv"]
ignore_patterns = []
ignore_directories = False
case_sensitive = True
def process(self, event):
log_file = open('log.txt', 'a')
path = os.path.join(event.src_path)
line = path + "\t" + event.event_type + "\t" + str(datetime.now()) + "\n"
log_file.write(line)
log_file.close()
def on_created(self, event):
self.process(event)
if __name__ == '__main__':
args = sys.argv[1:]
observer = Observer()
observer.schedule(MyHandler(), path=args[0] if args else '.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
How can it be done?
Yes. Instead of using the pattern just configure the directory and when the event is triggered what you do is check the src_path to see if it is a .tsv file then you can implement whatever logic you need at this point...
I am new to SQLite and wondering how to create a backup for a database, on a similar site I have found a question on how to create a backup for a database but I am having problems getting it to work.
This is the question:https://codereview.stackexchange.com/questions/78643/create-sqlite-backups
This is the code:
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import sqlite3
import shutil
import time
import os
NO_OF_DAYS = 7
def sqlite3_backup(dbfile, backupdir):
dbfile = Accounts.db
backupdir = r"E:\TESTING\BackUp.db"
"""Create timestamped database copy"""
if not os.path.isdir(backupdir):
raise Exception("Backup directory does not exist: {}".format(backupdir))
backup_file = r"E:\TESTING\BackUp.db" + time.strftime("-%Y%m%d-%H%M%S")
connection = sqlite3.connect(r"E:\TESTING\Accounts.db")
cursor = connection.cursor()
# Lock database before making a backup
cursor.execute('begin immediate')
# Make new backup file
shutil.copyfile(dbfile, backup_file)
print ("\nCreating {}...".format(backup_file))
# Unlock database
connection.rollback()
def clean_data(backup_dir):
backup_dir = r"E:\TESTING\BackUp.db"
print ("\n------------------------------")
print ("Cleaning up old backups")
for filename in os.listdir(backup_dir):
backup_file = os.path.join(backup_dir, filename)
if os.stat(backup_file).st_ctime < (time.time() - NO_OF_DAYS * 86400):
if os.path.isfile(backup_file):
os.remove(backup_file)
print ("Deleting {}...".format(ibackup_file))
def get_arguments():
## connection = sqlite3.connect(r"E:\TESTING\Accounts.db")
## cursor = connection.cursor()
backup_dir = r"E:\TESTING\BackUp.db"
db_file = sqlite3.connect(r"E:\TESTING\Accounts.db")
"""Parse the commandline arguments from the user"""
parser = argparse.ArgumentParser()
parser.add_argument('db_file',
help='the database file that needs backed up')
parser.add_argument('backup_dir',
help='the directory where the backup'
'file should be saved')
return parser.parse_args()
if __name__ == "__main__":
#args = get_arguments()
dbfile = Accounts
backup_dir = "E:\TESTING"
#sqlite3_backup(args.db_file, args.backup_dir)
sqlite3_backup(db_file, backup_dir)
clean_data(args.backup_dir)
print ("\nBackup update has been successful.")
When I run the code I get this error usage: backup.py [-h] db_file backup_dir
backup.py: error: the following arguments are required: db_file, backup_dir
I have subbed into the code the db_file and the backup_dir but it still appearing with the same error.
You may need to change this part:
if __name__ == "__main__":
#args = get_arguments()
dbfile = <<YOUR DB FILE NAME >>
backup_dir = <<YOUR BACK UP DIRECTORY PATH>>
#sqlite3_backup(args.db_file, args.backup_dir)
sqlite3_backup(db_file, backup_dir)
# CHANGE clean_data(args.backup_dir)
#TO:
clean_data(backup_dir)
print ("\nBackup update has been successful.")
Looking at the example file in CEF Python 3 on Windows.
When running the Python example scripts, it opens a debug window in the Windows command prompt.
I want to know how not to show this?
Just to state the obvious:
DEBUG = True -- Does not make a difference, it just stops the debugging inside that window but the window still shows.
This is the example.py file:
# CEF Python 3 example application.
# Checking whether python architecture and version are valid, otherwise an obfuscated
# error will be thrown when trying to load cefpython.pyd with a message "DLL load failed".
import platform
if platform.architecture()[0] != "32bit":
raise Exception("Architecture not supported: %s" % platform.architecture()[0])
import os, sys
libcef_dll = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'libcef.dll')
if os.path.exists(libcef_dll):
# Import the local module.
if 0x02070000 <= sys.hexversion < 0x03000000:
import cefpython_py27 as cefpython
elif 0x03000000 <= sys.hexversion < 0x04000000:
import cefpython_py32 as cefpython
else:
raise Exception("Unsupported python version: %s" % sys.version)
else:
# Import the package.
from cefpython3 import cefpython
import cefwindow
import win32con
import win32gui
import time
DEBUG = True
def GetApplicationPath(file=None):
import re, os
# If file is None return current directory without trailing slash.
if file is None:
file = ""
# Only when relative path.
if not file.startswith("/") and not file.startswith("\\") and (
not re.search(r"^[\w-]+:", file)):
if hasattr(sys, "frozen"):
path = os.path.dirname(sys.executable)
elif "__file__" in globals():
path = os.path.dirname(os.path.realpath(__file__))
else:
path = os.getcwd()
path = path + os.sep + file
path = re.sub(r"[/\\]+", re.escape(os.sep), path)
path = re.sub(r"[/\\]+$", "", path)
return path
return str(file)
def ExceptHook(excType, excValue, traceObject):
import traceback, os, time, codecs
# This hook does the following: in case of exception write it to
# the "error.log" file, display it to the console, shutdown CEF
# and exit application immediately by ignoring "finally" (_exit()).
errorMsg = "\n".join(traceback.format_exception(excType, excValue,
traceObject))
errorFile = GetApplicationPath("error.log")
try:
appEncoding = cefpython.g_applicationSettings["string_encoding"]
except:
appEncoding = "utf-8"
if type(errorMsg) == bytes:
errorMsg = errorMsg.decode(encoding=appEncoding, errors="replace")
try:
with codecs.open(errorFile, mode="a", encoding=appEncoding) as fp:
fp.write("\n[%s] %s\n" % (
time.strftime("%Y-%m-%d %H:%M:%S"), errorMsg))
except:
print("cefpython: WARNING: failed writing to error file: %s" % (
errorFile))
# Convert error message to ascii before printing, otherwise
# you may get error like this:
# | UnicodeEncodeError: 'charmap' codec can't encode characters
errorMsg = errorMsg.encode("ascii", errors="replace")
errorMsg = errorMsg.decode("ascii", errors="replace")
print("\n"+errorMsg+"\n")
cefpython.QuitMessageLoop()
cefpython.Shutdown()
os._exit(1)
def InitDebugging():
# Whether to print & log debug messages
if DEBUG:
cefpython.g_debug = True
cefpython.g_debugFile = GetApplicationPath("debug.log")
cefwindow.g_debug = True
def CefAdvanced():
sys.excepthook = ExceptHook
InitDebugging()
appSettings = dict()
appSettings["log_file"] = GetApplicationPath("debug.log")
appSettings["log_severity"] = cefpython.LOGSEVERITY_INFO
appSettings["release_dcheck_enabled"] = True # Enable only when debugging
appSettings["browser_subprocess_path"] = "%s/%s" % (
cefpython.GetModuleDirectory(), "subprocess")
cefpython.Initialize(appSettings)
wndproc = {
win32con.WM_CLOSE: CloseWindow,
win32con.WM_DESTROY: QuitApplication,
win32con.WM_SIZE: cefpython.WindowUtils.OnSize,
win32con.WM_SETFOCUS: cefpython.WindowUtils.OnSetFocus,
win32con.WM_ERASEBKGND: cefpython.WindowUtils.OnEraseBackground
}
browserSettings = dict()
browserSettings["universal_access_from_file_urls_allowed"] = True
browserSettings["file_access_from_file_urls_allowed"] = True
windowHandle = cefwindow.CreateWindow(title="CEF Python 3 example",
className="cefpython3_example", width=800, height=600,
icon="icon.ico", windowProc=wndproc)
windowInfo = cefpython.WindowInfo()
windowInfo.SetAsChild(windowHandle)
browser = cefpython.CreateBrowserSync(windowInfo, browserSettings,
navigateUrl=GetApplicationPath("example.html"))
cefpython.MessageLoop()
cefpython.Shutdown()
def CloseWindow(windowHandle, message, wparam, lparam):
browser = cefpython.GetBrowserByWindowHandle(windowHandle)
browser.CloseBrowser()
return win32gui.DefWindowProc(windowHandle, message, wparam, lparam)
def QuitApplication(windowHandle, message, wparam, lparam):
win32gui.PostQuitMessage(0)
return 0
if __name__ == "__main__":
CefAdvanced()
I got it right, but it was actually the way in which I used py2exe.
In the setup.py file I had to change:
setup(
console=['wxwindow.py']
,data_files = get_data_files()
,options={"py2exe":{"dll_excludes":dll_excludes, 'optimize': 2}}
,zipfile = "shared.lib"
)
setup(
window=['wxwindow.py']
,data_files = get_data_files()
,options={"py2exe":{"dll_excludes":dll_excludes, 'optimize': 2}}
,zipfile = "shared.lib"
)
SO:
window=['wxwindow.py']