I am trying to rerun my python script every 10 seconds. I have tried various things with the time.sleep function but nothing seems to be working. I have tried different variants of time.sleep:
def job()
# script
if __name__ == '__main__':
while True:
job()
time.sleep(10)
but not finding the correct place/way to implement it. I don't want to rerun a function, but the full code.
This is the script I am trying to rerun:
import...
def main():
# parse the command line arguments
args = ConfigArgumentParser(description=__doc__).parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(
objectName=args.ini.objectname,
objectIdentifier=('device', int(args.ini.objectidentifier)),
maxApduLengthAccepted=int(args.ini.maxapdulengthaccepted),
segmentationSupported=args.ini.segmentationsupported,
vendorIdentifier=int(args.ini.vendoridentifier),
)
# make a sample application
this_application = BIPSimpleApplication(this_device, args.ini.address)
# make some random input objects
for i in range(1, RANDOM_OBJECT_COUNT + 1):
ravo = RandomAnalogValueObject(
objectIdentifier=('analogValue', i),
objectName='Temp%d' % (i,),
)
this_application.add_object(ravo)
# make sure they are all there
_log.debug(" - object list: %r", this_device.objectList)
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
If you need the whole thing to run (even bootstrap code), you can use a shell script to do it.
This won't refresh environment variables though.
#!/usr/bin/env bash
while true; do
sleep 10
python script.py
done
If you need to refresh environment variables (you seem to need it because of RANDOM_OBJECTS), add eval "$(exec /usr/bin/env -i "${SHELL}" -l -c "export")" to the mix. Source: https://unix.stackexchange.com/a/581684
Try this:
if __name__ == "__main__":
time.sleep(10)
main()
What you can do is just restart your whole script after 10s.
Applied to your code it would look like this:
import os
import time
import json
import sys
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.core import run
from bacpypes.primitivedata import Real
from bacpypes.object import AnalogValueObject, Property, register_object_type
from bacpypes.errors import ExecutionError
from bacpypes.app import BIPSimpleApplication
from bacpypes.local.device import LocalDeviceObject
# Read Json
with open('ObjectList.json') as json_file:
data = json.load(json_file)
def get_present_value(no):
for a in data['AnalogValues']:
if a['ObjectIdentifier'] == int(no):
return a['PresentValue']
return None
ai_1 = (get_present_value(1))
print(ai_1)
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# settings
RANDOM_OBJECT_COUNT = int(os.getenv('RANDOM_OBJECT_COUNT', 1))
#
# RandomValueProperty
#
class RandomValueProperty(Property):
def __init__(self, identifier):
if _debug: RandomValueProperty._debug("__init__ %r", identifier)
Property.__init__(self, identifier, Real, default=0.0, optional=True, mutable=False)
def ReadProperty(self, obj, arrayIndex=None):
if _debug: RandomValueProperty._debug("ReadProperty %r arrayIndex=%r", obj, arrayIndex)
# access an array
if arrayIndex is not None:
raise ExecutionError(errorClass='property', errorCode='propertyIsNotAnArray')
# return a random value
value = ai_1
if _debug: RandomValueProperty._debug(" - value: %r", value)
return value
def WriteProperty(self, obj, value, arrayIndex=None, priority=None, direct=False):
if _debug: RandomValueProperty._debug("WriteProperty %r %r arrayIndex=%r priority=%r direct=%r", obj, value,
arrayIndex, priority, direct)
raise ExecutionError(errorClass='property', errorCode='writeAccessDenied')
bacpypes_debugging(RandomValueProperty)
#
# Random Value Object Type
#
class RandomAnalogValueObject(AnalogValueObject):
properties = [
RandomValueProperty('presentValue'),
]
def __init__(self, **kwargs):
if _debug: RandomAnalogValueObject._debug("__init__ %r", kwargs)
AnalogValueObject.__init__(self, **kwargs)
bacpypes_debugging(RandomAnalogValueObject)
register_object_type(RandomAnalogValueObject)
#
# __main__
#
def main():
# parse the command line arguments
args = ConfigArgumentParser(description=__doc__).parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(
objectName=args.ini.objectname,
objectIdentifier=('device', int(args.ini.objectidentifier)),
maxApduLengthAccepted=int(args.ini.maxapdulengthaccepted),
segmentationSupported=args.ini.segmentationsupported,
vendorIdentifier=int(args.ini.vendoridentifier),
)
# make a sample application
this_application = BIPSimpleApplication(this_device, args.ini.address)
# make some random input objects
for i in range(1, RANDOM_OBJECT_COUNT + 1):
ravo = RandomAnalogValueObject(
objectIdentifier=('analogValue', i),
objectName='Temp%d' % (i,),
)
this_application.add_object(ravo)
# make sure they are all there
_log.debug(" - object list: %r", this_device.objectList)
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
time.sleep(10)
os.execl(sys.executable, sys.executable, * sys.argv) # Your script restarts after 10s with this
Related
I am using the python unittest module for testing a file that takes a command line argument. The argument is a file name which is then passed into a function like so:
file_name = str(sys.argv[1])
file = open(file_name)
result = main_loop(file)
print(result)
My test is set up like so:
class testMainFile(unittest.TestCase):
def test_main_loop(self):
file = open('file_name.json')
result = main_file.main_loop(file)
self.assertEqual(result, 'Expected Result')
if __name__ == 'main':
unittest.main()
When I run the test I get an "IndexError: list index out of range".
I tried passing the argument when running the test but to no avail. How do I run my test without error?
I think you have couple of options here. Firstly go to documentation and checkout patch because i think you can get away with
from unittest.mock import patch
#patch('sys.argv', ['mock.py', 'test-value'])
def test_main_loop(self):
Options for fun:
One would be simply to override the sys.argv next to your call
def test_main_loop(self):
file = open('file_name.json')
+ orginal_argv = sys.argv
+ sys.argv = ['mock argv', 'my-test-value']
result = main_file.main_loop(file)
+ sys.argv = orginal_argv
self.assertEqual(result, 'Expected Result')
Second would be to create a simple wrapper for your function
def set_sys_argv(func: Callable):
sys.argv = ['mock.py', 'my_test_value']
def wrapper(*args, **kwargs):
func()
return wrapper
and use it with test function
#set_sys_argv
def test_main_loop(self):
We can improve it slightly and make it more generic making a decorator that accepts the values to mock
def set_sys_argv(*argv):
sys.argv = argv
def _decorator(func: Callable):
def wrapper(*args, **kwargs):
func()
return wrapper
return _decorator
and use it similarly to patch
#set_sys_argv('mock.py', 'test-value')
def test_main_loop(self):
Third would be to create a context manager, likewise:
class ReplaceSysArgv(list):
def __enter__(self):
self._argv = sys.argv
sys.argv = ['mock', 'my-test-value']
return self
def __exit__(self, *args):
sys.argv = self._argv
and use it with your code
def test_main_loop(self):
file = open('file_name.json')
with ReplaceSysArgv():
result = main_file.main_loop(file)
self.assertEqual(result, 'Expected Result')
you have to push the arguments onto sys.argv before retrieving them (if your code is pulling from command-line arguments - it's unclear to me where in your test you're using the command-line arguments but I digress)
so something like first doing
import sys
sys.argv = ['mock_filename.py', 'json_file.json']
#... continue with rest of program / test.
New to click here so I'm still learning. How do I store the click arguments/options in an object for later reference within my application? I thought of just creating an object and returning it from the setup function, but it doesn't seem to work. Here is what I did:
import click
import sys
class Cfg(object):
component = ""
command = ""
obj = ""
my_cfg = Cfg()
#click.command()
#click.argument('component')
#click.argument("command")
#click.argument("obj")
def set_args(component, command, obj):
cfg = Cfg()
if component != "optdata":
sys.stderr.write("Invalid option")
sys.exit(1)
else:
cfg.component = component
cfg.command = command
cfg.obj = obj
return cfg
if __name__ == "__main__":
app_cfg = Cfg()
app_cfg = set_args() # Never actually completes here.
print("Component = ", app_cfg.component, "Command = ", app_cfg.command, "Obj = ", app_cfg.obj)
There is some sort of exception raised in core.py which just does a sys.exit and doesn't raise any sort of actual error.
Your design goes against the idea of Click: You're not supposed to treat "parsing the options" and "doing the work" as two separate steps:
import click
#click.command()
#click.argument("component", type=click.Choice(["optdata"]))
#click.argument("command")
#click.argument("obj")
def cli(component, command, obj):
print("Component = ", component, "Command = ", command, "Obj = ", obj)
# put your business logic here
if __name__ == "__main__":
cli()
The pattern is to call the function that processes the command line options and then have that function call any other functionality.
I want to run a code with process parallel to my main code but also want to access its parameters or start/stop the process via command prompt.
my machine is win7 64bit. Something in mind is:
from multiprocessing import Process
class dllapi():
...
def apiloop(params, args):
apiclient = dllapi(**args)
while True:
apiclient.cycle()
params = [....]
def mainloop(args):
p = Process(target = apiloop, args=(params, args, ))
while True:
cmd = input()
if cmd == 'kill':
p.terminate()
if cmd == 'stop':
pass # no idea
if cmd == 'resume':
pass # no idea
if cmd == 'report':
print (params)
I wish to make it simple. I did tried to make apiloop as thread yet input() could freeze the program and stopped apiloop working until i pressed enter...
To share the parameters from apiloop process, i did try queue and pipe, but, seem to me, queue needs .join to wait until apiloop is done and pipe has buffer limit.
(actually i can make apiclient.cycle runs every 1s but i wish to keep apiclient alive)
I wish to know if it's worth to dig deeper with multiprocessing (e.g. will try manager as well...) or there are other approaches which is more suitable for my case. Thanks in advance...
* UPDATED: 201809170953*
Some progress with manager as below:
from multiprocessing import Process, Manager
class dllapi():
...
class webclientapi():
...
def apiloop(args, cmd, params):
apiclient = dllapi(**args)
status = True
while True:
# command from main
if cmd == 'stop':
status = False
elif cmd == 'start':
status = True
cmd = None
# stop or run
if status == True:
apiclient.cycle()
# update parameters
params['status'] = status
def uploadloop(cmds, params):
uploadclient = webclientapi()
status = True
while True:
# command from main
if cmd == 'stop':
status = False
elif cmd == 'start':
status = True
cmd = None
# stop or run
if status == True:
# upload 'status' from apiclient to somewhere
uploadclient.cycle(params['status'])
def mainloop(args):
manager = Manager()
mpcmds = {}
mpparams = {}
mps = {}
mpcmds ['apiloop'] = manager.Value('u', 'start')
mpparams ['apiloop'] = manager.dict()
mps ['apiloop'] = Process(target = apiloop, args=(args, mpcmds['apiloop'], mpparams['apiloop'])
mpcmds ['uploadloop'] = manager.Value('u', 'start')
# mpparams ['uploadloop'] is directly from mpparams ['apiloop']
mps ['uploadloop'] = Process(target = uploadloop, args=(mpcmds['uploadloop'], mpparams['apiloop'])
for key, mp in mps.items():
mp.daemon = True
mp.start()
while True:
cmd = input().split(' ')
# kill daemon process with exit()
if cmd[0] == 'bye':
exit()
# kill individual process
if cmd[0] == 'kill':
mps[cmd[1]].terminate()
# stop individual process via command
if cmd[0] == 'stop':
mpcmds[cmd[1]] = 'stop'
# stop individual process via command
if cmd[0] == 'start':
mpcmds[cmd[1]] = 'start'
# report individual process info via command
if cmd[0] == 'report':
print (mpparams ['apiloop'])
Hope this'd help someone.
I'm showing you how to solve the general problem with threads only, because that is what you tried first and your example doesn't bring up the need for a child-process.
In the example below your dllapi class is named Zoo and it's subclassing threading.Thread, adding some methods to allow execution control. It takes some data upon initialization and its cycle-method simply iterates repeatedly over this data and just counts how many times it has seen the specific item.
import time
import logging
from queue import Queue
from threading import Thread
from itertools import count, cycle
class Zoo(Thread):
_ids = count(1)
def __init__(self, cmd_queue, data, *args,
log_level=logging.DEBUG, **kwargs):
super().__init__()
self.name = f'{self.__class__.__name__.lower()}-{next(self._ids)}'
self.data = data
self.log_level = log_level
self.args = args
self.kwargs = kwargs
self.logger = self._init_logging()
self.cmd_queue = cmd_queue
self.data_size = len(data)
self.actual_item = None
self.iter_cnt = 0
self.cnt = count(1)
self.cyc = cycle(self.data)
def cycle(self):
item = next(self.cyc)
if next(self.cnt) % self.data_size == 0: # new iteration round
self.iter_cnt += 1
self.actual_item = f'{item}_{self.iter_cnt}'
def run(self):
"""
Run is the main-function in the new thread. Here we overwrite run
inherited from threading.Thread.
"""
while True:
if self.cmd_queue.empty():
self.cycle()
time.sleep(1) # optional heartbeat
else:
self._get_cmd()
self.cmd_queue.task_done() # unblocks prompter
def stop(self):
self.logger.info(f'stopping with actual item: {self.actual_item}')
# do clean up
raise SystemExit
def pause(self):
self.logger.info(f'pausing with actual item: {self.actual_item}')
self.cmd_queue.task_done() # unblocks producer joining the queue
self._get_cmd() # just wait blockingly until next command
def resume(self):
self.logger.info(f'resuming with actual item: {self.actual_item}')
def report(self):
self.logger.info(f'reporting with actual item: {self.actual_item}')
print(f'completed {self.iter_cnt} iterations over data')
def _init_logging(self):
fmt = '[%(asctime)s %(levelname)-8s %(threadName)s' \
' %(funcName)s()] --- %(message)s'
logging.basicConfig(format=fmt, level=self.log_level)
return logging.getLogger()
def _get_cmd(self):
cmd = self.cmd_queue.get()
try:
self.__class__.__dict__[cmd](self)
except KeyError:
print(f'Command `{cmd}` is unknown.')
input is a blocking function. You need to outsource it in a separate thread so it doesn't block your main-thread. In the example below input is wrapped in Prompter, a class subclassing threading.Thread. Prompter passes inputs into a command-queue. This command-queue is read by Zoo.
class Prompter(Thread):
"""Prompt user for command input.
Runs in a separate thread so the main-thread does not block.
"""
def __init__(self, cmd_queue):
super().__init__()
self.cmd_queue = cmd_queue
def run(self):
while True:
cmd = input('prompt> ')
self.cmd_queue.put(cmd)
self.cmd_queue.join() # blocks until consumer calls task_done()
if __name__ == '__main__':
data = ['ape', 'bear', 'cat', 'dog', 'elephant', 'frog']
cmd_queue = Queue()
prompter = Prompter(cmd_queue=cmd_queue)
prompter.daemon = True
zoo = Zoo(cmd_queue=cmd_queue, data=data)
prompter.start()
zoo.start()
Example session in terminal:
$python control_thread_over_prompt.py
prompt> report
[2018-09-16 17:59:16,856 INFO zoo-1 report()] --- reporting with actual item: dog_0
completed 0 iterations over data
prompt> pause
[2018-09-16 17:59:26,864 INFO zoo-1 pause()] --- pausing with actual item: bear_2
prompt> resume
[2018-09-16 17:59:33,291 INFO zoo-1 resume()] --- resuming with actual item: bear_2
prompt> report
[2018-09-16 17:59:38,296 INFO zoo-1 report()] --- reporting with actual item: ape_3
completed 3 iterations over data
prompt> stop
[2018-09-16 17:59:42,301 INFO zoo-1 stop()] --- stopping with actual item: elephant_3
I want to have a function, in Python (3.x), which force to the script itself to terminate, like :
i_time_value = 10
mytimeout(i_time_value ) # Terminate the script if not in i_time_value seconds
for i in range(10):
print("go")
time.sleep(2)
Where "mytimeout" is the function I need : it terminate the script in "arg" seconds if the script is not terminated.
I have seen good solutions for put a timeout to a function here or here, but I don't want a timeout for a function but for the script.
Also :
I know that I can put my script in a function or using something like subprocess and use it with a
timeout, I tried it and it works, but I want something more simple.
It must be Unix & Windows compatible.
The function must be universal i.e. : it may be add to any script in
one line (except import)
I need a function not a 'how to put a timeout in a script'.
signal is not Windows compatible.
You can send some signals on Windows e.g.:
os.kill(os.getpid(), signal.CTRL_C_EVENT) # send Ctrl+C to itself
You could use threading.Timer to call a function at a later time:
from threading import Timer
def kill_yourself(delay):
t = Timer(delay, kill_yourself_now)
t.daemon = True # no need to kill yourself if we're already dead
t.start()
where kill_yourself_now():
import os
import signal
import sys
def kill_yourself_now():
sig = signal.CTRL_C_EVENT if sys.platform == 'win32' else signal.SIGINT
os.kill(os.getpid(), sig) # raise KeyboardInterrupt in the main thread
If your scripts starts other processes then see: how to kill child process(es) when parent dies? See also, How to terminate a python subprocess launched with shell=True -- it demonstrates how to kill a process tree.
I would use something like this.
import sys
import time
import threading
def set_timeout(event):
event.set()
event = threading.Event()
i_time_value = 2
t = threading.Timer(i_time_value, set_timeout, [event])
t.start()
for i in range(10):
print("go")
if event.is_set():
print('Timed Out!')
sys.exit()
time.sleep(2)
A little bit of googling turned this answer up:
import multiprocessing as MP
from sys import exc_info
from time import clock
DEFAULT_TIMEOUT = 60
################################################################################
def timeout(limit=None):
if limit is None:
limit = DEFAULT_TIMEOUT
if limit <= 0:
raise ValueError()
def wrapper(function):
return _Timeout(function, limit)
return wrapper
class TimeoutError(Exception): pass
################################################################################
def _target(queue, function, *args, **kwargs):
try:
queue.put((True, function(*args, **kwargs)))
except:
queue.put((False, exc_info()[1]))
class _Timeout:
def __init__(self, function, limit):
self.__limit = limit
self.__function = function
self.__timeout = clock()
self.__process = MP.Process()
self.__queue = MP.Queue()
def __call__(self, *args, **kwargs):
self.cancel()
self.__queue = MP.Queue(1)
args = (self.__queue, self.__function) + args
self.__process = MP.Process(target=_target, args=args, kwargs=kwargs)
self.__process.daemon = True
self.__process.start()
self.__timeout = self.__limit + clock()
def cancel(self):
if self.__process.is_alive():
self.__process.terminate()
#property
def ready(self):
if self.__queue.full():
return True
elif not self.__queue.empty():
return True
elif self.__timeout < clock():
self.cancel()
else:
return False
#property
def value(self):
if self.ready is True:
flag, load = self.__queue.get()
if flag:
return load
raise load
raise TimeoutError()
def __get_limit(self):
return self.__limit
def __set_limit(self, value):
if value <= 0:
raise ValueError()
self.__limit = value
limit = property(__get_limit, __set_limit)
It might be Python 2.x, but it shouldn't be terribly hard to convert.
I was wondering if it would be possible to create some sort of static set in a Python Process subclass to keep track the types processes that are currently running asynchronously.
class showError(Process):
# Define some form of shared set that is shared by all Processes
displayed_errors = set()
def __init__(self, file_name, error_type):
super(showError, self).__init__()
self.error_type = error_type
def run(self):
if error_type not in set:
displayed_errors.add(error_type)
message = 'Please try again. ' + str(self.error_type)
winsound.MessageBeep(-1)
result = win32api.MessageBox(0, message, 'Error', 0x00001000)
if result == 0:
displayed_errors.discard(error_type)
That way, when I create/start multiple showError processes with the same error_type, subsequent error windows will not be created. So how can we define this shared set?
You can use a multiprocessing.Manager.dict (there's no set object available, but you can use a dict in the same way) and share that between all your subprocesses.
import multiprocessing as mp
if __name__ == "__main__":
m = mp.Manager()
displayed_errors = m.dict()
subp = showError("some filename", "some error type", displayed_errors)
Then change showError.__init__ to accept the shared dict:
def __init__(self, file_name, error_type, displayed_errors):
super(showError, self).__init__()
self.error_type = error_type
self.displayed_errors = displayed_errors
Then this:
displayed_errors.add(error_type)
Becomes:
self.displayed_errors[error_type] = 1
And this:
displayed_errors.discard(error_type)
Becomes:
try:
del self.displayed_errors[error_type]
except KeyError:
pass