I'm trying to use returned data from one function into multiple other functions. But I don't want the first function to run each time; which is happening in my case.
#Function lab
def func_a():
print('running function a')
data = 'test'
return data
def func_b():
print(func_a())
def func_c():
print(func_a())
def func_d():
print(func_a())
if __name__ == '__main__':
func_a()
func_b()
func_c()
func_d()
Each time that whole function_a runs. But I just want the returned data from "func_a" in other functions.
IIUC, you could alleviate this with a simple class.
I hold the state of the class which runs func_a in a variable called output. I can then reference this output variable once the class has finished running as much as I like in all other functions without having to re-run func_a.
Hope this helps!
class FunctionA:
def __init__(self):
self.output = None
def run_function(self):
print('running function a')
data = 'test'
self.output = data
def func_b():
print(func_a.output)
def func_c():
print(func_a.output)
def func_d():
print(func_a.output)
if __name__ == '__main__':
func_a = FunctionA()
func_a.run_function()
func_b()
func_c()
func_d()
>>> running function a
>>> test
>>> test
>>> test
Your func_a does two things. To make this clear, let's call it, print_and_return_data.
There are several ways to to break apart the two things print_and_return_data does. One way is to split up the two behaviors into smaller sub-methods:
def print_and_return_data():
print('running function a') # keeping the old print behavior
data = 'test'
return data
into:
def print_run():
print('running function a') # keeping the old print behavior
def return_data():
return 'test'
def print_and_return_data():
print_run()
return return_data()
So that other functions only use what they need:
def func_b():
print(return_data())
Another way is to change print_and_return_data to behave differently the first time it's called from the following times it's called (I don't recommend this because functions changing based on how many times it's been called can be confusing):
context = {'has_printed_before': False}
def print_and_return_data():
if not context['has_printed_before']:
print('running function a')
context['has_printed_before'] = True
data = 'test'
return data
def func_b():
print(print_and_return_data())
if __name__ == '__main__':
func_a() # prints
func_b() # won't print
One way to avoid "functions behaving differently when they're called" is to pass the variation (the "context") in as an argument:
def return_data(also_print=False):
if also_print:
print('running function a')
data = 'test'
return data
def func_b():
print(return_data())
if __name__ == '__main__':
func_a(also_print=True) # prints
func_b() # won't print
Related
I am trying to write a metamorphic quine. Without the "spawn" context, the subprocesses seem to inherit the stack, and so I ultimately exceed the max recursion depth. With the "spawn context," the subprocess doesn't seem to recurse. How would I go about executing the modified AST?
def main():
module = sys.modules[__name__]
source = inspect.getsource(module)
tree = ast.parse(source)
visitor = Visitor() # TODO mutate
tree = visitor.visit(tree)
tree = ast.fix_missing_locations(tree)
ctx = multiprocessing.get_context("spawn")
process = ctx.Process(target=Y, args=(tree,))
# Y() encapsulates these lines, since code objects can't be pickled
#code = compile(tree, filename="<ast>", mode='exec', optimize=2)
#process = ctx.Process(target=exec, args=(code, globals())) # locals()
process.daemon = True
process.start()
# TODO why do daemonized processes need to be joined in order to run?
process.join()
return 0
if __name__ == '__main__': exit(main())
It really is that easy. with daemon.DaemonContext(): foo()
Based on comments by #user2357112 supports Monica.
#trace
def spawn_child(f:Callable):
with daemon.DaemonContext(stdin=sys.stdin, stdout=sys.stdout): return f()
I = TypeVar('I')
def ai(f:Callable[[int,], I])->Callable[[int,], I]:
def g(*args, **kwargs)->int:
# assuming we have a higher-order function morph()
# that has a concept of eta-equivalence
# (e.g., a probabilistic notion),
# then the recursive call should be "metamorphic"
O = [morph(f), status, partial(spawn_child, f),]
i = random.randrange(0, len(O)) # TODO something magickal
return O[i]()
return g
def main()->int: return Y(ai)()
if __name__ == '__main__': exit(main())
The next problem is compiling the source for a nested function definition, since f() is not a reference to ai() but to a function defined within Y().
I'm learning about multithreading and I try to implement a few things to understand it.
After reading several (and very technical topics) I cannot find a solution or way to understand my issue.
Basically, I have the following structure:
class MyObject():
def __init__():
self.lastupdate = datetime.datetime.now()
def DoThings():
...
def MyThreadFunction(OneOfMyObject):
OneOfMyObject.DoThings()
OneOfMyObject.lastupdate = datetime.datetime.now()
def main():
MyObject1 = MyObject()
MyObject2 = MyObject()
MyObjects = [MyObject1, MyObject2]
pool = Pool(2)
while True:
pool.map(MyThreadFunction, MyObjects)
if __name__ == '__main__':
main()
I think the function .map make a copy of my objects because it does not update the time. Is it right ? if yes, how could I input a Global version of my objects. If not, would you have any idea why the time is fixed in my objects ?
When I check the new time with print(MyObject.lastupdate), the time is right, but not in the next loop
Thank you very much for any of your ideas
Yes, python threading will serialize (actually, pickle) your objects and then reconstruct them in the thread. However, it also sends them back. To recover them, see the commented additions to the code below:
class MyObject():
def __init__():
self.lastupdate = datetime.datetime.now()
def DoThings():
...
def MyThreadFunction(OneOfMyObject):
OneOfMyObject.DoThings()
OneOfMyObject.lastupdate = datetime.datetime.now()
# NOW, RETURN THE OBJECT
return oneOfMyObject
def main():
MyObject1 = MyObject()
MyObject2 = MyObject()
MyObjects = [MyObject1, MyObject2]
with Pool(2) as pool: # <- this is just a neater way of doing it than a while loop for various reasons. Checkout context managers if interested.
# Now we recover a list of the updated objects:
processed_object_list = pool.map(MyThreadFunction, MyObjects)
# Now inspect
for my_object in processed_object_list:
print(my_object.lastupdate)
if __name__ == '__main__':
main()
I want to implement a timer to measure how long a block of code takes to run. I then want to do this across an entire application containing multiple modules (40+) across multiple directories (4+).
My timer is created with two functions that are within a class with a structure like this:
class SubClass(Class1)
def getStartTime(self):
start = time.time()
return start
def logTiming(self, classstring, start):
fin = time.time() - start
logging.getLogger('perf_log_handler').info((classstring + ' sec').format(round(fin,3)))
The first function gets the start time, and the second function calculates the time for the block to run and then logs it to a logger.
This code is in a module that we'll call module1.py.
In practice, generically, it will be implemented as such:
class SubSubClass(SubClass)
def Some_Process
stim = super().getStartTime()
code..............................
...
...
...
...
super().logTiming("The Process took: {}", stim)
return Result_Of_Process
This code resides in a module called module2.py and already works and successfully logs. My problem is that when structured like this, I can seemingly only use the timer inside code that is under the umbrella of SubClass, where it is defined (my application fails to render and I get a "can't find page" error in my browser). But I want to use this code everywhere in all the application modules, globally. Whether the module is within another directory, whether some blocks of code are within other classes and subclasses inside other modules, everywhere.
What is the easiest, most efficient way to create this timing instrument so that I can use it anywhere in my application? I understand I may have to define it completely differently. I am very new to all of this, so any help is appreciated.
OPTION 1) You should define another module, for example, "mytimer.py" fully dedicated to the timer:
import time
class MyTimer():
def __init__(self):
self.start = time.time()
def log(self):
now = time.time()
return now - self.start
And then, from any line of your code, for example, in module2.py:
from mytimer import MyTimer
class SomeClass()
def Some_Function
t = MyTimer()
....
t.log()
return ...
OPTION 2) You could also use a simple function instead of a class:
import time
def mytimer(start=None, tag=""):
if start is None:
start = time.time()
now = time.time()
delay = float(now - start)
print "%(tag)s %(delay).2f seconds." % {'tag': tag, 'delay': delay}
return now
And then, in your code:
from mytimer import mytimer
class SomeClass()
def Some_Function
t = mytimer(tag='BREAK0')
....
t = mytimer(start=t, tag='BREAK1')
....
t = mytimer(start=t, tag='BREAK2')
....
t = mytimer(start=t, tag='BREAK3')
return ...
I am not quite sure what you are looking for, but once upon a time I used a decorator for a similar type of problem.
The snippet below is the closest I can remember to what I implemented at that time. Hopefully it is useful to you.
Brief explanation
The timed is a 'decorator' that wraps methods in the python object and times the method.
The class contains a log that is updated by the wrapper as the #timed methods are called.
Note that if you want to make the #property act as a "class property" you can draw inspiration from this post.
from time import sleep, time
# -----------------
# Define Decorators
# ------------------
def timed(wrapped):
def wrapper(self, *arg, **kwargs):
start = time()
res = wrapped(self, *arg, **kwargs)
stop = time()
self.log = {'method': wrapped.__name__, 'called': start, 'elapsed': stop - start}
return res
return wrapper
# -----------------
# Define Classes
# ------------------
class Test(object):
__log = []
#property
def log(self):
return self.__log
#log.setter
def log(self, kwargs):
self.__log.append(kwargs)
#timed
def test(self):
print("Running timed method")
sleep(2)
#timed
def test2(self, a, b=2):
print("Running another timed method")
sleep(2)
return a+b
# ::::::::::::::::::
if __name__ == '__main__':
t = Test()
res = t.test()
res = t.test2(1)
print(t.log)
Trying to call two methods say_hello and say_world by getattr() in multiprocessing.Process, but method say_world hasn't been executed. How can I make it possible? Thanks.
# -*- coding: utf-8 -*-
from multiprocessing import Process
import time
class Hello:
def say_hello(self):
print('Hello')
def say_world(self):
print('World')
class MultiprocessingTest:
def say_process(self, say_type):
h = Hello()
while True:
if hasattr(h, say_type):
result = getattr(h, say_type)()
print(result)
time.sleep(1)
def report(self):
Process(target=self.say_process('say_hello')).start()
Process(target=self.say_process('say_world')).start() # This line hasn't been executed.
if __name__ == '__main__':
t = MultiprocessingTest()
t.report()
The parameter target expects a reference to a function as value but your code passes None to it. These are the necessary parts to change:
class Hello:
def say_hello(self):
while True:
print('Hello')
time.sleep(1)
def say_world(self):
while True:
print('World')
time.sleep(1)
class MultiprocessingTest:
def say_process(self, say_type):
h = Hello()
if hasattr(h, say_type):
return getattr(h, say_type) # Return function reference instead of execute function
else:
return None
I want to know, how can I test for a function randomly pulled from a list in a conditional statement? Here is some example code. Just ignore what the code is supposed to be printing.
import random, time
def biomeLand():
print "Biome: Land"
def biomeOcean():
print "Biome: Ocean"
def biomeDesert():
print "Biome: Desert"
def biomeForest():
print "Biome: Forest"
def biomeRiver():
print "Biome: River"
biomes = [biomeLand, biomeOcean, biomeDesert, biomeForest,
biomeRiver]
def run():
while True:
selected_biome = random.choice(biomes)()
time.sleep(0.5)
run()
Once again how can I make it so the program tests in a conditional statement when a certain function is called from the list?
maybe:
def run():
while True:
selected_biome = random.choice(biomes)
selected_biome()
if selected_biome == biomeLand:
print "biomeLand Selected"
time.sleep(0.5)
run()
You can just match them like any other variable:
def foo():
print "foo"
def bar():
print "bar"
first = foo
print (first == bar) # prints "False"
print (first == foo) # prints "True"
So in your example you can just have something like:
if selected_biome == biomeLand:
# do something