class Return_Thread_Value(object):
def __init__(self,target = None,args = (),**kwargs):
self._que = queue.Queue()
self._t = Thread(target = lambda q,arg1,kwargs1: q.put(target(*arg1,**kwargs1)),
args=(self._que,args,kwargs), )
self._t.start()
def Return_Value(self):
self._t.join()
return self._que.get()
Thread_1 = Return_Thread_Value(target = Walking_Inputs,args = (
WINDOW,CLOCK,Hero,FRAME,INTERACTING,TOP_SCREEN,POSITION_DATA,BACKGROUND,
FOREGROUND_OPAQUE,FOREGROUND_TRANSLUCENT,INPUT,INPUT_SHIFT,PROMPT_SHIFT,Input,
ENTERED))
INTERACTING,TOP_SCREEN,Input,ENTERED = Thread_1.Return_Value()
Thread_2 = Return_Thread_Value(target = Key_Inputs,args = (
WINDOW,ENTERED,PROMPT_SHIFT,INPUT,INPUT_SHIFT,CAPITAL,Input))
ENTERED,PROMPT_SHIFT,INPUT,INPUT_SHIFT,CAPITAL,Input = Thread_2.Return_Value()
Trying to run two functions, one that lets you walk about the village and another that accepts key inputs, both functions are running, but I'm not sure if the values are being returned.
They threads will each have their own scopes, to pass data back to their parent thread, the easiest way is to define any object for example a dict return_value = {} and pass that as an arg to your thread.
Set the value you want to return as a key in the dict (
return_value['thread1return']='something'), and you should be able to access it in the parent thread
Related
I'm trying to create threads to run a class method. However, when I try to pass one class to another, it tries to initialize the class and never gets threaded.
I'm taking a list of tuples and trying to pass that list to the cfThread class, along with the class method that I want to use. From here, I'd like to create a separate thread to run the classes method and take action on one of tuples from the list. The REPLACEME is a placeholder because the class is looking for a tuple but I don't have one to pass to it yet. My end goal is to be able to pass a target (class / function) to a thread class that can create it's own queue and manage the threads without having to manually do it.
Below is a simple example to hopefully do a better job of explaining what I'm trying to do.
#!/bin/python3.10
import concurrent.futures
class math:
def __init__(self, num) -> None:
self.num = num
def add(self):
return self.num[0] + self.num[1]
def sub(self):
return self.num[0] - self.num[1]
def mult(self):
return self.num[0] * self.num[1]
class cfThread:
def __init__(self, target, args):
self.target = target
self.args = args
def run(self):
results = []
with concurrent.futures.ThreadPoolExecutor(10) as execute:
threads = []
for num in self.args:
result = execute.submit(self.target, num)
threads.append(result)
for result in concurrent.futures.as_completed(threads):
results.append(result)
return results
if __name__ == '__main__':
numbers = [(1,2),(3,4),(5,6)]
results = cfThread(target=math(REPLACEME).add(), args=numbers).run()
print(results)
target has to be a callable; you want to wrap your call to add in a lambda expression.
results = cfThread(target=lambda x: math(x).add(), args=numbers)
I was wondering how I could update an object assigned as a shared dictionary value between different process. I have the following class:
class Task:
STATUS_PROCESSING = 0
STATUS_EXECUTING = 1
STATUS_QUEUED = 2
STATUS_TERMINATED = 3
STATUS_HALTED = 4
STATUS_STOPPED = 5
def __init__(self, id: str, uuid: str, options: dict):
self.id = id
self.uuid = uuid
self.options = options
self.state = 0
# Some properties...
def execute(self):
""" Executes the task
"""
# Set self status to Executing
self.state = Task.STATUS_EXECUTING
print('Executing...')
self.state = Task.STATUS_TERMINATED
It just creates a new task with a given ID and executes its core method when execute() is called. I have another class with staticmethods that is used to append a new pair (id, task) to the dict, and read the dict executing all its tasks until the main program stops:
class DummyList:
#staticmethod
def submit_task(d: dict, uuid: str, options: dict):
""" Submit a new task
"""
# If invalid UUID
if not Task.is_valid_uuid(uuid):
return False
# If more than 20 tasks
if len(d) > 19:
return False
# Create random ID (simplified for question)
r_id = str(random.randint(1, 2000000))
if r_id in d:
return False
# Add task to the dictionary
d[r_id] = Task(r_id, uuid, options)
# Set status to queue
d[r_id].state = Task.STATUS_QUEUED
# Return the created ID
return r_id
#staticmethod
def execute_forever(d):
try:
while True:
for i in d.values():
print(i.state)
i.execute()
time.sleep(5)
except KeyboardInterrupt:
pass
The thing is that the DummyList.execute_forever() will be called from another process, while the main one will execute the submit_task(...) function to add new tasks. Like this:
# Create a shared dict
m = multiprocessing.Manager()
shared_d = m.dict()
# Start the Task shared list execution in another process
p = multiprocessing.Process(target=DummyList.execute_forever, args=(shared_d,))
# Set the process to exit when the main halts
p.daemon = True
p.start()
........
# From another place
# The message variable is not important
DummyList.submit_task(shared_d, message['proc'], message['options'])
It works! The task is created, assigned to the dictionary and executed, but the following lines (which are seen in the above code) do not execute properly:
self.state = Task.STATUS_EXECUTING
self.state = Task.STATUS_TERMINATED
d[r_id].state = Task.STATUS_QUEUED
If we would try to write ìf shared_d[<some_id>].state == 0 all over the code, it will always be True, because the property does not update
I suppose that's because the shared dictionary does not update when the object properties are modified, maybe because the dictionary only understands he has to update when his getitem or setitem methods are called. Do you know if there is any way to change this behaivor?
Thank you very much!
I finally found a solution. The objects inside the dictionary were not updating unless the __getitem__ or __setitem__ methods from the proxy dictionary were called. That's why I changed the following lines:
Task
The execute() method ends with return self. The self.state must be changed throughout the execution.
TaskManager
Method changed to:
#staticmethod
def execute_forever(d):
""" Infinite loop reading the queued tasks and executing all of them.
"""
try:
while True:
# Notice the loop using the keys
for i in d.keys():
# Execute and re-assign item
d[i] = d[i].execute()
time.sleep(5)
except KeyboardInterrupt:
pass
I have the following program and I want to use multiprocessing module. It uses external files, in which I call the PSO class from another file. costfunc is a function from another file and the other args are just variables.
Swarm is a list containing as much objects as the value of ps, and each object has multiple attributes which need to update at every iteration.
Following Hannu implemented multiprocessing.pool and it is working, however it is taking much more time than running sequentially.
Would appreciate if you could tell me what are the reasons for it happening and how can I make it run faster?
# IMPORT PACKAGES -----------------------------------------------------------+
import random
import numpy as np
# IMPORT FILES --------------------------------------------------------------+
from Reducer import initial
# Particle Class ------------------------------------------------------------+
class Particle:
def __init__(self,D,bounds_x,bounds_v):
self.Position_i = [] # particle position
self.Velocity_i = [] # particle velocity
self.Cost_i = -1 # cost individual
self.Position_Best_i = [] # best position individual
self.Cost_Best_i = -1 # best cost individual
self.Constraint_Best_i = [] # best cost individual contraints
self.Constraint_i = [] # constraints individual
self.Penalty_i = -1 # constraints individual
x0,v0 = initial(D,bounds_x,bounds_v)
for i in range(0,D):
self.Velocity_i.append(v0[i])
self.Position_i.append(x0[i])
# evaluate current fitness
def evaluate(self,costFunc,i):
self.Cost_i, self.Constraint_i,self.Penalty_i = costFunc(self.Position_i,i)
# check to see if the current position is an individual best
if self.Cost_i < self.Cost_Best_i or self.Cost_Best_i == -1:
self.Position_Best_i = self.Position_i
self.Cost_Best_i = self.Cost_i
self.Constraint_Best_i = self.Constraint_i
self.Penalty_Best_i = self.Penalty_i
return self
def proxy(gg, costf, i):
print(gg.evaluate(costf, i))
# Swarm Class ---------------------------------------------------------------+
class PSO():
def __init__(self,costFunc,bounds_x,bounds_v,ps,D,maxiter):
self.Cost_Best_g = -1 # Best Cost for Group
self.Position_Best_g = [] # Best Position for Group
self.Constraint_Best_g = []
self.Penalty_Best_g = -1
# Establish Swarm
Swarm = []
for i in range(0,ps):
Swarm.append(Particle(D,bounds_x,bounds_v))
# Begin optimization Loop
i = 1
self.Evol = []
while i <= maxiter:
pool = multiprocessing.Pool(processes = 4)
results = pool.map_async(partial(proxy, costf = costFunc, i=i), Swarm)
pool.close()
pool.join()
Swarm = results.get()
if Swarm[j].Cost_i< self.Cost_Best_g or self.Cost_Best_g == -1:
self.Position_Best_g = list(Swarm[j].Position_i)
self.Cost_Best_g = float(Swarm[j].Cost_i)
self.Constraint_Best_g = list(Swarm[j].Constraint_i)
self.Penalty_Best_g = float(Swarm[j].Penalty_i)
self.Evol.append(self.Cost_Best_g)
i += 1
You need a proxy function to do the function call, and as you need to deliver arguments to the function, you will need partial as well. Consider this:
from time import sleep
from multiprocessing import Pool
from functools import partial
class Foo:
def __init__(self, a):
self.a = a
self.b = None
def evaluate(self, CostFunction, i):
xyzzy = CostFunction(i)
sleep(0.01)
self.b = self.a*xyzzy
return self
def CostFunc(i):
return i*i
def proxy(gg, costf, i):
return gg.evaluate(costf, i)
def main():
Swarm = []
for i in range(0,10):
nc = Foo(i)
Swarm.append(nc)
p = Pool()
for i in range(100,102):
results = p.map_async(partial(proxy, costf=CostFunc, i=i), Swarm)
p.close()
p.join()
Swarm = []
for a in results.get():
Swarm.append(a)
for s in Swarm:
print (s.b)
main()
This creates a Swarm list of objects, and within each of these objects is evaluate that is the function you need to call. Then we have parameters (CostFunc and an integer as in your code).
We will now use Pool.map_async to map your Swarm list to your pool. This gives each worker one instance of Foo from your Swarm list, and we have a proxy function that actually calls then evaluate().
However, as apply_async only sends an object from the iterable to the function, instead of using proxy as the target function to pool, we use partial to create the target function to pass the "fixed" arguments.
And as you apparently want to get the modified objects back, this requires another trick. If you modify the target object in Pool process, it just modifies the local copy and throws it away as soon as the processing completes. There would be no way for the subprocess to modify main process memory anyway (or vice versa), this would cause a segmentation fault.
Instead, after modifying the object, we return self. When your pool has completed its work, we discard the old Swarm and reassemble it from the result objects.
in order to test this you will need the freeopcua library.
I want to offer the user a list of methods available on the server. The user can detect which methods exist. (through an enum)
All these functions have a variable amount of input args and output args.
now with freeopcua you call a method like
node.call_method("2:myMethod1", 1,2,3,4)
However what I have available is [1,2,3,4]. (is thats the user input I get)
Would there be a way to parse this so it fits as myMethod arguments?
Minimal code to run the issue (not at all my code but it will give the idea of where I want to go:
myServer.py: (Only needed to have the methods no issue in here)
from opcua import Server, ua, uamethod
from enum import Enum
class methods(Enum):
add = "add"
multi = "more"
person = "notInt"
class myServer(Server):
def __init__(self):
Server.__init__(self)
self.set_endpoint("opc.tcp://0.0.0.0:4840/freeopcua/server/")
self.set_server_name("FreeOpcUa Example Server")
uri = "http://examples.freeopcua.github.io"
self.idx = self.register_namespace(uri)
# Automatically creates server methods of the methods I promise to offer
for mymethod in methods:
args = self.methodCreator(mymethod)
args[1]
self.nodes.objects.add_method(args[0], args[1], args[2], args[3], args[4])
self.start()
def methodCreator(self, method_type):
inargs = None
outargs = None
method = None
if method_type == methods.add:
inargs = []
inarg = ua.Argument()
inarg.Name = "first_number"
inargs.append(inarg)
inarg = ua.Argument()
inarg.Name = "second_number"
inargs.append(inarg)
method = self.multi
return [2, method_type.value, method, inargs, outargs]
elif method_type == methods.multi:
inargs = []
inarg = ua.Argument()
inarg.Name = "first_number"
inargs.append(inarg)
inarg = ua.Argument()
inarg.Name = "second_number"
inargs.append(inarg)
inarg = ua.Argument()
inarg.Name = "third_number"
inargs.append(inarg)
method = self.add
return [2, method_type.value, method, inargs, outargs]
elif method_type == methods.person:
inargs = []
inarg = ua.Argument()
inarg.Name = "Name"
inargs.append(inarg)
inarg = ua.Argument()
inarg.Name = "Age"
inargs.append(inarg)
inarg = ua.Argument()
inarg.Name = "Surname"
inargs.append(inarg)
inarg = ua.Argument()
inarg.Name = "Job"
inargs.append(inarg)
method = self.person
return [2, method_type.value, method, inargs, outargs]
#uamethod
def add(self, parent, x, y):
print(x+y)
#uamethod
def multi(self, parentm, x, y, z):
print(x*y*z)
#uamethod
def person(self, parent, name, age, surname, job):
print("I'm %s %s I'm %s years old and I do %s" % (name, surname, age, job))
Now the file it's all about:
myClient.py
from stack.server import myServer, methods
from opcua import Client
class myClient(Client):
def call_functions(self):
print("Implemented methods:")
values = []
for method in methods:
print(method.value)
values.append(method.value)
#In my real code I check input but here I'll trust the user
method = input("Select a method please: \n")
objects = self.nodes.objects
inarguments = objects.get_child(["2:" + method, "0:InputArguments"]).get_value()
inargs = []
for argument in inarguments:
inargs.append(input("%s: " % argument.Name))
# I disabled some methods to make sure I just need to show one case
if method == 'notInt':
print("Desired")
objects.call_method("2:" + method, inargs[0], inargs[1], inargs[2], inargs[3])
print("error")
objects.call_method("2:" + method, inargs) # This is the line that wont work
server = myServer()
with myClient("opc.tcp://localhost:4840/freeopcua/server/") as client:
client.call_functions()
server.stop()
So when I want to call the method generic like:
objects.call_method("2:" + method, inargs)
Which for 'notInt' would have the desired output as if I did:
objects.call_method("2:" + method, inargs[0], inargs[1], inargs[2], inargs[3])
Is there a way in python to get this parsed from array to list of input args seperated by ,? So that I can keep my generic way to call each method? Or in freeopcua is there a way to get the desired affect (keep in mind that I use the argument names to ask the user for his input so just making it take a list as input wont be a sollution)
I've searched a bit. And yesterday at a meeting with some friends I discussed the issue. They made a point about *args and that I should investigate if that path works. And it does. To solve my issue I only had to add an * in front of my list of client responses and it just as promised in the shared link unboxes it and sends it to the server as being all individual arguments instead of 1 list object. My search keywords were wrong the other day. Anyway resolved by just doing this in de myClient.py
from stack.server import myServer, methods
from opcua import Client
class myClient(Client):
def call_functions(self):
print("Implemented methods:")
values = []
for method in methods:
print(method.value)
values.append(method.value)
#In my real code I check input but here I'll trust the user
method = input("Select a method please: \n")
objects = self.nodes.objects
inarguments = objects.get_child(["2:" + method, "0:InputArguments"]).get_value()
inargs = []
for argument in inarguments:
inargs.append(input("%s: " % argument.Name))
# I disabled some methods to make sure I just need to show one case
if method == 'notInt':
print("Desired")
objects.call_method("2:" + method, inargs[0], inargs[1], inargs[2], inargs[3])
print("error")
objects.call_method("2:" + method, *inargs) # This will now work
server = myServer()
with myClient("opc.tcp://localhost:4840/freeopcua/server/") as client:
client.call_functions()
server.stop()
Now all I need to do is remove the enum so the client asks this to the server and then I have a client that is generic for whatever function I add to my opcua-server my client code will be able to ask which are available, choose one and ask for the arguments without needing additional code. Gotta love python!
I wanted to know if there is a way of populating a option menu with choices and then each of the different choice gives the build button a different function
for eg:
Type = cmds.optionMenu('type',w = 300 ,label = 'Type of crowd:')
cmds.menuItem( label='Walking' )
cmds.menuItem( label='Running' )
cmds.menuItem( label='Cheering' )
cmds.button('Build',command = bld)
def walk(*args):
print (walking)
def run(*args)
print (running)
def cheer(*args)
print (cheer)
so if the menu item selected would be walking the button command would execute the command wak
and if the menu item selected would be running then the button command would execute the command run and so on....
is this even possible in maya python...????
There's three parts to the problem.
First, you want your options to be callables. I like functools.partial for that, so that you could give the same command different parameters and have it be treated as two different actions:
from functools import partial
bigcircle = functools.partial ( cmds.circle, radius = 10)
littleCircle = functools.partial (cmds.circle, radius = 1)
the second problem is that menuItems in OptionMenus don't fire their commands directly. They trigger the -cc change command on the owning optionMenu. So we need something that will turn the label back into a callable object. A little class will do:
class menuMgr(object):
'''call the function associated with a key in the **callables dictionary'''
def __init__(self, **callables):
self.Callables = callables
def __call__(self, *args):
self.Callables[args[-1]]()
The third part is to match these with a label. You can do this elegantly with the **kwargs syntax, where you can either pass in a whole dictionary or named keywords:
def menu_of_functions(**callables):
mmgr = menuMgr(**callables)
Main = cmds.optionMenu('type3',w = 300 ,label = 'Type of crowd:', cc = mmgr)
for key, partial in callables.items():
cmds.menuItem(label = key)
cmds.setParent("..")
Heres the whole thing in working form to inspect:
import maya.cmds as cmds
import functools
bigCircle = functools.partial ( cmds.circle, radius = 10)
littleCircle = functools.partial (cmds.circle, radius = 1)
class menuMgr(object):
def __init__(self, **callables):
self.Callables = callables
def __call__(self, *args):
self.Callables[args[-1]]()
def menu_of_functions(**callables):
mmgr = menuMgr(**callables)
Main = cmds.optionMenu('type3',w = 300 ,label = 'Type of crowd:', cc = mmgr)
for key, partial in callables.items():
cmds.menuItem(label = key)
cmds.setParent("..")
q = cmds.window()
cmds.columnLayout()
menu_of_functions(big = bigCircle, small = littleCircle)
cmds.showWindow(q)
Sure you can, since functions are first class objects in python.
Let's say you have a list of functions.
fns = [walk, run, cheer]
1) You'll need a mapping from a string key to the python function.
Let's use a dictionary comprehension.
options = dict((fn.__name__, fn) for fn in fns)
Alternatively you can build the dictionary with arbitrary keys.
options = {"Walking": walk, "Running": run, "Cheering": cheer}
2) Get a reference to the function by accessing the dictionary item with the functions name.
options['run'](*args)
3) ???
4) Profit
Had a chance to do a small bit of research on this today, but I don't actually use Maya Python so this may not be viable code -- at least it should be close!
def walk(*args):
print ("walking")
def run(*args)
print ("running")
def cheer(*args)
print ("cheer")
fncdict = {"Walking":walk,"Running":run,"Cheering":cheer}
def dofunc(funcname):
try: fncdict[funcname]()
except KeyError: print("adsmith doesn't really know how this crap works,
and gave you some really shoddy advice. Go downvote
his answer.")
Type = cmds.optionMenu('type',w = 300 ,label = 'Type of crowd:')
# Please don't name things this way! naming conventions in PEP 8
# make this look like a class not an instance of optionMenu
cmds.menuItem( label='Walking', parent = Type )
cmds.menuItem( label='Running', parent = Type )
cmds.menuItem( label='Cheering', parent = Type )
cmds.button('Build',command = lambda x: dofunc(Type.value))
# I'm assuming this is the button you want to use to say "GO", right?
From the little I've read, it looks like optionMenu.value refers to the text in the active menuItem, but I can't say for sure -- it may just be the text for that optionMenu in which case the button will call dofunc('Type of crowd:') which will return the exception I built to shame myself.
Here's an alternative that I KNOW will work, but it's ugly and unnecessary.
# all
# those
# functions
# go
# here
# including
# the dict and dofunc
activeMenuItem = None
Type = cmds.optionMenu('type',w = 300 ,label = 'Type of crowd:',
changeCommand = lambda x: activeMenuItem = x)
# this fails because you can't do assignments in a lambda -- OOPS!!
cmds.menuItem( label='Walking', parent = Type )
cmds.menuItem( label='Running', parent = Type )
cmds.menuItem( label='Cheering', parent = Type )
cmds.button('Build',command = lambda x: dofunc(activeMenuItem))
The changeCommand option in optionMenu gets called every time you change items. I've assigned it to a lambda that updates the variable activeMenuItem to the value in the newly-active menuItem, then had the button reference the variable instead of querying the optionMenu for its currently selected button, but let's be honest -- this is what menus are MADE to do. There is DEFINITELY a way to do it without storing every single selection.
EDIT: THIS LAST ONE WON'T WORK BECAUSE YOU CAN'T DO ASSIGNMENTS WITHIN LAMBDA EXPRESSIONS. MY FAULT!
I have kind of figured out an easier way to this problem
cmds.optionMenu('Greetings',Label = 'Greet')
cmds.menuItem(label= hi,parent = 'Greetings)
cmds.menuItem(label = Hello,parent = 'Greetings')
def Greet(*args)
menuItems = cmds.optionMenu('Greetings',q=True,v=True)
if menuItems = hi
print "hi"
menuItemsnew = cmds.optionMenu('Greetings',q=True,v=True)
if menuItemsnew = hello
print "hello"
This should work,it worked for me