Keeping context-manager object alive through function calls - python

I am running into a bit of an issue with keeping a context manager open through function calls. Here is what I mean:
There is a context-manager defined in a module which I use to open SSH connections to network devices. The "setup" code handles opening the SSH sessions and handling any issues, and the teardown code deals with gracefully closing the SSH session. I normally use it as follows:
from manager import manager
def do_stuff(device):
with manager(device) as conn:
output = conn.send_command("show ip route")
#process output...
return processed_output
In order to keep the SSH session open and not have to re-establish it across function calls, I would like to do add an argument to "do_stuff" which can optionally return the SSH session along with the data returned from the SSH session, as follows:
def do_stuff(device, return_handle=False):
with manager(device) as conn:
output = conn.send_command("show ip route")
#process output...
if return_handle:
return (processed_output, conn)
else:
return processed_output
I would like to be able to call this function "do_stuff" from another function, as follows, such that it signals to "do_stuff" that the SSH handle should be returned along with the output.
def do_more_stuff(device):
data, conn = do_stuff(device, return_handle=True)
output = conn.send_command("show users")
#process output...
return processed_output
However the issue that I am running into is that the SSH session is closed, due to the do_stuff function "returning" and triggering the teardown code in the context-manager (which gracefully closes the SSH session).
I have tried converting "do_stuff" into a generator, such that its state is suspended and perhaps causing the context-manager to stay open:
def do_stuff(device, return_handle=False):
with manager(device) as conn:
output = conn.send_command("show ip route")
#process output...
if return_handle:
yield (processed_output, conn)
else:
yield processed_output
And calling it as such:
def do_more_stuff(device):
gen = do_stuff(device, return_handle=True)
data, conn = next(gen)
output = conn.send_command("show users")
#process output...
return processed_output
However this approach does not seem to be working in my case, as the context-manager gets closed, and I get back a closed socket.
Is there a better way to approach this problem? Maybe my generator needs some more work...I think using a generator to hold state is the most "obvious" way that comes to mind, but overall should I be looking into another way of keeping the session open across function calls?
Thanks

I found this question because I was looking for a solution to an analogous problem where the object I wanted to keep alive was a pyvirtualdisplay.display.Display instance with selenium.webdriver.Firefox instances in it.
I also wanted any opened resources to die if an exception were raised during the display/browser instance creations.
I imagine the same could be applied to your database connection.
I recognize this probably only a partial solution and contains less-than-best practices. Help is appreciated.
This answer is the result of an ad lib spike using the following resources to patch together my solution:
https://docs.python.org/3/library/contextlib.html#contextlib.ContextDecorator
http://www.wefearchange.org/2013/05/resource-management-in-python-33-or.html
(I do not yet fully grok what is described here though I appreciate the potential. The second link above eventually proved to be the most helpful by providing analogous situations.)
from pyvirtualdisplay.display import Display
from selenium.webdriver import Firefox
from contextlib import contextmanager, ExitStack
RFBPORT = 5904
def acquire_desktop_display(rfbport=RFBPORT):
display_kwargs = {'backend': 'xvnc', 'rfbport': rfbport}
display = Display(**display_kwargs)
return display
def release_desktop_display(self):
print("Stopping the display.")
# browsers apparently die with the display so no need to call quits on them
self.display.stop()
def check_desktop_display_ok(desktop_display):
print("Some checking going on here.")
return True
class XvncDesktopManager:
max_browser_count = 1
def __init__(self, check_desktop_display_ok=None, **kwargs):
self.rfbport = kwargs.get('rfbport', RFBPORT)
self.acquire_desktop_display = acquire_desktop_display
self.release_desktop_display = release_desktop_display
self.check_desktop_display_ok = check_desktop_display_ok \
if check_desktop_display_ok is None else check_desktop_display_ok
#contextmanager
def _cleanup_on_error(self):
with ExitStack() as stack:
"""push adds a context manager’s __exit__() method
to stack's callback stack."""
stack.push(self)
yield
# The validation check passed and didn't raise an exception
# Accordingly, we want to keep the resource, and pass it
# back to our caller
stack.pop_all()
def __enter__(self):
url = 'http://stackoverflow.com/questions/30905121/'\
'keeping-context-manager-object-alive-through-function-calls'
self.display = self.acquire_desktop_display(self.rfbport)
with ExitStack() as stack:
# add XvncDesktopManager instance's exit method to callback stack
stack.push(self)
self.display.start()
self.browser_resources = [
Firefox() for x in range(self.max_browser_count)
]
for browser_resource in self.browser_resources:
for url in (url, ):
browser_resource.get(url)
"""This is the last bit of magic.
ExitStacks have a .close() method which unwinds
all the registered context managers and callbacks
and invokes their exit functionality."""
# capture the function that calls all the exits
# will be called later outside the context in which it was captured
self.close_all = stack.pop_all().close
# if something fails in this context in enter, cleanup
with self._cleanup_on_error() as stack:
if not self.check_desktop_display_ok(self):
msg = "Failed validation for {!r}"
raise RuntimeError(msg.format(self.display))
# self is assigned to variable after "as",
# manually call close_all to unwind callback stack
return self
def __exit__(self, *exc_details):
# had to comment this out, unable to add this to callback stack
# self.release_desktop_display(self)
pass
I had a semi-expected result with the following:
kwargs = {
'rfbport': 5904,
}
_desktop_manager = XvncDesktopManager(check_desktop_display_ok=check_desktop_display_ok, **kwargs)
with ExitStack() as stack:
# context entered and what is inside the __enter__ method is executed
# desktop_manager will have an attribute "close_all" that can be called explicitly to unwind the callback stack
desktop_manager = stack.enter_context(_desktop_manager)
# I was able to manipulate the browsers inside of the display
# and outside of the context
# before calling desktop_manager.close_all()
browser, = desktop_manager.browser_resources
browser.get(url)
# close everything down when finished with resource
desktop_manager.close_all() # does nothing, not in callback stack
# this functioned as expected
desktop_manager.release_desktop_display(desktop_manager)

Related

How to properly wait to check if an Excel instance has closed after attempting to?

I'm using the Python standard library modules and the pythoncom and win32com.client modules from the PyWin32 package to interact with Microsoft Excel.
I get a list of the running Excel instances as COM object references and then when I want to close the Excel instances I first iterate through the workbooks and close them. Then I execute the Quit method and after I attempt to terminate the Excel process if it's not terminated.
I do the check (_is_process_running) because the Excel instance might not close successfully if for example the Excel process is a zombie process (information on how one can be created) or if the VBA listens to the before close event and cancels it.
My current quirky solution to know when to check if it closed is to use the sleep function. It does seem to work but it can fail in certain circumstances, such as if it takes longer than the sleep function waits for.
I thought that clearing all the COM references and collecting the garbage would be enough for the Excel process to terminate if the Quit method does succeed but it still takes some time asynchronously.
The check is in the close method of the _excel_application_wrapper class in the excel.pyw file.
Simple code to generate an Excel zombie process (you can see the process in the task manager):
from os import getpid, kill
from win32com.client import DispatchEx
_ = DispatchEx('Excel.Application')
kill(getpid(), 9)
This is only for testing purposes to help reproduce an Excel instance that won't be closed when calling Quit.
Another way to make Quit fail to close is to add this VBA code to the workbook in Excel:
Private Sub Workbook_BeforeClose(Cancel As Boolean)
Cancel = True
End Sub
Code on the excel_test.py file:
import excel
from traceback import print_exc as print_exception
try:
excel_application_instances = excel.get_application_instances()
for excel_application_instance in excel_application_instances:
# use excel_application_instance here before closing it
# ...
excel_application_instance.close()
except Exception:
print('An exception has occurred. Details of the exception:')
print_exception()
finally:
input('Execution finished.')
Code on the excel.pyw file:
from ctypes import byref as by_reference, c_ulong as unsigned_long, windll as windows_dll
from gc import collect as collect_garbage
from pythoncom import CreateBindCtx as create_bind_context, GetRunningObjectTable as get_running_object_table, \
IID_IDispatch as dispatch_interface_iid, _GetInterfaceCount as get_interface_count
from win32com.client import Dispatch as dispatch
class _object_wrapper_base_class():
def __init__(self, object_to_be_wrapped):
# self.__dict__['_wrapped_object'] instead of
# self._wrapped_object to prevent recursive calling of __setattr__
# https://stackoverflow.com/a/12999019
self.__dict__['_wrapped_object'] = object_to_be_wrapped
def __getattr__(self, name):
return getattr(self._wrapped_object, name)
def __setattr__(self, name, value):
setattr(self._wrapped_object, name, value)
class _excel_workbook_wrapper(_object_wrapper_base_class):
# __setattr__ takes precedence over properties with setters
# https://stackoverflow.com/a/15751159
def __setattr__(self, name, value):
# raises AttributeError if the attribute doesn't exist
getattr(self, name)
if name in vars(_excel_workbook_wrapper):
attribute = vars(_excel_workbook_wrapper)[name]
# checks if the attribute is a property with a setter
if isinstance(attribute, property) and attribute.fset is not None:
attribute.fset(self, value)
return
setattr(self._wrapped_object, name, value)
#property
def saved(self):
return self.Saved
#saved.setter
def saved(self, value):
self.Saved = value
def close(self):
self.Close()
class _excel_workbooks_wrapper(_object_wrapper_base_class):
def __getitem__(self, key):
return _excel_workbook_wrapper(self._wrapped_object[key])
class _excel_application_wrapper(_object_wrapper_base_class):
#property
def workbooks(self):
return _excel_workbooks_wrapper(self.Workbooks)
def _get_process(self):
window_handle = self.hWnd
process_identifier = unsigned_long()
windows_dll.user32.GetWindowThreadProcessId(window_handle, by_reference(process_identifier))
return process_identifier.value
def _is_process_running(self, process_identifier):
SYNCHRONIZE = 0x00100000
process_handle = windows_dll.kernel32.OpenProcess(SYNCHRONIZE, False, process_identifier)
returned_value = windows_dll.kernel32.WaitForSingleObject(process_handle, 0)
windows_dll.kernel32.CloseHandle(process_handle)
WAIT_TIMEOUT = 0x00000102
return returned_value == WAIT_TIMEOUT
def _terminate_process(self, process_identifier):
PROCESS_TERMINATE = 0x0001
process_handle = windows_dll.kernel32.OpenProcess(PROCESS_TERMINATE, False, process_identifier)
process_terminated = windows_dll.kernel32.TerminateProcess(process_handle, 0)
windows_dll.kernel32.CloseHandle(process_handle)
return process_terminated != 0
def close(self):
for workbook in self.workbooks:
workbook.saved = True
workbook.close()
del workbook
process_identifier = self._get_process()
self.Quit()
del self._wrapped_object
# 0 COM references
print(f'{get_interface_count()} COM references.')
collect_garbage()
# quirky solution to wait for the Excel process to
# terminate if it did closed successfully from self.Quit()
windows_dll.kernel32.Sleep(1000)
# check if the Excel instance closed successfully
# it may not close for example if the Excel process is a zombie process
# or if the VBA listens to the before close event and cancels it
if self._is_process_running(process_identifier=process_identifier):
print('Excel instance failed to close.')
# if the process is still running then attempt to terminate it
if self._terminate_process(process_identifier=process_identifier):
print('The process of the Excel instance was successfully terminated.')
else:
print('The process of the Excel instance failed to be terminated.')
else:
print('Excel instance closed successfully.')
def get_application_instances():
running_object_table = get_running_object_table()
bind_context = create_bind_context()
excel_application_class_clsid = '{00024500-0000-0000-C000-000000000046}'
excel_application_clsid = '{000208D5-0000-0000-C000-000000000046}'
excel_application_instances = []
for moniker in running_object_table:
display_name = moniker.GetDisplayName(bind_context, None)
if excel_application_class_clsid not in display_name:
continue
unknown_com_interface = running_object_table.GetObject(moniker)
dispatch_interface = unknown_com_interface.QueryInterface(dispatch_interface_iid)
dispatch_clsid = str(dispatch_interface.GetTypeInfo().GetTypeAttr().iid)
if dispatch_clsid != excel_application_clsid:
continue
excel_application_instance_com_object = dispatch(dispatch=dispatch_interface)
excel_application_instance = _excel_application_wrapper(excel_application_instance_com_object)
excel_application_instances.append(excel_application_instance)
return excel_application_instances
This answer suggests checking if the remote procedural call (RPC) server is unavailable by calling something from the COM object. I have tried trial and error in different ways without success. Such as adding the code below after self.Quit().
from pythoncom import com_error, CoUninitialize as co_uninitialize
from traceback import print_exc as print_exception
co_uninitialize()
try:
print(self._wrapped_object)
except com_error as exception:
if exception.hresult == -2147023174: # "The RPC server is unavailable."
print_exception()
else:
raise
You can use object_name.close, which returns False if the file is not properly closed.
Using your code:
def close(self):
for workbook in self.workbooks:
workbook.saved = True
workbook.close()
if workbook.closed:
del workbook
else:
print("Lookout, it's a zombie! Workbook was not deleted")
However, I should also mention that Pep 343 has an even better solution using Python's with context manager. This will ensure the file is closed before further execution.
Example:
with open("file_name", "w") as openfile:
# do some work
# "file_name" is now closed
It seems to me that the you know how to detect the current state of Excel instances.
The only point you are missing is detecting an event for the Quitting action.
AFAIK, there is no way to detect an event as you mean to.
But a (possibly very good) workaround is setting time points, e.g. in a list, and check the status at those points.
If you are concerned about wasting 1000ms, and at the same time performing an excessive number of checks, you could set your list as [1, 3, 10, 30, ...], i.e., equispaced in log(time).
Even if there is an event available, I guess your code would be more "elegant", but you won't get any better performance than with the proposal above (unless the wait time is in the range of, say, minutes or above).

Python Pika blocking connection on a dedicated class - callback not being call

When i run a simple app (producer / consumer) - all is working. aka connecting to rabbitmq, consuming msgs etc.
when I run the below code inside a class (in order not to block the flask app) --> the callback not being called.
I added some prints and it seems that all is fine during the connection initialization.
The msgs are consumed (the queue is is being clean) --> but the callback is not even being called.
Any idea ?
TIA
here is the code:
currentprices = ({"1":5.5, "2":3.5})
def initialparams(): #being called from outside
print('S-init')
RMQ = RMQPriceListener()
RMQ.create_currentprices_channel()
print('F-init')
class RMQPriceListener():
def create_currentprices_channel(self):
try:
credential_params = pika.PlainCredentials('un', 'pw')
connection_params = pika.ConnectionParameters(
host='127.0.0.1',port=5555,
credentials=credential_params) # i use port 5555
connection = pika.BlockingConnection(connection_params)
channel = connection.channel()
channel.queue_declare(queue='formatedrates')
print('c0') # being printed
channel.basic_consume(queue='formatedrates',
on_message_callback=self.callback,
auto_ack=True) # without this (above) self - the app
# is stuck here (which is logical)
print('c1') # being printed
channel.start_consuming() # working good and cleaning the queue
return ('lost connection to RMQ')
except:
return ('No connection to RMQ')
** the problematic func that works good on regular app but here inside the class not**
def callback(ch, method, properties,msg):
print(f'msg={msg}') # never printed
global currentprices # has an initial value and being call from the below
getcurrentrates method and never change its value
currentprices = msg # never being updated
print(currentprices) # never printed
def getcurrentrates():
print('get' + str(currentprices)) # printed
print(type(currentprices)) # printed - ' dict '
return currentprices # returned with initial value
Since you define the on_message_callback=self.callback, you have to define the callback function as a class method with self as first function parameter as below
def callback(self, ch, method, properties, msg):
....

Non-lazy instance creation with Pyro4 and instance_mode='single'

My aim is to provide to a web framework access to a Pyro daemon that has time-consuming tasks at the first loading. So far, I have managed to keep in memory (outside of the web app) a single instance of a class that takes care of the time-consuming loading at its initialization. I can also query it with my web app. The code for the daemon is:
Pyro4.expose
#Pyro4.behavior(instance_mode='single')
class Store(object):
def __init__(self):
self._store = ... # the expensive loading
def query_store(self, query):
return ... # Useful query tool to expose to the web framework.
# Not time consuming, provided self._store is
# loaded.
with Pyro4.Daemon() as daemon:
uri = daemon.register(Thing)
with Pyro4.locateNS() as ns:
ns.register('thing', uri)
daemon.requestLoop()
The issue I am having is that although a single instance is created, it is only created at the first proxy query from the web app. This is normal behavior according to the doc, but not what I want, as the first query is still slow because of the initialization of Thing.
How can I make sure the instance is already created as soon as the daemon is started?
I was thinking of creating a proxy instance of Thing in the code of the daemon, but this is tricky because the event loop must be running.
EDIT
It turns out that daemon.register() can accept either a class or an object, which could be a solution. This is however not recommended in the doc (link above) and that feature apparently only exists for backwards compatibility.
Do whatever initialization you need outside of your Pyro code. Cache it somewhere. Use the instance_creator parameter of the #behavior decorator for maximum control over how and when an instance is created. You can even consider pre-creating server instances yourself and retrieving one from a pool if you so desire? Anyway, one possible way to do this is like so:
import Pyro4
def slow_initialization():
print("initializing stuff...")
import time
time.sleep(4)
print("stuff is initialized!")
return {"initialized stuff": 42}
cached_initialized_stuff = slow_initialization()
def instance_creator(cls):
print("(Pyro is asking for a server instance! Creating one!)")
return cls(cached_initialized_stuff)
#Pyro4.behavior(instance_mode="percall", instance_creator=instance_creator)
class Server:
def __init__(self, init_stuff):
self.init_stuff = init_stuff
#Pyro4.expose
def work(self):
print("server: init stuff is:", self.init_stuff)
return self.init_stuff
Pyro4.Daemon.serveSimple({
Server: "test.server"
})
But this complexity is not needed for your scenario, just initialize the thing (that takes a long time) and cache it somewhere. Instead of re-initializing it every time a new server object is created, just refer to the cached pre-initialized result. Something like this;
import Pyro4
def slow_initialization():
print("initializing stuff...")
import time
time.sleep(4)
print("stuff is initialized!")
return {"initialized stuff": 42}
cached_initialized_stuff = slow_initialization()
#Pyro4.behavior(instance_mode="percall")
class Server:
def __init__(self):
self.init_stuff = cached_initialized_stuff
#Pyro4.expose
def work(self):
print("server: init stuff is:", self.init_stuff)
return self.init_stuff
Pyro4.Daemon.serveSimple({
Server: "test.server"
})

Multiprocessing managers and custom classes

I do not know why, but I get this strange error whenever I try to pass to the method of a shared object shared custom class object. Python version: 3.6.3
Code:
from multiprocessing.managers import SyncManager
class MyManager(SyncManager): pass
class MyClass: pass
class Wrapper:
def set(self, ent):
self.ent = ent
MyManager.register('MyClass', MyClass)
MyManager.register('Wrapper', Wrapper)
if __name__ == '__main__':
manager = MyManager()
manager.start()
try:
obj = manager.MyClass()
lst = manager.list([1,2,3])
collection = manager.Wrapper()
collection.set(lst) # executed fine
collection.set(obj) # raises error
except Exception as e:
raise
Error:
---------------------------------------------------------------------------
Traceback (most recent call last):
File "D:\Program Files\Python363\lib\multiprocessing\managers.py", line 228, in serve_client
request = recv()
File "D:\Program Files\Python363\lib\multiprocessing\connection.py", line 251, in recv
return _ForkingPickler.loads(buf.getbuffer())
File "D:\Program Files\Python363\lib\multiprocessing\managers.py", line 881, in RebuildProxy
return func(token, serializer, incref=incref, **kwds)
TypeError: AutoProxy() got an unexpected keyword argument 'manager_owned'
---------------------------------------------------------------------------
What's the problem here?
I ran into this too, as noted, this is a bug in Python multiprocessing (see issue #30256) and the pull request that corrects this has not yet been merged. The pull request has since been superseded by another PR that makes the same change but adds a test as well.
Apart from manually patching your local installation, you have three other options:
you could use the MakeProxyType() callable to specify your proxytype, without relying on the AutoProxy proxy generator,
you could define a custom proxy class,
you can patch the bug with a monkeypatch
I'll describe those options below, after explaining what AutoProxy does:
What's the point of the AutoProxy class
The multiprocessing Manager pattern gives access to shared values by putting the values all in the same, dedicated 'canonical values server' process. All other processes (clients) talk to the server through proxies that then pass messages back and forth with the server.
The server does need to know what methods are acceptable for the type of object, however, so clients can produce a proxy object with the same methods. This is what the AutoProxy object is for. Whenever a client needs a new instance of your registered class, the default proxy the client creates is an AutoProxy, which then asks the server to tell it what methods it can use.
Once it has the method names, it calls MakeProxyType to construct a new class and then creates an instance for that class to return.
All this is deferred until you actually need an instance of the proxied type, so in principle AutoProxy saves a little bit of memory if you are not using certain classes you have registered. It's very little memory, however, and the downside is that this process has to take place in each client process.
These proxy objects use reference counting to track when the server can remove the canonical value. It is that part that is broken in the AutoProxy callable; a new argument is passed to the proxy type to disable reference counting when the proxy object is being created in the server process rather than in a client but the AutoProxy type wasn't updated to support this.
So, how can you fix this? Here are those 3 options:
Use the MakeProxyType() callable
As mentioned, AutoProxy is really just a call (via the server) to get the public methods of the type, and a call to MakeProxyType(). You can just make these calls yourself, when registering.
So, instead of
from multiprocessing.managers import SyncManager
SyncManager.register("YourType", YourType)
use
from multiprocessing.managers import SyncManager, MakeProxyType, public_methods
# arguments: classname, sequence of method names
YourTypeProxy = MakeProxyType("YourType", public_methods(YourType))
SyncManager.register("YourType", YourType, YourTypeProxy)
Feel free to inline the MakeProxyType() call there.
If you were using the exposed argument to SyncManager.register(), you should pass those names to MakeProxyType instead:
# SyncManager.register("YourType", YourType, exposed=("foo", "bar"))
# becomes
YourTypeProxy = MakeProxyType("YourType", ("foo", "bar"))
SyncManager.register("YourType", YourType, YourTypeProxy)
You'd have to do this for all the pre-registered types, too:
from multiprocessing.managers import SyncManager, AutoProxy, MakeProxyType, public_methods
registry = SyncManager._registry
for typeid, (callable, exposed, method_to_typeid, proxytype) in registry.items():
if proxytype is not AutoProxy:
continue
create_method = hasattr(managers.SyncManager, typeid)
if exposed is None:
exposed = public_methods(callable)
SyncManager.register(
typeid,
callable=callable,
exposed=exposed,
method_to_typeid=method_to_typeid,
proxytype=MakeProxyType(f"{typeid}Proxy", exposed),
create_method=create_method,
)
Create custom proxies
You could not rely on multiprocessing creating a proxy for you. You could just write your own. The proxy is used in all processes except for the special 'managed values' server process, and the proxy should pass messages back and forth. This is not an option for the already-registered types, of course, but I'm mentioning it here because for your own types this offers opportunities for optimisations.
Note that you should have methods for all interactions that need to go back to the 'canonical' value instance, so you'd need to use properties to handle normal attributes or add __getattr__, __setattr__ and __delattr__ methods as needed.
The advantage is that you can have very fine-grained control over what methods actually need to exchange data with the server process; in my specific example, my proxy class caches information that is immutable (the values would never change once the object was created), but were used often. That includes a flag value that controls if other methods would do something, so the proxy could just check the flag value and not talk to the server process if not set. Something like this:
class FooProxy(BaseProxy):
# what methods the proxy is allowed to access through calls
_exposed_ = ("__getattribute__", "expensive_method", "spam")
#property
def flag(self):
try:
v = self._flag
except AttributeError:
# ask for the value from the server, "realvalue.flag"
# use __getattribute__ because it's an attribute, not a property
v = self._flag = self._callmethod("__getattribute__", ("flag",))
return flag
def expensive_method(self, *args, **kwargs):
if self.flag: # cached locally!
return self._callmethod("expensive_method", args, kwargs)
def spam(self, *args, **kwargs):
return self._callmethod("spam", args, kwargs
SyncManager.register("Foo", Foo, FooProxy)
Because MakeProxyType() returns a BaseProxy subclass, you can combine that class with a custom subclass, saving yourself having to write any methods that just consist of return self._callmethod(...):
# a base class with the methods generated for us. The second argument
# doubles as the 'permitted' names, stored as _exposed_
FooProxyBase = MakeProxyType(
"FooProxyBase",
("__getattribute__", "expensive_method", "spam"),
)
class FooProxy(FooProxyBase):
#property
def flag(self):
try:
v = self._flag
except AttributeError:
# ask for the value from the server, "realvalue.flag"
# use __getattribute__ because it's an attribute, not a property
v = self._flag = self._callmethod("__getattribute__", ("flag",))
return flag
def expensive_method(self, *args, **kwargs):
if self.flag: # cached locally!
return self._callmethod("expensive_method", args, kwargs)
def spam(self, *args, **kwargs):
return self._callmethod("spam", args, kwargs
SyncManager.register("Foo", Foo, FooProxy)
Again, this won't solve the issue with standard types nested inside other proxied values.
Apply a monkeypatch
I use this to fix the AutoProxy callable, this should automatically avoid patching when you are running a Python version where the fix has already been applied to the source code:
# Backport of https://github.com/python/cpython/pull/4819
# Improvements to the Manager / proxied shared values code
# broke handling of proxied objects without a custom proxy type,
# as the AutoProxy function was not updated.
#
# This code adds a wrapper to AutoProxy if it is missing the
# new argument.
import logging
from inspect import signature
from functools import wraps
from multiprocessing import managers
logger = logging.getLogger(__name__)
orig_AutoProxy = managers.AutoProxy
#wraps(managers.AutoProxy)
def AutoProxy(*args, incref=True, manager_owned=False, **kwargs):
# Create the autoproxy without the manager_owned flag, then
# update the flag on the generated instance. If the manager_owned flag
# is set, `incref` is disabled, so set it to False here for the same
# result.
autoproxy_incref = False if manager_owned else incref
proxy = orig_AutoProxy(*args, incref=autoproxy_incref, **kwargs)
proxy._owned_by_manager = manager_owned
return proxy
def apply():
if "manager_owned" in signature(managers.AutoProxy).parameters:
return
logger.debug("Patching multiprocessing.managers.AutoProxy to add manager_owned")
managers.AutoProxy = AutoProxy
# re-register any types already registered to SyncManager without a custom
# proxy type, as otherwise these would all be using the old unpatched AutoProxy
SyncManager = managers.SyncManager
registry = managers.SyncManager._registry
for typeid, (callable, exposed, method_to_typeid, proxytype) in registry.items():
if proxytype is not orig_AutoProxy:
continue
create_method = hasattr(managers.SyncManager, typeid)
SyncManager.register(
typeid,
callable=callable,
exposed=exposed,
method_to_typeid=method_to_typeid,
create_method=create_method,
)
Import the above and call the apply() function to fix multiprocessing. Do so before you start the manager server!
Solution editing multiprocessing source code
The original answer by Sergey requires you to edit multiprocessing source code as follows:
Find your multiprocessing package (mine, installed via Anaconda, was in /anaconda3/lib/python3.6/multiprocessing).
Open managers.py
Add the key argument manager_owned=True to the AutoProxy function.
Original AutoProxy:
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
...
Edited AutoProxy:
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True, manager_owned=True):
...
Solution via code, at run time
I have managed to solve the unexpected keyword argument TypeError exception without editing directly the source code of multiprocessing by instead adding these few lines of code where I use multiprocessing's Managers:
import multiprocessing
# Backup original AutoProxy function
backup_autoproxy = multiprocessing.managers.AutoProxy
# Defining a new AutoProxy that handles unwanted key argument 'manager_owned'
def redefined_autoproxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True, manager_owned=True):
# Calling original AutoProxy without the unwanted key argument
return backup_autoproxy(token, serializer, manager, authkey,
exposed, incref)
# Updating AutoProxy definition in multiprocessing.managers package
multiprocessing.managers.AutoProxy = redefined_autoproxy
Found temporary solution here.
I've managed to fix it by adding needed keyword to initializer of AutoProxy in multiprocessing\managers.py Though, I don't know if this kwarg is responsible for anything.

Run function after a certain type of model is committed

I want to run a function when instances of the Post model are committed. I want to run it any time they are committed, so I'd rather not explicitly call the function everywhere. How can I do this?
def notify_subscribers(post):
""" send email to subscribers """
...
post = Post("Hello World", "This is my first blog entry.")
session.commit() # How to run notify_subscribers with post as argument
# as soon as post is committed successfully?
post.title = "Hello World!!1"
session.commit() # Run notify_subscribers once again.
No matter which option you chose below, SQLAlchemy comes with a big warning about the after_commit event (which is when both ways send the signal).
The Session is not in an active transaction when the after_commit() event is invoked, and therefore can not emit SQL.
If your callback needs to query or commit to the database, it may have unexpected issues. In this case, you could use a task queue such as Celery to execute this in a background thread (with a separate session). This is probably the right way to go anyway, since sending emails takes a long time and you don't want your view to wait to return while it's happening.
Flask-SQLAlchemy provides a signal you can listen to that sends all the insert/update/delete ops. It needs to be enabled by setting app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True because tracking modifications is expensive and not needed in most cases.
Then listen for the signal:
from flask_sqlalchemy import models_committed
def notify_subscribers(app, changes):
new_posts = [target for target, op in changes if isinstance(target, Post) and op in ('insert', 'update')]
# notify about the new and updated posts
models_committed.connect(notify_subscribers, app)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
You can also implement this yourself (mostly by copying the code from Flask-SQLAlchemy). It's slightly tricky, because model changes occur on flush, not on commit, so you need to record all changes as flushes occur, then use them after the commit.
from sqlalchemy import event
class ModelChangeEvent(object):
def __init__(self, session, *callbacks):
self.model_changes = {}
self.callbacks = callbacks
event.listen(session, 'before_flush', self.record_ops)
event.listen(session, 'before_commit', self.record_ops)
event.listen(session, 'after_commit', self.after_commit)
event.listen(session, 'after_rollback', self.after_rollback)
def record_ops(self, session, flush_context=None, instances=None):
for targets, operation in ((session.new, 'insert'), (session.dirty, 'update'), (session.deleted, 'delete')):
for target in targets:
state = inspect(target)
key = state.identity_key if state.has_identity else id(target)
self.model_changes[key] = (target, operation)
def after_commit(self, session):
if self._model_changes:
changes = list(self.model_changes.values())
for callback in self.callbacks:
callback(changes=changes)
self.model_changes.clear()
def after_rollback(self, session):
self.model_changes.clear()
def notify_subscribers(changes):
new_posts = [target for target, op in changes if isinstance(target, Post) and op in ('insert', 'update')]
# notify about new and updated posts
# pass all the callbacks (if you have more than notify_subscribers)
mce = ModelChangeEvent(db.session, notify_subscribers)
# or you can append more callbacks
mce.callbacks.append(my_other_callback)

Categories