namespace test_py
{
class Event
{
public:
enum Type { BEGIN = 0, RESULT, END };
Type get_type( ) const { return m_type; }
protected:
Event( ) { }
~Event( ) { }
Type m_type;
};
class EventBegin : public Event
{
public:
EventBegin( ) { m_type = Event::BEGIN; }
~EventBegin( ) {}
};
class EventResult : public Event
{
public:
EventResult( int result ) { m_type = Event::RESULT; m_result = result; }
~EventResult( ) {}
int get_result( ) { return m_result; }
protected:
int m_result;
};
class EventEnd : public Event
{
public:
EventEnd( ) { m_type = Event::END; }
~EventEnd( ) {}
};
class EventListener
{
public:
virtual void on_event( const Event& event ) = 0;
};
struct EventListenerWrap: EventListener, py::wrapper< EventListener >
{
void
on_event( const Event& event )
{
this->get_override( "on_event" )( event );
}
};
BOOST_PYTHON_MODULE( test_py )
{
{
py::scope outer = py::class_< Event, boost::noncopyable >( "Event", py::no_init )
.add_property( "event_type", &Event::get_type );
py::enum_< Event::Type >( "EventType" )
.value( "BEGIN", Event::BEGIN )
.value( "RESULT", Event::RESULT )
.value( "END", Event::END )
.export_values( );
}
{
py::class_< EventBegin, py::bases< Event > >( "EventBegin" );
}
{
py::class_< EventResult, py::bases< Event > >( "EventResult", py::no_init )
.def( py::init< int >( ( py::arg( "result" ) ) ) )
.add_property( "result", &EventResult::get_result );
}
{
py::class_< EventEnd, py::bases< Event > >( "EventEnd" );
}
{
py::class_< EventListenerWrap, boost::noncopyable >( "EventListener", py::no_init )
.def( "on_event", py::pure_virtual( &EventListener::on_event ) );
}
}
}
I have a protected constructor and destructor in Event base class and cannot change that.
In Python 2.7 I need to derive from EventListener class and send pointer back to C++ code.
During compilation I got error like that:
/boost/python/detail/destroy.hpp: In instantiation of ‘static void boost::python::detail::value_destroyer<false>::execute(const volatile T*) [with T = test_py::Event]’:
/boost/python/detail/destroy.hpp:95:36: required from ‘void boost::python::detail::destroy_referent_impl(void*, T& (*)()) [with T = const test_py::Event]’
/boost/python/detail/destroy.hpp:101:39: required from ‘void boost::python::detail::destroy_referent(void*, T (*)()) [with T = const test_py::Event&]’
/boost/python/converter/rvalue_from_python_data.hpp:135:71: required from ‘boost::python::converter::rvalue_from_python_data<T>::~rvalue_from_python_data() [with T = const test_py::Event&]’
/boost/python/converter/arg_from_python.hpp:107:8: required from ‘PyObject* boost::python::detail::caller_arity<2u>::impl<F, Policies, Sig>::operator()(PyObject*, PyObject*) [with F = void (test_py::EventListener::*)(const test_py::Event&); Policies = boost::python::default_call_policies; Sig = boost::mpl::vector3<void, test_py::EventListener&, const test_py::Event&>; PyObject = _object]’
/boost/python/object/py_function.hpp:38:33: required from ‘PyObject* boost::python::objects::caller_py_function_impl<Caller>::operator()(PyObject*, PyObject*) [with Caller = boost::python::detail::caller<void (test_py::EventListener::*)(const test_py::Event&), boost::python::default_call_policies, boost::mpl::vector3<void, test_py::EventListener&, const test_py::Event&> >; PyObject = _object]’
EventListener.cpp:193:1: required from here
EventListener.cpp:18:5: error: ‘test_py::Event::~Event()’ is protected
~Event( ) { }
^
In file included from /boost/python/converter/rvalue_from_python_data.hpp:10:0,
from /boost/python/converter/registry.hpp:9,
from /boost/python/converter/registered.hpp:8,
from /boost/python/object/make_instance.hpp:10,
from /boost/python/object/make_ptr_instance.hpp:8,
from /boost/python/to_python_indirect.hpp:11,
from /boost/python/converter/arg_to_python.hpp:10,
from /boost/python/call.hpp:15,
from /boost/python/object_core.hpp:14,
from /boost/python/object/class.hpp:9,
from /boost/python/class.hpp:13,
from ../../defs.hpp:6,
from ../defs.hpp:3,
from defs.hpp:3,
from EventListener.cpp:1:
/boost/python/detail/destroy.hpp:33:9: error: within this context
p->~T();
^
py::scope outer = py::class_< Event, boost::noncopyable >( "Event", py::no_init )
.add_property( "event_type", &Event::get_type );
First glance tells me you have a problem here. py::class_<Event, ...> only knows about binding to the Event, which has the protected destructor.
You're going to have to wrap Event in a class that exposes the destructor publically.
If that's not possible (because you cant change the definition of EventBegin, EventEnd etc for example) then you're going to have to write a polymorphic container that holds on to the derived classes through its own internal interface, internally treating the events as non-polymorphic objects.
This is not as difficult as it sounds:
#include <memory>
namespace test_py
{
class Event
{
public:
enum Type { BEGIN = 0, RESULT, END };
Type get_type( ) const { return m_type; }
protected:
Event( ) { }
~Event( ) { }
Type m_type;
};
class EventBegin : public Event
{
public:
EventBegin( ) { m_type = Event::BEGIN; }
~EventBegin( ) {}
};
class EventResult : public Event
{
public:
EventResult( int result ) { m_type = Event::RESULT; m_result = result; }
~EventResult( ) {}
int get_result( ) { return m_result; }
protected:
int m_result;
};
class EventEnd : public Event
{
public:
EventEnd( ) { m_type = Event::END; }
~EventEnd( ) {}
};
class EventProxy
{
// define an interface for turning a non-polymorphic event
// into a polymorphic one
struct concept
{
virtual const Event* as_event() const = 0;
virtual ~concept() = default;
};
// define a model to support the polymorphic interface for a
// non-polymorphic concrete object
template<class T> struct model : concept
{
template<class...Args> model(Args&&... args)
: _event(std::forward<Args>(args)...)
{}
const Event* as_event() const override {
return &_event;
}
T _event;
};
// construct the model that contains any Event
template<class T>
EventProxy(std::shared_ptr<T> ptr)
: _impl(std::move(ptr))
{}
public:
// T should be derived from Event...
template<class T, class...Args>
static EventProxy create(Args&&... args)
{
return EventProxy(std::make_shared<model<T>>(std::forward<Args>(args)...));
}
// simply return the address of the internal non-polymorphic event
const Event* as_event() const {
return _impl->as_event();
}
// return a shared pointer that points to the internal Event BUT
// defers lifetime ownership to our internal shared_ptr to
// our model. This means we never invoke the polymorphic
// destructor of Event through the protected interface.
std::shared_ptr<const Event> as_shared_event() const {
return std::shared_ptr<const Event>(_impl, _impl->as_event());
}
private:
// lifetime of the proxy is owned by this shared_ptr.
std::shared_ptr<concept> _impl;
};
}
// a quick test.
auto main() -> int
{
auto ep = test_py::EventProxy::create<test_py::EventBegin>();
const test_py::Event* p = ep.as_event();
std::shared_ptr<const test_py::Event> sp = ep.as_shared_event();
}
When exposing a function, Boost.Python will generate converters for each of the arguments. For arguments with types T and T&, the resulting Python converter will hold a copy of the object, and hence needs access to the copy-constructor and destructor. The rationale for this behavior is to prevent accidentally exposing dangling references. The same holds true when passing C++ arguments to Python.
This behavior presents a problem when:
exposing EventListener::on_event(const Event&), as Boost.Python is attempting to create an object that will hold a copy of the Event. To resolve this, consider exposing an auxiliary function that accepts a Event*, and then delegates to the original function.
passing an Event object to the Python in EventListenerWrap::on_event. To resolve this, consider wrapping the argument in boost::ref() or boost::python::ptr().
Be aware that by not creating copies, it creates the chance for dangling references. If the actual Event object is owned by Python, then its lifetime needs to be at least as long as any reference to it in C++. Likewise. If the actual Event object is owned by C++, then its lifetime needs to be at least as long as any reference to it in Python.
struct EventListenerWrap
: EventListener,
boost::python::wrapper<EventListener>
{
void on_event(const Event& event)
{
this->get_override("on_event")(boost::ref(event));
}
};
/// #brief Auxiliary function that will delegate to EventListener::on_event and
/// avoid by-value conversions at the language boundary. This prevents
/// prevents Boost.Python from creating instance holders that would hold
/// the value as an rvalue.
void event_listener_on_event_aux(EventListener& listener, Event* event)
{
return listener.on_event(*event);
}
BOOST_PYTHON_MODULE(...)
{
namespace python = boost::python;
python::class_<EventListenerWrap, boost::noncopyable>("EventListener")
.def("on_event", python::pure_virtual(&event_listener_on_event_aux))
;
}
An interesting detail is that boost::python::pure_virtual() will duplicate the signature of the function it wraps, but it will never actual invoke the wrapped function. Hence, the wrapped function could have a no-op/empty implementation, but providing an implementation is a good idea incase the pure_virtual designator is removed or if the auxiliary function is invoked directly.
Also, note that to allow a Python class to derive from a Boost.Python class, the Boost.Python must expose an __init__() method. Providing no methods, such as by using boost::python::no_init(), will result in a runtime error.
Here is a minimal complete example based on the original code that demonstrates exposing a class with protected constructor and destructor, two derived classes, and virtual dispatch of the derived classes through Boost.Python:
#include <iostream>
#include <string>
#include <boost/python.hpp>
// Legacy API.
class event
{
public:
std::string name;
protected:
event(std::string name) : name(name) {}
~event() = default;
};
struct event_a: event { event_a(): event("event_a") {} };
struct event_b: event { event_b(): event("event_b") {} };
class event_listener
{
public:
virtual void on_event(const event& event) = 0;
};
// Boost.Python helper types and functions.
struct event_listener_wrap
: event_listener,
boost::python::wrapper<event_listener>
{
void on_event(const event& event)
{
std::cout << "event_listener_wrap::on_event()" << std::endl;
this->get_override("on_event")(boost::ref(event));
}
};
/// #brief Auxiliary function that will delegate to EventListener::on_event and
/// avoid by-value conversions at the language boundary. This prevents
/// prevents Boost.Python from creating instance holders that would hold
/// the value as an rvalue.
void event_listener_on_event_wrap(event_listener& listener, event* event)
{
return listener.on_event(*event);
}
BOOST_PYTHON_MODULE(example)
{
namespace python = boost::python;
// Expose event and suppress default by-value converters and initailizers.
// This will prevent Boost.Python from trying to access constructors and
// destructors.
python::class_<event, boost::noncopyable>("Event", python::no_init)
.def_readonly("name", &event::name)
;
// Expose event_a and event_b as derived from event.
python::class_<event_a, python::bases<event>>("EventA");
python::class_<event_b, python::bases<event>>("EventB");
// Expose event_listener_wrap.
python::class_<event_listener_wrap, boost::noncopyable>("EventListener")
.def("on_event", python::pure_virtual(&event_listener_on_event_wrap))
;
// Expose a function that will perform virtual resolution.
python::def("do_on_event", &event_listener_on_event_wrap);
}
Interactive usage:
>>> import example
>>> class Listener(example.EventListener):
... def on_event(self, event):
... assert(isinstance(event, example.Event))
... print "Listener::on_event: ", event, event.name
...
>>> listener = Listener()
>>> listener.on_event(example.EventA())
Listener::on_event: <example.EventA object at 0x7f3bc1176368> event_a
>>> example.do_on_event(listener, example.EventB())
event_listener_wrap::on_event()
Listener::on_event: <example.Event object at 0x7f3bc1246fa0> event_b
When Python is directly aware of method, it will invoke it without passing through Boost.Python. Notice how listener.on_event() did not get dispatched through C++, and event object maintains its example.EventA type. On the other hand, when dispatching is forced into C++, upcasting will not occur. When Listener.on_event() is invoked through example.do_on_event(), the event object's type is example.Event and not example.EventB.
Related
I am creating a C# library with some reusable code and was trying to create a method inside a method. I have a method like this:
public static void Method1()
{
// Code
}
What I would like to do is this:
public static void Method1()
{
public static void Method2()
{
}
public static void Method3()
{
}
}
Then I could choose either Method1.Method2 or Method1.Method3. Obviously the compiler isn't happy about this, any help is much appreciated. Thanks.
If by nested method, you mean a method that is only callable within that method (like in Delphi) you could use delegates.
public static void Method1()
{
var method2 = new Action(() => { /* action body */ } );
var method3 = new Action(() => { /* action body */ } );
//call them like normal methods
method2();
method3();
//if you want an argument
var actionWithArgument = new Action<int>(i => { Console.WriteLine(i); });
actionWithArgument(5);
//if you want to return something
var function = new Func<int, int>(i => { return i++; });
int test = function(6);
}
Yes, when C# 7.0 is released, Local Functions will allow you to do that. You will be able to have a method, inside a method as:
public int GetName(int userId)
{
int GetFamilyName(int id)
{
return User.FamilyName;
}
string firstName = User.FirstName;
var fullName = firstName + GetFamilyName(userId);
return fullName;
}
Note that public (and similar modifiers) are not supported C# programming guide:
Because all local functions are private, including an access modifier, such as the private keyword, generates compiler error CS0106, "
This answer was written before C# 7 came out. With C# 7 you can write local methods.
No, you can't do that. You could create a nested class:
public class ContainingClass
{
public static class NestedClass
{
public static void Method2()
{
}
public static void Method3()
{
}
}
}
You'd then call:
ContainingClass.NestedClass.Method2();
or
ContainingClass.NestedClass.Method3();
I wouldn't recommend this though. Usually it's a bad idea to have public nested types.
Can you tell us more about what you're trying to achieve? There may well be a better approach.
You can define delegates within your method with complete code and call them if you want.
public class MyMethods
{
public void Method1()
{
// defining your methods
Action method1 = new Action( () =>
{
Console.WriteLine("I am method 1");
Thread.Sleep(100);
var b = 3.14;
Console.WriteLine(b);
}
);
Action<int> method2 = new Action<int>( a =>
{
Console.WriteLine("I am method 2");
Console.WriteLine(a);
}
);
Func<int, bool> method3 = new Func<int, bool>( a =>
{
Console.WriteLine("I am a function");
return a > 10;
}
);
// calling your methods
method1.Invoke();
method2.Invoke(10);
method3.Invoke(5);
}
}
There is always an alternative of using a nested class within a class that will not be visible from outside and calling its methods, like:
public class SuperClass
{
internal static class HelperClass
{
internal static void Method2() {}
}
public void Method1 ()
{
HelperClass.Method2();
}
}
As of C# 7.0 you can do that:
public static void SlimShady()
{
void Hi([CallerMemberName] string name = null)
{
Console.WriteLine($"Hi! My name is {name}");
}
Hi();
}
This is called local functions, that is just what you were looking for.
I took the example from here, but further informatin can be found here and here.
Why you don't use classes?
public static class Helper
{
public static string MethodA()
{
return "A";
}
public static string MethodA()
{
return "A";
}
}
Now you can acces MethodA via
Helper.MethodA();
Older thread, but C# does have the concept of nested functions
Func<int> getCalcFunction(int total, bool useAddition)
{
int overallValue = 0;
if (useAddition)
{
Func<int> incrementer = new Func<int>(() =>
{
overallValue += total;
return overallValue;
});
return incrementer;
}
else
{
Func<int> decrementer = new Func<int>(() =>
{
overallValue -= total;
return overallValue;
});
return decrementer;
}
}
private void CalcTotals()
{
Func<int> decrem = getCalcFunction(30, false);
int a = decrem(); //result = -30
a = decrem(); //result = -60
Func<int> increm = getCalcFunction(30, true);
int b = increm(); //result = 30
b = increm(); //result = 60
}
Your nearly there
public static void Method1()
should be
public static class Method1{}
Don't you want to use nested class instead?
That's said, you seem to not respect the Single Responsibility Principle because you want a single method do more than one thing at a time.
Why don't you just Run a method within another
public void M1()
{
DO STUFF
}
public void M1()
{
DO STUFF
M1();
}
As the title suggests, I'm trying to create a bunch of attributes but the code is getting repetitive and messy. I want to use the closure argument to make the code more compact.
According to the C API reference, the closure is a function pointer that provides additional information for getters/setters. I have not been able to find an example of it in use.
This is how I am currently using it:
static void closure_1() {};
static void closure_2() {};
...
static PyObject *
FOO_getter(FOO* self, void *closure) {
if (closure == &closure_1) {
return self->bar_1;
} else if (closure == &closure_2) {
return self->bar_2;
}
}
static int
FOO_setter(FOO* self, PyObject *value, void *closure) {
if (closure == &closure_1) {
if (somehow value is invalid) {
PyErr_SetString(PyExc_ValueError, "invalid value for bar_1.");
return -1;
}
} else if (closure == closure_2) {
if (somehow value is invalid) {
PyErr_SetString(PyExc_ValueError, "invalid value for bar_2.");
return -1;
}
}
return 0;
}
static PyGetSetDef FOO_getsetters[] = {
{"bar_1", (getter) FOO_getter, (setter) FOO_setter, "bar_1 attribute", closure_1},
{"bar_2", (getter) FOO_getter, (setter) FOO_setter, "bar_2 attribute", closure_2},
{NULL} /* Sentinel */
};
...
It works the way I want it to, but it looks more like a hack than something "pythonic". Is there a better way to handle this? e.g. call the closure in some way.
I guess this "closure" is used to pass an extra context to Foo_getter. It should be something that simplifies accessing members of Foo. Documentation it likely wrong. It should be "optional pointer", not "optional function pointer".
Consider passing offsets of the members. Offsets to struct members can be easily obtained with standard offsetof macro defined in stddef.h. It is a small unsigned integer that will fit to void* type.
static PyGetSetDef FOO_getsetters[] = {
{"bar_1", (getter) FOO_getter, (setter) FOO_setter, "bar_1 attribute", (void*)offsetof(FOO, bar_1)},
{"bar_2", (getter) FOO_getter, (setter) FOO_setter, "bar_2 attribute", (void*)offsetof(FOO, bar_2)},
{NULL} /* Sentinel */
};
Now the getter could be:
static PyObject *
FOO_getter(FOO* self, void *closure) {
// pointer to location where the FOO's member is stored
char *memb_ptr = (char*)self + (size_t)closure;
// cast to `PyObject**` because `mem_ptr` points to location where a pointer to `PyObject` is stored
return *(PyObject**)mem_ptr;
}
Use similar schema for the setter.
Despite the documentation, I'm assuming the closure can be any pointer you want. So how about passing an "object", seeing as C doesn't support closures (short of literally generating functions at run-time).
In an object, we can store the offset of the member in FOO, and a pointer to an attribute-specific validator.
typedef int (*Validator)(FOO *, const struct Attribute *, void *);
typedef struct Attribute {
const char *name;
size_t offset;
Validator validator;
} Attribute;
static PyObject **resolve_offset(FOO *self, const Attribute *attr) {
return (PyObject **)( ( (char *)self ) + attr->offset );
}
static PyObject *FOO_getter(FOO *self, void *_attr) {
const Attribute *attr = (const Attribute *)_attr;
return *resolve_offset(self, attr);
}
static int FOO_setter(FOO *self, PyObject *val, void *_attr) {
const Attribute *attr = (const Attribute *)_attr;
if (attr->validator(self, attr, val)) {
*resolve_offset(self, attr) = val;
return 0;
} else {
// Building the string to include attr->name is left to you.
PyErr_SetString(PyExc_ValueError, "invalid value.");
return -1;
}
}
static int FOO_bar_1_validator(FOO *self, const Attribute *attr, void *val) { ... }
static int FOO_bar_2_validator(FOO *self, const Attribute *attr, void *val) { ... }
#define ATTRIBUTE(name) \
static Attribute FOO_ ## name ## attr = { \
#name, \
offsetof(FOO, name), \
FOO_ ## name ## _validator \
};
ATTRIBUTE(bar_1);
ATTRIBUTE(bar_2);
#define PY_ATTR_DEF(name) { \
#name, \
(getter)FOO_getter, \
(setter)FOO_setter, \
#name " attribute", \
&(FOO_ ## name ## attr) \
}
static PyGetSetDef FOO_getsetters[] = {
PY_ATTR_DEF(bar_1),
PY_ATTR_DEF(bar_2),
{ NULL }
};
I originally wrote:
resolve_offset surely relies on undefined behaviour, but it should work fine. The alternative would be to have three functions in our attribute object (get, validate, set) instead of one, but that defies the point of the question.
But #tstanisl points out that it looks like it isn't UB. Awesome!
I want to create custom conversions from std::vector to Python list using boost python. For that I follow the to_python_converter approach. I used a typical converter structure, i.e.
template <class T, bool NoProxy = true>
struct vector_to_list {
static PyObject* convert(const std::vector<T>& vec) {
typedef typename std::vector<T>::const_iterator const_iter;
bp::list* l = new boost::python::list();
for (const_iter it = vec.begin(); it != vec.end(); ++it) {
if (NoProxy) {
l->append(boost::ref(*it));
} else {
l->append(*it);
}
}
return l->ptr();
}
static PyTypeObject const* get_pytype() { return &PyList_Type; }
};
which I can use successfully in plenty of cases but it doesn't work with std::vector<double>. This is the way how I declare this conversion in my boost python module as:
BOOST_PYTHON_MODULE(libmymodule_pywrap) {
.
.
.
bp::to_python_converter<std::vector<double, std::allocator<double> >,
vector_to_list<double, false>, true>(); // this doesn't work
bp::to_python_converter<std::vector<Eigen::VectorXd,
std::allocator<Eigen::VectorXd> >,
vector_to_list<Eigen::VectorXd, false>, true>(); // this works well
}
And I get the following compilation error:
/usr/include/boost/python/object/make_instance.hpp:27:9: error: no matching function for call to ‘assertion_failed(mpl_::failed************ boost::mpl::or_<boost::is_class<double>, boost::is_union<double>, mpl_::bool_<false>, mpl_::bool_<false>, mpl_::bool_<false> >::************)’
BOOST_MPL_ASSERT((mpl::or_<is_class<T>, is_union<T> >));
^
/usr/include/boost/mpl/assert.hpp:83:5: note: candidate: template<bool C> int mpl_::assertion_failed(typename mpl_::assert<C>::type)
int assertion_failed( typename assert<C>::type );
^
/usr/include/boost/mpl/assert.hpp:83:5: note: template argument deduction/substitution failed:
/usr/include/boost/python/object/make_instance.hpp:27:9: note: cannot convert ‘mpl_::assert_arg<boost::mpl::or_<boost::is_class<double>, boost::is_union<double>, mpl_::bool_<false>, mpl_::bool_<false>, mpl_::bool_<false> > >(0u, 1)’ (type ‘mpl_::failed************ boost::mpl::or_<boost::is_class<double>, boost::is_union<double>, mpl_::bool_<false>, mpl_::bool_<false>, mpl_::bool_<false> >::************’) to type ‘mpl_::assert<false>::type {aka mpl_::assert<false>}’
BOOST_MPL_ASSERT((mpl::or_<is_class<T>, is_union<T> >));
Do somebody understand what it's going on?
I am learning Boost::Python as well and unfortunately don't understand how to solve that error, but this example seems to avoid the error message, which you may be able to modify to your own needs.
template<typename T>
struct vector_to_list
{
static PyObject* convert(const std::vector<T>& src)
{
boost::python::list result;
for (int i = 0; i < src.size(); i++)
{
result.append(src[i]);
}
return boost::python::incref(result.ptr());
}
};
...
boost::python::to_python_converter<std::vector<double>, vector_to_list<double> >();
...
However, if this is to provide functionality like, for example:
getData() is declared in C++:
vector<double> getData() { return m_Data; }
where, for example, vector<double> m_Data = {1.0, 2.0, 3.0};
and you wanted in Python:
data = example.getData()
print (data)
[1.0, 2.0, 3.0]
You could implement it by creating a generic container and register each like this (courtesy of this answer):
/// #brief Type that allows for registration of conversions from
/// python iterable types.
struct iterable_converter
{
/// #note Registers converter from a python interable type to the
/// provided type.
template <typename Container>
iterable_converter&
from_python()
{
boost::python::converter::registry::push_back(
&iterable_converter::convertible,
&iterable_converter::construct<Container>,
boost::python::type_id<Container>());
// Support chaining.
return *this;
}
/// #brief Check if PyObject is iterable.
static void* convertible(PyObject* object)
{
return PyObject_GetIter(object) ? object : NULL;
}
/// #brief Convert iterable PyObject to C++ container type.
///
/// Container Concept requirements:
///
/// * Container::value_type is CopyConstructable.
/// * Container can be constructed and populated with two iterators.
/// I.e. Container(begin, end)
template <typename Container>
static void construct(
PyObject* object,
boost::python::converter::rvalue_from_python_stage1_data* data)
{
namespace python = boost::python;
// Object is a borrowed reference, so create a handle indicting it is
// borrowed for proper reference counting.
python::handle<> handle(python::borrowed(object));
// Obtain a handle to the memory block that the converter has allocated
// for the C++ type.
typedef python::converter::rvalue_from_python_storage<Container>
storage_type;
void* storage = reinterpret_cast<storage_type*>(data)->storage.bytes;
typedef python::stl_input_iterator<typename Container::value_type>
iterator;
// Allocate the C++ type into the converter's memory block, and assign
// its handle to the converter's convertible variable. The C++
// container is populated by passing the begin and end iterators of
// the python object to the container's constructor.
new (storage) Container(
iterator(python::object(handle)), // begin
iterator()); // end
data->convertible = storage;
}
};
BOOST_PYTHON_MODULE(example)
{
// Register interable conversions.
iterable_converter()
.from_python<std::vector<double> > ()
.from_python<std::vector<Eigen::VectorXd> >()
;
}
Which allows for chaining, nested vectors and an API that is more Pythonic than with indexed_vector_suite cases like:
data = example.doubleVector()
data[:] = example.getData()
you can simply use:
data = example.getData()
I want to check if an object is an instance of a certain class. In Python I can do this with instanceof. In C/C++, I found a function named PyObject_IsInstance. But it seems not to work like isinstance.
In detail (also described as sample codes below):
In C++ I defined my custom type My. The type definition is MyType and the object definition is MyObject.
Add MyType to the exported module with name My.
In Python, create a new instance my = My(), and isinstance(my, My) returns True.
While in C++ we use PyObject_IsInstance(my, (PyObject*)&MyType) to check my, and this returns 0, which means my is not an instance of the class defined by MyType.
Full C++ code:
#define PY_SSIZE_T_CLEAN
#include <python3.6/Python.h>
#include <python3.6/structmember.h>
#include <stddef.h>
typedef struct {
PyObject_HEAD
int num;
} MyObject;
static PyTypeObject MyType = []{
PyTypeObject ret = {
PyVarObject_HEAD_INIT(NULL, 0)
};
ret.tp_name = "cpp.My";
ret.tp_doc = NULL;
ret.tp_basicsize = sizeof(MyObject);
ret.tp_itemsize = 0;
ret.tp_flags = Py_TPFLAGS_DEFAULT;
ret.tp_new = PyType_GenericNew;
return ret;
}();
// check if obj is an instance of MyType
static PyObject *Py_fn_checkMy(PyObject *obj) {
if (PyObject_IsInstance(obj, (PyObject *)&MyType)) Py_RETURN_TRUE;
else Py_RETURN_FALSE;
}
static PyMethodDef modmethodsdef[] = {
{ "checkMy", (PyCFunction)Py_fn_checkMy, METH_VARARGS, NULL },
{ NULL }
};
static PyModuleDef moddef = []{
PyModuleDef ret = {
PyModuleDef_HEAD_INIT
};
ret.m_name = "cpp";
ret.m_doc = NULL;
ret.m_size = -1;
return ret;
}();
PyMODINIT_FUNC
PyInit_cpp(void)
{
PyObject *mod;
if (PyType_Ready(&MyType) < 0)
return NULL;
mod = PyModule_Create(&moddef);
if (mod == NULL)
return NULL;
Py_INCREF(&MyType);
PyModule_AddObject(mod, "My", (PyObject *)&MyType);
PyModule_AddFunctions(mod, modmethodsdef);
return mod;
}
Compile this into cpp.so, and test it in Python:
>>> import cpp
>>> isinstance(cpp.My(), cpp.My)
True
>>> cpp.checkMy(cpp.My())
False
METH_VARARGS
This is the typical calling convention, where the methods have the type PyCFunction. The function expects two PyObject* values. The first one is the self object for methods; for module functions, it is the module object. The second parameter (often called args) is a tuple object representing all arguments. This parameter is typically processed using PyArg_ParseTuple() or PyArg_UnpackTuple().
The function signature of Py_fn_checkMy does not match this. It should take two arguments. The first is the module, and this is what you are checking against MyType. The second argument (which you don't actually accept) is a tuple containing the object you passed. You should extract the argument from the tuple and check the type of this.
You'd probably be better using METH_O to specify a single argument instead of extracting arguments from a tuple:
static PyObject *Py_fn_checkMy(PyObject *self, PyObject *obj) {
if (PyObject_IsInstance(obj, (PyObject *)&MyType)) Py_RETURN_TRUE;
else Py_RETURN_FALSE;
}
static PyMethodDef modmethodsdef[] = {
{ "checkMy", (PyCFunction)Py_fn_checkMy, METH_O, NULL },
{ NULL }
};
I'm having an issue where a python class, which is derived from a c++ base class using pybind11, is being immediately destructed (garbage collected). I would like C++ to take ownership of the dynamically allocated object, but I can't seem to make that happen. I've tried keep_alive, passing shared_ptr<> as py::class_ template argument, and py::return_value_policy... nothing is working. I suspect this is just user error.
This is a simplification of the real issue I'm having with a much larger code base that is architected similarly. Changing the architecture is not an option, so making this example work is critical for me.
I have two c++ classes that I have created python interfaces for using pybind11. Class A and B both have virtual methods, so they have corresponding trampoline classes to support inheritance. The user calls the B::Run() function which results in a dynamically allocated (via new) A object to be created. When I create specializations of these two classes in python, as shown below.... Segmentation fault because the B::aBase is destroyed immediately after B::Run being called.
Any Ideas how to fix this? Thanks in advance!
class A
{
public:
A(){};
virtual ~A()
{
std::cout << "In A::~A()\n";
};
virtual char* SayHello()
{
char* x = "\n\nHello from Class A\n\n";
return x;
}
};
class ATramploline : public A
{
public:
using A::A;
char* SayHello() override
{
PYBIND11_OVERLOAD( char*,A,SayHello,);
}
};
class B
{
public:
B()
{
std::cout << "In Class B Constructor\n";
}
void Run()
{
aBase = AllocateAnAClass();
std::cout << aBase->SayHello();
}
virtual ~B()
{
fprintf(stderr,"About to delete aBase");
delete aBase;
}
A* aBase;
virtual A* AllocateAnAClass()
{
return new A;
}
};
class BTramploline : public B
{
public:
using B::B;
A* AllocateAnAClass() override
{
PYBIND11_OVERLOAD( A*,B,AllocateAnAClass,);
}
};
PYBIND11_MODULE(TestModule,m)
{
py::class_<A,ATramploline>(m,"A")
.def(py::init<>(),py::return_value_policy::reference_internal)
.def("SayHello",&A::SayHello);
py::class_<B,BTramploline>(m,"B")
.def(py::init<>())
.def("Run",&B::Run)
.def("AllocateAnAClass",&B::AllocateAnAClass,py::return_value_policy::reference_internal);
}
#!/usr/bin/python3
from TestModule import A,B
class MyA(A):
def __init__(self):
super().__init__()
print("Done with MyA Constructor")
def SayHello(self):
return '\n\nHello from Class MyA\n\n'
class MyB(B):
def __init__(self):
super().__init__()
print("Done With MyB Constructor")
def AllocateAnAClass(self):
print("In MyB::AllocateAnAClass!!!")
return MyA()
#x = B()
#x.Run()
y = MyB()
y.Run()
print("done with test script\n")
The correct (I think) way to use std::shared_ptr<A> as the A holder is to add it to class_<A...> arguments.
You also want to replace every instance of A* with std::shared_ptr<A>, and new with std::make_shared. I think non-default return policies are not needed in this case, so I have removed them; YMMV.
Working module below (with minor errors corrected).
#include <pybind11/pybind11.h>
#include <memory>
#include <iostream>
namespace py = pybind11;
class A
{
public:
A(){};
A(const A&) { std::cout << "Copying A\n"; }
virtual ~A()
{
std::cout << "In A::~A()\n";
};
virtual const char* SayHello()
{
const char* x = "\n\nHello from Class A\n\n";
return x;
}
};
class ATrampoline : public A
{
public:
using A::A;
const char* SayHello() override
{
PYBIND11_OVERLOAD( const char*,A,SayHello,);
}
};
class B
{
public:
B()
{
std::cout << "In Class B Constructor\n";
}
B(const B&) { std::cout << "Copying B\n"; }
void Run()
{
aBase = AllocateAnAClass();
std::cout << aBase->SayHello();
}
virtual ~B()
{
}
std::shared_ptr<A> aBase;
virtual std::shared_ptr<A> AllocateAnAClass()
{
return std::make_shared<A>();
}
};
class BTrampoline : public B
{
public:
using B::B;
std::shared_ptr<A> AllocateAnAClass() override
{
PYBIND11_OVERLOAD(std::shared_ptr<A>,B,AllocateAnAClass,);
}
};
PYBIND11_MODULE(TestModule,m)
{
py::class_<A,std::shared_ptr<A>, ATrampoline>(m,"A")
.def(py::init<>())
.def("SayHello",&A::SayHello);
py::class_<B, BTrampoline>(m,"B")
.def(py::init<>())
.def("Run",&B::Run)
.def("AllocateAnAClass",&B::AllocateAnAClass);
}
py::nodelete was the solution. While n.m's answer DOES work, it would require going back and chaning all of the pointer in an existing libary to smart pointers, which isn't a viable option for me. Using py::nodelete allows me to do everything on the pybind11 side.
py::class_<A,ATramploline,std::unique_ptr<A,py::nodelete> >(m,"A")
.def(py::init<>())
.def("SayHello",&A::SayHello);