diff --git a/.gitignore b/.gitignore index 3cf4fbbda..43d5094c9 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,4 @@ pybind11Targets.cmake /pybind11/share/* /docs/_build/* .ipynb_checkpoints/ +tests/main.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index ee0975bc1..3284e21eb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -120,6 +120,8 @@ set(PYBIND11_HEADERS include/pybind11/complex.h include/pybind11/options.h include/pybind11/eigen.h + include/pybind11/eigen/matrix.h + include/pybind11/eigen/tensor.h include/pybind11/embed.h include/pybind11/eval.h include/pybind11/gil.h diff --git a/include/pybind11/eigen.h b/include/pybind11/eigen.h index 831625229..273b9c930 100644 --- a/include/pybind11/eigen.h +++ b/include/pybind11/eigen.h @@ -9,705 +9,4 @@ #pragma once -/* HINT: To suppress warnings originating from the Eigen headers, use -isystem. - See also: - https://stackoverflow.com/questions/2579576/i-dir-vs-isystem-dir - https://stackoverflow.com/questions/1741816/isystem-for-ms-visual-studio-c-compiler -*/ - -#include "numpy.h" - -// The C4127 suppression was introduced for Eigen 3.4.0. In theory we could -// make it version specific, or even remove it later, but considering that -// 1. C4127 is generally far more distracting than useful for modern template code, and -// 2. we definitely want to ignore any MSVC warnings originating from Eigen code, -// it is probably best to keep this around indefinitely. -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable : 4127) // C4127: conditional expression is constant -# pragma warning(disable : 5054) // https://github.com/pybind/pybind11/pull/3741 -// C5054: operator '&': deprecated between enumerations of different types -#elif defined(__MINGW32__) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wmaybe-uninitialized" -#endif - -#include -#include - -#if defined(_MSC_VER) -# pragma warning(pop) -#elif defined(__MINGW32__) -# pragma GCC diagnostic pop -#endif - -// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit -// move constructors that break things. We could detect this an explicitly copy, but an extra copy -// of matrices seems highly undesirable. -static_assert(EIGEN_VERSION_AT_LEAST(3, 2, 7), - "Eigen support in pybind11 requires Eigen >= 3.2.7"); - -PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) - -// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides: -using EigenDStride = Eigen::Stride; -template -using EigenDRef = Eigen::Ref; -template -using EigenDMap = Eigen::Map; - -PYBIND11_NAMESPACE_BEGIN(detail) - -#if EIGEN_VERSION_AT_LEAST(3, 3, 0) -using EigenIndex = Eigen::Index; -template -using EigenMapSparseMatrix = Eigen::Map>; -#else -using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE; -template -using EigenMapSparseMatrix = Eigen::MappedSparseMatrix; -#endif - -// Matches Eigen::Map, Eigen::Ref, blocks, etc: -template -using is_eigen_dense_map = all_of, - std::is_base_of, T>>; -template -using is_eigen_mutable_map = std::is_base_of, T>; -template -using is_eigen_dense_plain - = all_of>, is_template_base_of>; -template -using is_eigen_sparse = is_template_base_of; -// Test for objects inheriting from EigenBase that aren't captured by the above. This -// basically covers anything that can be assigned to a dense matrix but that don't have a typical -// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and -// SelfAdjointView fall into this category. -template -using is_eigen_other - = all_of, - negation, is_eigen_dense_plain, is_eigen_sparse>>>; - -// Captures numpy/eigen conformability status (returned by EigenProps::conformable()): -template -struct EigenConformable { - bool conformable = false; - EigenIndex rows = 0, cols = 0; - EigenDStride stride{0, 0}; // Only valid if negativestrides is false! - bool negativestrides = false; // If true, do not use stride! - - // NOLINTNEXTLINE(google-explicit-constructor) - EigenConformable(bool fits = false) : conformable{fits} {} - // Matrix type: - EigenConformable(EigenIndex r, EigenIndex c, EigenIndex rstride, EigenIndex cstride) - : conformable{true}, rows{r}, cols{c}, - // TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity. - // http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747 - stride{EigenRowMajor ? (rstride > 0 ? rstride : 0) - : (cstride > 0 ? cstride : 0) /* outer stride */, - EigenRowMajor ? (cstride > 0 ? cstride : 0) - : (rstride > 0 ? rstride : 0) /* inner stride */}, - negativestrides{rstride < 0 || cstride < 0} {} - // Vector type: - EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride) - : EigenConformable(r, c, r == 1 ? c * stride : stride, c == 1 ? r : r * stride) {} - - template - bool stride_compatible() const { - // To have compatible strides, we need (on both dimensions) one of fully dynamic strides, - // matching strides, or a dimension size of 1 (in which case the stride value is - // irrelevant). Alternatively, if any dimension size is 0, the strides are not relevant - // (and numpy ≥ 1.23 sets the strides to 0 in that case, so we need to check explicitly). - if (negativestrides) { - return false; - } - if (rows == 0 || cols == 0) { - return true; - } - return (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner() - || (EigenRowMajor ? cols : rows) == 1) - && (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer() - || (EigenRowMajor ? rows : cols) == 1); - } - // NOLINTNEXTLINE(google-explicit-constructor) - operator bool() const { return conformable; } -}; - -template -struct eigen_extract_stride { - using type = Type; -}; -template -struct eigen_extract_stride> { - using type = StrideType; -}; -template -struct eigen_extract_stride> { - using type = StrideType; -}; - -// Helper struct for extracting information from an Eigen type -template -struct EigenProps { - using Type = Type_; - using Scalar = typename Type::Scalar; - using StrideType = typename eigen_extract_stride::type; - static constexpr EigenIndex rows = Type::RowsAtCompileTime, cols = Type::ColsAtCompileTime, - size = Type::SizeAtCompileTime; - static constexpr bool row_major = Type::IsRowMajor, - vector - = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1 - fixed_rows = rows != Eigen::Dynamic, fixed_cols = cols != Eigen::Dynamic, - fixed = size != Eigen::Dynamic, // Fully-fixed size - dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size - - template - using if_zero = std::integral_constant; - static constexpr EigenIndex inner_stride - = if_zero::value, - outer_stride = if_zero < StrideType::OuterStrideAtCompileTime, - vector ? size - : row_major ? cols - : rows > ::value; - static constexpr bool dynamic_stride - = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic; - static constexpr bool requires_row_major - = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1; - static constexpr bool requires_col_major - = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1; - - // Takes an input array and determines whether we can make it fit into the Eigen type. If - // the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector - // (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type). - static EigenConformable conformable(const array &a) { - const auto dims = a.ndim(); - if (dims < 1 || dims > 2) { - return false; - } - - if (dims == 2) { // Matrix type: require exact match (or dynamic) - - EigenIndex np_rows = a.shape(0), np_cols = a.shape(1), - np_rstride = a.strides(0) / static_cast(sizeof(Scalar)), - np_cstride = a.strides(1) / static_cast(sizeof(Scalar)); - if ((PYBIND11_SILENCE_MSVC_C4127(fixed_rows) && np_rows != rows) - || (PYBIND11_SILENCE_MSVC_C4127(fixed_cols) && np_cols != cols)) { - return false; - } - - return {np_rows, np_cols, np_rstride, np_cstride}; - } - - // Otherwise we're storing an n-vector. Only one of the strides will be used, but - // whichever is used, we want the (single) numpy stride value. - const EigenIndex n = a.shape(0), - stride = a.strides(0) / static_cast(sizeof(Scalar)); - - if (vector) { // Eigen type is a compile-time vector - if (PYBIND11_SILENCE_MSVC_C4127(fixed) && size != n) { - return false; // Vector size mismatch - } - return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride}; - } - if (fixed) { - // The type has a fixed size, but is not a vector: abort - return false; - } - if (fixed_cols) { - // Since this isn't a vector, cols must be != 1. We allow this only if it exactly - // equals the number of elements (rows is Dynamic, and so 1 row is allowed). - if (cols != n) { - return false; - } - return {1, n, stride}; - } // Otherwise it's either fully dynamic, or column dynamic; both become a column vector - if (PYBIND11_SILENCE_MSVC_C4127(fixed_rows) && rows != n) { - return false; - } - return {n, 1, stride}; - } - - static constexpr bool show_writeable - = is_eigen_dense_map::value && is_eigen_mutable_map::value; - static constexpr bool show_order = is_eigen_dense_map::value; - static constexpr bool show_c_contiguous = show_order && requires_row_major; - static constexpr bool show_f_contiguous - = !show_c_contiguous && show_order && requires_col_major; - - static constexpr auto descriptor - = const_name("numpy.ndarray[") + npy_format_descriptor::name + const_name("[") - + const_name(const_name<(size_t) rows>(), const_name("m")) + const_name(", ") - + const_name(const_name<(size_t) cols>(), const_name("n")) + const_name("]") - + - // For a reference type (e.g. Ref) we have other constraints that might need to - // be satisfied: writeable=True (for a mutable reference), and, depending on the map's - // stride options, possibly f_contiguous or c_contiguous. We include them in the - // descriptor output to provide some hint as to why a TypeError is occurring (otherwise - // it can be confusing to see that a function accepts a 'numpy.ndarray[float64[3,2]]' and - // an error message that you *gave* a numpy.ndarray of the right type and dimensions. - const_name(", flags.writeable", "") - + const_name(", flags.c_contiguous", "") - + const_name(", flags.f_contiguous", "") + const_name("]"); -}; - -// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data, -// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array. -template -handle -eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) { - constexpr ssize_t elem_size = sizeof(typename props::Scalar); - array a; - if (props::vector) { - a = array({src.size()}, {elem_size * src.innerStride()}, src.data(), base); - } else { - a = array({src.rows(), src.cols()}, - {elem_size * src.rowStride(), elem_size * src.colStride()}, - src.data(), - base); - } - - if (!writeable) { - array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_; - } - - return a.release(); -} - -// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that -// reference the Eigen object's data with `base` as the python-registered base class (if omitted, -// the base will be set to None, and lifetime management is up to the caller). The numpy array is -// non-writeable if the given type is const. -template -handle eigen_ref_array(Type &src, handle parent = none()) { - // none here is to get past array's should-we-copy detection, which currently always - // copies when there is no base. Setting the base to None should be harmless. - return eigen_array_cast(src, parent, !std::is_const::value); -} - -// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a -// numpy array that references the encapsulated data with a python-side reference to the capsule to -// tie its destruction to that of any dependent python objects. Const-ness is determined by -// whether or not the Type of the pointer given is const. -template ::value>> -handle eigen_encapsulate(Type *src) { - capsule base(src, [](void *o) { delete static_cast(o); }); - return eigen_ref_array(*src, base); -} - -// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense -// types. -template -struct type_caster::value>> { - using Scalar = typename Type::Scalar; - using props = EigenProps; - - bool load(handle src, bool convert) { - // If we're in no-convert mode, only load if given an array of the correct type - if (!convert && !isinstance>(src)) { - return false; - } - - // Coerce into an array, but don't do type conversion yet; the copy below handles it. - auto buf = array::ensure(src); - - if (!buf) { - return false; - } - - auto dims = buf.ndim(); - if (dims < 1 || dims > 2) { - return false; - } - - auto fits = props::conformable(buf); - if (!fits) { - return false; - } - - // Allocate the new type, then build a numpy reference into it - value = Type(fits.rows, fits.cols); - auto ref = reinterpret_steal(eigen_ref_array(value)); - if (dims == 1) { - ref = ref.squeeze(); - } else if (ref.ndim() == 1) { - buf = buf.squeeze(); - } - - int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr()); - - if (result < 0) { // Copy failed! - PyErr_Clear(); - return false; - } - - return true; - } - -private: - // Cast implementation - template - static handle cast_impl(CType *src, return_value_policy policy, handle parent) { - switch (policy) { - case return_value_policy::take_ownership: - case return_value_policy::automatic: - return eigen_encapsulate(src); - case return_value_policy::move: - return eigen_encapsulate(new CType(std::move(*src))); - case return_value_policy::copy: - return eigen_array_cast(*src); - case return_value_policy::reference: - case return_value_policy::automatic_reference: - return eigen_ref_array(*src); - case return_value_policy::reference_internal: - return eigen_ref_array(*src, parent); - default: - throw cast_error("unhandled return_value_policy: should not happen!"); - }; - } - -public: - // Normal returned non-reference, non-const value: - static handle cast(Type &&src, return_value_policy /* policy */, handle parent) { - return cast_impl(&src, return_value_policy::move, parent); - } - // If you return a non-reference const, we mark the numpy array readonly: - static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) { - return cast_impl(&src, return_value_policy::move, parent); - } - // lvalue reference return; default (automatic) becomes copy - static handle cast(Type &src, return_value_policy policy, handle parent) { - if (policy == return_value_policy::automatic - || policy == return_value_policy::automatic_reference) { - policy = return_value_policy::copy; - } - return cast_impl(&src, policy, parent); - } - // const lvalue reference return; default (automatic) becomes copy - static handle cast(const Type &src, return_value_policy policy, handle parent) { - if (policy == return_value_policy::automatic - || policy == return_value_policy::automatic_reference) { - policy = return_value_policy::copy; - } - return cast(&src, policy, parent); - } - // non-const pointer return - static handle cast(Type *src, return_value_policy policy, handle parent) { - return cast_impl(src, policy, parent); - } - // const pointer return - static handle cast(const Type *src, return_value_policy policy, handle parent) { - return cast_impl(src, policy, parent); - } - - static constexpr auto name = props::descriptor; - - // NOLINTNEXTLINE(google-explicit-constructor) - operator Type *() { return &value; } - // NOLINTNEXTLINE(google-explicit-constructor) - operator Type &() { return value; } - // NOLINTNEXTLINE(google-explicit-constructor) - operator Type &&() && { return std::move(value); } - template - using cast_op_type = movable_cast_op_type; - -private: - Type value; -}; - -// Base class for casting reference/map/block/etc. objects back to python. -template -struct eigen_map_caster { -private: - using props = EigenProps; - -public: - // Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has - // to stay around), but we'll allow it under the assumption that you know what you're doing - // (and have an appropriate keep_alive in place). We return a numpy array pointing directly at - // the ref's data (The numpy array ends up read-only if the ref was to a const matrix type.) - // Note that this means you need to ensure you don't destroy the object in some other way (e.g. - // with an appropriate keep_alive, or with a reference to a statically allocated matrix). - static handle cast(const MapType &src, return_value_policy policy, handle parent) { - switch (policy) { - case return_value_policy::copy: - return eigen_array_cast(src); - case return_value_policy::reference_internal: - return eigen_array_cast(src, parent, is_eigen_mutable_map::value); - case return_value_policy::reference: - case return_value_policy::automatic: - case return_value_policy::automatic_reference: - return eigen_array_cast(src, none(), is_eigen_mutable_map::value); - default: - // move, take_ownership don't make any sense for a ref/map: - pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type"); - } - } - - static constexpr auto name = props::descriptor; - - // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return - // types but not bound arguments). We still provide them (with an explicitly delete) so that - // you end up here if you try anyway. - bool load(handle, bool) = delete; - operator MapType() = delete; - template - using cast_op_type = MapType; -}; - -// We can return any map-like object (but can only load Refs, specialized next): -template -struct type_caster::value>> : eigen_map_caster {}; - -// Loader for Ref<...> arguments. See the documentation for info on how to make this work without -// copying (it requires some extra effort in many cases). -template -struct type_caster< - Eigen::Ref, - enable_if_t>::value>> - : public eigen_map_caster> { -private: - using Type = Eigen::Ref; - using props = EigenProps; - using Scalar = typename props::Scalar; - using MapType = Eigen::Map; - using Array - = array_t; - static constexpr bool need_writeable = is_eigen_mutable_map::value; - // Delay construction (these have no default constructor) - std::unique_ptr map; - std::unique_ptr ref; - // Our array. When possible, this is just a numpy array pointing to the source data, but - // sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an - // incompatible layout, or is an array of a type that needs to be converted). Using a numpy - // temporary (rather than an Eigen temporary) saves an extra copy when we need both type - // conversion and storage order conversion. (Note that we refuse to use this temporary copy - // when loading an argument for a Ref with M non-const, i.e. a read-write reference). - Array copy_or_ref; - -public: - bool load(handle src, bool convert) { - // First check whether what we have is already an array of the right type. If not, we - // can't avoid a copy (because the copy is also going to do type conversion). - bool need_copy = !isinstance(src); - - EigenConformable fits; - if (!need_copy) { - // We don't need a converting copy, but we also need to check whether the strides are - // compatible with the Ref's stride requirements - auto aref = reinterpret_borrow(src); - - if (aref && (!need_writeable || aref.writeable())) { - fits = props::conformable(aref); - if (!fits) { - return false; // Incompatible dimensions - } - if (!fits.template stride_compatible()) { - need_copy = true; - } else { - copy_or_ref = std::move(aref); - } - } else { - need_copy = true; - } - } - - if (need_copy) { - // We need to copy: If we need a mutable reference, or we're not supposed to convert - // (either because we're in the no-convert overload pass, or because we're explicitly - // instructed not to copy (via `py::arg().noconvert()`) we have to fail loading. - if (!convert || need_writeable) { - return false; - } - - Array copy = Array::ensure(src); - if (!copy) { - return false; - } - fits = props::conformable(copy); - if (!fits || !fits.template stride_compatible()) { - return false; - } - copy_or_ref = std::move(copy); - loader_life_support::add_patient(copy_or_ref); - } - - ref.reset(); - map.reset(new MapType(data(copy_or_ref), - fits.rows, - fits.cols, - make_stride(fits.stride.outer(), fits.stride.inner()))); - ref.reset(new Type(*map)); - - return true; - } - - // NOLINTNEXTLINE(google-explicit-constructor) - operator Type *() { return ref.get(); } - // NOLINTNEXTLINE(google-explicit-constructor) - operator Type &() { return *ref; } - template - using cast_op_type = pybind11::detail::cast_op_type<_T>; - -private: - template ::value, int> = 0> - Scalar *data(Array &a) { - return a.mutable_data(); - } - - template ::value, int> = 0> - const Scalar *data(Array &a) { - return a.data(); - } - - // Attempt to figure out a constructor of `Stride` that will work. - // If both strides are fixed, use a default constructor: - template - using stride_ctor_default = bool_constant::value>; - // Otherwise, if there is a two-index constructor, assume it is (outer,inner) like - // Eigen::Stride, and use it: - template - using stride_ctor_dual - = bool_constant::value - && std::is_constructible::value>; - // Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use - // it (passing whichever stride is dynamic). - template - using stride_ctor_outer - = bool_constant, stride_ctor_dual>::value - && S::OuterStrideAtCompileTime == Eigen::Dynamic - && S::InnerStrideAtCompileTime != Eigen::Dynamic - && std::is_constructible::value>; - template - using stride_ctor_inner - = bool_constant, stride_ctor_dual>::value - && S::InnerStrideAtCompileTime == Eigen::Dynamic - && S::OuterStrideAtCompileTime != Eigen::Dynamic - && std::is_constructible::value>; - - template ::value, int> = 0> - static S make_stride(EigenIndex, EigenIndex) { - return S(); - } - template ::value, int> = 0> - static S make_stride(EigenIndex outer, EigenIndex inner) { - return S(outer, inner); - } - template ::value, int> = 0> - static S make_stride(EigenIndex outer, EigenIndex) { - return S(outer); - } - template ::value, int> = 0> - static S make_stride(EigenIndex, EigenIndex inner) { - return S(inner); - } -}; - -// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not -// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout). -// load() is not supported, but we can cast them into the python domain by first copying to a -// regular Eigen::Matrix, then casting that. -template -struct type_caster::value>> { -protected: - using Matrix - = Eigen::Matrix; - using props = EigenProps; - -public: - static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) { - handle h = eigen_encapsulate(new Matrix(src)); - return h; - } - static handle cast(const Type *src, return_value_policy policy, handle parent) { - return cast(*src, policy, parent); - } - - static constexpr auto name = props::descriptor; - - // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return - // types but not bound arguments). We still provide them (with an explicitly delete) so that - // you end up here if you try anyway. - bool load(handle, bool) = delete; - operator Type() = delete; - template - using cast_op_type = Type; -}; - -template -struct type_caster::value>> { - using Scalar = typename Type::Scalar; - using StorageIndex = remove_reference_t().outerIndexPtr())>; - using Index = typename Type::Index; - static constexpr bool rowMajor = Type::IsRowMajor; - - bool load(handle src, bool) { - if (!src) { - return false; - } - - auto obj = reinterpret_borrow(src); - object sparse_module = module_::import("scipy.sparse"); - object matrix_type = sparse_module.attr(rowMajor ? "csr_matrix" : "csc_matrix"); - - if (!type::handle_of(obj).is(matrix_type)) { - try { - obj = matrix_type(obj); - } catch (const error_already_set &) { - return false; - } - } - - auto values = array_t((object) obj.attr("data")); - auto innerIndices = array_t((object) obj.attr("indices")); - auto outerIndices = array_t((object) obj.attr("indptr")); - auto shape = pybind11::tuple((pybind11::object) obj.attr("shape")); - auto nnz = obj.attr("nnz").cast(); - - if (!values || !innerIndices || !outerIndices) { - return false; - } - - value = EigenMapSparseMatrix(shape[0].cast(), - shape[1].cast(), - std::move(nnz), - outerIndices.mutable_data(), - innerIndices.mutable_data(), - values.mutable_data()); - - return true; - } - - static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) { - const_cast(src).makeCompressed(); - - object matrix_type - = module_::import("scipy.sparse").attr(rowMajor ? "csr_matrix" : "csc_matrix"); - - array data(src.nonZeros(), src.valuePtr()); - array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr()); - array innerIndices(src.nonZeros(), src.innerIndexPtr()); - - return matrix_type(pybind11::make_tuple( - std::move(data), std::move(innerIndices), std::move(outerIndices)), - pybind11::make_tuple(src.rows(), src.cols())) - .release(); - } - - PYBIND11_TYPE_CASTER(Type, - const_name<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[", - "scipy.sparse.csc_matrix[") - + npy_format_descriptor::name + const_name("]")); -}; - -PYBIND11_NAMESPACE_END(detail) -PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) +#include "eigen/matrix.h" diff --git a/include/pybind11/eigen/matrix.h b/include/pybind11/eigen/matrix.h new file mode 100644 index 000000000..5f5ad3867 --- /dev/null +++ b/include/pybind11/eigen/matrix.h @@ -0,0 +1,713 @@ +/* + pybind11/eigen/matrix.h: Transparent conversion for dense and sparse Eigen matrices + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "../numpy.h" + +// Similar to comments & pragma block in eigen_tensor.h. PLEASE KEEP IN SYNC. +/* HINT: To suppress warnings originating from the Eigen headers, use -isystem. + See also: + https://stackoverflow.com/questions/2579576/i-dir-vs-isystem-dir + https://stackoverflow.com/questions/1741816/isystem-for-ms-visual-studio-c-compiler +*/ +// The C4127 suppression was introduced for Eigen 3.4.0. In theory we could +// make it version specific, or even remove it later, but considering that +// 1. C4127 is generally far more distracting than useful for modern template code, and +// 2. we definitely want to ignore any MSVC warnings originating from Eigen code, +// it is probably best to keep this around indefinitely. +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4127) // C4127: conditional expression is constant +# pragma warning(disable : 5054) // https://github.com/pybind/pybind11/pull/3741 +// C5054: operator '&': deprecated between enumerations of different types +#elif defined(__MINGW32__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + +#include +#include + +#if defined(_MSC_VER) +# pragma warning(pop) +#elif defined(__MINGW32__) +# pragma GCC diagnostic pop +#endif + +// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit +// move constructors that break things. We could detect this an explicitly copy, but an extra copy +// of matrices seems highly undesirable. +static_assert(EIGEN_VERSION_AT_LEAST(3, 2, 7), + "Eigen matrix support in pybind11 requires Eigen >= 3.2.7"); + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides: +using EigenDStride = Eigen::Stride; +template +using EigenDRef = Eigen::Ref; +template +using EigenDMap = Eigen::Map; + +PYBIND11_NAMESPACE_BEGIN(detail) + +#if EIGEN_VERSION_AT_LEAST(3, 3, 0) +using EigenIndex = Eigen::Index; +template +using EigenMapSparseMatrix = Eigen::Map>; +#else +using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE; +template +using EigenMapSparseMatrix = Eigen::MappedSparseMatrix; +#endif + +// Matches Eigen::Map, Eigen::Ref, blocks, etc: +template +using is_eigen_dense_map = all_of, + std::is_base_of, T>>; +template +using is_eigen_mutable_map = std::is_base_of, T>; +template +using is_eigen_dense_plain + = all_of>, is_template_base_of>; +template +using is_eigen_sparse = is_template_base_of; +// Test for objects inheriting from EigenBase that aren't captured by the above. This +// basically covers anything that can be assigned to a dense matrix but that don't have a typical +// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and +// SelfAdjointView fall into this category. +template +using is_eigen_other + = all_of, + negation, is_eigen_dense_plain, is_eigen_sparse>>>; + +// Captures numpy/eigen conformability status (returned by EigenProps::conformable()): +template +struct EigenConformable { + bool conformable = false; + EigenIndex rows = 0, cols = 0; + EigenDStride stride{0, 0}; // Only valid if negativestrides is false! + bool negativestrides = false; // If true, do not use stride! + + // NOLINTNEXTLINE(google-explicit-constructor) + EigenConformable(bool fits = false) : conformable{fits} {} + // Matrix type: + EigenConformable(EigenIndex r, EigenIndex c, EigenIndex rstride, EigenIndex cstride) + : conformable{true}, rows{r}, cols{c}, + // TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity. + // http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747 + stride{EigenRowMajor ? (rstride > 0 ? rstride : 0) + : (cstride > 0 ? cstride : 0) /* outer stride */, + EigenRowMajor ? (cstride > 0 ? cstride : 0) + : (rstride > 0 ? rstride : 0) /* inner stride */}, + negativestrides{rstride < 0 || cstride < 0} {} + // Vector type: + EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride) + : EigenConformable(r, c, r == 1 ? c * stride : stride, c == 1 ? r : r * stride) {} + + template + bool stride_compatible() const { + // To have compatible strides, we need (on both dimensions) one of fully dynamic strides, + // matching strides, or a dimension size of 1 (in which case the stride value is + // irrelevant). Alternatively, if any dimension size is 0, the strides are not relevant + // (and numpy ≥ 1.23 sets the strides to 0 in that case, so we need to check explicitly). + if (negativestrides) { + return false; + } + if (rows == 0 || cols == 0) { + return true; + } + return (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner() + || (EigenRowMajor ? cols : rows) == 1) + && (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer() + || (EigenRowMajor ? rows : cols) == 1); + } + // NOLINTNEXTLINE(google-explicit-constructor) + operator bool() const { return conformable; } +}; + +template +struct eigen_extract_stride { + using type = Type; +}; +template +struct eigen_extract_stride> { + using type = StrideType; +}; +template +struct eigen_extract_stride> { + using type = StrideType; +}; + +// Helper struct for extracting information from an Eigen type +template +struct EigenProps { + using Type = Type_; + using Scalar = typename Type::Scalar; + using StrideType = typename eigen_extract_stride::type; + static constexpr EigenIndex rows = Type::RowsAtCompileTime, cols = Type::ColsAtCompileTime, + size = Type::SizeAtCompileTime; + static constexpr bool row_major = Type::IsRowMajor, + vector + = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1 + fixed_rows = rows != Eigen::Dynamic, fixed_cols = cols != Eigen::Dynamic, + fixed = size != Eigen::Dynamic, // Fully-fixed size + dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size + + template + using if_zero = std::integral_constant; + static constexpr EigenIndex inner_stride + = if_zero::value, + outer_stride = if_zero < StrideType::OuterStrideAtCompileTime, + vector ? size + : row_major ? cols + : rows > ::value; + static constexpr bool dynamic_stride + = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic; + static constexpr bool requires_row_major + = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1; + static constexpr bool requires_col_major + = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1; + + // Takes an input array and determines whether we can make it fit into the Eigen type. If + // the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector + // (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type). + static EigenConformable conformable(const array &a) { + const auto dims = a.ndim(); + if (dims < 1 || dims > 2) { + return false; + } + + if (dims == 2) { // Matrix type: require exact match (or dynamic) + + EigenIndex np_rows = a.shape(0), np_cols = a.shape(1), + np_rstride = a.strides(0) / static_cast(sizeof(Scalar)), + np_cstride = a.strides(1) / static_cast(sizeof(Scalar)); + if ((PYBIND11_SILENCE_MSVC_C4127(fixed_rows) && np_rows != rows) + || (PYBIND11_SILENCE_MSVC_C4127(fixed_cols) && np_cols != cols)) { + return false; + } + + return {np_rows, np_cols, np_rstride, np_cstride}; + } + + // Otherwise we're storing an n-vector. Only one of the strides will be used, but + // whichever is used, we want the (single) numpy stride value. + const EigenIndex n = a.shape(0), + stride = a.strides(0) / static_cast(sizeof(Scalar)); + + if (vector) { // Eigen type is a compile-time vector + if (PYBIND11_SILENCE_MSVC_C4127(fixed) && size != n) { + return false; // Vector size mismatch + } + return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride}; + } + if (fixed) { + // The type has a fixed size, but is not a vector: abort + return false; + } + if (fixed_cols) { + // Since this isn't a vector, cols must be != 1. We allow this only if it exactly + // equals the number of elements (rows is Dynamic, and so 1 row is allowed). + if (cols != n) { + return false; + } + return {1, n, stride}; + } // Otherwise it's either fully dynamic, or column dynamic; both become a column vector + if (PYBIND11_SILENCE_MSVC_C4127(fixed_rows) && rows != n) { + return false; + } + return {n, 1, stride}; + } + + static constexpr bool show_writeable + = is_eigen_dense_map::value && is_eigen_mutable_map::value; + static constexpr bool show_order = is_eigen_dense_map::value; + static constexpr bool show_c_contiguous = show_order && requires_row_major; + static constexpr bool show_f_contiguous + = !show_c_contiguous && show_order && requires_col_major; + + static constexpr auto descriptor + = const_name("numpy.ndarray[") + npy_format_descriptor::name + const_name("[") + + const_name(const_name<(size_t) rows>(), const_name("m")) + const_name(", ") + + const_name(const_name<(size_t) cols>(), const_name("n")) + const_name("]") + + + // For a reference type (e.g. Ref) we have other constraints that might need to + // be satisfied: writeable=True (for a mutable reference), and, depending on the map's + // stride options, possibly f_contiguous or c_contiguous. We include them in the + // descriptor output to provide some hint as to why a TypeError is occurring (otherwise + // it can be confusing to see that a function accepts a 'numpy.ndarray[float64[3,2]]' and + // an error message that you *gave* a numpy.ndarray of the right type and dimensions. + const_name(", flags.writeable", "") + + const_name(", flags.c_contiguous", "") + + const_name(", flags.f_contiguous", "") + const_name("]"); +}; + +// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data, +// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array. +template +handle +eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) { + constexpr ssize_t elem_size = sizeof(typename props::Scalar); + array a; + if (props::vector) { + a = array({src.size()}, {elem_size * src.innerStride()}, src.data(), base); + } else { + a = array({src.rows(), src.cols()}, + {elem_size * src.rowStride(), elem_size * src.colStride()}, + src.data(), + base); + } + + if (!writeable) { + array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_; + } + + return a.release(); +} + +// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that +// reference the Eigen object's data with `base` as the python-registered base class (if omitted, +// the base will be set to None, and lifetime management is up to the caller). The numpy array is +// non-writeable if the given type is const. +template +handle eigen_ref_array(Type &src, handle parent = none()) { + // none here is to get past array's should-we-copy detection, which currently always + // copies when there is no base. Setting the base to None should be harmless. + return eigen_array_cast(src, parent, !std::is_const::value); +} + +// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a +// numpy array that references the encapsulated data with a python-side reference to the capsule to +// tie its destruction to that of any dependent python objects. Const-ness is determined by +// whether or not the Type of the pointer given is const. +template ::value>> +handle eigen_encapsulate(Type *src) { + capsule base(src, [](void *o) { delete static_cast(o); }); + return eigen_ref_array(*src, base); +} + +// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense +// types. +template +struct type_caster::value>> { + using Scalar = typename Type::Scalar; + using props = EigenProps; + + bool load(handle src, bool convert) { + // If we're in no-convert mode, only load if given an array of the correct type + if (!convert && !isinstance>(src)) { + return false; + } + + // Coerce into an array, but don't do type conversion yet; the copy below handles it. + auto buf = array::ensure(src); + + if (!buf) { + return false; + } + + auto dims = buf.ndim(); + if (dims < 1 || dims > 2) { + return false; + } + + auto fits = props::conformable(buf); + if (!fits) { + return false; + } + + // Allocate the new type, then build a numpy reference into it + value = Type(fits.rows, fits.cols); + auto ref = reinterpret_steal(eigen_ref_array(value)); + if (dims == 1) { + ref = ref.squeeze(); + } else if (ref.ndim() == 1) { + buf = buf.squeeze(); + } + + int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr()); + + if (result < 0) { // Copy failed! + PyErr_Clear(); + return false; + } + + return true; + } + +private: + // Cast implementation + template + static handle cast_impl(CType *src, return_value_policy policy, handle parent) { + switch (policy) { + case return_value_policy::take_ownership: + case return_value_policy::automatic: + return eigen_encapsulate(src); + case return_value_policy::move: + return eigen_encapsulate(new CType(std::move(*src))); + case return_value_policy::copy: + return eigen_array_cast(*src); + case return_value_policy::reference: + case return_value_policy::automatic_reference: + return eigen_ref_array(*src); + case return_value_policy::reference_internal: + return eigen_ref_array(*src, parent); + default: + throw cast_error("unhandled return_value_policy: should not happen!"); + }; + } + +public: + // Normal returned non-reference, non-const value: + static handle cast(Type &&src, return_value_policy /* policy */, handle parent) { + return cast_impl(&src, return_value_policy::move, parent); + } + // If you return a non-reference const, we mark the numpy array readonly: + static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) { + return cast_impl(&src, return_value_policy::move, parent); + } + // lvalue reference return; default (automatic) becomes copy + static handle cast(Type &src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic + || policy == return_value_policy::automatic_reference) { + policy = return_value_policy::copy; + } + return cast_impl(&src, policy, parent); + } + // const lvalue reference return; default (automatic) becomes copy + static handle cast(const Type &src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic + || policy == return_value_policy::automatic_reference) { + policy = return_value_policy::copy; + } + return cast(&src, policy, parent); + } + // non-const pointer return + static handle cast(Type *src, return_value_policy policy, handle parent) { + return cast_impl(src, policy, parent); + } + // const pointer return + static handle cast(const Type *src, return_value_policy policy, handle parent) { + return cast_impl(src, policy, parent); + } + + static constexpr auto name = props::descriptor; + + // NOLINTNEXTLINE(google-explicit-constructor) + operator Type *() { return &value; } + // NOLINTNEXTLINE(google-explicit-constructor) + operator Type &() { return value; } + // NOLINTNEXTLINE(google-explicit-constructor) + operator Type &&() && { return std::move(value); } + template + using cast_op_type = movable_cast_op_type; + +private: + Type value; +}; + +// Base class for casting reference/map/block/etc. objects back to python. +template +struct eigen_map_caster { +private: + using props = EigenProps; + +public: + // Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has + // to stay around), but we'll allow it under the assumption that you know what you're doing + // (and have an appropriate keep_alive in place). We return a numpy array pointing directly at + // the ref's data (The numpy array ends up read-only if the ref was to a const matrix type.) + // Note that this means you need to ensure you don't destroy the object in some other way (e.g. + // with an appropriate keep_alive, or with a reference to a statically allocated matrix). + static handle cast(const MapType &src, return_value_policy policy, handle parent) { + switch (policy) { + case return_value_policy::copy: + return eigen_array_cast(src); + case return_value_policy::reference_internal: + return eigen_array_cast(src, parent, is_eigen_mutable_map::value); + case return_value_policy::reference: + case return_value_policy::automatic: + case return_value_policy::automatic_reference: + return eigen_array_cast(src, none(), is_eigen_mutable_map::value); + default: + // move, take_ownership don't make any sense for a ref/map: + pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type"); + } + } + + static constexpr auto name = props::descriptor; + + // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return + // types but not bound arguments). We still provide them (with an explicitly delete) so that + // you end up here if you try anyway. + bool load(handle, bool) = delete; + operator MapType() = delete; + template + using cast_op_type = MapType; +}; + +// We can return any map-like object (but can only load Refs, specialized next): +template +struct type_caster::value>> : eigen_map_caster {}; + +// Loader for Ref<...> arguments. See the documentation for info on how to make this work without +// copying (it requires some extra effort in many cases). +template +struct type_caster< + Eigen::Ref, + enable_if_t>::value>> + : public eigen_map_caster> { +private: + using Type = Eigen::Ref; + using props = EigenProps; + using Scalar = typename props::Scalar; + using MapType = Eigen::Map; + using Array + = array_t; + static constexpr bool need_writeable = is_eigen_mutable_map::value; + // Delay construction (these have no default constructor) + std::unique_ptr map; + std::unique_ptr ref; + // Our array. When possible, this is just a numpy array pointing to the source data, but + // sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an + // incompatible layout, or is an array of a type that needs to be converted). Using a numpy + // temporary (rather than an Eigen temporary) saves an extra copy when we need both type + // conversion and storage order conversion. (Note that we refuse to use this temporary copy + // when loading an argument for a Ref with M non-const, i.e. a read-write reference). + Array copy_or_ref; + +public: + bool load(handle src, bool convert) { + // First check whether what we have is already an array of the right type. If not, we + // can't avoid a copy (because the copy is also going to do type conversion). + bool need_copy = !isinstance(src); + + EigenConformable fits; + if (!need_copy) { + // We don't need a converting copy, but we also need to check whether the strides are + // compatible with the Ref's stride requirements + auto aref = reinterpret_borrow(src); + + if (aref && (!need_writeable || aref.writeable())) { + fits = props::conformable(aref); + if (!fits) { + return false; // Incompatible dimensions + } + if (!fits.template stride_compatible()) { + need_copy = true; + } else { + copy_or_ref = std::move(aref); + } + } else { + need_copy = true; + } + } + + if (need_copy) { + // We need to copy: If we need a mutable reference, or we're not supposed to convert + // (either because we're in the no-convert overload pass, or because we're explicitly + // instructed not to copy (via `py::arg().noconvert()`) we have to fail loading. + if (!convert || need_writeable) { + return false; + } + + Array copy = Array::ensure(src); + if (!copy) { + return false; + } + fits = props::conformable(copy); + if (!fits || !fits.template stride_compatible()) { + return false; + } + copy_or_ref = std::move(copy); + loader_life_support::add_patient(copy_or_ref); + } + + ref.reset(); + map.reset(new MapType(data(copy_or_ref), + fits.rows, + fits.cols, + make_stride(fits.stride.outer(), fits.stride.inner()))); + ref.reset(new Type(*map)); + + return true; + } + + // NOLINTNEXTLINE(google-explicit-constructor) + operator Type *() { return ref.get(); } + // NOLINTNEXTLINE(google-explicit-constructor) + operator Type &() { return *ref; } + template + using cast_op_type = pybind11::detail::cast_op_type<_T>; + +private: + template ::value, int> = 0> + Scalar *data(Array &a) { + return a.mutable_data(); + } + + template ::value, int> = 0> + const Scalar *data(Array &a) { + return a.data(); + } + + // Attempt to figure out a constructor of `Stride` that will work. + // If both strides are fixed, use a default constructor: + template + using stride_ctor_default = bool_constant::value>; + // Otherwise, if there is a two-index constructor, assume it is (outer,inner) like + // Eigen::Stride, and use it: + template + using stride_ctor_dual + = bool_constant::value + && std::is_constructible::value>; + // Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use + // it (passing whichever stride is dynamic). + template + using stride_ctor_outer + = bool_constant, stride_ctor_dual>::value + && S::OuterStrideAtCompileTime == Eigen::Dynamic + && S::InnerStrideAtCompileTime != Eigen::Dynamic + && std::is_constructible::value>; + template + using stride_ctor_inner + = bool_constant, stride_ctor_dual>::value + && S::InnerStrideAtCompileTime == Eigen::Dynamic + && S::OuterStrideAtCompileTime != Eigen::Dynamic + && std::is_constructible::value>; + + template ::value, int> = 0> + static S make_stride(EigenIndex, EigenIndex) { + return S(); + } + template ::value, int> = 0> + static S make_stride(EigenIndex outer, EigenIndex inner) { + return S(outer, inner); + } + template ::value, int> = 0> + static S make_stride(EigenIndex outer, EigenIndex) { + return S(outer); + } + template ::value, int> = 0> + static S make_stride(EigenIndex, EigenIndex inner) { + return S(inner); + } +}; + +// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not +// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout). +// load() is not supported, but we can cast them into the python domain by first copying to a +// regular Eigen::Matrix, then casting that. +template +struct type_caster::value>> { +protected: + using Matrix + = Eigen::Matrix; + using props = EigenProps; + +public: + static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) { + handle h = eigen_encapsulate(new Matrix(src)); + return h; + } + static handle cast(const Type *src, return_value_policy policy, handle parent) { + return cast(*src, policy, parent); + } + + static constexpr auto name = props::descriptor; + + // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return + // types but not bound arguments). We still provide them (with an explicitly delete) so that + // you end up here if you try anyway. + bool load(handle, bool) = delete; + operator Type() = delete; + template + using cast_op_type = Type; +}; + +template +struct type_caster::value>> { + using Scalar = typename Type::Scalar; + using StorageIndex = remove_reference_t().outerIndexPtr())>; + using Index = typename Type::Index; + static constexpr bool rowMajor = Type::IsRowMajor; + + bool load(handle src, bool) { + if (!src) { + return false; + } + + auto obj = reinterpret_borrow(src); + object sparse_module = module_::import("scipy.sparse"); + object matrix_type = sparse_module.attr(rowMajor ? "csr_matrix" : "csc_matrix"); + + if (!type::handle_of(obj).is(matrix_type)) { + try { + obj = matrix_type(obj); + } catch (const error_already_set &) { + return false; + } + } + + auto values = array_t((object) obj.attr("data")); + auto innerIndices = array_t((object) obj.attr("indices")); + auto outerIndices = array_t((object) obj.attr("indptr")); + auto shape = pybind11::tuple((pybind11::object) obj.attr("shape")); + auto nnz = obj.attr("nnz").cast(); + + if (!values || !innerIndices || !outerIndices) { + return false; + } + + value = EigenMapSparseMatrix(shape[0].cast(), + shape[1].cast(), + std::move(nnz), + outerIndices.mutable_data(), + innerIndices.mutable_data(), + values.mutable_data()); + + return true; + } + + static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) { + const_cast(src).makeCompressed(); + + object matrix_type + = module_::import("scipy.sparse").attr(rowMajor ? "csr_matrix" : "csc_matrix"); + + array data(src.nonZeros(), src.valuePtr()); + array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr()); + array innerIndices(src.nonZeros(), src.innerIndexPtr()); + + return matrix_type(pybind11::make_tuple( + std::move(data), std::move(innerIndices), std::move(outerIndices)), + pybind11::make_tuple(src.rows(), src.cols())) + .release(); + } + + PYBIND11_TYPE_CASTER(Type, + const_name<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[", + "scipy.sparse.csc_matrix[") + + npy_format_descriptor::name + const_name("]")); +}; + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/include/pybind11/eigen/tensor.h b/include/pybind11/eigen/tensor.h new file mode 100644 index 000000000..a823c0f39 --- /dev/null +++ b/include/pybind11/eigen/tensor.h @@ -0,0 +1,518 @@ +/* + pybind11/eigen/tensor.h: Transparent conversion for Eigen tensors + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "../numpy.h" + +#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) +static_assert(__GNUC__ > 5, "Eigen Tensor support in pybind11 requires GCC > 5.0"); +#endif + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4554) // Tensor.h warning +# pragma warning(disable : 4127) // Tensor.h warning +#elif defined(__MINGW32__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + +#include + +#if defined(_MSC_VER) +# pragma warning(pop) +#elif defined(__MINGW32__) +# pragma GCC diagnostic pop +#endif + +static_assert(EIGEN_VERSION_AT_LEAST(3, 3, 0), + "Eigen Tensor support in pybind11 requires Eigen >= 3.3.0"); + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +PYBIND11_NAMESPACE_BEGIN(detail) + +inline bool is_tensor_aligned(const void *data) { + return (reinterpret_cast(data) % EIGEN_DEFAULT_ALIGN_BYTES) == 0; +} + +template +constexpr int compute_array_flag_from_tensor() { + static_assert((static_cast(T::Layout) == static_cast(Eigen::RowMajor)) + || (static_cast(T::Layout) == static_cast(Eigen::ColMajor)), + "Layout must be row or column major"); + return (static_cast(T::Layout) == static_cast(Eigen::RowMajor)) ? array::c_style + : array::f_style; +} + +template +struct eigen_tensor_helper {}; + +template +struct eigen_tensor_helper> { + using Type = Eigen::Tensor; + using ValidType = void; + + static Eigen::DSizes get_shape(const Type &f) { + return f.dimensions(); + } + + static constexpr bool + is_correct_shape(const Eigen::DSizes & /*shape*/) { + return true; + } + + template + struct helper {}; + + template + struct helper> { + static constexpr auto value = concat(const_name(((void) Is, "?"))...); + }; + + static constexpr auto dimensions_descriptor + = helper())>::value; + + template + static Type *alloc(Args &&...args) { + return new Type(std::forward(args)...); + } + + static void free(Type *tensor) { delete tensor; } +}; + +template +struct eigen_tensor_helper< + Eigen::TensorFixedSize, Options_, IndexType>> { + using Type = Eigen::TensorFixedSize, Options_, IndexType>; + using ValidType = void; + + static constexpr Eigen::DSizes + get_shape(const Type & /*f*/) { + return get_shape(); + } + + static constexpr Eigen::DSizes get_shape() { + return Eigen::DSizes(Indices...); + } + + static bool + is_correct_shape(const Eigen::DSizes &shape) { + return get_shape() == shape; + } + + static constexpr auto dimensions_descriptor = concat(const_name()...); + + template + static Type *alloc(Args &&...args) { + Eigen::aligned_allocator allocator; + return ::new (allocator.allocate(1)) Type(std::forward(args)...); + } + + static void free(Type *tensor) { + Eigen::aligned_allocator allocator; + tensor->~Type(); + allocator.deallocate(tensor, 1); + } +}; + +template +struct get_tensor_descriptor { + static constexpr auto details + = const_name(", flags.writeable", "") + + const_name(Type::Layout) == static_cast(Eigen::RowMajor)>( + ", flags.c_contiguous", ", flags.f_contiguous"); + static constexpr auto value + = const_name("numpy.ndarray[") + npy_format_descriptor::name + + const_name("[") + eigen_tensor_helper>::dimensions_descriptor + + const_name("]") + const_name(details, const_name("")) + const_name("]"); +}; + +// When EIGEN_AVOID_STL_ARRAY is defined, Eigen::DSizes does not have the begin() member +// function. Falling back to a simple loop works around this issue. +// +// We need to disable the type-limits warning for the inner loop when size = 0. + +#if defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wtype-limits" +#endif + +template +std::vector convert_dsizes_to_vector(const Eigen::DSizes &arr) { + std::vector result(size); + + for (size_t i = 0; i < size; i++) { + result[i] = arr[i]; + } + + return result; +} + +template +Eigen::DSizes get_shape_for_array(const array &arr) { + Eigen::DSizes result; + const T *shape = arr.shape(); + for (size_t i = 0; i < size; i++) { + result[i] = shape[i]; + } + + return result; +} + +#if defined(__GNUC__) +# pragma GCC diagnostic pop +#endif + +template +struct type_caster::ValidType> { + using Helper = eigen_tensor_helper; + static constexpr auto temp_name = get_tensor_descriptor::value; + PYBIND11_TYPE_CASTER(Type, temp_name); + + bool load(handle src, bool convert) { + if (!convert) { + if (!isinstance(src)) { + return false; + } + array temp = array::ensure(src); + if (!temp) { + return false; + } + + if (!convert && !temp.dtype().is(dtype::of())) { + return false; + } + } + + array_t()> arr( + reinterpret_borrow(src)); + + if (arr.ndim() != Type::NumIndices) { + return false; + } + auto shape = get_shape_for_array(arr); + + if (!Helper::is_correct_shape(shape)) { + return false; + } + +#if EIGEN_VERSION_AT_LEAST(3, 4, 0) + auto data_pointer = arr.data(); +#else + // Handle Eigen bug + auto data_pointer = const_cast(arr.data()); +#endif + + if (is_tensor_aligned(arr.data())) { + value = Eigen::TensorMap(data_pointer, shape); + } else { + value = Eigen::TensorMap(data_pointer, shape); + } + + return true; + } + + static handle cast(Type &&src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::reference + || policy == return_value_policy::reference_internal) { + pybind11_fail("Cannot use a reference return value policy for an rvalue"); + } + return cast_impl(&src, return_value_policy::move, parent); + } + + static handle cast(const Type &&src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::reference + || policy == return_value_policy::reference_internal) { + pybind11_fail("Cannot use a reference return value policy for an rvalue"); + } + return cast_impl(&src, return_value_policy::move, parent); + } + + static handle cast(Type &src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic + || policy == return_value_policy::automatic_reference) { + policy = return_value_policy::copy; + } + return cast_impl(&src, policy, parent); + } + + static handle cast(const Type &src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic + || policy == return_value_policy::automatic_reference) { + policy = return_value_policy::copy; + } + return cast(&src, policy, parent); + } + + static handle cast(Type *src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic) { + policy = return_value_policy::take_ownership; + } else if (policy == return_value_policy::automatic_reference) { + policy = return_value_policy::reference; + } + return cast_impl(src, policy, parent); + } + + static handle cast(const Type *src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic) { + policy = return_value_policy::take_ownership; + } else if (policy == return_value_policy::automatic_reference) { + policy = return_value_policy::reference; + } + return cast_impl(src, policy, parent); + } + + template + static handle cast_impl(C *src, return_value_policy policy, handle parent) { + object parent_object; + bool writeable = false; + switch (policy) { + case return_value_policy::move: + if (std::is_const::value) { + pybind11_fail("Cannot move from a constant reference"); + } + + src = Helper::alloc(std::move(*src)); + + parent_object + = capsule(src, [](void *ptr) { Helper::free(reinterpret_cast(ptr)); }); + writeable = true; + break; + + case return_value_policy::take_ownership: + if (std::is_const::value) { + // This cast is ugly, and might be UB in some cases, but we don't have an + // alterantive here as we must free that memory + Helper::free(const_cast(src)); + pybind11_fail("Cannot take ownership of a const reference"); + } + + parent_object + = capsule(src, [](void *ptr) { Helper::free(reinterpret_cast(ptr)); }); + writeable = true; + break; + + case return_value_policy::copy: + writeable = true; + break; + + case return_value_policy::reference: + parent_object = none(); + writeable = !std::is_const::value; + break; + + case return_value_policy::reference_internal: + // Default should do the right thing + if (!parent) { + pybind11_fail("Cannot use reference internal when there is no parent"); + } + parent_object = reinterpret_borrow(parent); + writeable = !std::is_const::value; + break; + + default: + pybind11_fail("pybind11 bug in eigen.h, please file a bug report"); + } + + auto result = array_t()>( + convert_dsizes_to_vector(Helper::get_shape(*src)), src->data(), parent_object); + + if (!writeable) { + array_proxy(result.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_; + } + + return result.release(); + } +}; + +template = true> +StoragePointerType get_array_data_for_type(array &arr) { +#if EIGEN_VERSION_AT_LEAST(3, 4, 0) + return reinterpret_cast(arr.data()); +#else + // Handle Eigen bug + return reinterpret_cast(const_cast(arr.data())); +#endif +} + +template = true> +StoragePointerType get_array_data_for_type(array &arr) { + return reinterpret_cast(arr.mutable_data()); +} + +template +struct get_storage_pointer_type; + +template +struct get_storage_pointer_type> { + using SPT = typename MapType::StoragePointerType; +}; + +template +struct get_storage_pointer_type> { + using SPT = typename MapType::PointerArgType; +}; + +template +struct type_caster, + typename eigen_tensor_helper>::ValidType> { + using MapType = Eigen::TensorMap; + using Helper = eigen_tensor_helper>; + + bool load(handle src, bool /*convert*/) { + // Note that we have a lot more checks here as we want to make sure to avoid copies + if (!isinstance(src)) { + return false; + } + auto arr = reinterpret_borrow(src); + if ((arr.flags() & compute_array_flag_from_tensor()) == 0) { + return false; + } + + if (!arr.dtype().is(dtype::of())) { + return false; + } + + if (arr.ndim() != Type::NumIndices) { + return false; + } + + constexpr bool is_aligned = (Options & Eigen::Aligned) != 0; + + if (PYBIND11_SILENCE_MSVC_C4127(is_aligned) && !is_tensor_aligned(arr.data())) { + return false; + } + + auto shape = get_shape_for_array(arr); + + if (!Helper::is_correct_shape(shape)) { + return false; + } + + if (PYBIND11_SILENCE_MSVC_C4127(needs_writeable) && !arr.writeable()) { + return false; + } + + auto result = get_array_data_for_type::SPT, + needs_writeable>(arr); + + value.reset(new MapType(std::move(result), std::move(shape))); + + return true; + } + + static handle cast(MapType &&src, return_value_policy policy, handle parent) { + return cast_impl(&src, policy, parent); + } + + static handle cast(const MapType &&src, return_value_policy policy, handle parent) { + return cast_impl(&src, policy, parent); + } + + static handle cast(MapType &src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic + || policy == return_value_policy::automatic_reference) { + policy = return_value_policy::copy; + } + return cast_impl(&src, policy, parent); + } + + static handle cast(const MapType &src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic + || policy == return_value_policy::automatic_reference) { + policy = return_value_policy::copy; + } + return cast(&src, policy, parent); + } + + static handle cast(MapType *src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic) { + policy = return_value_policy::take_ownership; + } else if (policy == return_value_policy::automatic_reference) { + policy = return_value_policy::reference; + } + return cast_impl(src, policy, parent); + } + + static handle cast(const MapType *src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic) { + policy = return_value_policy::take_ownership; + } else if (policy == return_value_policy::automatic_reference) { + policy = return_value_policy::reference; + } + return cast_impl(src, policy, parent); + } + + template + static handle cast_impl(C *src, return_value_policy policy, handle parent) { + object parent_object; + constexpr bool writeable = !std::is_const::value; + switch (policy) { + case return_value_policy::reference: + parent_object = none(); + break; + + case return_value_policy::reference_internal: + // Default should do the right thing + if (!parent) { + pybind11_fail("Cannot use reference internal when there is no parent"); + } + parent_object = reinterpret_borrow(parent); + break; + + case return_value_policy::take_ownership: + delete src; + // fallthrough + default: + // move, take_ownership don't make any sense for a ref/map: + pybind11_fail("Invalid return_value_policy for Eigen Map type, must be either " + "reference or reference_internal"); + } + + auto result = array_t()>( + convert_dsizes_to_vector(Helper::get_shape(*src)), + src->data(), + std::move(parent_object)); + + if (!writeable) { + array_proxy(result.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_; + } + + return result.release(); + } + +#if EIGEN_VERSION_AT_LEAST(3, 4, 0) + + static constexpr bool needs_writeable = !std::is_const::SPT>::type>::value; +#else + // Handle Eigen bug + static constexpr bool needs_writeable = !std::is_const::value; +#endif + +protected: + // TODO: Move to std::optional once std::optional has more support + std::unique_ptr value; + +public: + static constexpr auto name = get_tensor_descriptor::value; + explicit operator MapType *() { return value.get(); } + explicit operator MapType &() { return *value; } + explicit operator MapType &&() && { return std::move(*value); } + + template + using cast_op_type = ::pybind11::detail::movable_cast_op_type; +}; + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 7296cd1b8..491f215ce 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -128,7 +128,9 @@ set(PYBIND11_TEST_FILES test_custom_type_casters test_custom_type_setup test_docstring_options - test_eigen + test_eigen_matrix + test_eigen_tensor + test_eigen_tensor_avoid_stl_array.cpp test_enum test_eval test_exceptions @@ -233,7 +235,10 @@ list(GET PYBIND11_EIGEN_VERSION_AND_HASH 1 PYBIND11_EIGEN_VERSION_HASH) # Check if Eigen is available; if not, remove from PYBIND11_TEST_FILES (but # keep it in PYBIND11_PYTEST_FILES, so that we get the "eigen is not installed" # skip message). -list(FIND PYBIND11_TEST_FILES test_eigen.cpp PYBIND11_TEST_FILES_EIGEN_I) +list(FIND PYBIND11_TEST_FILES test_eigen_matrix.cpp PYBIND11_TEST_FILES_EIGEN_I) +if(PYBIND11_TEST_FILES_EIGEN_I EQUAL -1) + list(FIND PYBIND11_TEST_FILES test_eigen_tensor.cpp PYBIND11_TEST_FILES_EIGEN_I) +endif() if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1) # Try loading via newer Eigen's Eigen3Config first (bypassing tools/FindEigen3.cmake). # Eigen 3.3.1+ exports a cmake 3.0+ target for handling dependency requirements, but also @@ -289,12 +294,37 @@ if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1) endif() message(STATUS "Building tests with Eigen v${EIGEN3_VERSION}") else() - list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I}) + list(FIND PYBIND11_TEST_FILES test_eigen_matrix.cpp PYBIND11_TEST_FILES_EIGEN_I) + if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1) + list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I}) + endif() + + list(FIND PYBIND11_TEST_FILES test_eigen_tensor.cpp PYBIND11_TEST_FILES_EIGEN_I) + if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1) + list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I}) + endif() + list(FIND PYBIND11_TEST_FILES test_eigen_tensor_avoid_stl_array.cpp + PYBIND11_TEST_FILES_EIGEN_I) + if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1) + list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I}) + endif() message( STATUS "Building tests WITHOUT Eigen, use -DDOWNLOAD_EIGEN=ON on CMake 3.11+ to download") endif() endif() +# Some code doesn't support gcc 4 +if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0) + list(FIND PYBIND11_TEST_FILES test_eigen_tensor.cpp PYBIND11_TEST_FILES_EIGEN_I) + if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1) + list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I}) + endif() + list(FIND PYBIND11_TEST_FILES test_eigen_tensor_avoid_stl_array.cpp PYBIND11_TEST_FILES_EIGEN_I) + if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1) + list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I}) + endif() +endif() + # Optional dependency for some tests (boost::variant is only supported with version >= 1.56) find_package(Boost 1.56) diff --git a/tests/extra_python_package/test_files.py b/tests/extra_python_package/test_files.py index 8e1ddd850..9a9bb1556 100644 --- a/tests/extra_python_package/test_files.py +++ b/tests/extra_python_package/test_files.py @@ -55,6 +55,11 @@ detail_headers = { "include/pybind11/detail/typeid.h", } +eigen_headers = { + "include/pybind11/eigen/matrix.h", + "include/pybind11/eigen/tensor.h", +} + stl_headers = { "include/pybind11/stl/filesystem.h", } @@ -82,7 +87,7 @@ py_files = { "setup_helpers.py", } -headers = main_headers | detail_headers | stl_headers +headers = main_headers | detail_headers | eigen_headers | stl_headers src_files = headers | cmake_files | pkgconfig_files all_files = src_files | py_files @@ -92,6 +97,7 @@ sdist_files = { "pybind11/include", "pybind11/include/pybind11", "pybind11/include/pybind11/detail", + "pybind11/include/pybind11/eigen", "pybind11/include/pybind11/stl", "pybind11/share", "pybind11/share/cmake", diff --git a/tests/test_eigen.cpp b/tests/test_eigen_matrix.cpp similarity index 99% rename from tests/test_eigen.cpp rename to tests/test_eigen_matrix.cpp index b0c7bdb48..71e41d198 100644 --- a/tests/test_eigen.cpp +++ b/tests/test_eigen_matrix.cpp @@ -7,7 +7,7 @@ BSD-style license that can be found in the LICENSE file. */ -#include +#include #include #include "constructor_stats.h" @@ -81,7 +81,7 @@ struct CustomOperatorNew { EIGEN_MAKE_ALIGNED_OPERATOR_NEW; }; -TEST_SUBMODULE(eigen, m) { +TEST_SUBMODULE(eigen_matrix, m) { using FixedMatrixR = Eigen::Matrix; using FixedMatrixC = Eigen::Matrix; using DenseMatrixR = Eigen::Matrix; diff --git a/tests/test_eigen.py b/tests/test_eigen_matrix.py similarity index 99% rename from tests/test_eigen.py rename to tests/test_eigen_matrix.py index 713432a81..4407fa6ae 100644 --- a/tests/test_eigen.py +++ b/tests/test_eigen_matrix.py @@ -3,7 +3,7 @@ import pytest from pybind11_tests import ConstructorStats np = pytest.importorskip("numpy") -m = pytest.importorskip("pybind11_tests.eigen") +m = pytest.importorskip("pybind11_tests.eigen_matrix") ref = np.array( diff --git a/tests/test_eigen_tensor.cpp b/tests/test_eigen_tensor.cpp new file mode 100644 index 000000000..40b494005 --- /dev/null +++ b/tests/test_eigen_tensor.cpp @@ -0,0 +1,16 @@ +/* + tests/eigen_tensor.cpp -- automatic conversion of Eigen Tensor + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +constexpr const char *test_eigen_tensor_module_name = "eigen_tensor"; + +#define PYBIND11_TEST_EIGEN_TENSOR_NAMESPACE eigen_tensor + +#ifdef EIGEN_AVOID_STL_ARRAY +# undef EIGEN_AVOID_STL_ARRAY +#endif + +#include "test_eigen_tensor.inl" diff --git a/tests/test_eigen_tensor.inl b/tests/test_eigen_tensor.inl new file mode 100644 index 000000000..09b35fa13 --- /dev/null +++ b/tests/test_eigen_tensor.inl @@ -0,0 +1,333 @@ +/* + tests/eigen_tensor.cpp -- automatic conversion of Eigen Tensor + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include + +#include "pybind11_tests.h" + +namespace PYBIND11_TEST_EIGEN_TENSOR_NAMESPACE { + +template +void reset_tensor(M &x) { + for (int i = 0; i < x.dimension(0); i++) { + for (int j = 0; j < x.dimension(1); j++) { + for (int k = 0; k < x.dimension(2); k++) { + x(i, j, k) = i * (5 * 2) + j * 2 + k; + } + } + } +} + +template +bool check_tensor(M &x) { + for (int i = 0; i < x.dimension(0); i++) { + for (int j = 0; j < x.dimension(1); j++) { + for (int k = 0; k < x.dimension(2); k++) { + if (x(i, j, k) != (i * (5 * 2) + j * 2 + k)) { + return false; + } + } + } + } + return true; +} + +template +Eigen::Tensor &get_tensor() { + static Eigen::Tensor *x; + + if (!x) { + x = new Eigen::Tensor(3, 5, 2); + reset_tensor(*x); + } + + return *x; +} + +template +Eigen::TensorMap> &get_tensor_map() { + static Eigen::TensorMap> *x; + + if (!x) { + x = new Eigen::TensorMap>(get_tensor()); + } + + return *x; +} + +template +Eigen::TensorFixedSize, Options> &get_fixed_tensor() { + static Eigen::TensorFixedSize, Options> *x; + + if (!x) { + Eigen::aligned_allocator, Options>> + allocator; + x = new (allocator.allocate(1)) + Eigen::TensorFixedSize, Options>(); + reset_tensor(*x); + } + + return *x; +} + +template +const Eigen::Tensor &get_const_tensor() { + return get_tensor(); +} + +template +struct CustomExample { + CustomExample() : member(get_tensor()), view_member(member) {} + + Eigen::Tensor member; + Eigen::TensorMap> view_member; +}; + +template +void init_tensor_module(pybind11::module &m) { + const char *needed_options = ""; + if (PYBIND11_SILENCE_MSVC_C4127(Options == Eigen::ColMajor)) { + needed_options = "F"; + } else { + needed_options = "C"; + } + m.attr("needed_options") = needed_options; + + m.def("setup", []() { + reset_tensor(get_tensor()); + reset_tensor(get_fixed_tensor()); + }); + + m.def("is_ok", []() { + return check_tensor(get_tensor()) && check_tensor(get_fixed_tensor()); + }); + + py::class_>(m, "CustomExample") + .def(py::init<>()) + .def_readonly( + "member", &CustomExample::member, py::return_value_policy::reference_internal) + .def_readonly("member_view", + &CustomExample::view_member, + py::return_value_policy::reference_internal); + + m.def( + "copy_fixed_tensor", + []() { return &get_fixed_tensor(); }, + py::return_value_policy::copy); + + m.def( + "copy_tensor", []() { return &get_tensor(); }, py::return_value_policy::copy); + + m.def( + "copy_const_tensor", + []() { return &get_const_tensor(); }, + py::return_value_policy::copy); + + m.def( + "move_fixed_tensor_copy", + []() -> Eigen::TensorFixedSize, Options> { + return get_fixed_tensor(); + }, + py::return_value_policy::move); + + m.def( + "move_tensor_copy", + []() -> Eigen::Tensor { return get_tensor(); }, + py::return_value_policy::move); + + m.def( + "move_const_tensor", + []() -> const Eigen::Tensor & { return get_const_tensor(); }, + py::return_value_policy::move); + + m.def( + "take_fixed_tensor", + + []() { + Eigen::aligned_allocator< + Eigen::TensorFixedSize, Options>> + allocator; + return new (allocator.allocate(1)) + Eigen::TensorFixedSize, Options>( + get_fixed_tensor()); + }, + py::return_value_policy::take_ownership); + + m.def( + "take_tensor", + []() { return new Eigen::Tensor(get_tensor()); }, + py::return_value_policy::take_ownership); + + m.def( + "take_const_tensor", + []() -> const Eigen::Tensor * { + return new Eigen::Tensor(get_tensor()); + }, + py::return_value_policy::take_ownership); + + m.def( + "take_view_tensor", + []() -> const Eigen::TensorMap> * { + return new Eigen::TensorMap>(get_tensor()); + }, + py::return_value_policy::take_ownership); + + m.def( + "reference_tensor", + []() { return &get_tensor(); }, + py::return_value_policy::reference); + + m.def( + "reference_tensor_v2", + []() -> Eigen::Tensor & { return get_tensor(); }, + py::return_value_policy::reference); + + m.def( + "reference_tensor_internal", + []() { return &get_tensor(); }, + py::return_value_policy::reference_internal); + + m.def( + "reference_fixed_tensor", + []() { return &get_tensor(); }, + py::return_value_policy::reference); + + m.def( + "reference_const_tensor", + []() { return &get_const_tensor(); }, + py::return_value_policy::reference); + + m.def( + "reference_const_tensor_v2", + []() -> const Eigen::Tensor & { return get_const_tensor(); }, + py::return_value_policy::reference); + + m.def( + "reference_view_of_tensor", + []() -> Eigen::TensorMap> { + return get_tensor_map(); + }, + py::return_value_policy::reference); + + m.def( + "reference_view_of_tensor_v2", + // NOLINTNEXTLINE(readability-const-return-type) + []() -> const Eigen::TensorMap> { + return get_tensor_map(); // NOLINT(readability-const-return-type) + }, // NOLINT(readability-const-return-type) + py::return_value_policy::reference); + + m.def( + "reference_view_of_tensor_v3", + []() -> Eigen::TensorMap> * { + return &get_tensor_map(); + }, + py::return_value_policy::reference); + + m.def( + "reference_view_of_tensor_v4", + []() -> const Eigen::TensorMap> * { + return &get_tensor_map(); + }, + py::return_value_policy::reference); + + m.def( + "reference_view_of_tensor_v5", + []() -> Eigen::TensorMap> & { + return get_tensor_map(); + }, + py::return_value_policy::reference); + + m.def( + "reference_view_of_tensor_v6", + []() -> const Eigen::TensorMap> & { + return get_tensor_map(); + }, + py::return_value_policy::reference); + + m.def( + "reference_view_of_fixed_tensor", + []() { + return Eigen::TensorMap< + Eigen::TensorFixedSize, Options>>( + get_fixed_tensor()); + }, + py::return_value_policy::reference); + + m.def("round_trip_tensor", + [](const Eigen::Tensor &tensor) { return tensor; }); + + m.def( + "round_trip_tensor_noconvert", + [](const Eigen::Tensor &tensor) { return tensor; }, + py::arg("tensor").noconvert()); + + m.def("round_trip_tensor2", + [](const Eigen::Tensor &tensor) { return tensor; }); + + m.def("round_trip_fixed_tensor", + [](const Eigen::TensorFixedSize, Options> &tensor) { + return tensor; + }); + + m.def( + "round_trip_view_tensor", + [](Eigen::TensorMap> view) { return view; }, + py::return_value_policy::reference); + + m.def( + "round_trip_view_tensor_ref", + [](Eigen::TensorMap> &view) { return view; }, + py::return_value_policy::reference); + + m.def( + "round_trip_view_tensor_ptr", + [](Eigen::TensorMap> *view) { return view; }, + py::return_value_policy::reference); + + m.def( + "round_trip_aligned_view_tensor", + [](Eigen::TensorMap, Eigen::Aligned> view) { + return view; + }, + py::return_value_policy::reference); + + m.def( + "round_trip_const_view_tensor", + [](Eigen::TensorMap> view) { + return Eigen::Tensor(view); + }, + py::return_value_policy::move); + + m.def( + "round_trip_rank_0", + [](const Eigen::Tensor &tensor) { return tensor; }, + py::return_value_policy::move); + + m.def( + "round_trip_rank_0_noconvert", + [](const Eigen::Tensor &tensor) { return tensor; }, + py::arg("tensor").noconvert(), + py::return_value_policy::move); + + m.def( + "round_trip_rank_0_view", + [](Eigen::TensorMap> &tensor) { return tensor; }, + py::return_value_policy::reference); +} + +void test_module(py::module_ &); +test_initializer name(test_eigen_tensor_module_name, test_module); +void test_module(py::module_ &m) { + auto f_style = m.def_submodule("f_style"); + auto c_style = m.def_submodule("c_style"); + + init_tensor_module(f_style); + init_tensor_module(c_style); +} + +} // namespace PYBIND11_TEST_EIGEN_TENSOR_NAMESPACE diff --git a/tests/test_eigen_tensor.py b/tests/test_eigen_tensor.py new file mode 100644 index 000000000..5ee5fa01b --- /dev/null +++ b/tests/test_eigen_tensor.py @@ -0,0 +1,288 @@ +import sys + +import pytest + +np = pytest.importorskip("numpy") +eigen_tensor = pytest.importorskip("pybind11_tests.eigen_tensor") +submodules = [eigen_tensor.c_style, eigen_tensor.f_style] +try: + from pybind11_tests import eigen_tensor_avoid_stl_array as avoid + + submodules += [avoid.c_style, avoid.f_style] +except ImportError: + pass + +tensor_ref = np.empty((3, 5, 2), dtype=np.int64) + +for i in range(tensor_ref.shape[0]): + for j in range(tensor_ref.shape[1]): + for k in range(tensor_ref.shape[2]): + tensor_ref[i, j, k] = i * (5 * 2) + j * 2 + k + +indices = (2, 3, 1) + + +@pytest.fixture(autouse=True) +def cleanup(): + for module in submodules: + module.setup() + + yield + + for module in submodules: + assert module.is_ok() + + +def test_import_avoid_stl_array(): + pytest.importorskip("pybind11_tests.eigen_tensor_avoid_stl_array") + assert len(submodules) == 4 + + +def assert_equal_tensor_ref(mat, writeable=True, modified=None): + assert mat.flags.writeable == writeable + + copy = np.array(tensor_ref) + if modified is not None: + copy[indices] = modified + + np.testing.assert_array_equal(mat, copy) + + +@pytest.mark.parametrize("m", submodules) +@pytest.mark.parametrize("member_name", ["member", "member_view"]) +def test_reference_internal(m, member_name): + + if not hasattr(sys, "getrefcount"): + pytest.skip("No reference counting") + foo = m.CustomExample() + counts = sys.getrefcount(foo) + mem = getattr(foo, member_name) + assert_equal_tensor_ref(mem, writeable=False) + new_counts = sys.getrefcount(foo) + assert new_counts == counts + 1 + assert_equal_tensor_ref(mem, writeable=False) + del mem + assert sys.getrefcount(foo) == counts + + +assert_equal_funcs = [ + "copy_tensor", + "copy_fixed_tensor", + "copy_const_tensor", + "move_tensor_copy", + "move_fixed_tensor_copy", + "take_tensor", + "take_fixed_tensor", + "reference_tensor", + "reference_tensor_v2", + "reference_fixed_tensor", + "reference_view_of_tensor", + "reference_view_of_tensor_v3", + "reference_view_of_tensor_v5", + "reference_view_of_fixed_tensor", +] + +assert_equal_const_funcs = [ + "reference_view_of_tensor_v2", + "reference_view_of_tensor_v4", + "reference_view_of_tensor_v6", + "reference_const_tensor", + "reference_const_tensor_v2", +] + + +@pytest.mark.parametrize("m", submodules) +@pytest.mark.parametrize("func_name", assert_equal_funcs + assert_equal_const_funcs) +def test_convert_tensor_to_py(m, func_name): + writeable = func_name in assert_equal_funcs + assert_equal_tensor_ref(getattr(m, func_name)(), writeable=writeable) + + +@pytest.mark.parametrize("m", submodules) +def test_bad_cpp_to_python_casts(m): + + with pytest.raises( + RuntimeError, match="Cannot use reference internal when there is no parent" + ): + m.reference_tensor_internal() + + with pytest.raises(RuntimeError, match="Cannot move from a constant reference"): + m.move_const_tensor() + + with pytest.raises( + RuntimeError, match="Cannot take ownership of a const reference" + ): + m.take_const_tensor() + + with pytest.raises( + RuntimeError, + match="Invalid return_value_policy for Eigen Map type, must be either reference or reference_internal", + ): + m.take_view_tensor() + + +@pytest.mark.parametrize("m", submodules) +def test_bad_python_to_cpp_casts(m): + + with pytest.raises( + TypeError, match=r"^round_trip_tensor\(\): incompatible function arguments" + ): + m.round_trip_tensor(np.zeros((2, 3))) + + with pytest.raises(TypeError, match=r"^Cannot cast array data from dtype"): + m.round_trip_tensor(np.zeros(dtype=np.str_, shape=(2, 3, 1))) + + with pytest.raises( + TypeError, + match=r"^round_trip_tensor_noconvert\(\): incompatible function arguments", + ): + m.round_trip_tensor_noconvert(tensor_ref) + + assert_equal_tensor_ref( + m.round_trip_tensor_noconvert(tensor_ref.astype(np.float64)) + ) + + if m.needed_options == "F": + bad_options = "C" + else: + bad_options = "F" + # Shape, dtype and the order need to be correct for a TensorMap cast + with pytest.raises( + TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments" + ): + m.round_trip_view_tensor( + np.zeros((3, 5, 2), dtype=np.float64, order=bad_options) + ) + + with pytest.raises( + TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments" + ): + m.round_trip_view_tensor( + np.zeros((3, 5, 2), dtype=np.float32, order=m.needed_options) + ) + + with pytest.raises( + TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments" + ): + m.round_trip_view_tensor( + np.zeros((3, 5), dtype=np.float64, order=m.needed_options) + ) + + with pytest.raises( + TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments" + ): + temp = np.zeros((3, 5, 2), dtype=np.float64, order=m.needed_options) + m.round_trip_view_tensor( + temp[:, ::-1, :], + ) + + with pytest.raises( + TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments" + ): + temp = np.zeros((3, 5, 2), dtype=np.float64, order=m.needed_options) + temp.setflags(write=False) + m.round_trip_view_tensor(temp) + + +@pytest.mark.parametrize("m", submodules) +def test_references_actually_refer(m): + + a = m.reference_tensor() + temp = a[indices] + a[indices] = 100 + assert_equal_tensor_ref(m.copy_const_tensor(), modified=100) + a[indices] = temp + assert_equal_tensor_ref(m.copy_const_tensor()) + + a = m.reference_view_of_tensor() + a[indices] = 100 + assert_equal_tensor_ref(m.copy_const_tensor(), modified=100) + a[indices] = temp + assert_equal_tensor_ref(m.copy_const_tensor()) + + +@pytest.mark.parametrize("m", submodules) +def test_round_trip(m): + + assert_equal_tensor_ref(m.round_trip_tensor(tensor_ref)) + + with pytest.raises(TypeError, match="^Cannot cast array data from"): + assert_equal_tensor_ref(m.round_trip_tensor2(tensor_ref)) + + assert_equal_tensor_ref(m.round_trip_tensor2(np.array(tensor_ref, dtype=np.int32))) + assert_equal_tensor_ref(m.round_trip_fixed_tensor(tensor_ref)) + assert_equal_tensor_ref(m.round_trip_aligned_view_tensor(m.reference_tensor())) + + copy = np.array(tensor_ref, dtype=np.float64, order=m.needed_options) + assert_equal_tensor_ref(m.round_trip_view_tensor(copy)) + assert_equal_tensor_ref(m.round_trip_view_tensor_ref(copy)) + assert_equal_tensor_ref(m.round_trip_view_tensor_ptr(copy)) + copy.setflags(write=False) + assert_equal_tensor_ref(m.round_trip_const_view_tensor(copy)) + + np.testing.assert_array_equal( + tensor_ref[:, ::-1, :], m.round_trip_tensor(tensor_ref[:, ::-1, :]) + ) + + assert m.round_trip_rank_0(np.float64(3.5)) == 3.5 + assert m.round_trip_rank_0(3.5) == 3.5 + + with pytest.raises( + TypeError, + match=r"^round_trip_rank_0_noconvert\(\): incompatible function arguments", + ): + m.round_trip_rank_0_noconvert(np.float64(3.5)) + + with pytest.raises( + TypeError, + match=r"^round_trip_rank_0_noconvert\(\): incompatible function arguments", + ): + m.round_trip_rank_0_noconvert(3.5) + + with pytest.raises( + TypeError, match=r"^round_trip_rank_0_view\(\): incompatible function arguments" + ): + m.round_trip_rank_0_view(np.float64(3.5)) + + with pytest.raises( + TypeError, match=r"^round_trip_rank_0_view\(\): incompatible function arguments" + ): + m.round_trip_rank_0_view(3.5) + + +@pytest.mark.parametrize("m", submodules) +def test_round_trip_references_actually_refer(m): + + # Need to create a copy that matches the type on the C side + copy = np.array(tensor_ref, dtype=np.float64, order=m.needed_options) + a = m.round_trip_view_tensor(copy) + temp = a[indices] + a[indices] = 100 + assert_equal_tensor_ref(copy, modified=100) + a[indices] = temp + assert_equal_tensor_ref(copy) + + +@pytest.mark.parametrize("m", submodules) +def test_doc_string(m, doc): + assert ( + doc(m.copy_tensor) == "copy_tensor() -> numpy.ndarray[numpy.float64[?, ?, ?]]" + ) + assert ( + doc(m.copy_fixed_tensor) + == "copy_fixed_tensor() -> numpy.ndarray[numpy.float64[3, 5, 2]]" + ) + assert ( + doc(m.reference_const_tensor) + == "reference_const_tensor() -> numpy.ndarray[numpy.float64[?, ?, ?]]" + ) + + order_flag = f"flags.{m.needed_options.lower()}_contiguous" + assert doc(m.round_trip_view_tensor) == ( + f"round_trip_view_tensor(arg0: numpy.ndarray[numpy.float64[?, ?, ?], flags.writeable, {order_flag}])" + + f" -> numpy.ndarray[numpy.float64[?, ?, ?], flags.writeable, {order_flag}]" + ) + assert doc(m.round_trip_const_view_tensor) == ( + f"round_trip_const_view_tensor(arg0: numpy.ndarray[numpy.float64[?, ?, ?], {order_flag}])" + + " -> numpy.ndarray[numpy.float64[?, ?, ?]]" + ) diff --git a/tests/test_eigen_tensor_avoid_stl_array.cpp b/tests/test_eigen_tensor_avoid_stl_array.cpp new file mode 100644 index 000000000..58bedf62d --- /dev/null +++ b/tests/test_eigen_tensor_avoid_stl_array.cpp @@ -0,0 +1,16 @@ +/* + tests/eigen_tensor.cpp -- automatic conversion of Eigen Tensor + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +constexpr const char *test_eigen_tensor_module_name = "eigen_tensor_avoid_stl_array"; + +#ifndef EIGEN_AVOID_STL_ARRAY +# define EIGEN_AVOID_STL_ARRAY +#endif + +#define PYBIND11_TEST_EIGEN_TENSOR_NAMESPACE eigen_tensor_avoid_stl_array + +#include "test_eigen_tensor.inl" diff --git a/tests/test_numpy_array.cpp b/tests/test_numpy_array.cpp index 69ddbe1ef..b118e2c6c 100644 --- a/tests/test_numpy_array.cpp +++ b/tests/test_numpy_array.cpp @@ -521,4 +521,6 @@ TEST_SUBMODULE(numpy_array, sm) { sm.def("test_fmt_desc_double", [](const py::array_t &) {}); sm.def("test_fmt_desc_const_float", [](const py::array_t &) {}); sm.def("test_fmt_desc_const_double", [](const py::array_t &) {}); + + sm.def("round_trip_float", [](double d) { return d; }); } diff --git a/tests/test_numpy_array.py b/tests/test_numpy_array.py index 504963b16..cdec9ad60 100644 --- a/tests/test_numpy_array.py +++ b/tests/test_numpy_array.py @@ -585,3 +585,9 @@ def test_dtype_refcount_leak(): m.ndim(a) after = getrefcount(dtype) assert after == before + + +def test_round_trip_float(): + arr = np.zeros((), np.float64) + arr[()] = 37.2 + assert m.round_trip_float(arr) == 37.2 diff --git a/tools/setup_global.py.in b/tools/setup_global.py.in index d91468c10..885ac5c72 100644 --- a/tools/setup_global.py.in +++ b/tools/setup_global.py.in @@ -27,10 +27,11 @@ class InstallHeadersNested(install_headers): main_headers = glob.glob("pybind11/include/pybind11/*.h") detail_headers = glob.glob("pybind11/include/pybind11/detail/*.h") +eigen_headers = glob.glob("pybind11/include/pybind11/eigen/*.h") stl_headers = glob.glob("pybind11/include/pybind11/stl/*.h") cmake_files = glob.glob("pybind11/share/cmake/pybind11/*.cmake") pkgconfig_files = glob.glob("pybind11/share/pkgconfig/*.pc") -headers = main_headers + detail_headers + stl_headers +headers = main_headers + detail_headers + stl_headers + eigen_headers cmdclass = {"install_headers": InstallHeadersNested} $extra_cmd @@ -55,6 +56,7 @@ setup( (base + "share/pkgconfig", pkgconfig_files), (base + "include/pybind11", main_headers), (base + "include/pybind11/detail", detail_headers), + (base + "include/pybind11/eigen", eigen_headers), (base + "include/pybind11/stl", stl_headers), ], cmdclass=cmdclass, diff --git a/tools/setup_main.py.in b/tools/setup_main.py.in index 65198bdb6..6358cc7b9 100644 --- a/tools/setup_main.py.in +++ b/tools/setup_main.py.in @@ -15,6 +15,7 @@ setup( "pybind11", "pybind11.include.pybind11", "pybind11.include.pybind11.detail", + "pybind11.include.pybind11.eigen", "pybind11.include.pybind11.stl", "pybind11.share.cmake.pybind11", "pybind11.share.pkgconfig", @@ -23,6 +24,7 @@ setup( "pybind11": ["py.typed"], "pybind11.include.pybind11": ["*.h"], "pybind11.include.pybind11.detail": ["*.h"], + "pybind11.include.pybind11.eigen": ["*.h"], "pybind11.include.pybind11.stl": ["*.h"], "pybind11.share.cmake.pybind11": ["*.cmake"], "pybind11.share.pkgconfig": ["*.pc"],