Merge branch 'master' into skylion007/classmethod

This commit is contained in:
Aaron Gokaslan 2022-11-08 12:27:42 -05:00 committed by GitHub
commit 7eb5414a36
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 2871 additions and 876 deletions

View File

@ -6,7 +6,8 @@ body:
- type: markdown - type: markdown
attributes: attributes:
value: | value: |
Maintainers will only make a best effort to triage PRs. Please do your best to make the issue as easy to act on as possible, and only open if clearly a problem with pybind11 (ask first if unsure). Please do your best to make the issue as easy to act on as possible, and only submit here if there is clearly a problem with pybind11 (ask first if unsure). **Note that a reproducer in a PR is much more likely to get immediate attention.**
- type: checkboxes - type: checkboxes
id: steps id: steps
attributes: attributes:
@ -20,6 +21,12 @@ body:
- label: Consider asking first in the [Gitter chat room](https://gitter.im/pybind/Lobby) or in a [Discussion](https:/pybind/pybind11/discussions/new). - label: Consider asking first in the [Gitter chat room](https://gitter.im/pybind/Lobby) or in a [Discussion](https:/pybind/pybind11/discussions/new).
required: false required: false
- type: Input
id: version
attributes:
label: What version (or hash if on master) of pybind11 are you using?
required: true
- type: textarea - type: textarea
id: description id: description
attributes: attributes:
@ -40,6 +47,14 @@ body:
The code should be minimal, have no external dependencies, isolate the The code should be minimal, have no external dependencies, isolate the
function(s) that cause breakage. Submit matched and complete C++ and function(s) that cause breakage. Submit matched and complete C++ and
Python snippets that can be easily compiled and run to diagnose the Python snippets that can be easily compiled and run to diagnose the
issue. If possible, make a PR with a new, failing test to give us a issue. — Note that a reproducer in a PR is much more likely to get
starting point to work on! immediate attention: failing tests in the pybind11 CI are the best
starting point for working out fixes.
render: text render: text
- type: Input
id: regression
attributes:
label: Is this a regression? Put the last known working version here if it is.
description: Put the last known working version here if this is a regression.
value: Not a regression

View File

@ -30,7 +30,7 @@ jobs:
- '3.6' - '3.6'
- '3.9' - '3.9'
- '3.10' - '3.10'
- '3.11-dev' - '3.11'
- 'pypy-3.7' - 'pypy-3.7'
- 'pypy-3.8' - 'pypy-3.8'
- 'pypy-3.9' - 'pypy-3.9'
@ -102,10 +102,12 @@ jobs:
run: python -m pip install pytest-github-actions-annotate-failures run: python -m pip install pytest-github-actions-annotate-failures
# First build - C++11 mode and inplace # First build - C++11 mode and inplace
# More-or-less randomly adding -DPYBIND11_SIMPLE_GIL_MANAGEMENT=ON here.
- name: Configure C++11 ${{ matrix.args }} - name: Configure C++11 ${{ matrix.args }}
run: > run: >
cmake -S . -B . cmake -S . -B .
-DPYBIND11_WERROR=ON -DPYBIND11_WERROR=ON
-DPYBIND11_SIMPLE_GIL_MANAGEMENT=ON
-DDOWNLOAD_CATCH=ON -DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON -DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=11 -DCMAKE_CXX_STANDARD=11
@ -119,7 +121,7 @@ jobs:
- name: C++11 tests - name: C++11 tests
# TODO: Figure out how to load the DLL on Python 3.8+ # TODO: Figure out how to load the DLL on Python 3.8+
if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11-dev' || matrix.python == 'pypy-3.8'))" if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11' || matrix.python == 'pypy-3.8'))"
run: cmake --build . --target cpptest -j 2 run: cmake --build . --target cpptest -j 2
- name: Interface test C++11 - name: Interface test C++11
@ -129,10 +131,12 @@ jobs:
run: git clean -fdx run: git clean -fdx
# Second build - C++17 mode and in a build directory # Second build - C++17 mode and in a build directory
# More-or-less randomly adding -DPYBIND11_SIMPLE_GIL_MANAGEMENT=OFF here.
- name: Configure C++17 - name: Configure C++17
run: > run: >
cmake -S . -B build2 cmake -S . -B build2
-DPYBIND11_WERROR=ON -DPYBIND11_WERROR=ON
-DPYBIND11_SIMPLE_GIL_MANAGEMENT=OFF
-DDOWNLOAD_CATCH=ON -DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON -DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17 -DCMAKE_CXX_STANDARD=17
@ -146,7 +150,7 @@ jobs:
- name: C++ tests - name: C++ tests
# TODO: Figure out how to load the DLL on Python 3.8+ # TODO: Figure out how to load the DLL on Python 3.8+
if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11-dev' || matrix.python == 'pypy-3.8'))" if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11' || matrix.python == 'pypy-3.8'))"
run: cmake --build build2 --target cpptest run: cmake --build build2 --target cpptest
# Third build - C++17 mode with unstable ABI # Third build - C++17 mode with unstable ABI
@ -186,7 +190,7 @@ jobs:
- python-version: "3.9" - python-version: "3.9"
python-debug: true python-debug: true
valgrind: true valgrind: true
- python-version: "3.11-dev" - python-version: "3.11"
python-debug: false python-debug: false
name: "🐍 ${{ matrix.python-version }}${{ matrix.python-debug && '-dbg' || '' }} (deadsnakes)${{ matrix.valgrind && ' • Valgrind' || '' }} • x64" name: "🐍 ${{ matrix.python-version }}${{ matrix.python-debug && '-dbg' || '' }} (deadsnakes)${{ matrix.valgrind && ' • Valgrind' || '' }} • x64"
@ -391,7 +395,7 @@ jobs:
# Testing on CentOS 7 + PGI compilers, which seems to require more workarounds # Testing on CentOS 7 + PGI compilers, which seems to require more workarounds
centos-nvhpc7: centos-nvhpc7:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: "🐍 3 • CentOS7 / PGI 22.3 • x64" name: "🐍 3 • CentOS7 / PGI 22.9 • x64"
container: centos:7 container: centos:7
steps: steps:
@ -401,7 +405,7 @@ jobs:
run: yum update -y && yum install -y epel-release && yum install -y git python3-devel make environment-modules cmake3 yum-utils run: yum update -y && yum install -y epel-release && yum install -y git python3-devel make environment-modules cmake3 yum-utils
- name: Install NVidia HPC SDK - name: Install NVidia HPC SDK
run: yum-config-manager --add-repo https://developer.download.nvidia.com/hpc-sdk/rhel/nvhpc.repo && yum -y install nvhpc-22.3 run: yum-config-manager --add-repo https://developer.download.nvidia.com/hpc-sdk/rhel/nvhpc.repo && yum -y install nvhpc-22.9
# On CentOS 7, we have to filter a few tests (compiler internal error) # On CentOS 7, we have to filter a few tests (compiler internal error)
# and allow deeper template recursion (not needed on CentOS 8 with a newer # and allow deeper template recursion (not needed on CentOS 8 with a newer
@ -411,12 +415,12 @@ jobs:
shell: bash shell: bash
run: | run: |
source /etc/profile.d/modules.sh source /etc/profile.d/modules.sh
module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/22.3 module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/22.9
cmake3 -S . -B build -DDOWNLOAD_CATCH=ON \ cmake3 -S . -B build -DDOWNLOAD_CATCH=ON \
-DCMAKE_CXX_STANDARD=11 \ -DCMAKE_CXX_STANDARD=11 \
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") \ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") \
-DCMAKE_CXX_FLAGS="-Wc,--pending_instantiations=0" \ -DCMAKE_CXX_FLAGS="-Wc,--pending_instantiations=0" \
-DPYBIND11_TEST_FILTER="test_smart_ptr.cpp;test_virtual_functions.cpp" -DPYBIND11_TEST_FILTER="test_smart_ptr.cpp"
# Building before installing Pip should produce a warning but not an error # Building before installing Pip should produce a warning but not an error
- name: Build - name: Build
@ -757,7 +761,7 @@ jobs:
uses: jwlawson/actions-setup-cmake@v1.13 uses: jwlawson/actions-setup-cmake@v1.13
- name: Prepare MSVC - name: Prepare MSVC
uses: ilammy/msvc-dev-cmd@v1.11.0 uses: ilammy/msvc-dev-cmd@v1.12.0
with: with:
arch: x86 arch: x86
@ -810,7 +814,7 @@ jobs:
uses: jwlawson/actions-setup-cmake@v1.13 uses: jwlawson/actions-setup-cmake@v1.13
- name: Prepare MSVC - name: Prepare MSVC
uses: ilammy/msvc-dev-cmd@v1.11.0 uses: ilammy/msvc-dev-cmd@v1.12.0
with: with:
arch: x86 arch: x86

View File

@ -10,7 +10,11 @@ jobs:
steps: steps:
- uses: actions/labeler@main - uses: actions/labeler@main
if: github.event.pull_request.merged == true if: >
github.event.pull_request.merged == true &&
!startsWith(github.event.pull_request.title, 'chore(deps):') &&
!startsWith(github.event.pull_request.title, 'ci(fix):') &&
!startsWith(github.event.pull_request.title, 'docs(changelog):')
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
configuration-path: .github/labeler_merged.yml configuration-path: .github/labeler_merged.yml

1
.gitignore vendored
View File

@ -43,3 +43,4 @@ pybind11Targets.cmake
/pybind11/share/* /pybind11/share/*
/docs/_build/* /docs/_build/*
.ipynb_checkpoints/ .ipynb_checkpoints/
tests/main.cpp

View File

@ -41,7 +41,7 @@ repos:
# Upgrade old Python syntax # Upgrade old Python syntax
- repo: https://github.com/asottile/pyupgrade - repo: https://github.com/asottile/pyupgrade
rev: "v2.38.2" rev: "v3.2.0"
hooks: hooks:
- id: pyupgrade - id: pyupgrade
args: [--py36-plus] args: [--py36-plus]
@ -54,7 +54,7 @@ repos:
# Black, the code formatter, natively supports pre-commit # Black, the code formatter, natively supports pre-commit
- repo: https://github.com/psf/black - repo: https://github.com/psf/black
rev: "22.8.0" # Keep in sync with blacken-docs rev: "22.10.0" # Keep in sync with blacken-docs
hooks: hooks:
- id: black - id: black
@ -64,7 +64,7 @@ repos:
hooks: hooks:
- id: blacken-docs - id: blacken-docs
additional_dependencies: additional_dependencies:
- black==22.8.0 # keep in sync with black hook - black==22.10.0 # keep in sync with black hook
# Changes tabs to spaces # Changes tabs to spaces
- repo: https://github.com/Lucas-C/pre-commit-hooks - repo: https://github.com/Lucas-C/pre-commit-hooks
@ -116,7 +116,7 @@ repos:
# PyLint has native support - not always usable, but works for us # PyLint has native support - not always usable, but works for us
- repo: https://github.com/PyCQA/pylint - repo: https://github.com/PyCQA/pylint
rev: "v2.15.3" rev: "v2.15.5"
hooks: hooks:
- id: pylint - id: pylint
files: ^pybind11 files: ^pybind11
@ -132,7 +132,7 @@ repos:
# Check static types with mypy # Check static types with mypy
- repo: https://github.com/pre-commit/mirrors-mypy - repo: https://github.com/pre-commit/mirrors-mypy
rev: "v0.981" rev: "v0.982"
hooks: hooks:
- id: mypy - id: mypy
args: [] args: []
@ -152,7 +152,7 @@ repos:
# Use tools/codespell_ignore_lines_from_errors.py # Use tools/codespell_ignore_lines_from_errors.py
# to rebuild .codespell-ignore-lines # to rebuild .codespell-ignore-lines
- repo: https://github.com/codespell-project/codespell - repo: https://github.com/codespell-project/codespell
rev: "v2.2.1" rev: "v2.2.2"
hooks: hooks:
- id: codespell - id: codespell
exclude: ".supp$" exclude: ".supp$"

View File

@ -91,10 +91,16 @@ endif()
option(PYBIND11_INSTALL "Install pybind11 header files?" ${PYBIND11_MASTER_PROJECT}) option(PYBIND11_INSTALL "Install pybind11 header files?" ${PYBIND11_MASTER_PROJECT})
option(PYBIND11_TEST "Build pybind11 test suite?" ${PYBIND11_MASTER_PROJECT}) option(PYBIND11_TEST "Build pybind11 test suite?" ${PYBIND11_MASTER_PROJECT})
option(PYBIND11_NOPYTHON "Disable search for Python" OFF) option(PYBIND11_NOPYTHON "Disable search for Python" OFF)
option(PYBIND11_SIMPLE_GIL_MANAGEMENT
"Use simpler GIL management logic that does not support disassociation" OFF)
set(PYBIND11_INTERNALS_VERSION set(PYBIND11_INTERNALS_VERSION
"" ""
CACHE STRING "Override the ABI version, may be used to enable the unstable ABI.") CACHE STRING "Override the ABI version, may be used to enable the unstable ABI.")
if(PYBIND11_SIMPLE_GIL_MANAGEMENT)
add_compile_definitions(PYBIND11_SIMPLE_GIL_MANAGEMENT)
endif()
cmake_dependent_option( cmake_dependent_option(
USE_PYTHON_INCLUDE_DIR USE_PYTHON_INCLUDE_DIR
"Install pybind11 headers in Python include directory instead of default installation prefix" "Install pybind11 headers in Python include directory instead of default installation prefix"
@ -120,6 +126,8 @@ set(PYBIND11_HEADERS
include/pybind11/complex.h include/pybind11/complex.h
include/pybind11/options.h include/pybind11/options.h
include/pybind11/eigen.h include/pybind11/eigen.h
include/pybind11/eigen/matrix.h
include/pybind11/eigen/tensor.h
include/pybind11/embed.h include/pybind11/embed.h
include/pybind11/eval.h include/pybind11/eval.h
include/pybind11/gil.h include/pybind11/gil.h

View File

@ -177,9 +177,12 @@ section.
may be explicitly (re-)thrown to delegate it to the other, may be explicitly (re-)thrown to delegate it to the other,
previously-declared existing exception translators. previously-declared existing exception translators.
Note that ``libc++`` and ``libstdc++`` `behave differently <https://stackoverflow.com/questions/19496643/using-clang-fvisibility-hidden-and-typeinfo-and-type-erasure/28827430>`_ Note that ``libc++`` and ``libstdc++`` `behave differently under macOS
with ``-fvisibility=hidden``. Therefore exceptions that are used across ABI boundaries need to be explicitly exported, as exercised in ``tests/test_exceptions.h``. <https://stackoverflow.com/questions/19496643/using-clang-fvisibility-hidden-and-typeinfo-and-type-erasure/28827430>`_
See also: "Problems with C++ exceptions" under `GCC Wiki <https://gcc.gnu.org/wiki/Visibility>`_. with ``-fvisibility=hidden``. Therefore exceptions that are used across ABI
boundaries need to be explicitly exported, as exercised in
``tests/test_exceptions.h``. See also:
"Problems with C++ exceptions" under `GCC Wiki <https://gcc.gnu.org/wiki/Visibility>`_.
Local vs Global Exception Translators Local vs Global Exception Translators

View File

@ -10,12 +10,147 @@ Changes will be added here periodically from the "Suggested changelog entry"
block in pull request descriptions. block in pull request descriptions.
IN DEVELOPMENT IN DEVELOPMENT
-------------- --------------
Changes will be summarized here periodically. Changes will be summarized here periodically.
Version 2.10.1 (Oct 31, 2022)
-----------------------------
This is the first version to fully support embedding the newly released Python 3.11.
Changes:
* Allow ``pybind11::capsule`` constructor to take null destructor pointers.
`#4221 <https://github.com/pybind/pybind11/pull/4221>`_
* ``embed.h`` was changed so that ``PYTHONPATH`` is used also with Python 3.11
(established behavior).
`#4119 <https://github.com/pybind/pybind11/pull/4119>`_
* A ``PYBIND11_SIMPLE_GIL_MANAGEMENT`` option was added (cmake, C++ define),
along with many additional tests in ``test_gil_scoped.py``. The option may be
useful to try when debugging GIL-related issues, to determine if the more
complex default implementation is or is not to blame. See #4216 for
background. WARNING: Please be careful to not create ODR violations when
using the option: everything that is linked together with mutual symbol
visibility needs to be rebuilt.
`#4216 <https://github.com/pybind/pybind11/pull/4216>`_
* ``PYBIND11_EXPORT_EXCEPTION`` was made non-empty only under macOS. This makes
Linux builds safer, and enables the removal of warning suppression pragmas for
Windows.
`#4298 <https://github.com/pybind/pybind11/pull/4298>`_
Bug fixes:
* Fixed a bug where ``UnicodeDecodeError`` was not propagated from various
``py::str`` ctors when decoding surrogate utf characters.
`#4294 <https://github.com/pybind/pybind11/pull/4294>`_
* Revert perfect forwarding for ``make_iterator``. This broke at least one
valid use case. May revisit later.
`#4234 <https://github.com/pybind/pybind11/pull/4234>`_
* Fix support for safe casts to ``void*`` (regression in 2.10.0).
`#4275 <https://github.com/pybind/pybind11/pull/4275>`_
* Fix ``char8_t`` support (regression in 2.9).
`#4278 <https://github.com/pybind/pybind11/pull/4278>`_
* Unicode surrogate character in Python exception message leads to process
termination in ``error_already_set::what()``.
`#4297 <https://github.com/pybind/pybind11/pull/4297>`_
* Fix MSVC 2019 v.1924 & C++14 mode error for ``overload_cast``.
`#4188 <https://github.com/pybind/pybind11/pull/4188>`_
* Make augmented assignment operators non-const for the object-api. Behavior
was previously broken for augmented assignment operators.
`#4065 <https://github.com/pybind/pybind11/pull/4065>`_
* Add proper error checking to C++ bindings for Python list append and insert.
`#4208 <https://github.com/pybind/pybind11/pull/4208>`_
* Work-around for Nvidia's CUDA nvcc compiler in versions 11.4.0 - 11.8.0.
`#4220 <https://github.com/pybind/pybind11/pull/4220>`_
* A workaround for PyPy was added in the ``py::error_already_set``
implementation, related to PR `#1895 <https://github.com/pybind/pybind11/pull/1895>`_
released with v2.10.0.
`#4079 <https://github.com/pybind/pybind11/pull/4079>`_
* Fixed compiler errors when C++23 ``std::forward_like`` is available.
`#4136 <https://github.com/pybind/pybind11/pull/4136>`_
* Properly raise exceptions in contains methods (like when an object in unhashable).
`#4209 <https://github.com/pybind/pybind11/pull/4209>`_
* Further improve another error in exception handling.
`#4232 <https://github.com/pybind/pybind11/pull/4232>`_
* ``get_local_internals()`` was made compatible with
``finalize_interpreter()``, fixing potential freezes during interpreter
finalization.
`#4192 <https://github.com/pybind/pybind11/pull/4192>`_
Performance and style:
* Reserve space in set and STL map casters if possible. This will prevent
unnecessary rehashing / resizing by knowing the number of keys ahead of time
for Python to C++ casting. This improvement will greatly speed up the casting
of large unordered maps and sets.
`#4194 <https://github.com/pybind/pybind11/pull/4194>`_
* GIL RAII scopes are non-copyable to avoid potential bugs.
`#4183 <https://github.com/pybind/pybind11/pull/4183>`_
* Explicitly default all relevant ctors for pytypes in the ``PYBIND11_OBJECT``
macros and enforce the clang-tidy checks ``modernize-use-equals-default`` in
macros as well.
`#4017 <https://github.com/pybind/pybind11/pull/4017>`_
* Optimize iterator advancement in C++ bindings.
`#4237 <https://github.com/pybind/pybind11/pull/4237>`_
* Use the modern ``PyObject_GenericGetDict`` and ``PyObject_GenericSetDict``
for handling dynamic attribute dictionaries.
`#4106 <https://github.com/pybind/pybind11/pull/4106>`_
* Document that users should use ``PYBIND11_NAMESPACE`` instead of using ``pybind11`` when
opening namespaces. Using namespace declarations and namespace qualification
remain the same as ``pybind11``. This is done to ensure consistent symbol
visibility.
`#4098 <https://github.com/pybind/pybind11/pull/4098>`_
* Mark ``detail::forward_like`` as constexpr.
`#4147 <https://github.com/pybind/pybind11/pull/4147>`_
* Optimize unpacking_collector when processing ``arg_v`` arguments.
`#4219 <https://github.com/pybind/pybind11/pull/4219>`_
* Optimize casting C++ object to ``None``.
`#4269 <https://github.com/pybind/pybind11/pull/4269>`_
Build system improvements:
* CMake: revert overwrite behavior, now opt-in with ``PYBIND11_PYTHONLIBS_OVERRWRITE OFF``.
`#4195 <https://github.com/pybind/pybind11/pull/4195>`_
* Include a pkg-config file when installing pybind11, such as in the Python
package.
`#4077 <https://github.com/pybind/pybind11/pull/4077>`_
* Avoid stripping debug symbols when ``CMAKE_BUILD_TYPE`` is set to ``DEBUG``
instead of ``Debug``.
`#4078 <https://github.com/pybind/pybind11/pull/4078>`_
* Followup to `#3948 <https://github.com/pybind/pybind11/pull/3948>`_, fixing vcpkg again.
`#4123 <https://github.com/pybind/pybind11/pull/4123>`_
Version 2.10.0 (Jul 15, 2022) Version 2.10.0 (Jul 15, 2022)
----------------------------- -----------------------------

View File

@ -248,7 +248,7 @@ public:
return false; return false;
} }
static handle cast(T, return_value_policy /* policy */, handle /* parent */) { static handle cast(T, return_value_policy /* policy */, handle /* parent */) {
return none().inc_ref(); return none().release();
} }
PYBIND11_TYPE_CASTER(T, const_name("None")); PYBIND11_TYPE_CASTER(T, const_name("None"));
}; };
@ -291,7 +291,7 @@ public:
if (ptr) { if (ptr) {
return capsule(ptr).release(); return capsule(ptr).release();
} }
return none().inc_ref(); return none().release();
} }
template <typename T> template <typename T>
@ -537,7 +537,7 @@ public:
static handle cast(const CharT *src, return_value_policy policy, handle parent) { static handle cast(const CharT *src, return_value_policy policy, handle parent) {
if (src == nullptr) { if (src == nullptr) {
return pybind11::none().inc_ref(); return pybind11::none().release();
} }
return StringCaster::cast(StringType(src), policy, parent); return StringCaster::cast(StringType(src), policy, parent);
} }
@ -1179,11 +1179,9 @@ enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_safe(object &&)
pybind11_fail("Internal error: cast_safe fallback invoked"); pybind11_fail("Internal error: cast_safe fallback invoked");
} }
template <typename T> template <typename T>
enable_if_t<std::is_same<void, intrinsic_t<T>>::value, void> cast_safe(object &&) {} enable_if_t<std::is_void<T>::value, void> cast_safe(object &&) {}
template <typename T> template <typename T>
enable_if_t<detail::none_of<cast_is_temporary_value_reference<T>, enable_if_t<detail::none_of<cast_is_temporary_value_reference<T>, std::is_void<T>>::value, T>
std::is_same<void, intrinsic_t<T>>>::value,
T>
cast_safe(object &&o) { cast_safe(object &&o) {
return pybind11::cast<T>(std::move(o)); return pybind11::cast<T>(std::move(o));
} }

View File

@ -96,13 +96,10 @@
#endif #endif
#if !defined(PYBIND11_EXPORT_EXCEPTION) #if !defined(PYBIND11_EXPORT_EXCEPTION)
# ifdef __MINGW32__ # if defined(__apple_build_version__)
// workaround for:
// error: 'dllexport' implies default visibility, but xxx has already been declared with a
// different visibility
# define PYBIND11_EXPORT_EXCEPTION
# else
# define PYBIND11_EXPORT_EXCEPTION PYBIND11_EXPORT # define PYBIND11_EXPORT_EXCEPTION PYBIND11_EXPORT
# else
# define PYBIND11_EXPORT_EXCEPTION
# endif # endif
#endif #endif
@ -205,11 +202,8 @@
# endif # endif
#endif #endif
#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
# define PYBIND11_HAS_U8STRING
#endif
#include <Python.h> #include <Python.h>
// Reminder: WITH_THREAD is always defined if PY_VERSION_HEX >= 0x03070000
#if PY_VERSION_HEX < 0x03060000 #if PY_VERSION_HEX < 0x03060000
# error "PYTHON < 3.6 IS UNSUPPORTED. pybind11 v2.9 was the last to support Python 2 and 3.5." # error "PYTHON < 3.6 IS UNSUPPORTED. pybind11 v2.9 was the last to support Python 2 and 3.5."
#endif #endif
@ -233,6 +227,10 @@
# undef copysign # undef copysign
#endif #endif
#if defined(PYPY_VERSION) && !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# define PYBIND11_SIMPLE_GIL_MANAGEMENT
#endif
#if defined(_MSC_VER) #if defined(_MSC_VER)
# if defined(PYBIND11_DEBUG_MARKER) # if defined(PYBIND11_DEBUG_MARKER)
# define _DEBUG # define _DEBUG
@ -259,6 +257,11 @@
# endif # endif
#endif #endif
// Must be after including <version> or one of the other headers specified by the standard
#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
# define PYBIND11_HAS_U8STRING
#endif
// #define PYBIND11_STR_LEGACY_PERMISSIVE // #define PYBIND11_STR_LEGACY_PERMISSIVE
// If DEFINED, pybind11::str can hold PyUnicodeObject or PyBytesObject // If DEFINED, pybind11::str can hold PyUnicodeObject or PyBytesObject
// (probably surprising and never documented, but this was the // (probably surprising and never documented, but this was the
@ -898,12 +901,6 @@ using expand_side_effects = bool[];
PYBIND11_NAMESPACE_END(detail) PYBIND11_NAMESPACE_END(detail)
#if defined(_MSC_VER)
# pragma warning(push)
# pragma warning(disable : 4275)
// warning C4275: An exported class was derived from a class that wasn't exported.
// Can be ignored when derived from a STL class.
#endif
/// C++ bindings of builtin Python exceptions /// C++ bindings of builtin Python exceptions
class PYBIND11_EXPORT_EXCEPTION builtin_exception : public std::runtime_error { class PYBIND11_EXPORT_EXCEPTION builtin_exception : public std::runtime_error {
public: public:
@ -911,9 +908,6 @@ public:
/// Set the error using the Python C API /// Set the error using the Python C API
virtual void set_error() const = 0; virtual void set_error() const = 0;
}; };
#if defined(_MSC_VER)
# pragma warning(pop)
#endif
#define PYBIND11_RUNTIME_EXCEPTION(name, type) \ #define PYBIND11_RUNTIME_EXCEPTION(name, type) \
class PYBIND11_EXPORT_EXCEPTION name : public builtin_exception { \ class PYBIND11_EXPORT_EXCEPTION name : public builtin_exception { \

View File

@ -9,6 +9,12 @@
#pragma once #pragma once
#include "common.h"
#if defined(WITH_THREAD) && defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# include "../gil.h"
#endif
#include "../pytypes.h" #include "../pytypes.h"
#include <exception> #include <exception>
@ -37,6 +43,8 @@ using ExceptionTranslator = void (*)(std::exception_ptr);
PYBIND11_NAMESPACE_BEGIN(detail) PYBIND11_NAMESPACE_BEGIN(detail)
constexpr const char *internals_function_record_capsule_name = "pybind11_function_record_capsule";
// Forward declarations // Forward declarations
inline PyTypeObject *make_static_property_type(); inline PyTypeObject *make_static_property_type();
inline PyTypeObject *make_default_metaclass(); inline PyTypeObject *make_default_metaclass();
@ -49,7 +57,7 @@ inline PyObject *make_object_base_type(PyTypeObject *metaclass);
// `Py_LIMITED_API` anyway. // `Py_LIMITED_API` anyway.
# if PYBIND11_INTERNALS_VERSION > 4 # if PYBIND11_INTERNALS_VERSION > 4
# define PYBIND11_TLS_KEY_REF Py_tss_t & # define PYBIND11_TLS_KEY_REF Py_tss_t &
# ifdef __GNUC__ # if defined(__GNUC__) && !defined(__INTEL_COMPILER)
// Clang on macOS warns due to `Py_tss_NEEDS_INIT` not specifying an initializer // Clang on macOS warns due to `Py_tss_NEEDS_INIT` not specifying an initializer
// for every field. // for every field.
# define PYBIND11_TLS_KEY_INIT(var) \ # define PYBIND11_TLS_KEY_INIT(var) \
@ -169,11 +177,23 @@ struct internals {
PyTypeObject *default_metaclass; PyTypeObject *default_metaclass;
PyObject *instance_base; PyObject *instance_base;
#if defined(WITH_THREAD) #if defined(WITH_THREAD)
// Unused if PYBIND11_SIMPLE_GIL_MANAGEMENT is defined:
PYBIND11_TLS_KEY_INIT(tstate) PYBIND11_TLS_KEY_INIT(tstate)
# if PYBIND11_INTERNALS_VERSION > 4 # if PYBIND11_INTERNALS_VERSION > 4
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key) PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
# endif // PYBIND11_INTERNALS_VERSION > 4 # endif // PYBIND11_INTERNALS_VERSION > 4
// Unused if PYBIND11_SIMPLE_GIL_MANAGEMENT is defined:
PyInterpreterState *istate = nullptr; PyInterpreterState *istate = nullptr;
# if PYBIND11_INTERNALS_VERSION > 4
// Note that we have to use a std::string to allocate memory to ensure a unique address
// We want unique addresses since we use pointer equality to compare function records
std::string function_record_capsule_name = internals_function_record_capsule_name;
# endif
internals() = default;
internals(const internals &other) = delete;
internals &operator=(const internals &other) = delete;
~internals() { ~internals() {
# if PYBIND11_INTERNALS_VERSION > 4 # if PYBIND11_INTERNALS_VERSION > 4
PYBIND11_TLS_FREE(loader_life_support_tls_key); PYBIND11_TLS_FREE(loader_life_support_tls_key);
@ -408,6 +428,10 @@ PYBIND11_NOINLINE internals &get_internals() {
return **internals_pp; return **internals_pp;
} }
#if defined(WITH_THREAD)
# if defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
gil_scoped_acquire gil;
# else
// Ensure that the GIL is held since we will need to make Python calls. // Ensure that the GIL is held since we will need to make Python calls.
// Cannot use py::gil_scoped_acquire here since that constructor calls get_internals. // Cannot use py::gil_scoped_acquire here since that constructor calls get_internals.
struct gil_scoped_acquire_local { struct gil_scoped_acquire_local {
@ -417,6 +441,8 @@ PYBIND11_NOINLINE internals &get_internals() {
~gil_scoped_acquire_local() { PyGILState_Release(state); } ~gil_scoped_acquire_local() { PyGILState_Release(state); }
const PyGILState_STATE state; const PyGILState_STATE state;
} gil; } gil;
# endif
#endif
error_scope err_scope; error_scope err_scope;
PYBIND11_STR_TYPE id(PYBIND11_INTERNALS_ID); PYBIND11_STR_TYPE id(PYBIND11_INTERNALS_ID);
@ -534,6 +560,25 @@ const char *c_str(Args &&...args) {
return strings.front().c_str(); return strings.front().c_str();
} }
inline const char *get_function_record_capsule_name() {
#if PYBIND11_INTERNALS_VERSION > 4
return get_internals().function_record_capsule_name.c_str();
#else
return nullptr;
#endif
}
// Determine whether or not the following capsule contains a pybind11 function record.
// Note that we use `internals` to make sure that only ABI compatible records are touched.
//
// This check is currently used in two places:
// - An important optimization in functional.h to avoid overhead in C++ -> Python -> C++
// - The sibling feature of cpp_function to allow overloads
inline bool is_function_record_capsule(const capsule &cap) {
// Pointer equality as we rely on internals() to ensure unique pointers
return cap.name() == get_function_record_capsule_name();
}
PYBIND11_NAMESPACE_END(detail) PYBIND11_NAMESPACE_END(detail)
/// Returns a named pointer that is shared among all extension modules (using the same /// Returns a named pointer that is shared among all extension modules (using the same

View File

@ -9,705 +9,4 @@
#pragma once #pragma once
/* HINT: To suppress warnings originating from the Eigen headers, use -isystem. #include "eigen/matrix.h"
See also:
https://stackoverflow.com/questions/2579576/i-dir-vs-isystem-dir
https://stackoverflow.com/questions/1741816/isystem-for-ms-visual-studio-c-compiler
*/
#include "numpy.h"
// The C4127 suppression was introduced for Eigen 3.4.0. In theory we could
// make it version specific, or even remove it later, but considering that
// 1. C4127 is generally far more distracting than useful for modern template code, and
// 2. we definitely want to ignore any MSVC warnings originating from Eigen code,
// it is probably best to keep this around indefinitely.
#if defined(_MSC_VER)
# pragma warning(push)
# pragma warning(disable : 4127) // C4127: conditional expression is constant
# pragma warning(disable : 5054) // https://github.com/pybind/pybind11/pull/3741
// C5054: operator '&': deprecated between enumerations of different types
#elif defined(__MINGW32__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
#include <Eigen/Core>
#include <Eigen/SparseCore>
#if defined(_MSC_VER)
# pragma warning(pop)
#elif defined(__MINGW32__)
# pragma GCC diagnostic pop
#endif
// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
// move constructors that break things. We could detect this an explicitly copy, but an extra copy
// of matrices seems highly undesirable.
static_assert(EIGEN_VERSION_AT_LEAST(3, 2, 7),
"Eigen support in pybind11 requires Eigen >= 3.2.7");
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
template <typename MatrixType>
using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
template <typename MatrixType>
using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
PYBIND11_NAMESPACE_BEGIN(detail)
#if EIGEN_VERSION_AT_LEAST(3, 3, 0)
using EigenIndex = Eigen::Index;
template <typename Scalar, int Flags, typename StorageIndex>
using EigenMapSparseMatrix = Eigen::Map<Eigen::SparseMatrix<Scalar, Flags, StorageIndex>>;
#else
using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;
template <typename Scalar, int Flags, typename StorageIndex>
using EigenMapSparseMatrix = Eigen::MappedSparseMatrix<Scalar, Flags, StorageIndex>;
#endif
// Matches Eigen::Map, Eigen::Ref, blocks, etc:
template <typename T>
using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>,
std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;
template <typename T>
using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;
template <typename T>
using is_eigen_dense_plain
= all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;
template <typename T>
using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;
// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above. This
// basically covers anything that can be assigned to a dense matrix but that don't have a typical
// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and
// SelfAdjointView fall into this category.
template <typename T>
using is_eigen_other
= all_of<is_template_base_of<Eigen::EigenBase, T>,
negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>>;
// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
template <bool EigenRowMajor>
struct EigenConformable {
bool conformable = false;
EigenIndex rows = 0, cols = 0;
EigenDStride stride{0, 0}; // Only valid if negativestrides is false!
bool negativestrides = false; // If true, do not use stride!
// NOLINTNEXTLINE(google-explicit-constructor)
EigenConformable(bool fits = false) : conformable{fits} {}
// Matrix type:
EigenConformable(EigenIndex r, EigenIndex c, EigenIndex rstride, EigenIndex cstride)
: conformable{true}, rows{r}, cols{c},
// TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity.
// http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
stride{EigenRowMajor ? (rstride > 0 ? rstride : 0)
: (cstride > 0 ? cstride : 0) /* outer stride */,
EigenRowMajor ? (cstride > 0 ? cstride : 0)
: (rstride > 0 ? rstride : 0) /* inner stride */},
negativestrides{rstride < 0 || cstride < 0} {}
// Vector type:
EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)
: EigenConformable(r, c, r == 1 ? c * stride : stride, c == 1 ? r : r * stride) {}
template <typename props>
bool stride_compatible() const {
// To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
// matching strides, or a dimension size of 1 (in which case the stride value is
// irrelevant). Alternatively, if any dimension size is 0, the strides are not relevant
// (and numpy ≥ 1.23 sets the strides to 0 in that case, so we need to check explicitly).
if (negativestrides) {
return false;
}
if (rows == 0 || cols == 0) {
return true;
}
return (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner()
|| (EigenRowMajor ? cols : rows) == 1)
&& (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer()
|| (EigenRowMajor ? rows : cols) == 1);
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator bool() const { return conformable; }
};
template <typename Type>
struct eigen_extract_stride {
using type = Type;
};
template <typename PlainObjectType, int MapOptions, typename StrideType>
struct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> {
using type = StrideType;
};
template <typename PlainObjectType, int Options, typename StrideType>
struct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> {
using type = StrideType;
};
// Helper struct for extracting information from an Eigen type
template <typename Type_>
struct EigenProps {
using Type = Type_;
using Scalar = typename Type::Scalar;
using StrideType = typename eigen_extract_stride<Type>::type;
static constexpr EigenIndex rows = Type::RowsAtCompileTime, cols = Type::ColsAtCompileTime,
size = Type::SizeAtCompileTime;
static constexpr bool row_major = Type::IsRowMajor,
vector
= Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1
fixed_rows = rows != Eigen::Dynamic, fixed_cols = cols != Eigen::Dynamic,
fixed = size != Eigen::Dynamic, // Fully-fixed size
dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size
template <EigenIndex i, EigenIndex ifzero>
using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;
static constexpr EigenIndex inner_stride
= if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,
outer_stride = if_zero < StrideType::OuterStrideAtCompileTime,
vector ? size
: row_major ? cols
: rows > ::value;
static constexpr bool dynamic_stride
= inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;
static constexpr bool requires_row_major
= !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;
static constexpr bool requires_col_major
= !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;
// Takes an input array and determines whether we can make it fit into the Eigen type. If
// the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
// (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
static EigenConformable<row_major> conformable(const array &a) {
const auto dims = a.ndim();
if (dims < 1 || dims > 2) {
return false;
}
if (dims == 2) { // Matrix type: require exact match (or dynamic)
EigenIndex np_rows = a.shape(0), np_cols = a.shape(1),
np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),
np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));
if ((PYBIND11_SILENCE_MSVC_C4127(fixed_rows) && np_rows != rows)
|| (PYBIND11_SILENCE_MSVC_C4127(fixed_cols) && np_cols != cols)) {
return false;
}
return {np_rows, np_cols, np_rstride, np_cstride};
}
// Otherwise we're storing an n-vector. Only one of the strides will be used, but
// whichever is used, we want the (single) numpy stride value.
const EigenIndex n = a.shape(0),
stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));
if (vector) { // Eigen type is a compile-time vector
if (PYBIND11_SILENCE_MSVC_C4127(fixed) && size != n) {
return false; // Vector size mismatch
}
return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};
}
if (fixed) {
// The type has a fixed size, but is not a vector: abort
return false;
}
if (fixed_cols) {
// Since this isn't a vector, cols must be != 1. We allow this only if it exactly
// equals the number of elements (rows is Dynamic, and so 1 row is allowed).
if (cols != n) {
return false;
}
return {1, n, stride};
} // Otherwise it's either fully dynamic, or column dynamic; both become a column vector
if (PYBIND11_SILENCE_MSVC_C4127(fixed_rows) && rows != n) {
return false;
}
return {n, 1, stride};
}
static constexpr bool show_writeable
= is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;
static constexpr bool show_order = is_eigen_dense_map<Type>::value;
static constexpr bool show_c_contiguous = show_order && requires_row_major;
static constexpr bool show_f_contiguous
= !show_c_contiguous && show_order && requires_col_major;
static constexpr auto descriptor
= const_name("numpy.ndarray[") + npy_format_descriptor<Scalar>::name + const_name("[")
+ const_name<fixed_rows>(const_name<(size_t) rows>(), const_name("m")) + const_name(", ")
+ const_name<fixed_cols>(const_name<(size_t) cols>(), const_name("n")) + const_name("]")
+
// For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to
// be satisfied: writeable=True (for a mutable reference), and, depending on the map's
// stride options, possibly f_contiguous or c_contiguous. We include them in the
// descriptor output to provide some hint as to why a TypeError is occurring (otherwise
// it can be confusing to see that a function accepts a 'numpy.ndarray[float64[3,2]]' and
// an error message that you *gave* a numpy.ndarray of the right type and dimensions.
const_name<show_writeable>(", flags.writeable", "")
+ const_name<show_c_contiguous>(", flags.c_contiguous", "")
+ const_name<show_f_contiguous>(", flags.f_contiguous", "") + const_name("]");
};
// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data,
// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array.
template <typename props>
handle
eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {
constexpr ssize_t elem_size = sizeof(typename props::Scalar);
array a;
if (props::vector) {
a = array({src.size()}, {elem_size * src.innerStride()}, src.data(), base);
} else {
a = array({src.rows(), src.cols()},
{elem_size * src.rowStride(), elem_size * src.colStride()},
src.data(),
base);
}
if (!writeable) {
array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return a.release();
}
// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
// reference the Eigen object's data with `base` as the python-registered base class (if omitted,
// the base will be set to None, and lifetime management is up to the caller). The numpy array is
// non-writeable if the given type is const.
template <typename props, typename Type>
handle eigen_ref_array(Type &src, handle parent = none()) {
// none here is to get past array's should-we-copy detection, which currently always
// copies when there is no base. Setting the base to None should be harmless.
return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);
}
// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a
// numpy array that references the encapsulated data with a python-side reference to the capsule to
// tie its destruction to that of any dependent python objects. Const-ness is determined by
// whether or not the Type of the pointer given is const.
template <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>
handle eigen_encapsulate(Type *src) {
capsule base(src, [](void *o) { delete static_cast<Type *>(o); });
return eigen_ref_array<props>(*src, base);
}
// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
// types.
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
using Scalar = typename Type::Scalar;
using props = EigenProps<Type>;
bool load(handle src, bool convert) {
// If we're in no-convert mode, only load if given an array of the correct type
if (!convert && !isinstance<array_t<Scalar>>(src)) {
return false;
}
// Coerce into an array, but don't do type conversion yet; the copy below handles it.
auto buf = array::ensure(src);
if (!buf) {
return false;
}
auto dims = buf.ndim();
if (dims < 1 || dims > 2) {
return false;
}
auto fits = props::conformable(buf);
if (!fits) {
return false;
}
// Allocate the new type, then build a numpy reference into it
value = Type(fits.rows, fits.cols);
auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
if (dims == 1) {
ref = ref.squeeze();
} else if (ref.ndim() == 1) {
buf = buf.squeeze();
}
int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());
if (result < 0) { // Copy failed!
PyErr_Clear();
return false;
}
return true;
}
private:
// Cast implementation
template <typename CType>
static handle cast_impl(CType *src, return_value_policy policy, handle parent) {
switch (policy) {
case return_value_policy::take_ownership:
case return_value_policy::automatic:
return eigen_encapsulate<props>(src);
case return_value_policy::move:
return eigen_encapsulate<props>(new CType(std::move(*src)));
case return_value_policy::copy:
return eigen_array_cast<props>(*src);
case return_value_policy::reference:
case return_value_policy::automatic_reference:
return eigen_ref_array<props>(*src);
case return_value_policy::reference_internal:
return eigen_ref_array<props>(*src, parent);
default:
throw cast_error("unhandled return_value_policy: should not happen!");
};
}
public:
// Normal returned non-reference, non-const value:
static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {
return cast_impl(&src, return_value_policy::move, parent);
}
// If you return a non-reference const, we mark the numpy array readonly:
static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {
return cast_impl(&src, return_value_policy::move, parent);
}
// lvalue reference return; default (automatic) becomes copy
static handle cast(Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
// const lvalue reference return; default (automatic) becomes copy
static handle cast(const Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
// non-const pointer return
static handle cast(Type *src, return_value_policy policy, handle parent) {
return cast_impl(src, policy, parent);
}
// const pointer return
static handle cast(const Type *src, return_value_policy policy, handle parent) {
return cast_impl(src, policy, parent);
}
static constexpr auto name = props::descriptor;
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type *() { return &value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &() { return value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &&() && { return std::move(value); }
template <typename T>
using cast_op_type = movable_cast_op_type<T>;
private:
Type value;
};
// Base class for casting reference/map/block/etc. objects back to python.
template <typename MapType>
struct eigen_map_caster {
private:
using props = EigenProps<MapType>;
public:
// Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
// to stay around), but we'll allow it under the assumption that you know what you're doing
// (and have an appropriate keep_alive in place). We return a numpy array pointing directly at
// the ref's data (The numpy array ends up read-only if the ref was to a const matrix type.)
// Note that this means you need to ensure you don't destroy the object in some other way (e.g.
// with an appropriate keep_alive, or with a reference to a statically allocated matrix).
static handle cast(const MapType &src, return_value_policy policy, handle parent) {
switch (policy) {
case return_value_policy::copy:
return eigen_array_cast<props>(src);
case return_value_policy::reference_internal:
return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);
case return_value_policy::reference:
case return_value_policy::automatic:
case return_value_policy::automatic_reference:
return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);
default:
// move, take_ownership don't make any sense for a ref/map:
pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type");
}
}
static constexpr auto name = props::descriptor;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool load(handle, bool) = delete;
operator MapType() = delete;
template <typename>
using cast_op_type = MapType;
};
// We can return any map-like object (but can only load Refs, specialized next):
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>> : eigen_map_caster<Type> {};
// Loader for Ref<...> arguments. See the documentation for info on how to make this work without
// copying (it requires some extra effort in many cases).
template <typename PlainObjectType, typename StrideType>
struct type_caster<
Eigen::Ref<PlainObjectType, 0, StrideType>,
enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>>
: public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {
private:
using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;
using props = EigenProps<Type>;
using Scalar = typename props::Scalar;
using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;
using Array
= array_t<Scalar,
array::forcecast
| ((props::row_major ? props::inner_stride : props::outer_stride) == 1
? array::c_style
: (props::row_major ? props::outer_stride : props::inner_stride) == 1
? array::f_style
: 0)>;
static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;
// Delay construction (these have no default constructor)
std::unique_ptr<MapType> map;
std::unique_ptr<Type> ref;
// Our array. When possible, this is just a numpy array pointing to the source data, but
// sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an
// incompatible layout, or is an array of a type that needs to be converted). Using a numpy
// temporary (rather than an Eigen temporary) saves an extra copy when we need both type
// conversion and storage order conversion. (Note that we refuse to use this temporary copy
// when loading an argument for a Ref<M> with M non-const, i.e. a read-write reference).
Array copy_or_ref;
public:
bool load(handle src, bool convert) {
// First check whether what we have is already an array of the right type. If not, we
// can't avoid a copy (because the copy is also going to do type conversion).
bool need_copy = !isinstance<Array>(src);
EigenConformable<props::row_major> fits;
if (!need_copy) {
// We don't need a converting copy, but we also need to check whether the strides are
// compatible with the Ref's stride requirements
auto aref = reinterpret_borrow<Array>(src);
if (aref && (!need_writeable || aref.writeable())) {
fits = props::conformable(aref);
if (!fits) {
return false; // Incompatible dimensions
}
if (!fits.template stride_compatible<props>()) {
need_copy = true;
} else {
copy_or_ref = std::move(aref);
}
} else {
need_copy = true;
}
}
if (need_copy) {
// We need to copy: If we need a mutable reference, or we're not supposed to convert
// (either because we're in the no-convert overload pass, or because we're explicitly
// instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
if (!convert || need_writeable) {
return false;
}
Array copy = Array::ensure(src);
if (!copy) {
return false;
}
fits = props::conformable(copy);
if (!fits || !fits.template stride_compatible<props>()) {
return false;
}
copy_or_ref = std::move(copy);
loader_life_support::add_patient(copy_or_ref);
}
ref.reset();
map.reset(new MapType(data(copy_or_ref),
fits.rows,
fits.cols,
make_stride(fits.stride.outer(), fits.stride.inner())));
ref.reset(new Type(*map));
return true;
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type *() { return ref.get(); }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &() { return *ref; }
template <typename _T>
using cast_op_type = pybind11::detail::cast_op_type<_T>;
private:
template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>
Scalar *data(Array &a) {
return a.mutable_data();
}
template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>
const Scalar *data(Array &a) {
return a.data();
}
// Attempt to figure out a constructor of `Stride` that will work.
// If both strides are fixed, use a default constructor:
template <typename S>
using stride_ctor_default = bool_constant<S::InnerStrideAtCompileTime != Eigen::Dynamic
&& S::OuterStrideAtCompileTime != Eigen::Dynamic
&& std::is_default_constructible<S>::value>;
// Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
// Eigen::Stride, and use it:
template <typename S>
using stride_ctor_dual
= bool_constant<!stride_ctor_default<S>::value
&& std::is_constructible<S, EigenIndex, EigenIndex>::value>;
// Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
// it (passing whichever stride is dynamic).
template <typename S>
using stride_ctor_outer
= bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
&& S::OuterStrideAtCompileTime == Eigen::Dynamic
&& S::InnerStrideAtCompileTime != Eigen::Dynamic
&& std::is_constructible<S, EigenIndex>::value>;
template <typename S>
using stride_ctor_inner
= bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
&& S::InnerStrideAtCompileTime == Eigen::Dynamic
&& S::OuterStrideAtCompileTime != Eigen::Dynamic
&& std::is_constructible<S, EigenIndex>::value>;
template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>
static S make_stride(EigenIndex, EigenIndex) {
return S();
}
template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>
static S make_stride(EigenIndex outer, EigenIndex inner) {
return S(outer, inner);
}
template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>
static S make_stride(EigenIndex outer, EigenIndex) {
return S(outer);
}
template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>
static S make_stride(EigenIndex, EigenIndex inner) {
return S(inner);
}
};
// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
// load() is not supported, but we can cast them into the python domain by first copying to a
// regular Eigen::Matrix, then casting that.
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {
protected:
using Matrix
= Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;
using props = EigenProps<Matrix>;
public:
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
handle h = eigen_encapsulate<props>(new Matrix(src));
return h;
}
static handle cast(const Type *src, return_value_policy policy, handle parent) {
return cast(*src, policy, parent);
}
static constexpr auto name = props::descriptor;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool load(handle, bool) = delete;
operator Type() = delete;
template <typename>
using cast_op_type = Type;
};
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
using Scalar = typename Type::Scalar;
using StorageIndex = remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())>;
using Index = typename Type::Index;
static constexpr bool rowMajor = Type::IsRowMajor;
bool load(handle src, bool) {
if (!src) {
return false;
}
auto obj = reinterpret_borrow<object>(src);
object sparse_module = module_::import("scipy.sparse");
object matrix_type = sparse_module.attr(rowMajor ? "csr_matrix" : "csc_matrix");
if (!type::handle_of(obj).is(matrix_type)) {
try {
obj = matrix_type(obj);
} catch (const error_already_set &) {
return false;
}
}
auto values = array_t<Scalar>((object) obj.attr("data"));
auto innerIndices = array_t<StorageIndex>((object) obj.attr("indices"));
auto outerIndices = array_t<StorageIndex>((object) obj.attr("indptr"));
auto shape = pybind11::tuple((pybind11::object) obj.attr("shape"));
auto nnz = obj.attr("nnz").cast<Index>();
if (!values || !innerIndices || !outerIndices) {
return false;
}
value = EigenMapSparseMatrix<Scalar,
Type::Flags &(Eigen::RowMajor | Eigen::ColMajor),
StorageIndex>(shape[0].cast<Index>(),
shape[1].cast<Index>(),
std::move(nnz),
outerIndices.mutable_data(),
innerIndices.mutable_data(),
values.mutable_data());
return true;
}
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
const_cast<Type &>(src).makeCompressed();
object matrix_type
= module_::import("scipy.sparse").attr(rowMajor ? "csr_matrix" : "csc_matrix");
array data(src.nonZeros(), src.valuePtr());
array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());
array innerIndices(src.nonZeros(), src.innerIndexPtr());
return matrix_type(pybind11::make_tuple(
std::move(data), std::move(innerIndices), std::move(outerIndices)),
pybind11::make_tuple(src.rows(), src.cols()))
.release();
}
PYBIND11_TYPE_CASTER(Type,
const_name<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[",
"scipy.sparse.csc_matrix[")
+ npy_format_descriptor<Scalar>::name + const_name("]"));
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

View File

@ -0,0 +1,712 @@
/*
pybind11/eigen/matrix.h: Transparent conversion for dense and sparse Eigen matrices
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "../numpy.h"
/* HINT: To suppress warnings originating from the Eigen headers, use -isystem.
See also:
https://stackoverflow.com/questions/2579576/i-dir-vs-isystem-dir
https://stackoverflow.com/questions/1741816/isystem-for-ms-visual-studio-c-compiler
*/
// The C4127 suppression was introduced for Eigen 3.4.0. In theory we could
// make it version specific, or even remove it later, but considering that
// 1. C4127 is generally far more distracting than useful for modern template code, and
// 2. we definitely want to ignore any MSVC warnings originating from Eigen code,
// it is probably best to keep this around indefinitely.
#if defined(_MSC_VER)
# pragma warning(push)
# pragma warning(disable : 4127) // C4127: conditional expression is constant
# pragma warning(disable : 5054) // https://github.com/pybind/pybind11/pull/3741
// C5054: operator '&': deprecated between enumerations of different types
#elif defined(__MINGW32__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
#include <Eigen/Core>
#include <Eigen/SparseCore>
#if defined(_MSC_VER)
# pragma warning(pop)
#elif defined(__MINGW32__)
# pragma GCC diagnostic pop
#endif
// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
// move constructors that break things. We could detect this an explicitly copy, but an extra copy
// of matrices seems highly undesirable.
static_assert(EIGEN_VERSION_AT_LEAST(3, 2, 7),
"Eigen matrix support in pybind11 requires Eigen >= 3.2.7");
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
template <typename MatrixType>
using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
template <typename MatrixType>
using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
PYBIND11_NAMESPACE_BEGIN(detail)
#if EIGEN_VERSION_AT_LEAST(3, 3, 0)
using EigenIndex = Eigen::Index;
template <typename Scalar, int Flags, typename StorageIndex>
using EigenMapSparseMatrix = Eigen::Map<Eigen::SparseMatrix<Scalar, Flags, StorageIndex>>;
#else
using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;
template <typename Scalar, int Flags, typename StorageIndex>
using EigenMapSparseMatrix = Eigen::MappedSparseMatrix<Scalar, Flags, StorageIndex>;
#endif
// Matches Eigen::Map, Eigen::Ref, blocks, etc:
template <typename T>
using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>,
std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;
template <typename T>
using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;
template <typename T>
using is_eigen_dense_plain
= all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;
template <typename T>
using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;
// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above. This
// basically covers anything that can be assigned to a dense matrix but that don't have a typical
// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and
// SelfAdjointView fall into this category.
template <typename T>
using is_eigen_other
= all_of<is_template_base_of<Eigen::EigenBase, T>,
negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>>;
// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
template <bool EigenRowMajor>
struct EigenConformable {
bool conformable = false;
EigenIndex rows = 0, cols = 0;
EigenDStride stride{0, 0}; // Only valid if negativestrides is false!
bool negativestrides = false; // If true, do not use stride!
// NOLINTNEXTLINE(google-explicit-constructor)
EigenConformable(bool fits = false) : conformable{fits} {}
// Matrix type:
EigenConformable(EigenIndex r, EigenIndex c, EigenIndex rstride, EigenIndex cstride)
: conformable{true}, rows{r}, cols{c},
// TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity.
// http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
stride{EigenRowMajor ? (rstride > 0 ? rstride : 0)
: (cstride > 0 ? cstride : 0) /* outer stride */,
EigenRowMajor ? (cstride > 0 ? cstride : 0)
: (rstride > 0 ? rstride : 0) /* inner stride */},
negativestrides{rstride < 0 || cstride < 0} {}
// Vector type:
EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)
: EigenConformable(r, c, r == 1 ? c * stride : stride, c == 1 ? r : r * stride) {}
template <typename props>
bool stride_compatible() const {
// To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
// matching strides, or a dimension size of 1 (in which case the stride value is
// irrelevant). Alternatively, if any dimension size is 0, the strides are not relevant
// (and numpy ≥ 1.23 sets the strides to 0 in that case, so we need to check explicitly).
if (negativestrides) {
return false;
}
if (rows == 0 || cols == 0) {
return true;
}
return (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner()
|| (EigenRowMajor ? cols : rows) == 1)
&& (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer()
|| (EigenRowMajor ? rows : cols) == 1);
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator bool() const { return conformable; }
};
template <typename Type>
struct eigen_extract_stride {
using type = Type;
};
template <typename PlainObjectType, int MapOptions, typename StrideType>
struct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> {
using type = StrideType;
};
template <typename PlainObjectType, int Options, typename StrideType>
struct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> {
using type = StrideType;
};
// Helper struct for extracting information from an Eigen type
template <typename Type_>
struct EigenProps {
using Type = Type_;
using Scalar = typename Type::Scalar;
using StrideType = typename eigen_extract_stride<Type>::type;
static constexpr EigenIndex rows = Type::RowsAtCompileTime, cols = Type::ColsAtCompileTime,
size = Type::SizeAtCompileTime;
static constexpr bool row_major = Type::IsRowMajor,
vector
= Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1
fixed_rows = rows != Eigen::Dynamic, fixed_cols = cols != Eigen::Dynamic,
fixed = size != Eigen::Dynamic, // Fully-fixed size
dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size
template <EigenIndex i, EigenIndex ifzero>
using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;
static constexpr EigenIndex inner_stride
= if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,
outer_stride = if_zero < StrideType::OuterStrideAtCompileTime,
vector ? size
: row_major ? cols
: rows > ::value;
static constexpr bool dynamic_stride
= inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;
static constexpr bool requires_row_major
= !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;
static constexpr bool requires_col_major
= !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;
// Takes an input array and determines whether we can make it fit into the Eigen type. If
// the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
// (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
static EigenConformable<row_major> conformable(const array &a) {
const auto dims = a.ndim();
if (dims < 1 || dims > 2) {
return false;
}
if (dims == 2) { // Matrix type: require exact match (or dynamic)
EigenIndex np_rows = a.shape(0), np_cols = a.shape(1),
np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),
np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));
if ((PYBIND11_SILENCE_MSVC_C4127(fixed_rows) && np_rows != rows)
|| (PYBIND11_SILENCE_MSVC_C4127(fixed_cols) && np_cols != cols)) {
return false;
}
return {np_rows, np_cols, np_rstride, np_cstride};
}
// Otherwise we're storing an n-vector. Only one of the strides will be used, but
// whichever is used, we want the (single) numpy stride value.
const EigenIndex n = a.shape(0),
stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));
if (vector) { // Eigen type is a compile-time vector
if (PYBIND11_SILENCE_MSVC_C4127(fixed) && size != n) {
return false; // Vector size mismatch
}
return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};
}
if (fixed) {
// The type has a fixed size, but is not a vector: abort
return false;
}
if (fixed_cols) {
// Since this isn't a vector, cols must be != 1. We allow this only if it exactly
// equals the number of elements (rows is Dynamic, and so 1 row is allowed).
if (cols != n) {
return false;
}
return {1, n, stride};
} // Otherwise it's either fully dynamic, or column dynamic; both become a column vector
if (PYBIND11_SILENCE_MSVC_C4127(fixed_rows) && rows != n) {
return false;
}
return {n, 1, stride};
}
static constexpr bool show_writeable
= is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;
static constexpr bool show_order = is_eigen_dense_map<Type>::value;
static constexpr bool show_c_contiguous = show_order && requires_row_major;
static constexpr bool show_f_contiguous
= !show_c_contiguous && show_order && requires_col_major;
static constexpr auto descriptor
= const_name("numpy.ndarray[") + npy_format_descriptor<Scalar>::name + const_name("[")
+ const_name<fixed_rows>(const_name<(size_t) rows>(), const_name("m")) + const_name(", ")
+ const_name<fixed_cols>(const_name<(size_t) cols>(), const_name("n")) + const_name("]")
+
// For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to
// be satisfied: writeable=True (for a mutable reference), and, depending on the map's
// stride options, possibly f_contiguous or c_contiguous. We include them in the
// descriptor output to provide some hint as to why a TypeError is occurring (otherwise
// it can be confusing to see that a function accepts a 'numpy.ndarray[float64[3,2]]' and
// an error message that you *gave* a numpy.ndarray of the right type and dimensions.
const_name<show_writeable>(", flags.writeable", "")
+ const_name<show_c_contiguous>(", flags.c_contiguous", "")
+ const_name<show_f_contiguous>(", flags.f_contiguous", "") + const_name("]");
};
// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data,
// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array.
template <typename props>
handle
eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {
constexpr ssize_t elem_size = sizeof(typename props::Scalar);
array a;
if (props::vector) {
a = array({src.size()}, {elem_size * src.innerStride()}, src.data(), base);
} else {
a = array({src.rows(), src.cols()},
{elem_size * src.rowStride(), elem_size * src.colStride()},
src.data(),
base);
}
if (!writeable) {
array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return a.release();
}
// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
// reference the Eigen object's data with `base` as the python-registered base class (if omitted,
// the base will be set to None, and lifetime management is up to the caller). The numpy array is
// non-writeable if the given type is const.
template <typename props, typename Type>
handle eigen_ref_array(Type &src, handle parent = none()) {
// none here is to get past array's should-we-copy detection, which currently always
// copies when there is no base. Setting the base to None should be harmless.
return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);
}
// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a
// numpy array that references the encapsulated data with a python-side reference to the capsule to
// tie its destruction to that of any dependent python objects. Const-ness is determined by
// whether or not the Type of the pointer given is const.
template <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>
handle eigen_encapsulate(Type *src) {
capsule base(src, [](void *o) { delete static_cast<Type *>(o); });
return eigen_ref_array<props>(*src, base);
}
// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
// types.
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
using Scalar = typename Type::Scalar;
using props = EigenProps<Type>;
bool load(handle src, bool convert) {
// If we're in no-convert mode, only load if given an array of the correct type
if (!convert && !isinstance<array_t<Scalar>>(src)) {
return false;
}
// Coerce into an array, but don't do type conversion yet; the copy below handles it.
auto buf = array::ensure(src);
if (!buf) {
return false;
}
auto dims = buf.ndim();
if (dims < 1 || dims > 2) {
return false;
}
auto fits = props::conformable(buf);
if (!fits) {
return false;
}
// Allocate the new type, then build a numpy reference into it
value = Type(fits.rows, fits.cols);
auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
if (dims == 1) {
ref = ref.squeeze();
} else if (ref.ndim() == 1) {
buf = buf.squeeze();
}
int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());
if (result < 0) { // Copy failed!
PyErr_Clear();
return false;
}
return true;
}
private:
// Cast implementation
template <typename CType>
static handle cast_impl(CType *src, return_value_policy policy, handle parent) {
switch (policy) {
case return_value_policy::take_ownership:
case return_value_policy::automatic:
return eigen_encapsulate<props>(src);
case return_value_policy::move:
return eigen_encapsulate<props>(new CType(std::move(*src)));
case return_value_policy::copy:
return eigen_array_cast<props>(*src);
case return_value_policy::reference:
case return_value_policy::automatic_reference:
return eigen_ref_array<props>(*src);
case return_value_policy::reference_internal:
return eigen_ref_array<props>(*src, parent);
default:
throw cast_error("unhandled return_value_policy: should not happen!");
};
}
public:
// Normal returned non-reference, non-const value:
static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {
return cast_impl(&src, return_value_policy::move, parent);
}
// If you return a non-reference const, we mark the numpy array readonly:
static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {
return cast_impl(&src, return_value_policy::move, parent);
}
// lvalue reference return; default (automatic) becomes copy
static handle cast(Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
// const lvalue reference return; default (automatic) becomes copy
static handle cast(const Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
// non-const pointer return
static handle cast(Type *src, return_value_policy policy, handle parent) {
return cast_impl(src, policy, parent);
}
// const pointer return
static handle cast(const Type *src, return_value_policy policy, handle parent) {
return cast_impl(src, policy, parent);
}
static constexpr auto name = props::descriptor;
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type *() { return &value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &() { return value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &&() && { return std::move(value); }
template <typename T>
using cast_op_type = movable_cast_op_type<T>;
private:
Type value;
};
// Base class for casting reference/map/block/etc. objects back to python.
template <typename MapType>
struct eigen_map_caster {
private:
using props = EigenProps<MapType>;
public:
// Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
// to stay around), but we'll allow it under the assumption that you know what you're doing
// (and have an appropriate keep_alive in place). We return a numpy array pointing directly at
// the ref's data (The numpy array ends up read-only if the ref was to a const matrix type.)
// Note that this means you need to ensure you don't destroy the object in some other way (e.g.
// with an appropriate keep_alive, or with a reference to a statically allocated matrix).
static handle cast(const MapType &src, return_value_policy policy, handle parent) {
switch (policy) {
case return_value_policy::copy:
return eigen_array_cast<props>(src);
case return_value_policy::reference_internal:
return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);
case return_value_policy::reference:
case return_value_policy::automatic:
case return_value_policy::automatic_reference:
return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);
default:
// move, take_ownership don't make any sense for a ref/map:
pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type");
}
}
static constexpr auto name = props::descriptor;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool load(handle, bool) = delete;
operator MapType() = delete;
template <typename>
using cast_op_type = MapType;
};
// We can return any map-like object (but can only load Refs, specialized next):
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>> : eigen_map_caster<Type> {};
// Loader for Ref<...> arguments. See the documentation for info on how to make this work without
// copying (it requires some extra effort in many cases).
template <typename PlainObjectType, typename StrideType>
struct type_caster<
Eigen::Ref<PlainObjectType, 0, StrideType>,
enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>>
: public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {
private:
using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;
using props = EigenProps<Type>;
using Scalar = typename props::Scalar;
using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;
using Array
= array_t<Scalar,
array::forcecast
| ((props::row_major ? props::inner_stride : props::outer_stride) == 1
? array::c_style
: (props::row_major ? props::outer_stride : props::inner_stride) == 1
? array::f_style
: 0)>;
static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;
// Delay construction (these have no default constructor)
std::unique_ptr<MapType> map;
std::unique_ptr<Type> ref;
// Our array. When possible, this is just a numpy array pointing to the source data, but
// sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an
// incompatible layout, or is an array of a type that needs to be converted). Using a numpy
// temporary (rather than an Eigen temporary) saves an extra copy when we need both type
// conversion and storage order conversion. (Note that we refuse to use this temporary copy
// when loading an argument for a Ref<M> with M non-const, i.e. a read-write reference).
Array copy_or_ref;
public:
bool load(handle src, bool convert) {
// First check whether what we have is already an array of the right type. If not, we
// can't avoid a copy (because the copy is also going to do type conversion).
bool need_copy = !isinstance<Array>(src);
EigenConformable<props::row_major> fits;
if (!need_copy) {
// We don't need a converting copy, but we also need to check whether the strides are
// compatible with the Ref's stride requirements
auto aref = reinterpret_borrow<Array>(src);
if (aref && (!need_writeable || aref.writeable())) {
fits = props::conformable(aref);
if (!fits) {
return false; // Incompatible dimensions
}
if (!fits.template stride_compatible<props>()) {
need_copy = true;
} else {
copy_or_ref = std::move(aref);
}
} else {
need_copy = true;
}
}
if (need_copy) {
// We need to copy: If we need a mutable reference, or we're not supposed to convert
// (either because we're in the no-convert overload pass, or because we're explicitly
// instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
if (!convert || need_writeable) {
return false;
}
Array copy = Array::ensure(src);
if (!copy) {
return false;
}
fits = props::conformable(copy);
if (!fits || !fits.template stride_compatible<props>()) {
return false;
}
copy_or_ref = std::move(copy);
loader_life_support::add_patient(copy_or_ref);
}
ref.reset();
map.reset(new MapType(data(copy_or_ref),
fits.rows,
fits.cols,
make_stride(fits.stride.outer(), fits.stride.inner())));
ref.reset(new Type(*map));
return true;
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type *() { return ref.get(); }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &() { return *ref; }
template <typename _T>
using cast_op_type = pybind11::detail::cast_op_type<_T>;
private:
template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>
Scalar *data(Array &a) {
return a.mutable_data();
}
template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>
const Scalar *data(Array &a) {
return a.data();
}
// Attempt to figure out a constructor of `Stride` that will work.
// If both strides are fixed, use a default constructor:
template <typename S>
using stride_ctor_default = bool_constant<S::InnerStrideAtCompileTime != Eigen::Dynamic
&& S::OuterStrideAtCompileTime != Eigen::Dynamic
&& std::is_default_constructible<S>::value>;
// Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
// Eigen::Stride, and use it:
template <typename S>
using stride_ctor_dual
= bool_constant<!stride_ctor_default<S>::value
&& std::is_constructible<S, EigenIndex, EigenIndex>::value>;
// Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
// it (passing whichever stride is dynamic).
template <typename S>
using stride_ctor_outer
= bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
&& S::OuterStrideAtCompileTime == Eigen::Dynamic
&& S::InnerStrideAtCompileTime != Eigen::Dynamic
&& std::is_constructible<S, EigenIndex>::value>;
template <typename S>
using stride_ctor_inner
= bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
&& S::InnerStrideAtCompileTime == Eigen::Dynamic
&& S::OuterStrideAtCompileTime != Eigen::Dynamic
&& std::is_constructible<S, EigenIndex>::value>;
template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>
static S make_stride(EigenIndex, EigenIndex) {
return S();
}
template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>
static S make_stride(EigenIndex outer, EigenIndex inner) {
return S(outer, inner);
}
template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>
static S make_stride(EigenIndex outer, EigenIndex) {
return S(outer);
}
template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>
static S make_stride(EigenIndex, EigenIndex inner) {
return S(inner);
}
};
// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
// load() is not supported, but we can cast them into the python domain by first copying to a
// regular Eigen::Matrix, then casting that.
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {
protected:
using Matrix
= Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;
using props = EigenProps<Matrix>;
public:
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
handle h = eigen_encapsulate<props>(new Matrix(src));
return h;
}
static handle cast(const Type *src, return_value_policy policy, handle parent) {
return cast(*src, policy, parent);
}
static constexpr auto name = props::descriptor;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool load(handle, bool) = delete;
operator Type() = delete;
template <typename>
using cast_op_type = Type;
};
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
using Scalar = typename Type::Scalar;
using StorageIndex = remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())>;
using Index = typename Type::Index;
static constexpr bool rowMajor = Type::IsRowMajor;
bool load(handle src, bool) {
if (!src) {
return false;
}
auto obj = reinterpret_borrow<object>(src);
object sparse_module = module_::import("scipy.sparse");
object matrix_type = sparse_module.attr(rowMajor ? "csr_matrix" : "csc_matrix");
if (!type::handle_of(obj).is(matrix_type)) {
try {
obj = matrix_type(obj);
} catch (const error_already_set &) {
return false;
}
}
auto values = array_t<Scalar>((object) obj.attr("data"));
auto innerIndices = array_t<StorageIndex>((object) obj.attr("indices"));
auto outerIndices = array_t<StorageIndex>((object) obj.attr("indptr"));
auto shape = pybind11::tuple((pybind11::object) obj.attr("shape"));
auto nnz = obj.attr("nnz").cast<Index>();
if (!values || !innerIndices || !outerIndices) {
return false;
}
value = EigenMapSparseMatrix<Scalar,
Type::Flags &(Eigen::RowMajor | Eigen::ColMajor),
StorageIndex>(shape[0].cast<Index>(),
shape[1].cast<Index>(),
std::move(nnz),
outerIndices.mutable_data(),
innerIndices.mutable_data(),
values.mutable_data());
return true;
}
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
const_cast<Type &>(src).makeCompressed();
object matrix_type
= module_::import("scipy.sparse").attr(rowMajor ? "csr_matrix" : "csc_matrix");
array data(src.nonZeros(), src.valuePtr());
array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());
array innerIndices(src.nonZeros(), src.innerIndexPtr());
return matrix_type(pybind11::make_tuple(
std::move(data), std::move(innerIndices), std::move(outerIndices)),
pybind11::make_tuple(src.rows(), src.cols()))
.release();
}
PYBIND11_TYPE_CASTER(Type,
const_name<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[",
"scipy.sparse.csc_matrix[")
+ npy_format_descriptor<Scalar>::name + const_name("]"));
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

View File

@ -0,0 +1,518 @@
/*
pybind11/eigen/tensor.h: Transparent conversion for Eigen tensors
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "../numpy.h"
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
static_assert(__GNUC__ > 5, "Eigen Tensor support in pybind11 requires GCC > 5.0");
#endif
#if defined(_MSC_VER)
# pragma warning(push)
# pragma warning(disable : 4554) // Tensor.h warning
# pragma warning(disable : 4127) // Tensor.h warning
#elif defined(__MINGW32__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
#include <unsupported/Eigen/CXX11/Tensor>
#if defined(_MSC_VER)
# pragma warning(pop)
#elif defined(__MINGW32__)
# pragma GCC diagnostic pop
#endif
static_assert(EIGEN_VERSION_AT_LEAST(3, 3, 0),
"Eigen Tensor support in pybind11 requires Eigen >= 3.3.0");
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
inline bool is_tensor_aligned(const void *data) {
return (reinterpret_cast<std::size_t>(data) % EIGEN_DEFAULT_ALIGN_BYTES) == 0;
}
template <typename T>
constexpr int compute_array_flag_from_tensor() {
static_assert((static_cast<int>(T::Layout) == static_cast<int>(Eigen::RowMajor))
|| (static_cast<int>(T::Layout) == static_cast<int>(Eigen::ColMajor)),
"Layout must be row or column major");
return (static_cast<int>(T::Layout) == static_cast<int>(Eigen::RowMajor)) ? array::c_style
: array::f_style;
}
template <typename T>
struct eigen_tensor_helper {};
template <typename Scalar_, int NumIndices_, int Options_, typename IndexType>
struct eigen_tensor_helper<Eigen::Tensor<Scalar_, NumIndices_, Options_, IndexType>> {
using Type = Eigen::Tensor<Scalar_, NumIndices_, Options_, IndexType>;
using ValidType = void;
static Eigen::DSizes<typename Type::Index, Type::NumIndices> get_shape(const Type &f) {
return f.dimensions();
}
static constexpr bool
is_correct_shape(const Eigen::DSizes<typename Type::Index, Type::NumIndices> & /*shape*/) {
return true;
}
template <typename T>
struct helper {};
template <size_t... Is>
struct helper<index_sequence<Is...>> {
static constexpr auto value = concat(const_name(((void) Is, "?"))...);
};
static constexpr auto dimensions_descriptor
= helper<decltype(make_index_sequence<Type::NumIndices>())>::value;
template <typename... Args>
static Type *alloc(Args &&...args) {
return new Type(std::forward<Args>(args)...);
}
static void free(Type *tensor) { delete tensor; }
};
template <typename Scalar_, typename std::ptrdiff_t... Indices, int Options_, typename IndexType>
struct eigen_tensor_helper<
Eigen::TensorFixedSize<Scalar_, Eigen::Sizes<Indices...>, Options_, IndexType>> {
using Type = Eigen::TensorFixedSize<Scalar_, Eigen::Sizes<Indices...>, Options_, IndexType>;
using ValidType = void;
static constexpr Eigen::DSizes<typename Type::Index, Type::NumIndices>
get_shape(const Type & /*f*/) {
return get_shape();
}
static constexpr Eigen::DSizes<typename Type::Index, Type::NumIndices> get_shape() {
return Eigen::DSizes<typename Type::Index, Type::NumIndices>(Indices...);
}
static bool
is_correct_shape(const Eigen::DSizes<typename Type::Index, Type::NumIndices> &shape) {
return get_shape() == shape;
}
static constexpr auto dimensions_descriptor = concat(const_name<Indices>()...);
template <typename... Args>
static Type *alloc(Args &&...args) {
Eigen::aligned_allocator<Type> allocator;
return ::new (allocator.allocate(1)) Type(std::forward<Args>(args)...);
}
static void free(Type *tensor) {
Eigen::aligned_allocator<Type> allocator;
tensor->~Type();
allocator.deallocate(tensor, 1);
}
};
template <typename Type, bool ShowDetails, bool NeedsWriteable = false>
struct get_tensor_descriptor {
static constexpr auto details
= const_name<NeedsWriteable>(", flags.writeable", "")
+ const_name<static_cast<int>(Type::Layout) == static_cast<int>(Eigen::RowMajor)>(
", flags.c_contiguous", ", flags.f_contiguous");
static constexpr auto value
= const_name("numpy.ndarray[") + npy_format_descriptor<typename Type::Scalar>::name
+ const_name("[") + eigen_tensor_helper<remove_cv_t<Type>>::dimensions_descriptor
+ const_name("]") + const_name<ShowDetails>(details, const_name("")) + const_name("]");
};
// When EIGEN_AVOID_STL_ARRAY is defined, Eigen::DSizes<T, 0> does not have the begin() member
// function. Falling back to a simple loop works around this issue.
//
// We need to disable the type-limits warning for the inner loop when size = 0.
#if defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wtype-limits"
#endif
template <typename T, int size>
std::vector<T> convert_dsizes_to_vector(const Eigen::DSizes<T, size> &arr) {
std::vector<T> result(size);
for (size_t i = 0; i < size; i++) {
result[i] = arr[i];
}
return result;
}
template <typename T, int size>
Eigen::DSizes<T, size> get_shape_for_array(const array &arr) {
Eigen::DSizes<T, size> result;
const T *shape = arr.shape();
for (size_t i = 0; i < size; i++) {
result[i] = shape[i];
}
return result;
}
#if defined(__GNUC__)
# pragma GCC diagnostic pop
#endif
template <typename Type>
struct type_caster<Type, typename eigen_tensor_helper<Type>::ValidType> {
using Helper = eigen_tensor_helper<Type>;
static constexpr auto temp_name = get_tensor_descriptor<Type, false>::value;
PYBIND11_TYPE_CASTER(Type, temp_name);
bool load(handle src, bool convert) {
if (!convert) {
if (!isinstance<array>(src)) {
return false;
}
array temp = array::ensure(src);
if (!temp) {
return false;
}
if (!convert && !temp.dtype().is(dtype::of<typename Type::Scalar>())) {
return false;
}
}
array_t<typename Type::Scalar, compute_array_flag_from_tensor<Type>()> arr(
reinterpret_borrow<object>(src));
if (arr.ndim() != Type::NumIndices) {
return false;
}
auto shape = get_shape_for_array<typename Type::Index, Type::NumIndices>(arr);
if (!Helper::is_correct_shape(shape)) {
return false;
}
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
auto data_pointer = arr.data();
#else
// Handle Eigen bug
auto data_pointer = const_cast<typename Type::Scalar *>(arr.data());
#endif
if (is_tensor_aligned(arr.data())) {
value = Eigen::TensorMap<const Type, Eigen::Aligned>(data_pointer, shape);
} else {
value = Eigen::TensorMap<const Type>(data_pointer, shape);
}
return true;
}
static handle cast(Type &&src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::reference
|| policy == return_value_policy::reference_internal) {
pybind11_fail("Cannot use a reference return value policy for an rvalue");
}
return cast_impl(&src, return_value_policy::move, parent);
}
static handle cast(const Type &&src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::reference
|| policy == return_value_policy::reference_internal) {
pybind11_fail("Cannot use a reference return value policy for an rvalue");
}
return cast_impl(&src, return_value_policy::move, parent);
}
static handle cast(Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
static handle cast(const Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
static handle cast(Type *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
static handle cast(const Type *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
template <typename C>
static handle cast_impl(C *src, return_value_policy policy, handle parent) {
object parent_object;
bool writeable = false;
switch (policy) {
case return_value_policy::move:
if (std::is_const<C>::value) {
pybind11_fail("Cannot move from a constant reference");
}
src = Helper::alloc(std::move(*src));
parent_object
= capsule(src, [](void *ptr) { Helper::free(reinterpret_cast<Type *>(ptr)); });
writeable = true;
break;
case return_value_policy::take_ownership:
if (std::is_const<C>::value) {
// This cast is ugly, and might be UB in some cases, but we don't have an
// alterantive here as we must free that memory
Helper::free(const_cast<Type *>(src));
pybind11_fail("Cannot take ownership of a const reference");
}
parent_object
= capsule(src, [](void *ptr) { Helper::free(reinterpret_cast<Type *>(ptr)); });
writeable = true;
break;
case return_value_policy::copy:
writeable = true;
break;
case return_value_policy::reference:
parent_object = none();
writeable = !std::is_const<C>::value;
break;
case return_value_policy::reference_internal:
// Default should do the right thing
if (!parent) {
pybind11_fail("Cannot use reference internal when there is no parent");
}
parent_object = reinterpret_borrow<object>(parent);
writeable = !std::is_const<C>::value;
break;
default:
pybind11_fail("pybind11 bug in eigen.h, please file a bug report");
}
auto result = array_t<typename Type::Scalar, compute_array_flag_from_tensor<Type>()>(
convert_dsizes_to_vector(Helper::get_shape(*src)), src->data(), parent_object);
if (!writeable) {
array_proxy(result.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return result.release();
}
};
template <typename StoragePointerType,
bool needs_writeable,
enable_if_t<!needs_writeable, bool> = true>
StoragePointerType get_array_data_for_type(array &arr) {
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
return reinterpret_cast<StoragePointerType>(arr.data());
#else
// Handle Eigen bug
return reinterpret_cast<StoragePointerType>(const_cast<void *>(arr.data()));
#endif
}
template <typename StoragePointerType,
bool needs_writeable,
enable_if_t<needs_writeable, bool> = true>
StoragePointerType get_array_data_for_type(array &arr) {
return reinterpret_cast<StoragePointerType>(arr.mutable_data());
}
template <typename T, typename = void>
struct get_storage_pointer_type;
template <typename MapType>
struct get_storage_pointer_type<MapType, void_t<typename MapType::StoragePointerType>> {
using SPT = typename MapType::StoragePointerType;
};
template <typename MapType>
struct get_storage_pointer_type<MapType, void_t<typename MapType::PointerArgType>> {
using SPT = typename MapType::PointerArgType;
};
template <typename Type, int Options>
struct type_caster<Eigen::TensorMap<Type, Options>,
typename eigen_tensor_helper<remove_cv_t<Type>>::ValidType> {
using MapType = Eigen::TensorMap<Type, Options>;
using Helper = eigen_tensor_helper<remove_cv_t<Type>>;
bool load(handle src, bool /*convert*/) {
// Note that we have a lot more checks here as we want to make sure to avoid copies
if (!isinstance<array>(src)) {
return false;
}
auto arr = reinterpret_borrow<array>(src);
if ((arr.flags() & compute_array_flag_from_tensor<Type>()) == 0) {
return false;
}
if (!arr.dtype().is(dtype::of<typename Type::Scalar>())) {
return false;
}
if (arr.ndim() != Type::NumIndices) {
return false;
}
constexpr bool is_aligned = (Options & Eigen::Aligned) != 0;
if (PYBIND11_SILENCE_MSVC_C4127(is_aligned) && !is_tensor_aligned(arr.data())) {
return false;
}
auto shape = get_shape_for_array<typename Type::Index, Type::NumIndices>(arr);
if (!Helper::is_correct_shape(shape)) {
return false;
}
if (PYBIND11_SILENCE_MSVC_C4127(needs_writeable) && !arr.writeable()) {
return false;
}
auto result = get_array_data_for_type<typename get_storage_pointer_type<MapType>::SPT,
needs_writeable>(arr);
value.reset(new MapType(std::move(result), std::move(shape)));
return true;
}
static handle cast(MapType &&src, return_value_policy policy, handle parent) {
return cast_impl(&src, policy, parent);
}
static handle cast(const MapType &&src, return_value_policy policy, handle parent) {
return cast_impl(&src, policy, parent);
}
static handle cast(MapType &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
static handle cast(const MapType &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
static handle cast(MapType *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
static handle cast(const MapType *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
template <typename C>
static handle cast_impl(C *src, return_value_policy policy, handle parent) {
object parent_object;
constexpr bool writeable = !std::is_const<C>::value;
switch (policy) {
case return_value_policy::reference:
parent_object = none();
break;
case return_value_policy::reference_internal:
// Default should do the right thing
if (!parent) {
pybind11_fail("Cannot use reference internal when there is no parent");
}
parent_object = reinterpret_borrow<object>(parent);
break;
case return_value_policy::take_ownership:
delete src;
// fallthrough
default:
// move, take_ownership don't make any sense for a ref/map:
pybind11_fail("Invalid return_value_policy for Eigen Map type, must be either "
"reference or reference_internal");
}
auto result = array_t<typename Type::Scalar, compute_array_flag_from_tensor<Type>()>(
convert_dsizes_to_vector(Helper::get_shape(*src)),
src->data(),
std::move(parent_object));
if (!writeable) {
array_proxy(result.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return result.release();
}
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
static constexpr bool needs_writeable = !std::is_const<typename std::remove_pointer<
typename get_storage_pointer_type<MapType>::SPT>::type>::value;
#else
// Handle Eigen bug
static constexpr bool needs_writeable = !std::is_const<Type>::value;
#endif
protected:
// TODO: Move to std::optional once std::optional has more support
std::unique_ptr<MapType> value;
public:
static constexpr auto name = get_tensor_descriptor<Type, true, needs_writeable>::value;
explicit operator MapType *() { return value.get(); }
explicit operator MapType &() { return *value; }
explicit operator MapType &&() && { return std::move(*value); }
template <typename T_>
using cast_op_type = ::pybind11::detail::movable_cast_op_type<T_>;
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

View File

@ -48,9 +48,16 @@ public:
*/ */
if (auto cfunc = func.cpp_function()) { if (auto cfunc = func.cpp_function()) {
auto *cfunc_self = PyCFunction_GET_SELF(cfunc.ptr()); auto *cfunc_self = PyCFunction_GET_SELF(cfunc.ptr());
if (isinstance<capsule>(cfunc_self)) { if (cfunc_self == nullptr) {
PyErr_Clear();
} else if (isinstance<capsule>(cfunc_self)) {
auto c = reinterpret_borrow<capsule>(cfunc_self); auto c = reinterpret_borrow<capsule>(cfunc_self);
auto *rec = (function_record *) c;
function_record *rec = nullptr;
// Check that we can safely reinterpret the capsule into a function_record
if (detail::is_function_record_capsule(c)) {
rec = c.get_pointer<function_record>();
}
while (rec != nullptr) { while (rec != nullptr) {
if (rec->is_stateless if (rec->is_stateless
@ -110,7 +117,7 @@ public:
template <typename Func> template <typename Func>
static handle cast(Func &&f_, return_value_policy policy, handle /* parent */) { static handle cast(Func &&f_, return_value_policy policy, handle /* parent */) {
if (!f_) { if (!f_) {
return none().inc_ref(); return none().release();
} }
auto result = f_.template target<function_type>(); auto result = f_.template target<function_type>();

View File

@ -10,7 +10,10 @@
#pragma once #pragma once
#include "detail/common.h" #include "detail/common.h"
#include "detail/internals.h"
#if defined(WITH_THREAD) && !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# include "detail/internals.h"
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
@ -21,7 +24,9 @@ PyThreadState *get_thread_state_unchecked();
PYBIND11_NAMESPACE_END(detail) PYBIND11_NAMESPACE_END(detail)
#if defined(WITH_THREAD) && !defined(PYPY_VERSION) #if defined(WITH_THREAD)
# if !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
/* The functions below essentially reproduce the PyGILState_* API using a RAII /* The functions below essentially reproduce the PyGILState_* API using a RAII
* pattern, but there are a few important differences: * pattern, but there are a few important differences:
@ -178,12 +183,14 @@ private:
bool disassoc; bool disassoc;
bool active = true; bool active = true;
}; };
#elif defined(PYPY_VERSION)
# else // PYBIND11_SIMPLE_GIL_MANAGEMENT
class gil_scoped_acquire { class gil_scoped_acquire {
PyGILState_STATE state; PyGILState_STATE state;
public: public:
gil_scoped_acquire() { state = PyGILState_Ensure(); } gil_scoped_acquire() : state{PyGILState_Ensure()} {}
gil_scoped_acquire(const gil_scoped_acquire &) = delete; gil_scoped_acquire(const gil_scoped_acquire &) = delete;
gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete; gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete;
~gil_scoped_acquire() { PyGILState_Release(state); } ~gil_scoped_acquire() { PyGILState_Release(state); }
@ -194,19 +201,39 @@ class gil_scoped_release {
PyThreadState *state; PyThreadState *state;
public: public:
gil_scoped_release() { state = PyEval_SaveThread(); } gil_scoped_release() : state{PyEval_SaveThread()} {}
gil_scoped_release(const gil_scoped_release &) = delete; gil_scoped_release(const gil_scoped_release &) = delete;
gil_scoped_release &operator=(const gil_scoped_acquire &) = delete; gil_scoped_release &operator=(const gil_scoped_acquire &) = delete;
~gil_scoped_release() { PyEval_RestoreThread(state); } ~gil_scoped_release() { PyEval_RestoreThread(state); }
void disarm() {} void disarm() {}
}; };
#else
# endif // PYBIND11_SIMPLE_GIL_MANAGEMENT
#else // WITH_THREAD
class gil_scoped_acquire { class gil_scoped_acquire {
public:
gil_scoped_acquire() {
// Trick to suppress `unused variable` error messages (at call sites).
(void) (this != (this + 1));
}
gil_scoped_acquire(const gil_scoped_acquire &) = delete;
gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete;
void disarm() {} void disarm() {}
}; };
class gil_scoped_release { class gil_scoped_release {
public:
gil_scoped_release() {
// Trick to suppress `unused variable` error messages (at call sites).
(void) (this != (this + 1));
}
gil_scoped_release(const gil_scoped_release &) = delete;
gil_scoped_release &operator=(const gil_scoped_acquire &) = delete;
void disarm() {} void disarm() {}
}; };
#endif
#endif // WITH_THREAD
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)

View File

@ -468,14 +468,21 @@ protected:
if (rec->sibling) { if (rec->sibling) {
if (PyCFunction_Check(rec->sibling.ptr())) { if (PyCFunction_Check(rec->sibling.ptr())) {
auto *self = PyCFunction_GET_SELF(rec->sibling.ptr()); auto *self = PyCFunction_GET_SELF(rec->sibling.ptr());
capsule rec_capsule = isinstance<capsule>(self) ? reinterpret_borrow<capsule>(self) if (!isinstance<capsule>(self)) {
: capsule(self); chain = nullptr;
chain = (detail::function_record *) rec_capsule; } else {
auto rec_capsule = reinterpret_borrow<capsule>(self);
if (detail::is_function_record_capsule(rec_capsule)) {
chain = rec_capsule.get_pointer<detail::function_record>();
/* Never append a method to an overload chain of a parent class; /* Never append a method to an overload chain of a parent class;
instead, hide the parent's overloads in this case */ instead, hide the parent's overloads in this case */
if (!chain->scope.is(rec->scope)) { if (!chain->scope.is(rec->scope)) {
chain = nullptr; chain = nullptr;
} }
} else {
chain = nullptr;
}
}
} }
// Don't trigger for things like the default __init__, which are wrapper_descriptors // Don't trigger for things like the default __init__, which are wrapper_descriptors
// that we are intentionally replacing // that we are intentionally replacing
@ -496,6 +503,7 @@ protected:
capsule rec_capsule(unique_rec.release(), capsule rec_capsule(unique_rec.release(),
[](void *ptr) { destruct((detail::function_record *) ptr); }); [](void *ptr) { destruct((detail::function_record *) ptr); });
rec_capsule.set_name(detail::get_function_record_capsule_name());
guarded_strdup.release(); guarded_strdup.release();
object scope_module; object scope_module;
@ -661,10 +669,13 @@ protected:
/// Main dispatch logic for calls to functions bound using pybind11 /// Main dispatch logic for calls to functions bound using pybind11
static PyObject *dispatcher(PyObject *self, PyObject *args_in, PyObject *kwargs_in) { static PyObject *dispatcher(PyObject *self, PyObject *args_in, PyObject *kwargs_in) {
using namespace detail; using namespace detail;
assert(isinstance<capsule>(self));
/* Iterator over the list of potentially admissible overloads */ /* Iterator over the list of potentially admissible overloads */
const function_record *overloads = (function_record *) PyCapsule_GetPointer(self, nullptr), const function_record *overloads = reinterpret_cast<function_record *>(
PyCapsule_GetPointer(self, get_function_record_capsule_name())),
*it = overloads; *it = overloads;
assert(overloads != nullptr);
/* Need to know how many arguments + keyword arguments there are to pick the right /* Need to know how many arguments + keyword arguments there are to pick the right
overload */ overload */
@ -1883,9 +1894,22 @@ private:
static detail::function_record *get_function_record(handle h) { static detail::function_record *get_function_record(handle h) {
h = detail::get_function(h); h = detail::get_function(h);
return h ? (detail::function_record *) reinterpret_borrow<capsule>( if (!h) {
PyCFunction_GET_SELF(h.ptr())) return nullptr;
: nullptr; }
handle func_self = PyCFunction_GET_SELF(h.ptr());
if (!func_self) {
throw error_already_set();
}
if (!isinstance<capsule>(func_self)) {
return nullptr;
}
auto cap = reinterpret_borrow<capsule>(func_self);
if (!detail::is_function_record_capsule(cap)) {
return nullptr;
}
return cap.get_pointer<detail::function_record>();
} }
}; };
@ -2345,7 +2369,7 @@ template <typename Access,
typename Sentinel, typename Sentinel,
typename ValueType, typename ValueType,
typename... Extra> typename... Extra>
iterator make_iterator_impl(Iterator &&first, Sentinel &&last, Extra &&...extra) { iterator make_iterator_impl(Iterator first, Sentinel last, Extra &&...extra) {
using state = detail::iterator_state<Access, Policy, Iterator, Sentinel, ValueType, Extra...>; using state = detail::iterator_state<Access, Policy, Iterator, Sentinel, ValueType, Extra...>;
// TODO: state captures only the types of Extra, not the values // TODO: state captures only the types of Extra, not the values
@ -2371,7 +2395,7 @@ iterator make_iterator_impl(Iterator &&first, Sentinel &&last, Extra &&...extra)
Policy); Policy);
} }
return cast(state{std::forward<Iterator>(first), std::forward<Sentinel>(last), true}); return cast(state{first, last, true});
} }
PYBIND11_NAMESPACE_END(detail) PYBIND11_NAMESPACE_END(detail)
@ -2382,15 +2406,13 @@ template <return_value_policy Policy = return_value_policy::reference_internal,
typename Sentinel, typename Sentinel,
typename ValueType = typename detail::iterator_access<Iterator>::result_type, typename ValueType = typename detail::iterator_access<Iterator>::result_type,
typename... Extra> typename... Extra>
iterator make_iterator(Iterator &&first, Sentinel &&last, Extra &&...extra) { iterator make_iterator(Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_access<Iterator>, return detail::make_iterator_impl<detail::iterator_access<Iterator>,
Policy, Policy,
Iterator, Iterator,
Sentinel, Sentinel,
ValueType, ValueType,
Extra...>(std::forward<Iterator>(first), Extra...>(first, last, std::forward<Extra>(extra)...);
std::forward<Sentinel>(last),
std::forward<Extra>(extra)...);
} }
/// Makes a python iterator over the keys (`.first`) of a iterator over pairs from a /// Makes a python iterator over the keys (`.first`) of a iterator over pairs from a
@ -2400,15 +2422,13 @@ template <return_value_policy Policy = return_value_policy::reference_internal,
typename Sentinel, typename Sentinel,
typename KeyType = typename detail::iterator_key_access<Iterator>::result_type, typename KeyType = typename detail::iterator_key_access<Iterator>::result_type,
typename... Extra> typename... Extra>
iterator make_key_iterator(Iterator &&first, Sentinel &&last, Extra &&...extra) { iterator make_key_iterator(Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_key_access<Iterator>, return detail::make_iterator_impl<detail::iterator_key_access<Iterator>,
Policy, Policy,
Iterator, Iterator,
Sentinel, Sentinel,
KeyType, KeyType,
Extra...>(std::forward<Iterator>(first), Extra...>(first, last, std::forward<Extra>(extra)...);
std::forward<Sentinel>(last),
std::forward<Extra>(extra)...);
} }
/// Makes a python iterator over the values (`.second`) of a iterator over pairs from a /// Makes a python iterator over the values (`.second`) of a iterator over pairs from a
@ -2418,15 +2438,13 @@ template <return_value_policy Policy = return_value_policy::reference_internal,
typename Sentinel, typename Sentinel,
typename ValueType = typename detail::iterator_value_access<Iterator>::result_type, typename ValueType = typename detail::iterator_value_access<Iterator>::result_type,
typename... Extra> typename... Extra>
iterator make_value_iterator(Iterator &&first, Sentinel &&last, Extra &&...extra) { iterator make_value_iterator(Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_value_access<Iterator>, return detail::make_iterator_impl<detail::iterator_value_access<Iterator>,
Policy, Policy,
Iterator, Iterator,
Sentinel, Sentinel,
ValueType, ValueType,
Extra...>(std::forward<Iterator>(first), Extra...>(first, last, std::forward<Extra>(extra)...);
std::forward<Sentinel>(last),
std::forward<Extra>(extra)...);
} }
/// Makes an iterator over values of an stl container or other container supporting /// Makes an iterator over values of an stl container or other container supporting

View File

@ -501,11 +501,29 @@ struct error_fetch_and_normalize {
std::string message_error_string; std::string message_error_string;
if (m_value) { if (m_value) {
auto value_str = reinterpret_steal<object>(PyObject_Str(m_value.ptr())); auto value_str = reinterpret_steal<object>(PyObject_Str(m_value.ptr()));
constexpr const char *message_unavailable_exc
= "<MESSAGE UNAVAILABLE DUE TO ANOTHER EXCEPTION>";
if (!value_str) { if (!value_str) {
message_error_string = detail::error_string(); message_error_string = detail::error_string();
result = "<MESSAGE UNAVAILABLE DUE TO ANOTHER EXCEPTION>"; result = message_unavailable_exc;
} else { } else {
result = value_str.cast<std::string>(); // Not using `value_str.cast<std::string>()`, to not potentially throw a secondary
// error_already_set that will then result in process termination (#4288).
auto value_bytes = reinterpret_steal<object>(
PyUnicode_AsEncodedString(value_str.ptr(), "utf-8", "backslashreplace"));
if (!value_bytes) {
message_error_string = detail::error_string();
result = message_unavailable_exc;
} else {
char *buffer = nullptr;
Py_ssize_t length = 0;
if (PyBytes_AsStringAndSize(value_bytes.ptr(), &buffer, &length) == -1) {
message_error_string = detail::error_string();
result = message_unavailable_exc;
} else {
result = std::string(buffer, static_cast<std::size_t>(length));
}
}
} }
} else { } else {
result = "<MESSAGE UNAVAILABLE>"; result = "<MESSAGE UNAVAILABLE>";
@ -605,12 +623,6 @@ inline std::string error_string() {
PYBIND11_NAMESPACE_END(detail) PYBIND11_NAMESPACE_END(detail)
#if defined(_MSC_VER)
# pragma warning(push)
# pragma warning(disable : 4275 4251)
// warning C4275: An exported class was derived from a class that wasn't exported.
// Can be ignored when derived from a STL class.
#endif
/// Fetch and hold an error which was already set in Python. An instance of this is typically /// Fetch and hold an error which was already set in Python. An instance of this is typically
/// thrown to propagate python-side errors back through C++ which can either be caught manually or /// thrown to propagate python-side errors back through C++ which can either be caught manually or
/// else falls back to the function dispatcher (which then raises the captured error back to /// else falls back to the function dispatcher (which then raises the captured error back to
@ -670,9 +682,6 @@ private:
/// crashes (undefined behavior) if the Python interpreter is finalizing. /// crashes (undefined behavior) if the Python interpreter is finalizing.
static void m_fetched_error_deleter(detail::error_fetch_and_normalize *raw_ptr); static void m_fetched_error_deleter(detail::error_fetch_and_normalize *raw_ptr);
}; };
#if defined(_MSC_VER)
# pragma warning(pop)
#endif
/// Replaces the current Python error indicator with the chosen error, performing a /// Replaces the current Python error indicator with the chosen error, performing a
/// 'raise from' to indicate that the chosen error was caused by the original error. /// 'raise from' to indicate that the chosen error was caused by the original error.
@ -1383,7 +1392,7 @@ public:
private: private:
void advance() { void advance() {
value = reinterpret_steal<object>(PyIter_Next(m_ptr)); value = reinterpret_steal<object>(PyIter_Next(m_ptr));
if (PyErr_Occurred()) { if (value.ptr() == nullptr && PyErr_Occurred()) {
throw error_already_set(); throw error_already_set();
} }
} }
@ -1433,6 +1442,9 @@ public:
str(const char *c, const SzType &n) str(const char *c, const SzType &n)
: object(PyUnicode_FromStringAndSize(c, ssize_t_cast(n)), stolen_t{}) { : object(PyUnicode_FromStringAndSize(c, ssize_t_cast(n)), stolen_t{}) {
if (!m_ptr) { if (!m_ptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate string object!"); pybind11_fail("Could not allocate string object!");
} }
} }
@ -1442,6 +1454,9 @@ public:
// NOLINTNEXTLINE(google-explicit-constructor) // NOLINTNEXTLINE(google-explicit-constructor)
str(const char *c = "") : object(PyUnicode_FromString(c), stolen_t{}) { str(const char *c = "") : object(PyUnicode_FromString(c), stolen_t{}) {
if (!m_ptr) { if (!m_ptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate string object!"); pybind11_fail("Could not allocate string object!");
} }
} }
@ -1599,6 +1614,9 @@ inline str::str(const bytes &b) {
} }
auto obj = reinterpret_steal<object>(PyUnicode_FromStringAndSize(buffer, length)); auto obj = reinterpret_steal<object>(PyUnicode_FromStringAndSize(buffer, length));
if (!obj) { if (!obj) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate string object!"); pybind11_fail("Could not allocate string object!");
} }
m_ptr = obj.release().ptr(); m_ptr = obj.release().ptr();
@ -1810,16 +1828,16 @@ public:
explicit capsule(const void *value, explicit capsule(const void *value,
const char *name = nullptr, const char *name = nullptr,
void (*destructor)(PyObject *) = nullptr) PyCapsule_Destructor destructor = nullptr)
: object(PyCapsule_New(const_cast<void *>(value), name, destructor), stolen_t{}) { : object(PyCapsule_New(const_cast<void *>(value), name, destructor), stolen_t{}) {
if (!m_ptr) { if (!m_ptr) {
throw error_already_set(); throw error_already_set();
} }
} }
PYBIND11_DEPRECATED("Please pass a destructor that takes a void pointer as input") PYBIND11_DEPRECATED("Please use the ctor with value, name, destructor args")
capsule(const void *value, void (*destruct)(PyObject *)) capsule(const void *value, PyCapsule_Destructor destructor)
: object(PyCapsule_New(const_cast<void *>(value), nullptr, destruct), stolen_t{}) { : object(PyCapsule_New(const_cast<void *>(value), nullptr, destructor), stolen_t{}) {
if (!m_ptr) { if (!m_ptr) {
throw error_already_set(); throw error_already_set();
} }
@ -1830,7 +1848,7 @@ public:
// guard if destructor called while err indicator is set // guard if destructor called while err indicator is set
error_scope error_guard; error_scope error_guard;
auto destructor = reinterpret_cast<void (*)(void *)>(PyCapsule_GetContext(o)); auto destructor = reinterpret_cast<void (*)(void *)>(PyCapsule_GetContext(o));
if (PyErr_Occurred()) { if (destructor == nullptr && PyErr_Occurred()) {
throw error_already_set(); throw error_already_set();
} }
const char *name = get_name_in_error_scope(o); const char *name = get_name_in_error_scope(o);
@ -1844,7 +1862,7 @@ public:
} }
}); });
if (!m_ptr || PyCapsule_SetContext(m_ptr, (void *) destructor) != 0) { if (!m_ptr || PyCapsule_SetContext(m_ptr, reinterpret_cast<void *>(destructor)) != 0) {
throw error_already_set(); throw error_already_set();
} }
} }
@ -1968,7 +1986,11 @@ public:
void clear() /* py-non-const */ { PyDict_Clear(ptr()); } void clear() /* py-non-const */ { PyDict_Clear(ptr()); }
template <typename T> template <typename T>
bool contains(T &&key) const { bool contains(T &&key) const {
return PyDict_Contains(m_ptr, detail::object_or_cast(std::forward<T>(key)).ptr()) == 1; auto result = PyDict_Contains(m_ptr, detail::object_or_cast(std::forward<T>(key)).ptr());
if (result == -1) {
throw error_already_set();
}
return result == 1;
} }
private: private:
@ -2054,7 +2076,11 @@ public:
bool empty() const { return size() == 0; } bool empty() const { return size() == 0; }
template <typename T> template <typename T>
bool contains(T &&val) const { bool contains(T &&val) const {
return PySet_Contains(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) == 1; auto result = PySet_Contains(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr());
if (result == -1) {
throw error_already_set();
}
return result == 1;
} }
}; };

View File

@ -311,7 +311,7 @@ struct optional_caster {
template <typename T> template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) { static handle cast(T &&src, return_value_policy policy, handle parent) {
if (!src) { if (!src) {
return none().inc_ref(); return none().release();
} }
if (!std::is_lvalue_reference<T>::value) { if (!std::is_lvalue_reference<T>::value) {
policy = return_value_policy_override<Value>::policy(policy); policy = return_value_policy_override<Value>::policy(policy);

View File

@ -128,7 +128,9 @@ set(PYBIND11_TEST_FILES
test_custom_type_casters test_custom_type_casters
test_custom_type_setup test_custom_type_setup
test_docstring_options test_docstring_options
test_eigen test_eigen_matrix
test_eigen_tensor
test_eigen_tensor_avoid_stl_array.cpp
test_enum test_enum
test_eval test_eval
test_exceptions test_exceptions
@ -233,7 +235,10 @@ list(GET PYBIND11_EIGEN_VERSION_AND_HASH 1 PYBIND11_EIGEN_VERSION_HASH)
# Check if Eigen is available; if not, remove from PYBIND11_TEST_FILES (but # Check if Eigen is available; if not, remove from PYBIND11_TEST_FILES (but
# keep it in PYBIND11_PYTEST_FILES, so that we get the "eigen is not installed" # keep it in PYBIND11_PYTEST_FILES, so that we get the "eigen is not installed"
# skip message). # skip message).
list(FIND PYBIND11_TEST_FILES test_eigen.cpp PYBIND11_TEST_FILES_EIGEN_I) list(FIND PYBIND11_TEST_FILES test_eigen_matrix.cpp PYBIND11_TEST_FILES_EIGEN_I)
if(PYBIND11_TEST_FILES_EIGEN_I EQUAL -1)
list(FIND PYBIND11_TEST_FILES test_eigen_tensor.cpp PYBIND11_TEST_FILES_EIGEN_I)
endif()
if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1) if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1)
# Try loading via newer Eigen's Eigen3Config first (bypassing tools/FindEigen3.cmake). # Try loading via newer Eigen's Eigen3Config first (bypassing tools/FindEigen3.cmake).
# Eigen 3.3.1+ exports a cmake 3.0+ target for handling dependency requirements, but also # Eigen 3.3.1+ exports a cmake 3.0+ target for handling dependency requirements, but also
@ -289,12 +294,37 @@ if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1)
endif() endif()
message(STATUS "Building tests with Eigen v${EIGEN3_VERSION}") message(STATUS "Building tests with Eigen v${EIGEN3_VERSION}")
else() else()
list(FIND PYBIND11_TEST_FILES test_eigen_matrix.cpp PYBIND11_TEST_FILES_EIGEN_I)
if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1)
list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I}) list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I})
endif()
list(FIND PYBIND11_TEST_FILES test_eigen_tensor.cpp PYBIND11_TEST_FILES_EIGEN_I)
if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1)
list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I})
endif()
list(FIND PYBIND11_TEST_FILES test_eigen_tensor_avoid_stl_array.cpp
PYBIND11_TEST_FILES_EIGEN_I)
if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1)
list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I})
endif()
message( message(
STATUS "Building tests WITHOUT Eigen, use -DDOWNLOAD_EIGEN=ON on CMake 3.11+ to download") STATUS "Building tests WITHOUT Eigen, use -DDOWNLOAD_EIGEN=ON on CMake 3.11+ to download")
endif() endif()
endif() endif()
# Some code doesn't support gcc 4
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
list(FIND PYBIND11_TEST_FILES test_eigen_tensor.cpp PYBIND11_TEST_FILES_EIGEN_I)
if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1)
list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I})
endif()
list(FIND PYBIND11_TEST_FILES test_eigen_tensor_avoid_stl_array.cpp PYBIND11_TEST_FILES_EIGEN_I)
if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1)
list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I})
endif()
endif()
# Optional dependency for some tests (boost::variant is only supported with version >= 1.56) # Optional dependency for some tests (boost::variant is only supported with version >= 1.56)
find_package(Boost 1.56) find_package(Boost 1.56)

View File

@ -210,4 +210,5 @@ def pytest_report_header(config):
f" {pybind11_tests.compiler_info}" f" {pybind11_tests.compiler_info}"
f" {pybind11_tests.cpp_std}" f" {pybind11_tests.cpp_std}"
f" {pybind11_tests.PYBIND11_INTERNALS_ID}" f" {pybind11_tests.PYBIND11_INTERNALS_ID}"
f" PYBIND11_SIMPLE_GIL_MANAGEMENT={pybind11_tests.PYBIND11_SIMPLE_GIL_MANAGEMENT}"
) )

View File

@ -6,9 +6,15 @@
All rights reserved. Use of this source code is governed by a All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file. BSD-style license that can be found in the LICENSE file.
*/ */
#if defined(PYBIND11_INTERNALS_VERSION)
# undef PYBIND11_INTERNALS_VERSION
#endif
#define PYBIND11_INTERNALS_VERSION 21814642 // Ensure this module has its own `internals` instance.
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <cstdint> #include <cstdint>
#include <string>
#include <thread>
// This file mimics a DSO that makes pybind11 calls but does not define a // This file mimics a DSO that makes pybind11 calls but does not define a
// PYBIND11_MODULE. The purpose is to test that such a DSO can create a // PYBIND11_MODULE. The purpose is to test that such a DSO can create a
@ -21,8 +27,54 @@
namespace { namespace {
namespace py = pybind11; namespace py = pybind11;
void gil_acquire() { py::gil_scoped_acquire gil; } void gil_acquire() { py::gil_scoped_acquire gil; }
std::string gil_multi_acquire_release(unsigned bits) {
if ((bits & 0x1u) != 0u) {
py::gil_scoped_acquire gil;
}
if ((bits & 0x2u) != 0u) {
py::gil_scoped_release gil;
}
if ((bits & 0x4u) != 0u) {
py::gil_scoped_acquire gil;
}
if ((bits & 0x8u) != 0u) {
py::gil_scoped_release gil;
}
return PYBIND11_INTERNALS_ID;
}
struct CustomAutoGIL {
CustomAutoGIL() : gstate(PyGILState_Ensure()) {}
~CustomAutoGIL() { PyGILState_Release(gstate); }
PyGILState_STATE gstate;
};
struct CustomAutoNoGIL {
CustomAutoNoGIL() : save(PyEval_SaveThread()) {}
~CustomAutoNoGIL() { PyEval_RestoreThread(save); }
PyThreadState *save;
};
template <typename Acquire, typename Release>
void gil_acquire_inner() {
Acquire acquire_outer;
Acquire acquire_inner;
Release release;
}
template <typename Acquire, typename Release>
void gil_acquire_nested() {
Acquire acquire_outer;
Acquire acquire_inner;
Release release;
auto thread = std::thread(&gil_acquire_inner<Acquire, Release>);
thread.join();
}
constexpr char kModuleName[] = "cross_module_gil_utils"; constexpr char kModuleName[] = "cross_module_gil_utils";
struct PyModuleDef moduledef = { struct PyModuleDef moduledef = {
@ -30,6 +82,9 @@ struct PyModuleDef moduledef = {
} // namespace } // namespace
#define ADD_FUNCTION(Name, ...) \
PyModule_AddObject(m, Name, PyLong_FromVoidPtr(reinterpret_cast<void *>(&__VA_ARGS__)));
extern "C" PYBIND11_EXPORT PyObject *PyInit_cross_module_gil_utils() { extern "C" PYBIND11_EXPORT PyObject *PyInit_cross_module_gil_utils() {
PyObject *m = PyModule_Create(&moduledef); PyObject *m = PyModule_Create(&moduledef);
@ -37,8 +92,16 @@ extern "C" PYBIND11_EXPORT PyObject *PyInit_cross_module_gil_utils() {
if (m != nullptr) { if (m != nullptr) {
static_assert(sizeof(&gil_acquire) == sizeof(void *), static_assert(sizeof(&gil_acquire) == sizeof(void *),
"Function pointer must have the same size as void*"); "Function pointer must have the same size as void*");
PyModule_AddObject( ADD_FUNCTION("gil_acquire_funcaddr", gil_acquire)
m, "gil_acquire_funcaddr", PyLong_FromVoidPtr(reinterpret_cast<void *>(&gil_acquire))); ADD_FUNCTION("gil_multi_acquire_release_funcaddr", gil_multi_acquire_release)
ADD_FUNCTION("gil_acquire_inner_custom_funcaddr",
gil_acquire_inner<CustomAutoGIL, CustomAutoNoGIL>)
ADD_FUNCTION("gil_acquire_nested_custom_funcaddr",
gil_acquire_nested<CustomAutoGIL, CustomAutoNoGIL>)
ADD_FUNCTION("gil_acquire_inner_pybind11_funcaddr",
gil_acquire_inner<py::gil_scoped_acquire, py::gil_scoped_release>)
ADD_FUNCTION("gil_acquire_nested_pybind11_funcaddr",
gil_acquire_nested<py::gil_scoped_acquire, py::gil_scoped_release>)
} }
return m; return m;

View File

@ -55,6 +55,11 @@ detail_headers = {
"include/pybind11/detail/typeid.h", "include/pybind11/detail/typeid.h",
} }
eigen_headers = {
"include/pybind11/eigen/matrix.h",
"include/pybind11/eigen/tensor.h",
}
stl_headers = { stl_headers = {
"include/pybind11/stl/filesystem.h", "include/pybind11/stl/filesystem.h",
} }
@ -82,7 +87,7 @@ py_files = {
"setup_helpers.py", "setup_helpers.py",
} }
headers = main_headers | detail_headers | stl_headers headers = main_headers | detail_headers | eigen_headers | stl_headers
src_files = headers | cmake_files | pkgconfig_files src_files = headers | cmake_files | pkgconfig_files
all_files = src_files | py_files all_files = src_files | py_files
@ -92,6 +97,7 @@ sdist_files = {
"pybind11/include", "pybind11/include",
"pybind11/include/pybind11", "pybind11/include/pybind11",
"pybind11/include/pybind11/detail", "pybind11/include/pybind11/detail",
"pybind11/include/pybind11/eigen",
"pybind11/include/pybind11/stl", "pybind11/include/pybind11/stl",
"pybind11/share", "pybind11/share",
"pybind11/share/cmake", "pybind11/share/cmake",

View File

@ -89,6 +89,12 @@ PYBIND11_MODULE(pybind11_tests, m) {
#endif #endif
m.attr("cpp_std") = cpp_std(); m.attr("cpp_std") = cpp_std();
m.attr("PYBIND11_INTERNALS_ID") = PYBIND11_INTERNALS_ID; m.attr("PYBIND11_INTERNALS_ID") = PYBIND11_INTERNALS_ID;
m.attr("PYBIND11_SIMPLE_GIL_MANAGEMENT") =
#if defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
true;
#else
false;
#endif
bind_ConstructorStats(m); bind_ConstructorStats(m);

View File

@ -240,4 +240,41 @@ TEST_SUBMODULE(callbacks, m) {
f(); f();
} }
}); });
auto *custom_def = []() {
static PyMethodDef def;
def.ml_name = "example_name";
def.ml_doc = "Example doc";
def.ml_meth = [](PyObject *, PyObject *args) -> PyObject * {
if (PyTuple_Size(args) != 1) {
throw std::runtime_error("Invalid number of arguments for example_name");
}
PyObject *first = PyTuple_GetItem(args, 0);
if (!PyLong_Check(first)) {
throw std::runtime_error("Invalid argument to example_name");
}
auto result = py::cast(PyLong_AsLong(first) * 9);
return result.release().ptr();
};
def.ml_flags = METH_VARARGS;
return &def;
}();
// rec_capsule with name that has the same value (but not pointer) as our internal one
// This capsule should be detected by our code as foreign and not inspected as the pointers
// shouldn't match
constexpr const char *rec_capsule_name
= pybind11::detail::internals_function_record_capsule_name;
py::capsule rec_capsule(std::malloc(1), [](void *data) { std::free(data); });
rec_capsule.set_name(rec_capsule_name);
m.add_object("custom_function", PyCFunction_New(custom_def, rec_capsule.ptr()));
// This test requires a new ABI version to pass
#if PYBIND11_INTERNALS_VERSION > 4
// rec_capsule with nullptr name
py::capsule rec_capsule2(std::malloc(1), [](void *data) { std::free(data); });
m.add_object("custom_function2", PyCFunction_New(custom_def, rec_capsule2.ptr()));
#else
m.add_object("custom_function2", py::none());
#endif
} }

View File

@ -193,3 +193,16 @@ def test_callback_num_times():
if len(rates) > 1: if len(rates) > 1:
print("Min Mean Max") print("Min Mean Max")
print(f"{min(rates):6.3f} {sum(rates) / len(rates):6.3f} {max(rates):6.3f}") print(f"{min(rates):6.3f} {sum(rates) / len(rates):6.3f} {max(rates):6.3f}")
def test_custom_func():
assert m.custom_function(4) == 36
assert m.roundtrip(m.custom_function)(4) == 36
@pytest.mark.skipif(
m.custom_function2 is None, reason="Current PYBIND11_INTERNALS_VERSION too low"
)
def test_custom_func2():
assert m.custom_function2(3) == 27
assert m.roundtrip(m.custom_function2)(3) == 27

View File

@ -392,6 +392,8 @@ TEST_SUBMODULE(class_, m) {
protected: protected:
virtual int foo() const { return value; } virtual int foo() const { return value; }
virtual void *void_foo() { return static_cast<void *>(&value); }
virtual void *get_self() { return static_cast<void *>(this); }
private: private:
int value = 42; int value = 42;
@ -400,6 +402,8 @@ TEST_SUBMODULE(class_, m) {
class TrampolineB : public ProtectedB { class TrampolineB : public ProtectedB {
public: public:
int foo() const override { PYBIND11_OVERRIDE(int, ProtectedB, foo, ); } int foo() const override { PYBIND11_OVERRIDE(int, ProtectedB, foo, ); }
void *void_foo() override { PYBIND11_OVERRIDE(void *, ProtectedB, void_foo, ); }
void *get_self() override { PYBIND11_OVERRIDE(void *, ProtectedB, get_self, ); }
}; };
class PublicistB : public ProtectedB { class PublicistB : public ProtectedB {
@ -409,11 +413,23 @@ TEST_SUBMODULE(class_, m) {
// (in Debug builds only, tested with icpc (ICC) 2021.1 Beta 20200827) // (in Debug builds only, tested with icpc (ICC) 2021.1 Beta 20200827)
~PublicistB() override{}; // NOLINT(modernize-use-equals-default) ~PublicistB() override{}; // NOLINT(modernize-use-equals-default)
using ProtectedB::foo; using ProtectedB::foo;
using ProtectedB::get_self;
using ProtectedB::void_foo;
}; };
m.def("read_foo", [](const void *original) {
const int *ptr = reinterpret_cast<const int *>(original);
return *ptr;
});
m.def("pointers_equal",
[](const void *original, const void *comparison) { return original == comparison; });
py::class_<ProtectedB, TrampolineB>(m, "ProtectedB") py::class_<ProtectedB, TrampolineB>(m, "ProtectedB")
.def(py::init<>()) .def(py::init<>())
.def("foo", &PublicistB::foo); .def("foo", &PublicistB::foo)
.def("void_foo", &PublicistB::void_foo)
.def("get_self", &PublicistB::get_self);
// test_brace_initialization // test_brace_initialization
struct BraceInitialization { struct BraceInitialization {

View File

@ -320,6 +320,8 @@ def test_bind_protected_functions():
b = m.ProtectedB() b = m.ProtectedB()
assert b.foo() == 42 assert b.foo() == 42
assert m.read_foo(b.void_foo()) == 42
assert m.pointers_equal(b.get_self(), b)
class C(m.ProtectedB): class C(m.ProtectedB):
def __init__(self): def __init__(self):

View File

@ -7,7 +7,7 @@
BSD-style license that can be found in the LICENSE file. BSD-style license that can be found in the LICENSE file.
*/ */
#include <pybind11/eigen.h> #include <pybind11/eigen/matrix.h>
#include <pybind11/stl.h> #include <pybind11/stl.h>
#include "constructor_stats.h" #include "constructor_stats.h"
@ -81,7 +81,7 @@ struct CustomOperatorNew {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW; EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
}; };
TEST_SUBMODULE(eigen, m) { TEST_SUBMODULE(eigen_matrix, m) {
using FixedMatrixR = Eigen::Matrix<float, 5, 6, Eigen::RowMajor>; using FixedMatrixR = Eigen::Matrix<float, 5, 6, Eigen::RowMajor>;
using FixedMatrixC = Eigen::Matrix<float, 5, 6>; using FixedMatrixC = Eigen::Matrix<float, 5, 6>;
using DenseMatrixR = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using DenseMatrixR = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;

View File

@ -3,7 +3,7 @@ import pytest
from pybind11_tests import ConstructorStats from pybind11_tests import ConstructorStats
np = pytest.importorskip("numpy") np = pytest.importorskip("numpy")
m = pytest.importorskip("pybind11_tests.eigen") m = pytest.importorskip("pybind11_tests.eigen_matrix")
ref = np.array( ref = np.array(

View File

@ -0,0 +1,16 @@
/*
tests/eigen_tensor.cpp -- automatic conversion of Eigen Tensor
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
constexpr const char *test_eigen_tensor_module_name = "eigen_tensor";
#define PYBIND11_TEST_EIGEN_TENSOR_NAMESPACE eigen_tensor
#ifdef EIGEN_AVOID_STL_ARRAY
# undef EIGEN_AVOID_STL_ARRAY
#endif
#include "test_eigen_tensor.inl"

333
tests/test_eigen_tensor.inl Normal file
View File

@ -0,0 +1,333 @@
/*
tests/eigen_tensor.cpp -- automatic conversion of Eigen Tensor
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <pybind11/eigen/tensor.h>
#include "pybind11_tests.h"
namespace PYBIND11_TEST_EIGEN_TENSOR_NAMESPACE {
template <typename M>
void reset_tensor(M &x) {
for (int i = 0; i < x.dimension(0); i++) {
for (int j = 0; j < x.dimension(1); j++) {
for (int k = 0; k < x.dimension(2); k++) {
x(i, j, k) = i * (5 * 2) + j * 2 + k;
}
}
}
}
template <typename M>
bool check_tensor(M &x) {
for (int i = 0; i < x.dimension(0); i++) {
for (int j = 0; j < x.dimension(1); j++) {
for (int k = 0; k < x.dimension(2); k++) {
if (x(i, j, k) != (i * (5 * 2) + j * 2 + k)) {
return false;
}
}
}
}
return true;
}
template <int Options>
Eigen::Tensor<double, 3, Options> &get_tensor() {
static Eigen::Tensor<double, 3, Options> *x;
if (!x) {
x = new Eigen::Tensor<double, 3, Options>(3, 5, 2);
reset_tensor(*x);
}
return *x;
}
template <int Options>
Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> &get_tensor_map() {
static Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> *x;
if (!x) {
x = new Eigen::TensorMap<Eigen::Tensor<double, 3, Options>>(get_tensor<Options>());
}
return *x;
}
template <int Options>
Eigen::TensorFixedSize<double, Eigen::Sizes<3, 5, 2>, Options> &get_fixed_tensor() {
static Eigen::TensorFixedSize<double, Eigen::Sizes<3, 5, 2>, Options> *x;
if (!x) {
Eigen::aligned_allocator<Eigen::TensorFixedSize<double, Eigen::Sizes<3, 5, 2>, Options>>
allocator;
x = new (allocator.allocate(1))
Eigen::TensorFixedSize<double, Eigen::Sizes<3, 5, 2>, Options>();
reset_tensor(*x);
}
return *x;
}
template <int Options>
const Eigen::Tensor<double, 3, Options> &get_const_tensor() {
return get_tensor<Options>();
}
template <int Options>
struct CustomExample {
CustomExample() : member(get_tensor<Options>()), view_member(member) {}
Eigen::Tensor<double, 3, Options> member;
Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> view_member;
};
template <int Options>
void init_tensor_module(pybind11::module &m) {
const char *needed_options = "";
if (PYBIND11_SILENCE_MSVC_C4127(Options == Eigen::ColMajor)) {
needed_options = "F";
} else {
needed_options = "C";
}
m.attr("needed_options") = needed_options;
m.def("setup", []() {
reset_tensor(get_tensor<Options>());
reset_tensor(get_fixed_tensor<Options>());
});
m.def("is_ok", []() {
return check_tensor(get_tensor<Options>()) && check_tensor(get_fixed_tensor<Options>());
});
py::class_<CustomExample<Options>>(m, "CustomExample")
.def(py::init<>())
.def_readonly(
"member", &CustomExample<Options>::member, py::return_value_policy::reference_internal)
.def_readonly("member_view",
&CustomExample<Options>::view_member,
py::return_value_policy::reference_internal);
m.def(
"copy_fixed_tensor",
[]() { return &get_fixed_tensor<Options>(); },
py::return_value_policy::copy);
m.def(
"copy_tensor", []() { return &get_tensor<Options>(); }, py::return_value_policy::copy);
m.def(
"copy_const_tensor",
[]() { return &get_const_tensor<Options>(); },
py::return_value_policy::copy);
m.def(
"move_fixed_tensor_copy",
[]() -> Eigen::TensorFixedSize<double, Eigen::Sizes<3, 5, 2>, Options> {
return get_fixed_tensor<Options>();
},
py::return_value_policy::move);
m.def(
"move_tensor_copy",
[]() -> Eigen::Tensor<double, 3, Options> { return get_tensor<Options>(); },
py::return_value_policy::move);
m.def(
"move_const_tensor",
[]() -> const Eigen::Tensor<double, 3, Options> & { return get_const_tensor<Options>(); },
py::return_value_policy::move);
m.def(
"take_fixed_tensor",
[]() {
Eigen::aligned_allocator<
Eigen::TensorFixedSize<double, Eigen::Sizes<3, 5, 2>, Options>>
allocator;
return new (allocator.allocate(1))
Eigen::TensorFixedSize<double, Eigen::Sizes<3, 5, 2>, Options>(
get_fixed_tensor<Options>());
},
py::return_value_policy::take_ownership);
m.def(
"take_tensor",
[]() { return new Eigen::Tensor<double, 3, Options>(get_tensor<Options>()); },
py::return_value_policy::take_ownership);
m.def(
"take_const_tensor",
[]() -> const Eigen::Tensor<double, 3, Options> * {
return new Eigen::Tensor<double, 3, Options>(get_tensor<Options>());
},
py::return_value_policy::take_ownership);
m.def(
"take_view_tensor",
[]() -> const Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> * {
return new Eigen::TensorMap<Eigen::Tensor<double, 3, Options>>(get_tensor<Options>());
},
py::return_value_policy::take_ownership);
m.def(
"reference_tensor",
[]() { return &get_tensor<Options>(); },
py::return_value_policy::reference);
m.def(
"reference_tensor_v2",
[]() -> Eigen::Tensor<double, 3, Options> & { return get_tensor<Options>(); },
py::return_value_policy::reference);
m.def(
"reference_tensor_internal",
[]() { return &get_tensor<Options>(); },
py::return_value_policy::reference_internal);
m.def(
"reference_fixed_tensor",
[]() { return &get_tensor<Options>(); },
py::return_value_policy::reference);
m.def(
"reference_const_tensor",
[]() { return &get_const_tensor<Options>(); },
py::return_value_policy::reference);
m.def(
"reference_const_tensor_v2",
[]() -> const Eigen::Tensor<double, 3, Options> & { return get_const_tensor<Options>(); },
py::return_value_policy::reference);
m.def(
"reference_view_of_tensor",
[]() -> Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> {
return get_tensor_map<Options>();
},
py::return_value_policy::reference);
m.def(
"reference_view_of_tensor_v2",
// NOLINTNEXTLINE(readability-const-return-type)
[]() -> const Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> {
return get_tensor_map<Options>(); // NOLINT(readability-const-return-type)
}, // NOLINT(readability-const-return-type)
py::return_value_policy::reference);
m.def(
"reference_view_of_tensor_v3",
[]() -> Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> * {
return &get_tensor_map<Options>();
},
py::return_value_policy::reference);
m.def(
"reference_view_of_tensor_v4",
[]() -> const Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> * {
return &get_tensor_map<Options>();
},
py::return_value_policy::reference);
m.def(
"reference_view_of_tensor_v5",
[]() -> Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> & {
return get_tensor_map<Options>();
},
py::return_value_policy::reference);
m.def(
"reference_view_of_tensor_v6",
[]() -> const Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> & {
return get_tensor_map<Options>();
},
py::return_value_policy::reference);
m.def(
"reference_view_of_fixed_tensor",
[]() {
return Eigen::TensorMap<
Eigen::TensorFixedSize<double, Eigen::Sizes<3, 5, 2>, Options>>(
get_fixed_tensor<Options>());
},
py::return_value_policy::reference);
m.def("round_trip_tensor",
[](const Eigen::Tensor<double, 3, Options> &tensor) { return tensor; });
m.def(
"round_trip_tensor_noconvert",
[](const Eigen::Tensor<double, 3, Options> &tensor) { return tensor; },
py::arg("tensor").noconvert());
m.def("round_trip_tensor2",
[](const Eigen::Tensor<int32_t, 3, Options> &tensor) { return tensor; });
m.def("round_trip_fixed_tensor",
[](const Eigen::TensorFixedSize<double, Eigen::Sizes<3, 5, 2>, Options> &tensor) {
return tensor;
});
m.def(
"round_trip_view_tensor",
[](Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> view) { return view; },
py::return_value_policy::reference);
m.def(
"round_trip_view_tensor_ref",
[](Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> &view) { return view; },
py::return_value_policy::reference);
m.def(
"round_trip_view_tensor_ptr",
[](Eigen::TensorMap<Eigen::Tensor<double, 3, Options>> *view) { return view; },
py::return_value_policy::reference);
m.def(
"round_trip_aligned_view_tensor",
[](Eigen::TensorMap<Eigen::Tensor<double, 3, Options>, Eigen::Aligned> view) {
return view;
},
py::return_value_policy::reference);
m.def(
"round_trip_const_view_tensor",
[](Eigen::TensorMap<const Eigen::Tensor<double, 3, Options>> view) {
return Eigen::Tensor<double, 3, Options>(view);
},
py::return_value_policy::move);
m.def(
"round_trip_rank_0",
[](const Eigen::Tensor<double, 0, Options> &tensor) { return tensor; },
py::return_value_policy::move);
m.def(
"round_trip_rank_0_noconvert",
[](const Eigen::Tensor<double, 0, Options> &tensor) { return tensor; },
py::arg("tensor").noconvert(),
py::return_value_policy::move);
m.def(
"round_trip_rank_0_view",
[](Eigen::TensorMap<Eigen::Tensor<double, 0, Options>> &tensor) { return tensor; },
py::return_value_policy::reference);
}
void test_module(py::module_ &);
test_initializer name(test_eigen_tensor_module_name, test_module);
void test_module(py::module_ &m) {
auto f_style = m.def_submodule("f_style");
auto c_style = m.def_submodule("c_style");
init_tensor_module<Eigen::ColMajor>(f_style);
init_tensor_module<Eigen::RowMajor>(c_style);
}
} // namespace PYBIND11_TEST_EIGEN_TENSOR_NAMESPACE

296
tests/test_eigen_tensor.py Normal file
View File

@ -0,0 +1,296 @@
import sys
import pytest
np = pytest.importorskip("numpy")
eigen_tensor = pytest.importorskip("pybind11_tests.eigen_tensor")
submodules = [eigen_tensor.c_style, eigen_tensor.f_style]
try:
from pybind11_tests import eigen_tensor_avoid_stl_array as avoid
submodules += [avoid.c_style, avoid.f_style]
except ImportError as e:
# Ensure config, build, toolchain, etc. issues are not masked here:
raise RuntimeError(
"import pybind11_tests.eigen_tensor_avoid_stl_array FAILED, while "
"import pybind11_tests.eigen_tensor succeeded. "
"Please ensure that "
"test_eigen_tensor.cpp & "
"test_eigen_tensor_avoid_stl_array.cpp "
"are built together (or both are not built if Eigen is not available)."
) from e
tensor_ref = np.empty((3, 5, 2), dtype=np.int64)
for i in range(tensor_ref.shape[0]):
for j in range(tensor_ref.shape[1]):
for k in range(tensor_ref.shape[2]):
tensor_ref[i, j, k] = i * (5 * 2) + j * 2 + k
indices = (2, 3, 1)
@pytest.fixture(autouse=True)
def cleanup():
for module in submodules:
module.setup()
yield
for module in submodules:
assert module.is_ok()
def test_import_avoid_stl_array():
pytest.importorskip("pybind11_tests.eigen_tensor_avoid_stl_array")
assert len(submodules) == 4
def assert_equal_tensor_ref(mat, writeable=True, modified=None):
assert mat.flags.writeable == writeable
copy = np.array(tensor_ref)
if modified is not None:
copy[indices] = modified
np.testing.assert_array_equal(mat, copy)
@pytest.mark.parametrize("m", submodules)
@pytest.mark.parametrize("member_name", ["member", "member_view"])
def test_reference_internal(m, member_name):
if not hasattr(sys, "getrefcount"):
pytest.skip("No reference counting")
foo = m.CustomExample()
counts = sys.getrefcount(foo)
mem = getattr(foo, member_name)
assert_equal_tensor_ref(mem, writeable=False)
new_counts = sys.getrefcount(foo)
assert new_counts == counts + 1
assert_equal_tensor_ref(mem, writeable=False)
del mem
assert sys.getrefcount(foo) == counts
assert_equal_funcs = [
"copy_tensor",
"copy_fixed_tensor",
"copy_const_tensor",
"move_tensor_copy",
"move_fixed_tensor_copy",
"take_tensor",
"take_fixed_tensor",
"reference_tensor",
"reference_tensor_v2",
"reference_fixed_tensor",
"reference_view_of_tensor",
"reference_view_of_tensor_v3",
"reference_view_of_tensor_v5",
"reference_view_of_fixed_tensor",
]
assert_equal_const_funcs = [
"reference_view_of_tensor_v2",
"reference_view_of_tensor_v4",
"reference_view_of_tensor_v6",
"reference_const_tensor",
"reference_const_tensor_v2",
]
@pytest.mark.parametrize("m", submodules)
@pytest.mark.parametrize("func_name", assert_equal_funcs + assert_equal_const_funcs)
def test_convert_tensor_to_py(m, func_name):
writeable = func_name in assert_equal_funcs
assert_equal_tensor_ref(getattr(m, func_name)(), writeable=writeable)
@pytest.mark.parametrize("m", submodules)
def test_bad_cpp_to_python_casts(m):
with pytest.raises(
RuntimeError, match="Cannot use reference internal when there is no parent"
):
m.reference_tensor_internal()
with pytest.raises(RuntimeError, match="Cannot move from a constant reference"):
m.move_const_tensor()
with pytest.raises(
RuntimeError, match="Cannot take ownership of a const reference"
):
m.take_const_tensor()
with pytest.raises(
RuntimeError,
match="Invalid return_value_policy for Eigen Map type, must be either reference or reference_internal",
):
m.take_view_tensor()
@pytest.mark.parametrize("m", submodules)
def test_bad_python_to_cpp_casts(m):
with pytest.raises(
TypeError, match=r"^round_trip_tensor\(\): incompatible function arguments"
):
m.round_trip_tensor(np.zeros((2, 3)))
with pytest.raises(TypeError, match=r"^Cannot cast array data from dtype"):
m.round_trip_tensor(np.zeros(dtype=np.str_, shape=(2, 3, 1)))
with pytest.raises(
TypeError,
match=r"^round_trip_tensor_noconvert\(\): incompatible function arguments",
):
m.round_trip_tensor_noconvert(tensor_ref)
assert_equal_tensor_ref(
m.round_trip_tensor_noconvert(tensor_ref.astype(np.float64))
)
if m.needed_options == "F":
bad_options = "C"
else:
bad_options = "F"
# Shape, dtype and the order need to be correct for a TensorMap cast
with pytest.raises(
TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments"
):
m.round_trip_view_tensor(
np.zeros((3, 5, 2), dtype=np.float64, order=bad_options)
)
with pytest.raises(
TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments"
):
m.round_trip_view_tensor(
np.zeros((3, 5, 2), dtype=np.float32, order=m.needed_options)
)
with pytest.raises(
TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments"
):
m.round_trip_view_tensor(
np.zeros((3, 5), dtype=np.float64, order=m.needed_options)
)
with pytest.raises(
TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments"
):
temp = np.zeros((3, 5, 2), dtype=np.float64, order=m.needed_options)
m.round_trip_view_tensor(
temp[:, ::-1, :],
)
with pytest.raises(
TypeError, match=r"^round_trip_view_tensor\(\): incompatible function arguments"
):
temp = np.zeros((3, 5, 2), dtype=np.float64, order=m.needed_options)
temp.setflags(write=False)
m.round_trip_view_tensor(temp)
@pytest.mark.parametrize("m", submodules)
def test_references_actually_refer(m):
a = m.reference_tensor()
temp = a[indices]
a[indices] = 100
assert_equal_tensor_ref(m.copy_const_tensor(), modified=100)
a[indices] = temp
assert_equal_tensor_ref(m.copy_const_tensor())
a = m.reference_view_of_tensor()
a[indices] = 100
assert_equal_tensor_ref(m.copy_const_tensor(), modified=100)
a[indices] = temp
assert_equal_tensor_ref(m.copy_const_tensor())
@pytest.mark.parametrize("m", submodules)
def test_round_trip(m):
assert_equal_tensor_ref(m.round_trip_tensor(tensor_ref))
with pytest.raises(TypeError, match="^Cannot cast array data from"):
assert_equal_tensor_ref(m.round_trip_tensor2(tensor_ref))
assert_equal_tensor_ref(m.round_trip_tensor2(np.array(tensor_ref, dtype=np.int32)))
assert_equal_tensor_ref(m.round_trip_fixed_tensor(tensor_ref))
assert_equal_tensor_ref(m.round_trip_aligned_view_tensor(m.reference_tensor()))
copy = np.array(tensor_ref, dtype=np.float64, order=m.needed_options)
assert_equal_tensor_ref(m.round_trip_view_tensor(copy))
assert_equal_tensor_ref(m.round_trip_view_tensor_ref(copy))
assert_equal_tensor_ref(m.round_trip_view_tensor_ptr(copy))
copy.setflags(write=False)
assert_equal_tensor_ref(m.round_trip_const_view_tensor(copy))
np.testing.assert_array_equal(
tensor_ref[:, ::-1, :], m.round_trip_tensor(tensor_ref[:, ::-1, :])
)
assert m.round_trip_rank_0(np.float64(3.5)) == 3.5
assert m.round_trip_rank_0(3.5) == 3.5
with pytest.raises(
TypeError,
match=r"^round_trip_rank_0_noconvert\(\): incompatible function arguments",
):
m.round_trip_rank_0_noconvert(np.float64(3.5))
with pytest.raises(
TypeError,
match=r"^round_trip_rank_0_noconvert\(\): incompatible function arguments",
):
m.round_trip_rank_0_noconvert(3.5)
with pytest.raises(
TypeError, match=r"^round_trip_rank_0_view\(\): incompatible function arguments"
):
m.round_trip_rank_0_view(np.float64(3.5))
with pytest.raises(
TypeError, match=r"^round_trip_rank_0_view\(\): incompatible function arguments"
):
m.round_trip_rank_0_view(3.5)
@pytest.mark.parametrize("m", submodules)
def test_round_trip_references_actually_refer(m):
# Need to create a copy that matches the type on the C side
copy = np.array(tensor_ref, dtype=np.float64, order=m.needed_options)
a = m.round_trip_view_tensor(copy)
temp = a[indices]
a[indices] = 100
assert_equal_tensor_ref(copy, modified=100)
a[indices] = temp
assert_equal_tensor_ref(copy)
@pytest.mark.parametrize("m", submodules)
def test_doc_string(m, doc):
assert (
doc(m.copy_tensor) == "copy_tensor() -> numpy.ndarray[numpy.float64[?, ?, ?]]"
)
assert (
doc(m.copy_fixed_tensor)
== "copy_fixed_tensor() -> numpy.ndarray[numpy.float64[3, 5, 2]]"
)
assert (
doc(m.reference_const_tensor)
== "reference_const_tensor() -> numpy.ndarray[numpy.float64[?, ?, ?]]"
)
order_flag = f"flags.{m.needed_options.lower()}_contiguous"
assert doc(m.round_trip_view_tensor) == (
f"round_trip_view_tensor(arg0: numpy.ndarray[numpy.float64[?, ?, ?], flags.writeable, {order_flag}])"
+ f" -> numpy.ndarray[numpy.float64[?, ?, ?], flags.writeable, {order_flag}]"
)
assert doc(m.round_trip_const_view_tensor) == (
f"round_trip_const_view_tensor(arg0: numpy.ndarray[numpy.float64[?, ?, ?], {order_flag}])"
+ " -> numpy.ndarray[numpy.float64[?, ?, ?]]"
)

View File

@ -0,0 +1,16 @@
/*
tests/eigen_tensor.cpp -- automatic conversion of Eigen Tensor
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
constexpr const char *test_eigen_tensor_module_name = "eigen_tensor_avoid_stl_array";
#ifndef EIGEN_AVOID_STL_ARRAY
# define EIGEN_AVOID_STL_ARRAY
#endif
#define PYBIND11_TEST_EIGEN_TENSOR_NAMESPACE eigen_tensor_avoid_stl_array
#include "test_eigen_tensor.inl"

View File

@ -293,7 +293,6 @@ TEST_CASE("Threads") {
{ {
py::gil_scoped_release gil_release{}; py::gil_scoped_release gil_release{};
REQUIRE(has_pybind11_internals_static());
auto threads = std::vector<std::thread>(); auto threads = std::vector<std::thread>();
for (auto i = 0; i < num_threads; ++i) { for (auto i = 0; i < num_threads; ++i) {

View File

@ -105,11 +105,6 @@ struct PythonAlreadySetInDestructor {
py::str s; py::str s;
}; };
std::string error_already_set_what(const py::object &exc_type, const py::object &exc_value) {
PyErr_SetObject(exc_type.ptr(), exc_value.ptr());
return py::error_already_set().what();
}
TEST_SUBMODULE(exceptions, m) { TEST_SUBMODULE(exceptions, m) {
m.def("throw_std_exception", m.def("throw_std_exception",
[]() { throw std::runtime_error("This exception was intentionally thrown."); }); []() { throw std::runtime_error("This exception was intentionally thrown."); });

View File

@ -275,6 +275,20 @@ def test_local_translator(msg):
assert msg(excinfo.value) == "this mod" assert msg(excinfo.value) == "this mod"
def test_error_already_set_message_with_unicode_surrogate(): # Issue #4288
assert m.error_already_set_what(RuntimeError, "\ud927") == (
"RuntimeError: \\ud927",
False,
)
def test_error_already_set_message_with_malformed_utf8():
assert m.error_already_set_what(RuntimeError, b"\x80") == (
"RuntimeError: b'\\x80'",
False,
)
class FlakyException(Exception): class FlakyException(Exception):
def __init__(self, failure_point): def __init__(self, failure_point):
if failure_point == "failure_point_init": if failure_point == "failure_point_init":

View File

@ -11,6 +11,13 @@
#include "pybind11_tests.h" #include "pybind11_tests.h"
#include <string>
#include <thread>
#define CROSS_MODULE(Function) \
auto cm = py::module_::import("cross_module_gil_utils"); \
auto target = reinterpret_cast<void (*)()>(PyLong_AsVoidPtr(cm.attr(Function).ptr()));
class VirtClass { class VirtClass {
public: public:
virtual ~VirtClass() = default; virtual ~VirtClass() = default;
@ -28,6 +35,16 @@ class PyVirtClass : public VirtClass {
}; };
TEST_SUBMODULE(gil_scoped, m) { TEST_SUBMODULE(gil_scoped, m) {
m.attr("defined_THREAD_SANITIZER") =
#if defined(THREAD_SANITIZER)
true;
#else
false;
#endif
m.def("intentional_deadlock",
[]() { std::thread([]() { py::gil_scoped_acquire gil_acquired; }).join(); });
py::class_<VirtClass, PyVirtClass>(m, "VirtClass") py::class_<VirtClass, PyVirtClass>(m, "VirtClass")
.def(py::init<>()) .def(py::init<>())
.def("virtual_func", &VirtClass::virtual_func) .def("virtual_func", &VirtClass::virtual_func)
@ -37,11 +54,91 @@ TEST_SUBMODULE(gil_scoped, m) {
m.def("test_callback_std_func", [](const std::function<void()> &func) { func(); }); m.def("test_callback_std_func", [](const std::function<void()> &func) { func(); });
m.def("test_callback_virtual_func", [](VirtClass &virt) { virt.virtual_func(); }); m.def("test_callback_virtual_func", [](VirtClass &virt) { virt.virtual_func(); });
m.def("test_callback_pure_virtual_func", [](VirtClass &virt) { virt.pure_virtual_func(); }); m.def("test_callback_pure_virtual_func", [](VirtClass &virt) { virt.pure_virtual_func(); });
m.def("test_cross_module_gil", []() { m.def("test_cross_module_gil_released", []() {
auto cm = py::module_::import("cross_module_gil_utils"); CROSS_MODULE("gil_acquire_funcaddr")
auto gil_acquire = reinterpret_cast<void (*)()>(
PyLong_AsVoidPtr(cm.attr("gil_acquire_funcaddr").ptr()));
py::gil_scoped_release gil_release; py::gil_scoped_release gil_release;
gil_acquire(); target();
});
m.def("test_cross_module_gil_acquired", []() {
CROSS_MODULE("gil_acquire_funcaddr")
py::gil_scoped_acquire gil_acquire;
target();
});
m.def("test_cross_module_gil_inner_custom_released", []() {
CROSS_MODULE("gil_acquire_inner_custom_funcaddr")
py::gil_scoped_release gil_release;
target();
});
m.def("test_cross_module_gil_inner_custom_acquired", []() {
CROSS_MODULE("gil_acquire_inner_custom_funcaddr")
py::gil_scoped_acquire gil_acquire;
target();
});
m.def("test_cross_module_gil_inner_pybind11_released", []() {
CROSS_MODULE("gil_acquire_inner_pybind11_funcaddr")
py::gil_scoped_release gil_release;
target();
});
m.def("test_cross_module_gil_inner_pybind11_acquired", []() {
CROSS_MODULE("gil_acquire_inner_pybind11_funcaddr")
py::gil_scoped_acquire gil_acquire;
target();
});
m.def("test_cross_module_gil_nested_custom_released", []() {
CROSS_MODULE("gil_acquire_nested_custom_funcaddr")
py::gil_scoped_release gil_release;
target();
});
m.def("test_cross_module_gil_nested_custom_acquired", []() {
CROSS_MODULE("gil_acquire_nested_custom_funcaddr")
py::gil_scoped_acquire gil_acquire;
target();
});
m.def("test_cross_module_gil_nested_pybind11_released", []() {
CROSS_MODULE("gil_acquire_nested_pybind11_funcaddr")
py::gil_scoped_release gil_release;
target();
});
m.def("test_cross_module_gil_nested_pybind11_acquired", []() {
CROSS_MODULE("gil_acquire_nested_pybind11_funcaddr")
py::gil_scoped_acquire gil_acquire;
target();
});
m.def("test_release_acquire", [](const py::object &obj) {
py::gil_scoped_release gil_released;
py::gil_scoped_acquire gil_acquired;
return py::str(obj);
});
m.def("test_nested_acquire", [](const py::object &obj) {
py::gil_scoped_release gil_released;
py::gil_scoped_acquire gil_acquired_outer;
py::gil_scoped_acquire gil_acquired_inner;
return py::str(obj);
});
m.def("test_multi_acquire_release_cross_module", [](unsigned bits) {
py::set internals_ids;
internals_ids.add(PYBIND11_INTERNALS_ID);
{
py::gil_scoped_release gil_released;
auto thread_f = [bits, &internals_ids]() {
py::gil_scoped_acquire gil_acquired;
auto cm = py::module_::import("cross_module_gil_utils");
auto target = reinterpret_cast<std::string (*)(unsigned)>(
PyLong_AsVoidPtr(cm.attr("gil_multi_acquire_release_funcaddr").ptr()));
std::string cm_internals_id = target(bits >> 3);
internals_ids.add(cm_internals_id);
};
if ((bits & 0x1u) != 0u) {
thread_f();
}
if ((bits & 0x2u) != 0u) {
std::thread non_python_thread(thread_f);
non_python_thread.join();
}
if ((bits & 0x4u) != 0u) {
thread_f();
}
}
return internals_ids;
}); });
} }

View File

@ -1,45 +1,199 @@
import multiprocessing import multiprocessing
import sys
import threading import threading
import time
import pytest
from pybind11_tests import gil_scoped as m from pybind11_tests import gil_scoped as m
def _run_in_process(target, *args, **kwargs): class ExtendedVirtClass(m.VirtClass):
"""Runs target in process and returns its exitcode after 10s (None if still alive)."""
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
process.daemon = True
try:
process.start()
# Do not need to wait much, 10s should be more than enough.
process.join(timeout=10)
return process.exitcode
finally:
if process.is_alive():
process.terminate()
def _python_to_cpp_to_python():
"""Calls different C++ functions that come back to Python."""
class ExtendedVirtClass(m.VirtClass):
def virtual_func(self): def virtual_func(self):
pass pass
def pure_virtual_func(self): def pure_virtual_func(self):
pass pass
extended = ExtendedVirtClass()
def test_callback_py_obj():
m.test_callback_py_obj(lambda: None) m.test_callback_py_obj(lambda: None)
def test_callback_std_func():
m.test_callback_std_func(lambda: None) m.test_callback_std_func(lambda: None)
def test_callback_virtual_func():
extended = ExtendedVirtClass()
m.test_callback_virtual_func(extended) m.test_callback_virtual_func(extended)
def test_callback_pure_virtual_func():
extended = ExtendedVirtClass()
m.test_callback_pure_virtual_func(extended) m.test_callback_pure_virtual_func(extended)
def _python_to_cpp_to_python_from_threads(num_threads, parallel=False): def test_cross_module_gil_released():
"""Calls different C++ functions that come back to Python, from Python threads.""" """Makes sure that the GIL can be acquired by another module from a GIL-released state."""
m.test_cross_module_gil_released() # Should not raise a SIGSEGV
def test_cross_module_gil_acquired():
"""Makes sure that the GIL can be acquired by another module from a GIL-acquired state."""
m.test_cross_module_gil_acquired() # Should not raise a SIGSEGV
def test_cross_module_gil_inner_custom_released():
"""Makes sure that the GIL can be acquired/released by another module
from a GIL-released state using custom locking logic."""
m.test_cross_module_gil_inner_custom_released()
def test_cross_module_gil_inner_custom_acquired():
"""Makes sure that the GIL can be acquired/acquired by another module
from a GIL-acquired state using custom locking logic."""
m.test_cross_module_gil_inner_custom_acquired()
def test_cross_module_gil_inner_pybind11_released():
"""Makes sure that the GIL can be acquired/released by another module
from a GIL-released state using pybind11 locking logic."""
m.test_cross_module_gil_inner_pybind11_released()
def test_cross_module_gil_inner_pybind11_acquired():
"""Makes sure that the GIL can be acquired/acquired by another module
from a GIL-acquired state using pybind11 locking logic."""
m.test_cross_module_gil_inner_pybind11_acquired()
def test_cross_module_gil_nested_custom_released():
"""Makes sure that the GIL can be nested acquired/released by another module
from a GIL-released state using custom locking logic."""
m.test_cross_module_gil_nested_custom_released()
def test_cross_module_gil_nested_custom_acquired():
"""Makes sure that the GIL can be nested acquired/acquired by another module
from a GIL-acquired state using custom locking logic."""
m.test_cross_module_gil_nested_custom_acquired()
def test_cross_module_gil_nested_pybind11_released():
"""Makes sure that the GIL can be nested acquired/released by another module
from a GIL-released state using pybind11 locking logic."""
m.test_cross_module_gil_nested_pybind11_released()
def test_cross_module_gil_nested_pybind11_acquired():
"""Makes sure that the GIL can be nested acquired/acquired by another module
from a GIL-acquired state using pybind11 locking logic."""
m.test_cross_module_gil_nested_pybind11_acquired()
def test_release_acquire():
assert m.test_release_acquire(0xAB) == "171"
def test_nested_acquire():
assert m.test_nested_acquire(0xAB) == "171"
def test_multi_acquire_release_cross_module():
for bits in range(16 * 8):
internals_ids = m.test_multi_acquire_release_cross_module(bits)
assert len(internals_ids) == 2 if bits % 8 else 1
# Intentionally putting human review in the loop here, to guard against accidents.
VARS_BEFORE_ALL_BASIC_TESTS = dict(vars()) # Make a copy of the dict (critical).
ALL_BASIC_TESTS = (
test_callback_py_obj,
test_callback_std_func,
test_callback_virtual_func,
test_callback_pure_virtual_func,
test_cross_module_gil_released,
test_cross_module_gil_acquired,
test_cross_module_gil_inner_custom_released,
test_cross_module_gil_inner_custom_acquired,
test_cross_module_gil_inner_pybind11_released,
test_cross_module_gil_inner_pybind11_acquired,
test_cross_module_gil_nested_custom_released,
test_cross_module_gil_nested_custom_acquired,
test_cross_module_gil_nested_pybind11_released,
test_cross_module_gil_nested_pybind11_acquired,
test_release_acquire,
test_nested_acquire,
test_multi_acquire_release_cross_module,
)
def test_all_basic_tests_completeness():
num_found = 0
for key, value in VARS_BEFORE_ALL_BASIC_TESTS.items():
if not key.startswith("test_"):
continue
assert value in ALL_BASIC_TESTS
num_found += 1
assert len(ALL_BASIC_TESTS) == num_found
def _intentional_deadlock():
m.intentional_deadlock()
ALL_BASIC_TESTS_PLUS_INTENTIONAL_DEADLOCK = ALL_BASIC_TESTS + (_intentional_deadlock,)
SKIP_IF_DEADLOCK = True # See PR #4216
def _run_in_process(target, *args, **kwargs):
if len(args) == 0:
test_fn = target
else:
test_fn = args[0]
# Do not need to wait much, 10s should be more than enough.
timeout = 0.1 if test_fn is _intentional_deadlock else 10
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
process.daemon = True
try:
t_start = time.time()
process.start()
if timeout >= 100: # For debugging.
print(
"\nprocess.pid STARTED", process.pid, (sys.argv, target, args, kwargs)
)
print(f"COPY-PASTE-THIS: gdb {sys.argv[0]} -p {process.pid}", flush=True)
process.join(timeout=timeout)
if timeout >= 100:
print("\nprocess.pid JOINED", process.pid, flush=True)
t_delta = time.time() - t_start
if process.exitcode == 66 and m.defined_THREAD_SANITIZER: # Issue #2754
# WOULD-BE-NICE-TO-HAVE: Check that the message below is actually in the output.
# Maybe this could work:
# https://gist.github.com/alexeygrigorev/01ce847f2e721b513b42ea4a6c96905e
pytest.skip(
"ThreadSanitizer: starting new threads after multi-threaded fork is not supported."
)
elif test_fn is _intentional_deadlock:
assert process.exitcode is None
return 0
elif process.exitcode is None:
assert t_delta > 0.9 * timeout
msg = "DEADLOCK, most likely, exactly what this test is meant to detect."
if SKIP_IF_DEADLOCK:
pytest.skip(msg)
raise RuntimeError(msg)
return process.exitcode
finally:
if process.is_alive():
process.terminate()
def _run_in_threads(test_fn, num_threads, parallel):
threads = [] threads = []
for _ in range(num_threads): for _ in range(num_threads):
thread = threading.Thread(target=_python_to_cpp_to_python) thread = threading.Thread(target=test_fn)
thread.daemon = True thread.daemon = True
thread.start() thread.start()
if parallel: if parallel:
@ -51,43 +205,40 @@ def _python_to_cpp_to_python_from_threads(num_threads, parallel=False):
# TODO: FIXME, sometimes returns -11 (segfault) instead of 0 on macOS Python 3.9 # TODO: FIXME, sometimes returns -11 (segfault) instead of 0 on macOS Python 3.9
def test_python_to_cpp_to_python_from_thread(): @pytest.mark.parametrize("test_fn", ALL_BASIC_TESTS_PLUS_INTENTIONAL_DEADLOCK)
def test_run_in_process_one_thread(test_fn):
"""Makes sure there is no GIL deadlock when running in a thread. """Makes sure there is no GIL deadlock when running in a thread.
It runs in a separate process to be able to stop and assert if it deadlocks. It runs in a separate process to be able to stop and assert if it deadlocks.
""" """
assert _run_in_process(_python_to_cpp_to_python_from_threads, 1) == 0 assert _run_in_process(_run_in_threads, test_fn, num_threads=1, parallel=False) == 0
# TODO: FIXME on macOS Python 3.9 # TODO: FIXME on macOS Python 3.9
def test_python_to_cpp_to_python_from_thread_multiple_parallel(): @pytest.mark.parametrize("test_fn", ALL_BASIC_TESTS_PLUS_INTENTIONAL_DEADLOCK)
def test_run_in_process_multiple_threads_parallel(test_fn):
"""Makes sure there is no GIL deadlock when running in a thread multiple times in parallel. """Makes sure there is no GIL deadlock when running in a thread multiple times in parallel.
It runs in a separate process to be able to stop and assert if it deadlocks. It runs in a separate process to be able to stop and assert if it deadlocks.
""" """
assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=True) == 0 assert _run_in_process(_run_in_threads, test_fn, num_threads=8, parallel=True) == 0
# TODO: FIXME on macOS Python 3.9 # TODO: FIXME on macOS Python 3.9
def test_python_to_cpp_to_python_from_thread_multiple_sequential(): @pytest.mark.parametrize("test_fn", ALL_BASIC_TESTS_PLUS_INTENTIONAL_DEADLOCK)
def test_run_in_process_multiple_threads_sequential(test_fn):
"""Makes sure there is no GIL deadlock when running in a thread multiple times sequentially. """Makes sure there is no GIL deadlock when running in a thread multiple times sequentially.
It runs in a separate process to be able to stop and assert if it deadlocks. It runs in a separate process to be able to stop and assert if it deadlocks.
""" """
assert ( assert _run_in_process(_run_in_threads, test_fn, num_threads=8, parallel=False) == 0
_run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=False) == 0
)
# TODO: FIXME on macOS Python 3.9 # TODO: FIXME on macOS Python 3.9
def test_python_to_cpp_to_python_from_process(): @pytest.mark.parametrize("test_fn", ALL_BASIC_TESTS_PLUS_INTENTIONAL_DEADLOCK)
def test_run_in_process_direct(test_fn):
"""Makes sure there is no GIL deadlock when using processes. """Makes sure there is no GIL deadlock when using processes.
This test is for completion, but it was never an issue. This test is for completion, but it was never an issue.
""" """
assert _run_in_process(_python_to_cpp_to_python) == 0 assert _run_in_process(test_fn) == 0
def test_cross_module_gil():
"""Makes sure that the GIL can be acquired by another module from a GIL-released state."""
m.test_cross_module_gil() # Should not raise a SIGSEGV

View File

@ -521,4 +521,6 @@ TEST_SUBMODULE(numpy_array, sm) {
sm.def("test_fmt_desc_double", [](const py::array_t<double> &) {}); sm.def("test_fmt_desc_double", [](const py::array_t<double> &) {});
sm.def("test_fmt_desc_const_float", [](const py::array_t<const float> &) {}); sm.def("test_fmt_desc_const_float", [](const py::array_t<const float> &) {});
sm.def("test_fmt_desc_const_double", [](const py::array_t<const double> &) {}); sm.def("test_fmt_desc_const_double", [](const py::array_t<const double> &) {});
sm.def("round_trip_float", [](double d) { return d; });
} }

View File

@ -585,3 +585,9 @@ def test_dtype_refcount_leak():
m.ndim(a) m.ndim(a)
after = getrefcount(dtype) after = getrefcount(dtype)
assert after == before assert after == before
def test_round_trip_float():
arr = np.zeros((), np.float64)
arr[()] = 37.2
assert m.round_trip_float(arr) == 37.2

View File

@ -183,7 +183,7 @@ TEST_SUBMODULE(pytypes, m) {
return d2; return d2;
}); });
m.def("dict_contains", m.def("dict_contains",
[](const py::dict &dict, py::object val) { return dict.contains(val); }); [](const py::dict &dict, const py::object &val) { return dict.contains(val); });
m.def("dict_contains", m.def("dict_contains",
[](const py::dict &dict, const char *val) { return dict.contains(val); }); [](const py::dict &dict, const char *val) { return dict.contains(val); });
@ -206,7 +206,12 @@ TEST_SUBMODULE(pytypes, m) {
m.def("str_from_char_ssize_t", []() { return py::str{"red", (py::ssize_t) 3}; }); m.def("str_from_char_ssize_t", []() { return py::str{"red", (py::ssize_t) 3}; });
m.def("str_from_char_size_t", []() { return py::str{"blue", (py::size_t) 4}; }); m.def("str_from_char_size_t", []() { return py::str{"blue", (py::size_t) 4}; });
m.def("str_from_string", []() { return py::str(std::string("baz")); }); m.def("str_from_string", []() { return py::str(std::string("baz")); });
m.def("str_from_std_string_input", [](const std::string &stri) { return py::str(stri); });
m.def("str_from_cstr_input", [](const char *c_str) { return py::str(c_str); });
m.def("str_from_bytes", []() { return py::str(py::bytes("boo", 3)); }); m.def("str_from_bytes", []() { return py::str(py::bytes("boo", 3)); });
m.def("str_from_bytes_input",
[](const py::bytes &encoded_str) { return py::str(encoded_str); });
m.def("str_from_object", [](const py::object &obj) { return py::str(obj); }); m.def("str_from_object", [](const py::object &obj) { return py::str(obj); });
m.def("repr_from_object", [](const py::object &obj) { return py::repr(obj); }); m.def("repr_from_object", [](const py::object &obj) { return py::repr(obj); });
m.def("str_from_handle", [](py::handle h) { return py::str(h); }); m.def("str_from_handle", [](py::handle h) { return py::str(h); });
@ -538,6 +543,9 @@ TEST_SUBMODULE(pytypes, m) {
m.def("hash_function", [](py::object obj) { return py::hash(std::move(obj)); }); m.def("hash_function", [](py::object obj) { return py::hash(std::move(obj)); });
m.def("obj_contains",
[](py::object &obj, const py::object &key) { return obj.contains(key); });
m.def("test_number_protocol", [](const py::object &a, const py::object &b) { m.def("test_number_protocol", [](const py::object &a, const py::object &b) {
py::list l; py::list l;
l.append(a.equal(b)); l.append(a.equal(b));

View File

@ -168,6 +168,31 @@ def test_dict(capture, doc):
assert m.dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3} assert m.dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3}
class CustomContains:
d = {"key": None}
def __contains__(self, m):
return m in self.d
@pytest.mark.parametrize(
"arg,func",
[
(set(), m.anyset_contains),
(dict(), m.dict_contains),
(CustomContains(), m.obj_contains),
],
)
@pytest.mark.xfail("env.PYPY and sys.pypy_version_info < (7, 3, 10)", strict=False)
def test_unhashable_exceptions(arg, func):
class Unhashable:
__hash__ = None
with pytest.raises(TypeError) as exc_info:
func(arg, Unhashable())
assert "unhashable type:" in str(exc_info.value)
def test_tuple(): def test_tuple():
assert m.tuple_no_args() == () assert m.tuple_no_args() == ()
assert m.tuple_ssize_t() == () assert m.tuple_ssize_t() == ()
@ -219,6 +244,20 @@ def test_str(doc):
m.str_from_string_from_str(ucs_surrogates_str) m.str_from_string_from_str(ucs_surrogates_str)
@pytest.mark.parametrize(
"func",
[
m.str_from_bytes_input,
m.str_from_cstr_input,
m.str_from_std_string_input,
],
)
def test_surrogate_pairs_unicode_error(func):
input_str = "\ud83d\ude4f".encode("utf-8", "surrogatepass")
with pytest.raises(UnicodeDecodeError):
func(input_str)
def test_bytes(doc): def test_bytes(doc):
assert m.bytes_from_char_ssize_t().decode() == "green" assert m.bytes_from_char_ssize_t().decode() == "green"
assert m.bytes_from_char_size_t().decode() == "purple" assert m.bytes_from_char_size_t().decode() == "purple"

View File

@ -559,4 +559,23 @@ TEST_SUBMODULE(sequences_and_iterators, m) {
[]() { return py::make_iterator<py::return_value_policy::copy>(list); }); []() { return py::make_iterator<py::return_value_policy::copy>(list); });
m.def("make_iterator_2", m.def("make_iterator_2",
[]() { return py::make_iterator<py::return_value_policy::automatic>(list); }); []() { return py::make_iterator<py::return_value_policy::automatic>(list); });
// test_iterator on c arrays
// #4100: ensure lvalue required as increment operand
class CArrayHolder {
public:
CArrayHolder(double x, double y, double z) {
values[0] = x;
values[1] = y;
values[2] = z;
};
double values[3];
};
py::class_<CArrayHolder>(m, "CArrayHolder")
.def(py::init<double, double, double>())
.def(
"__iter__",
[](const CArrayHolder &v) { return py::make_iterator(v.values, v.values + 3); },
py::keep_alive<0, 1>());
} }

View File

@ -241,3 +241,11 @@ def test_iterator_rvp():
assert list(m.make_iterator_1()) == [1, 2, 3] assert list(m.make_iterator_1()) == [1, 2, 3]
assert list(m.make_iterator_2()) == [1, 2, 3] assert list(m.make_iterator_2()) == [1, 2, 3]
assert not isinstance(m.make_iterator_1(), type(m.make_iterator_2())) assert not isinstance(m.make_iterator_1(), type(m.make_iterator_2()))
def test_carray_iterator():
"""#4100: Check for proper iterator overload with C-Arrays"""
args_gt = list(float(i) for i in range(3))
arr_h = m.CArrayHolder(*args_gt)
args = list(arr_h)
assert args_gt == args

View File

@ -151,9 +151,13 @@ if(NOT _PYTHON_SUCCESS MATCHES 0)
return() return()
endif() endif()
option(
PYBIND11_PYTHONLIBS_OVERWRITE
"Overwrite cached values read from Python library (classic search). Turn off if cross-compiling and manually setting these values."
ON)
# Can manually set values when cross-compiling # Can manually set values when cross-compiling
macro(_PYBIND11_GET_IF_UNDEF lst index name) macro(_PYBIND11_GET_IF_UNDEF lst index name)
if(NOT DEFINED "${name}") if(PYBIND11_PYTHONLIBS_OVERWRITE OR NOT DEFINED "${name}")
list(GET "${lst}" "${index}" "${name}") list(GET "${lst}" "${index}" "${name}")
endif() endif()
endmacro() endmacro()

View File

@ -27,10 +27,11 @@ class InstallHeadersNested(install_headers):
main_headers = glob.glob("pybind11/include/pybind11/*.h") main_headers = glob.glob("pybind11/include/pybind11/*.h")
detail_headers = glob.glob("pybind11/include/pybind11/detail/*.h") detail_headers = glob.glob("pybind11/include/pybind11/detail/*.h")
eigen_headers = glob.glob("pybind11/include/pybind11/eigen/*.h")
stl_headers = glob.glob("pybind11/include/pybind11/stl/*.h") stl_headers = glob.glob("pybind11/include/pybind11/stl/*.h")
cmake_files = glob.glob("pybind11/share/cmake/pybind11/*.cmake") cmake_files = glob.glob("pybind11/share/cmake/pybind11/*.cmake")
pkgconfig_files = glob.glob("pybind11/share/pkgconfig/*.pc") pkgconfig_files = glob.glob("pybind11/share/pkgconfig/*.pc")
headers = main_headers + detail_headers + stl_headers headers = main_headers + detail_headers + stl_headers + eigen_headers
cmdclass = {"install_headers": InstallHeadersNested} cmdclass = {"install_headers": InstallHeadersNested}
$extra_cmd $extra_cmd
@ -55,6 +56,7 @@ setup(
(base + "share/pkgconfig", pkgconfig_files), (base + "share/pkgconfig", pkgconfig_files),
(base + "include/pybind11", main_headers), (base + "include/pybind11", main_headers),
(base + "include/pybind11/detail", detail_headers), (base + "include/pybind11/detail", detail_headers),
(base + "include/pybind11/eigen", eigen_headers),
(base + "include/pybind11/stl", stl_headers), (base + "include/pybind11/stl", stl_headers),
], ],
cmdclass=cmdclass, cmdclass=cmdclass,

View File

@ -15,6 +15,7 @@ setup(
"pybind11", "pybind11",
"pybind11.include.pybind11", "pybind11.include.pybind11",
"pybind11.include.pybind11.detail", "pybind11.include.pybind11.detail",
"pybind11.include.pybind11.eigen",
"pybind11.include.pybind11.stl", "pybind11.include.pybind11.stl",
"pybind11.share.cmake.pybind11", "pybind11.share.cmake.pybind11",
"pybind11.share.pkgconfig", "pybind11.share.pkgconfig",
@ -23,6 +24,7 @@ setup(
"pybind11": ["py.typed"], "pybind11": ["py.typed"],
"pybind11.include.pybind11": ["*.h"], "pybind11.include.pybind11": ["*.h"],
"pybind11.include.pybind11.detail": ["*.h"], "pybind11.include.pybind11.detail": ["*.h"],
"pybind11.include.pybind11.eigen": ["*.h"],
"pybind11.include.pybind11.stl": ["*.h"], "pybind11.include.pybind11.stl": ["*.h"],
"pybind11.share.cmake.pybind11": ["*.cmake"], "pybind11.share.cmake.pybind11": ["*.cmake"],
"pybind11.share.pkgconfig": ["*.pc"], "pybind11.share.pkgconfig": ["*.pc"],