Merge branch 'master' into smart_holder

This commit is contained in:
Ralf W. Grosse-Kunstleve 2022-10-09 21:55:40 -07:00
commit 67561bc6d2
16 changed files with 171 additions and 38 deletions

View File

@ -325,8 +325,8 @@ jobs:
# Testing NVCC; forces sources to behave like .cu files
cuda:
runs-on: ubuntu-latest
name: "🐍 3.8 • CUDA 11.2 • Ubuntu 20.04"
container: nvidia/cuda:11.2.2-devel-ubuntu20.04
name: "🐍 3.10 • CUDA 11.7 • Ubuntu 22.04"
container: nvidia/cuda:11.7.0-devel-ubuntu22.04
steps:
- uses: actions/checkout@v3
@ -738,6 +738,9 @@ jobs:
args: -DCMAKE_CXX_STANDARD=20
- python: 3.8
args: -DCMAKE_CXX_STANDARD=17
- python: 3.7
args: -DCMAKE_CXX_STANDARD=14
name: "🐍 ${{ matrix.python }} • MSVC 2019 • x86 ${{ matrix.args }}"
runs-on: windows-2019

View File

@ -12,6 +12,12 @@
#
# See https://github.com/pre-commit/pre-commit
ci:
autoupdate_commit_msg: "chore(deps): update pre-commit hooks"
autofix_commit_msg: "style: pre-commit fixes"
autoupdate_schedule: monthly
# third-party content
exclude: ^tools/JoinPaths.cmake$
@ -36,7 +42,7 @@ repos:
# Upgrade old Python syntax
- repo: https://github.com/asottile/pyupgrade
rev: "v2.37.3"
rev: "v2.38.2"
hooks:
- id: pyupgrade
args: [--py36-plus]
@ -113,7 +119,7 @@ repos:
# PyLint has native support - not always usable, but works for us
- repo: https://github.com/PyCQA/pylint
rev: "v2.15.2"
rev: "v2.15.3"
hooks:
- id: pylint
files: ^pybind11
@ -129,7 +135,7 @@ repos:
# Check static types with mypy
- repo: https://github.com/pre-commit/mirrors-mypy
rev: "v0.971"
rev: "v0.981"
hooks:
- id: mypy
args: []

View File

@ -1586,7 +1586,7 @@ private:
throw cast_error_unable_to_convert_call_arg(a.name, a.type);
#endif
}
m_kwargs[a.name] = a.value;
m_kwargs[a.name] = std::move(a.value);
}
void process(list & /*args_list*/, detail::kwargs_proxy kp) {

View File

@ -1045,12 +1045,7 @@ PYBIND11_NAMESPACE_END(detail)
/// - regular: static_cast<Return (Class::*)(Arg0, Arg1, Arg2)>(&Class::func)
/// - sweet: overload_cast<Arg0, Arg1, Arg2>(&Class::func)
template <typename... Args>
# if (defined(_MSC_VER) && _MSC_VER < 1920) /* MSVC 2017 */ \
|| (defined(__clang__) && __clang_major__ == 5)
static constexpr detail::overload_cast_impl<Args...> overload_cast = {};
# else
static constexpr detail::overload_cast_impl<Args...> overload_cast;
# endif
static constexpr detail::overload_cast_impl<Args...> overload_cast{};
#endif
/// Const member function selector for overload_cast

View File

@ -423,6 +423,8 @@ PYBIND11_NOINLINE internals &get_internals() {
// Cannot use py::gil_scoped_acquire here since that constructor calls get_internals.
struct gil_scoped_acquire_local {
gil_scoped_acquire_local() : state(PyGILState_Ensure()) {}
gil_scoped_acquire_local(const gil_scoped_acquire_local &) = delete;
gil_scoped_acquire_local &operator=(const gil_scoped_acquire_local &) = delete;
~gil_scoped_acquire_local() { PyGILState_Release(state); }
const PyGILState_STATE state;
} gil;
@ -523,8 +525,13 @@ struct local_internals {
/// Works like `get_internals`, but for things which are locally registered.
inline local_internals &get_local_internals() {
static local_internals locals;
return locals;
// Current static can be created in the interpreter finalization routine. If the later will be
// destroyed in another static variable destructor, creation of this static there will cause
// static deinitialization fiasco. In order to avoid it we avoid destruction of the
// local_internals static. One can read more about the problem and current solution here:
// https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables
static auto *locals = new local_internals();
return *locals;
}
/// Constructs a std::string with the given arguments, stores it in `internals`, and returns its

View File

@ -80,6 +80,9 @@ public:
inc_ref();
}
gil_scoped_acquire(const gil_scoped_acquire &) = delete;
gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete;
void inc_ref() { ++tstate->gilstate_counter; }
PYBIND11_NOINLINE void dec_ref() {
@ -144,6 +147,9 @@ public:
}
}
gil_scoped_release(const gil_scoped_acquire &) = delete;
gil_scoped_release &operator=(const gil_scoped_acquire &) = delete;
/// This method will disable the PyThreadState_DeleteCurrent call and the
/// GIL won't be acquired. This method should be used if the interpreter
/// could be shutting down when this is called, as thread deletion is not
@ -178,6 +184,8 @@ class gil_scoped_acquire {
public:
gil_scoped_acquire() { state = PyGILState_Ensure(); }
gil_scoped_acquire(const gil_scoped_acquire &) = delete;
gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete;
~gil_scoped_acquire() { PyGILState_Release(state); }
void disarm() {}
};
@ -187,6 +195,8 @@ class gil_scoped_release {
public:
gil_scoped_release() { state = PyEval_SaveThread(); }
gil_scoped_release(const gil_scoped_release &) = delete;
gil_scoped_release &operator=(const gil_scoped_acquire &) = delete;
~gil_scoped_release() { PyEval_RestoreThread(state); }
void disarm() {}
};

View File

@ -84,6 +84,7 @@ struct op_impl {};
/// Operator implementation generator
template <op_id id, op_type ot, typename L, typename R>
struct op_ {
static constexpr bool op_enable_if_hook = true;
template <typename Class, typename... Extra>
void execute(Class &cl, const Extra &...extra) const {
using Base = typename Class::type;

View File

@ -1808,14 +1808,14 @@ public:
return *this;
}
template <detail::op_id id, detail::op_type ot, typename L, typename R, typename... Extra>
class_ &def(const detail::op_<id, ot, L, R> &op, const Extra &...extra) {
template <typename T, typename... Extra, detail::enable_if_t<T::op_enable_if_hook, int> = 0>
class_ &def(const T &op, const Extra &...extra) {
op.execute(*this, extra...);
return *this;
}
template <detail::op_id id, detail::op_type ot, typename L, typename R, typename... Extra>
class_ &def_cast(const detail::op_<id, ot, L, R> &op, const Extra &...extra) {
template <typename T, typename... Extra, detail::enable_if_t<T::op_enable_if_hook, int> = 0>
class_ &def_cast(const T &op, const Extra &...extra) {
op.execute_cast(*this, extra...);
return *this;
}

View File

@ -1829,18 +1829,18 @@ public:
// guard if destructor called while err indicator is set
error_scope error_guard;
auto destructor = reinterpret_cast<void (*)(void *)>(PyCapsule_GetContext(o));
if (destructor == nullptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Unable to get capsule context");
}
const char *name = get_name_in_error_scope(o);
void *ptr = PyCapsule_GetPointer(o, name);
if (ptr == nullptr) {
throw error_already_set();
}
if (destructor != nullptr) {
destructor(ptr);
}
});
if (!m_ptr || PyCapsule_SetContext(m_ptr, (void *) destructor) != 0) {
@ -2022,14 +2022,20 @@ public:
detail::list_iterator end() const { return {*this, PyList_GET_SIZE(m_ptr)}; }
template <typename T>
void append(T &&val) /* py-non-const */ {
PyList_Append(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr());
if (PyList_Append(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) != 0) {
throw error_already_set();
}
}
template <typename IdxType,
typename ValType,
detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
void insert(const IdxType &index, ValType &&val) /* py-non-const */ {
PyList_Insert(
m_ptr, ssize_t_cast(index), detail::object_or_cast(std::forward<ValType>(val)).ptr());
if (PyList_Insert(m_ptr,
ssize_t_cast(index),
detail::object_or_cast(std::forward<ValType>(val)).ptr())
!= 0) {
throw error_already_set();
}
}
};

View File

@ -49,17 +49,31 @@ constexpr forwarded_type<T, U> forward_like(U &&u) {
return std::forward<detail::forwarded_type<T, U>>(std::forward<U>(u));
}
// Checks if a container has a STL style reserve method.
// This will only return true for a `reserve()` with a `void` return.
template <typename C>
using has_reserve_method = std::is_same<decltype(std::declval<C>().reserve(0)), void>;
template <typename Type, typename Key>
struct set_caster {
using type = Type;
using key_conv = make_caster<Key>;
private:
template <typename T = Type, enable_if_t<has_reserve_method<T>::value, int> = 0>
void reserve_maybe(const anyset &s, Type *) {
value.reserve(s.size());
}
void reserve_maybe(const anyset &, void *) {}
public:
bool load(handle src, bool convert) {
if (!isinstance<anyset>(src)) {
return false;
}
auto s = reinterpret_borrow<anyset>(src);
value.clear();
reserve_maybe(s, &value);
for (auto entry : s) {
key_conv conv;
if (!conv.load(entry, convert)) {
@ -94,12 +108,21 @@ struct map_caster {
using key_conv = make_caster<Key>;
using value_conv = make_caster<Value>;
private:
template <typename T = Type, enable_if_t<has_reserve_method<T>::value, int> = 0>
void reserve_maybe(const dict &d, Type *) {
value.reserve(d.size());
}
void reserve_maybe(const dict &, void *) {}
public:
bool load(handle src, bool convert) {
if (!isinstance<dict>(src)) {
return false;
}
auto d = reinterpret_borrow<dict>(src);
value.clear();
reserve_maybe(d, &value);
for (auto it : d) {
key_conv kconv;
value_conv vconv;
@ -160,9 +183,7 @@ struct list_caster {
}
private:
template <
typename T = Type,
enable_if_t<std::is_same<decltype(std::declval<T>().reserve(0)), void>::value, int> = 0>
template <typename T = Type, enable_if_t<has_reserve_method<T>::value, int> = 0>
void reserve_maybe(const sequence &s, Type *) {
value.reserve(s.size());
}

View File

@ -49,6 +49,26 @@ struct SamePointer {};
} // namespace
namespace test_class {
namespace pr4220_tripped_over_this { // PR #4227
template <int>
struct SoEmpty {};
template <typename T>
std::string get_msg(const T &) {
return "This is really only meant to exercise successful compilation.";
}
using Empty0 = SoEmpty<0x0>;
void bind_empty0(py::module_ &m) {
py::class_<Empty0>(m, "Empty0").def(py::init<>()).def("get_msg", get_msg<Empty0>);
}
} // namespace pr4220_tripped_over_this
} // namespace test_class
PYBIND11_TYPE_CASTER_BASE_HOLDER(MismatchBase1, std::shared_ptr<MismatchBase1>)
PYBIND11_TYPE_CASTER_BASE_HOLDER(MismatchDerived1, std::unique_ptr<MismatchDerived1>)
PYBIND11_TYPE_CASTER_BASE_HOLDER(MismatchBase2, std::unique_ptr<MismatchBase2>)
@ -530,6 +550,8 @@ TEST_SUBMODULE(class_, m) {
py::class_<OtherDuplicateNested>(gt, "OtherDuplicateNested");
py::class_<OtherDuplicateNested>(gt, "YetAnotherDuplicateNested");
});
test_class::pr4220_tripped_over_this::bind_empty0(m);
}
template <int N>

View File

@ -469,3 +469,10 @@ def test_register_duplicate_class():
m.register_duplicate_nested_class_type(ClassScope)
expected = 'generic_type: type "YetAnotherDuplicateNested" is already registered!'
assert str(exc_info.value) == expected
def test_pr4220_tripped_over_this():
assert (
m.Empty0().get_msg()
== "This is really only meant to exercise successful compilation."
)

View File

@ -197,11 +197,40 @@ TEST_SUBMODULE(eigen, m) {
// Return a block of a matrix (gives non-standard strides)
m.def("block",
[](const Eigen::Ref<const Eigen::MatrixXd> &x,
[m](const py::object &x_obj,
int start_row,
int start_col,
int block_rows,
int block_cols) { return x.block(start_row, start_col, block_rows, block_cols); });
int block_cols) {
return m.attr("_block")(x_obj, x_obj, start_row, start_col, block_rows, block_cols);
});
m.def(
"_block",
[](const py::object &x_obj,
const Eigen::Ref<const Eigen::MatrixXd> &x,
int start_row,
int start_col,
int block_rows,
int block_cols) {
// See PR #4217 for background. This test is a bit over the top, but might be useful
// as a concrete example to point to when explaining the dangling reference trap.
auto i0 = py::make_tuple(0, 0);
auto x0_orig = x_obj[*i0].cast<double>();
if (x(0, 0) != x0_orig) {
throw std::runtime_error(
"Something in the type_caster for Eigen::Ref is terribly wrong.");
}
double x0_mod = x0_orig + 1;
x_obj[*i0] = x0_mod;
auto copy_detected = (x(0, 0) != x0_mod);
x_obj[*i0] = x0_orig;
if (copy_detected) {
throw std::runtime_error("type_caster for Eigen::Ref made a copy.");
}
return x.block(start_row, start_col, block_rows, block_cols);
},
py::keep_alive<0, 1>());
// test_eigen_return_references, test_eigen_keepalive
// return value referencing/copying tests:

View File

@ -217,15 +217,24 @@ def test_negative_stride_from_python(msg):
)
def test_block_runtime_error_type_caster_eigen_ref_made_a_copy():
with pytest.raises(RuntimeError) as excinfo:
m.block(ref, 0, 0, 0, 0)
assert str(excinfo.value) == "type_caster for Eigen::Ref made a copy."
def test_nonunit_stride_to_python():
assert np.all(m.diagonal(ref) == ref.diagonal())
assert np.all(m.diagonal_1(ref) == ref.diagonal(1))
for i in range(-5, 7):
assert np.all(m.diagonal_n(ref, i) == ref.diagonal(i)), f"m.diagonal_n({i})"
assert np.all(m.block(ref, 2, 1, 3, 3) == ref[2:5, 1:4])
assert np.all(m.block(ref, 1, 4, 4, 2) == ref[1:, 4:])
assert np.all(m.block(ref, 1, 4, 3, 2) == ref[1:4, 4:])
# Must be order="F", otherwise the type_caster will make a copy and
# m.block() will return a dangling reference (heap-use-after-free).
rof = np.asarray(ref, order="F")
assert np.all(m.block(rof, 2, 1, 3, 3) == rof[2:5, 1:4])
assert np.all(m.block(rof, 1, 4, 4, 2) == rof[1:, 4:])
assert np.all(m.block(rof, 1, 4, 3, 2) == rof[1:4, 4:])
def test_eigen_ref_to_python():

View File

@ -289,6 +289,12 @@ TEST_SUBMODULE(pytypes, m) {
return capsule;
});
m.def("return_capsule_with_explicit_nullptr_dtor", []() {
py::print("creating capsule with explicit nullptr dtor");
return py::capsule(reinterpret_cast<void *>(1234),
static_cast<void (*)(void *)>(nullptr)); // PR #4221
});
// test_accessors
m.def("accessor_api", [](const py::object &o) {
auto d = py::dict();

View File

@ -299,6 +299,17 @@ def test_capsule(capture):
"""
)
with capture:
a = m.return_capsule_with_explicit_nullptr_dtor()
del a
pytest.gc_collect()
assert (
capture.unordered
== """
creating capsule with explicit nullptr dtor
"""
)
def test_accessors():
class SubTestObject: