.. _program_listing_file__tmp_ws_src_eigenpy_include_eigenpy_eigen-allocator.hpp: Program Listing for File eigen-allocator.hpp ============================================ |exhale_lsh| :ref:`Return to documentation for file ` (``/tmp/ws/src/eigenpy/include/eigenpy/eigen-allocator.hpp``) .. |exhale_lsh| unicode:: U+021B0 .. UPWARDS ARROW WITH TIP LEFTWARDS .. code-block:: cpp // // Copyright (c) 2014-2023 CNRS INRIA // #ifndef __eigenpy_eigen_allocator_hpp__ #define __eigenpy_eigen_allocator_hpp__ #include "eigenpy/fwd.hpp" #include "eigenpy/numpy-map.hpp" #include "eigenpy/register.hpp" #include "eigenpy/scalar-conversion.hpp" #include "eigenpy/utils/is-aligned.hpp" namespace eigenpy { namespace details { template struct init_matrix_or_array { static MatType *run(int rows, int cols, void *storage) { if (storage) return new (storage) MatType(rows, cols); else return new MatType(rows, cols); } static MatType *run(PyArrayObject *pyArray, void *storage = NULL) { assert(PyArray_NDIM(pyArray) == 1 || PyArray_NDIM(pyArray) == 2); int rows = -1, cols = -1; const int ndim = PyArray_NDIM(pyArray); if (ndim == 2) { rows = (int)PyArray_DIMS(pyArray)[0]; cols = (int)PyArray_DIMS(pyArray)[1]; } else if (ndim == 1) { rows = (int)PyArray_DIMS(pyArray)[0]; cols = 1; } return run(rows, cols, storage); } }; template struct init_matrix_or_array { static MatType *run(int rows, int cols, void *storage) { if (storage) return new (storage) MatType(rows, cols); else return new MatType(rows, cols); } static MatType *run(int size, void *storage) { if (storage) return new (storage) MatType(size); else return new MatType(size); } static MatType *run(PyArrayObject *pyArray, void *storage = NULL) { const int ndim = PyArray_NDIM(pyArray); if (ndim == 1) { const int size = (int)PyArray_DIMS(pyArray)[0]; return run(size, storage); } else { const int rows = (int)PyArray_DIMS(pyArray)[0]; const int cols = (int)PyArray_DIMS(pyArray)[1]; return run(rows, cols, storage); } } }; #ifdef EIGENPY_WITH_TENSOR_SUPPORT template struct init_tensor { static Tensor *run(PyArrayObject *pyArray, void *storage = NULL) { enum { Rank = Tensor::NumDimensions }; assert(PyArray_NDIM(pyArray) == Rank); typedef typename Tensor::Index Index; Eigen::array dimensions; for (int k = 0; k < PyArray_NDIM(pyArray); ++k) dimensions[k] = PyArray_DIMS(pyArray)[k]; if (storage) return new (storage) Tensor(dimensions); else return new Tensor(dimensions); } }; #endif template struct check_swap_impl_matrix; template ::type> struct check_swap_impl; template struct check_swap_impl > : check_swap_impl_matrix {}; template struct check_swap_impl_matrix { static bool run(PyArrayObject *pyArray, const Eigen::MatrixBase &mat) { if (PyArray_NDIM(pyArray) == 0) return false; if (mat.rows() == PyArray_DIMS(pyArray)[0]) return false; else return true; } }; template bool check_swap(PyArrayObject *pyArray, const EigenType &mat) { return check_swap_impl::run(pyArray, mat); } #ifdef EIGENPY_WITH_TENSOR_SUPPORT template struct check_swap_impl_tensor { static bool run(PyArrayObject * /*pyArray*/, const TensorType & /*tensor*/) { return false; } }; template struct check_swap_impl > : check_swap_impl_tensor {}; #endif // template // struct cast_impl_matrix; // // template ::type> // struct cast_impl; // // template // struct cast_impl > // : cast_impl_matrix {}; // // template // struct cast_impl_matrix //{ // template // static void run(const Eigen::MatrixBase &input, // const Eigen::MatrixBase &dest) { // dest.const_cast_derived() = input.template cast(); // } // }; template class EigenBase = Eigen::MatrixBase, bool cast_is_valid = FromTypeToType::value> struct cast { template static void run(const Eigen::MatrixBase &input, const Eigen::MatrixBase &dest) { dest.const_cast_derived() = input.template cast(); } }; #ifdef EIGENPY_WITH_TENSOR_SUPPORT template struct cast { template static void run(const TensorIn &input, TensorOut &dest) { dest = input.template cast(); } }; #endif template class EigenBase> struct cast { template static void run(const MatrixIn /*input*/, const MatrixOut /*dest*/) { // do nothing assert(false && "Must never happened"); } }; } // namespace details #define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, Scalar, NewScalar, \ pyArray, mat) \ details::cast::run( \ NumpyMap::map(pyArray, \ details::check_swap(pyArray, mat)), \ mat) #define EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, NewScalar, \ mat, pyArray) \ details::cast::run( \ mat, NumpyMap::map( \ pyArray, details::check_swap(pyArray, mat))) // Define specific cast for Windows and Mac #if defined _WIN32 || defined __CYGWIN__ // Manage NPY_INT on Windows (NPY_INT32 is NPY_LONG). // See https://github.com/stack-of-tasks/eigenpy/pull/455 #define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \ MatType, Scalar, pyArray, mat, CAST_MACRO) \ case NPY_INT: \ CAST_MACRO(MatType, int32_t, Scalar, pyArray, mat); \ break; \ case NPY_UINT: \ CAST_MACRO(MatType, uint32_t, Scalar, pyArray, mat); \ break; #elif defined __APPLE__ // Manage NPY_LONGLONG on Mac (NPY_INT64 is NPY_LONG). // long long and long are both the same type // but NPY_LONGLONG and NPY_LONGĀ are different dtype. // See https://github.com/stack-of-tasks/eigenpy/pull/455 #define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \ MatType, Scalar, pyArray, mat, CAST_MACRO) \ case NPY_LONGLONG: \ CAST_MACRO(MatType, int64_t, Scalar, pyArray, mat); \ break; \ case NPY_ULONGLONG: \ CAST_MACRO(MatType, uint64_t, Scalar, pyArray, mat); \ break; #else #define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \ MatType, Scalar, pyArray, mat, CAST_MACRO) #endif #define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH( \ pyArray_type_code, MatType, Scalar, pyArray, mat, CAST_MACRO) \ switch (pyArray_type_code) { \ case NPY_BOOL: \ CAST_MACRO(MatType, bool, Scalar, pyArray, mat); \ break; \ case NPY_INT8: \ CAST_MACRO(MatType, int8_t, Scalar, pyArray, mat); \ break; \ case NPY_INT16: \ CAST_MACRO(MatType, int16_t, Scalar, pyArray, mat); \ break; \ case NPY_INT32: \ CAST_MACRO(MatType, int32_t, Scalar, pyArray, mat); \ break; \ case NPY_INT64: \ CAST_MACRO(MatType, int64_t, Scalar, pyArray, mat); \ break; \ case NPY_UINT8: \ CAST_MACRO(MatType, uint8_t, Scalar, pyArray, mat); \ break; \ case NPY_UINT16: \ CAST_MACRO(MatType, uint16_t, Scalar, pyArray, mat); \ break; \ case NPY_UINT32: \ CAST_MACRO(MatType, uint32_t, Scalar, pyArray, mat); \ break; \ case NPY_UINT64: \ CAST_MACRO(MatType, uint64_t, Scalar, pyArray, mat); \ break; \ case NPY_FLOAT: \ CAST_MACRO(MatType, float, Scalar, pyArray, mat); \ break; \ case NPY_CFLOAT: \ CAST_MACRO(MatType, std::complex, Scalar, pyArray, mat); \ break; \ case NPY_DOUBLE: \ CAST_MACRO(MatType, double, Scalar, pyArray, mat); \ break; \ case NPY_CDOUBLE: \ CAST_MACRO(MatType, std::complex, Scalar, pyArray, mat); \ break; \ case NPY_LONGDOUBLE: \ CAST_MACRO(MatType, long double, Scalar, pyArray, mat); \ break; \ case NPY_CLONGDOUBLE: \ CAST_MACRO(MatType, std::complex, Scalar, pyArray, mat); \ break; \ EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \ MatType, Scalar, pyArray, mat, CAST_MACRO) \ default: \ throw Exception("You asked for a conversion which is not implemented."); \ } template struct EigenAllocator; template ::type> struct eigen_allocator_impl; template struct eigen_allocator_impl_matrix; template struct eigen_allocator_impl > : eigen_allocator_impl_matrix {}; template struct eigen_allocator_impl > : eigen_allocator_impl_matrix {}; template struct eigen_allocator_impl_matrix { typedef MatType Type; typedef typename MatType::Scalar Scalar; static void allocate( PyArrayObject *pyArray, boost::python::converter::rvalue_from_python_storage *storage) { void *raw_ptr = storage->storage.bytes; assert(is_aligned(raw_ptr, EIGENPY_DEFAULT_ALIGN_BYTES) && "The pointer is not aligned."); Type *mat_ptr = details::init_matrix_or_array::run(pyArray, raw_ptr); Type &mat = *mat_ptr; copy(pyArray, mat); } template static void copy(PyArrayObject *pyArray, const Eigen::MatrixBase &mat_) { MatrixDerived &mat = mat_.const_cast_derived(); const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray); const int Scalar_type_code = Register::getTypeCode(); if (pyArray_type_code == Scalar_type_code) { mat = NumpyMap::map( pyArray, details::check_swap(pyArray, mat)); // avoid useless cast return; } EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH( pyArray_type_code, MatType, Scalar, pyArray, mat, EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX); } template static void copy(const Eigen::MatrixBase &mat_, PyArrayObject *pyArray) { const MatrixDerived &mat = const_cast(mat_.derived()); const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray); const int Scalar_type_code = Register::getTypeCode(); if (pyArray_type_code == Scalar_type_code) // no cast needed { NumpyMap::map(pyArray, details::check_swap(pyArray, mat)) = mat; return; } throw Exception( "Scalar conversion from Eigen to Numpy is not implemented."); } }; #ifdef EIGENPY_WITH_TENSOR_SUPPORT template struct eigen_allocator_impl_tensor; template struct eigen_allocator_impl > : eigen_allocator_impl_tensor {}; template struct eigen_allocator_impl > : eigen_allocator_impl_tensor {}; template struct eigen_allocator_impl_tensor { typedef typename TensorType::Scalar Scalar; static void allocate( PyArrayObject *pyArray, boost::python::converter::rvalue_from_python_storage *storage) { void *raw_ptr = storage->storage.bytes; assert(is_aligned(raw_ptr, EIGENPY_DEFAULT_ALIGN_BYTES) && "The pointer is not aligned."); TensorType *tensor_ptr = details::init_tensor::run(pyArray, raw_ptr); TensorType &tensor = *tensor_ptr; copy(pyArray, tensor); } #define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, Scalar, \ NewScalar, pyArray, tensor) \ { \ typename NumpyMap::EigenMap pyArray_map = \ NumpyMap::map( \ pyArray, details::check_swap(pyArray, tensor)); \ details::cast::run(pyArray_map, \ tensor); \ } template static void copy(PyArrayObject *pyArray, TensorDerived &tensor) { const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray); const int Scalar_type_code = Register::getTypeCode(); if (pyArray_type_code == Scalar_type_code) { tensor = NumpyMap::map( pyArray, details::check_swap(pyArray, tensor)); // avoid useless cast return; } EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH( pyArray_type_code, TensorType, Scalar, pyArray, tensor, EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR); } #define EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar, \ NewScalar, tensor, pyArray) \ { \ typename NumpyMap::EigenMap pyArray_map = \ NumpyMap::map( \ pyArray, details::check_swap(pyArray, tensor)); \ details::cast::run(tensor, \ pyArray_map); \ } static void copy(const TensorType &tensor, PyArrayObject *pyArray) { const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray); const int Scalar_type_code = Register::getTypeCode(); if (pyArray_type_code == Scalar_type_code) // no cast needed { NumpyMap::map( pyArray, details::check_swap(pyArray, tensor)) = tensor; return; } throw Exception( "Scalar conversion from Eigen to Numpy is not implemented."); } }; #endif #if EIGEN_VERSION_AT_LEAST(3, 2, 0) template inline bool is_arr_layout_compatible_with_mat_type(PyArrayObject *pyArray) { bool is_array_C_cont = PyArray_IS_C_CONTIGUOUS(pyArray); bool is_array_F_cont = PyArray_IS_F_CONTIGUOUS(pyArray); return (MatType::IsRowMajor && is_array_C_cont) || (!MatType::IsRowMajor && is_array_F_cont) || (MatType::IsVectorAtCompileTime && (is_array_C_cont || is_array_F_cont)); } template struct eigen_allocator_impl_matrix > { typedef Eigen::Ref RefType; typedef typename MatType::Scalar Scalar; typedef typename ::boost::python::detail::referent_storage::StorageType StorageType; static void allocate( PyArrayObject *pyArray, ::boost::python::converter::rvalue_from_python_storage *storage) { typedef typename StrideType< MatType, Eigen::internal::traits::StrideType::InnerStrideAtCompileTime, Eigen::internal::traits::StrideType:: OuterStrideAtCompileTime>::type NumpyMapStride; bool need_to_allocate = false; const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray); const int Scalar_type_code = Register::getTypeCode(); if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true; bool incompatible_layout = !is_arr_layout_compatible_with_mat_type(pyArray); need_to_allocate |= incompatible_layout; if (Options != Eigen::Unaligned) // we need to check whether the memory is correctly // aligned and composed of a continuous segment { void *data_ptr = PyArray_DATA(pyArray); if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr, Options)) need_to_allocate |= true; } void *raw_ptr = storage->storage.bytes; if (need_to_allocate) { MatType *mat_ptr; mat_ptr = details::init_matrix_or_array::run(pyArray); RefType mat_ref(*mat_ptr); new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr); RefType &mat = *reinterpret_cast(raw_ptr); EigenAllocator::copy(pyArray, mat); } else { assert(pyArray_type_code == Scalar_type_code); typename NumpyMap::EigenMap numpyMap = NumpyMap::map(pyArray); RefType mat_ref(numpyMap); new (raw_ptr) StorageType(mat_ref, pyArray); } } static void copy(RefType const &ref, PyArrayObject *pyArray) { EigenAllocator::copy(ref, pyArray); } }; template struct eigen_allocator_impl_matrix< const Eigen::Ref > { typedef const Eigen::Ref RefType; typedef typename MatType::Scalar Scalar; typedef typename ::boost::python::detail::referent_storage::StorageType StorageType; static void allocate( PyArrayObject *pyArray, ::boost::python::converter::rvalue_from_python_storage *storage) { typedef typename StrideType< MatType, Eigen::internal::traits::StrideType::InnerStrideAtCompileTime, Eigen::internal::traits::StrideType:: OuterStrideAtCompileTime>::type NumpyMapStride; bool need_to_allocate = false; const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray); const int Scalar_type_code = Register::getTypeCode(); if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true; bool incompatible_layout = !is_arr_layout_compatible_with_mat_type(pyArray); need_to_allocate |= incompatible_layout; if (Options != Eigen::Unaligned) // we need to check whether the memory is correctly // aligned and composed of a continuous segment { void *data_ptr = PyArray_DATA(pyArray); if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr, Options)) need_to_allocate |= true; } void *raw_ptr = storage->storage.bytes; if (need_to_allocate) { MatType *mat_ptr; mat_ptr = details::init_matrix_or_array::run(pyArray); RefType mat_ref(*mat_ptr); new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr); MatType &mat = *mat_ptr; EigenAllocator::copy(pyArray, mat); } else { assert(pyArray_type_code == Scalar_type_code); typename NumpyMap::EigenMap numpyMap = NumpyMap::map(pyArray); RefType mat_ref(numpyMap); new (raw_ptr) StorageType(mat_ref, pyArray); } } static void copy(RefType const &ref, PyArrayObject *pyArray) { EigenAllocator::copy(ref, pyArray); } }; #endif #ifdef EIGENPY_WITH_TENSOR_SUPPORT template struct eigen_allocator_impl_tensor_ref; template struct eigen_allocator_impl_tensor > : eigen_allocator_impl_tensor_ref > {}; template struct eigen_allocator_impl_tensor > : eigen_allocator_impl_tensor_ref< const TensorType, const Eigen::TensorRef > {}; template struct eigen_allocator_impl_tensor_ref { typedef typename TensorType::Scalar Scalar; typedef typename ::boost::python::detail::referent_storage::StorageType StorageType; static void allocate( PyArrayObject *pyArray, ::boost::python::converter::rvalue_from_python_storage *storage) { // typedef typename StrideType< // MatType, // Eigen::internal::traits::StrideType::InnerStrideAtCompileTime, // Eigen::internal::traits::StrideType:: // OuterStrideAtCompileTime>::type NumpyMapStride; static const int Options = Eigen::internal::traits::Options; bool need_to_allocate = false; const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray); const int Scalar_type_code = Register::getTypeCode(); if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true; // bool incompatible_layout = // !is_arr_layout_compatible_with_mat_type(pyArray); // need_to_allocate |= incompatible_layout; // if (Options != // Eigen::Unaligned) // we need to check whether the memory is // correctly // // aligned and composed of a continuous segment // { // void *data_ptr = PyArray_DATA(pyArray); // if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr, // Options)) // need_to_allocate |= true; // } void *raw_ptr = storage->storage.bytes; if (need_to_allocate) { typedef typename boost::remove_const::type TensorTypeNonConst; TensorTypeNonConst *tensor_ptr; tensor_ptr = details::init_tensor::run(pyArray); RefType tensor_ref(*tensor_ptr); new (raw_ptr) StorageType(tensor_ref, pyArray, tensor_ptr); TensorTypeNonConst &tensor = *tensor_ptr; EigenAllocator::copy(pyArray, tensor); } else { assert(pyArray_type_code == Scalar_type_code); typename NumpyMap::EigenMap numpyMap = NumpyMap::map(pyArray); RefType tensor_ref(numpyMap); new (raw_ptr) StorageType(tensor_ref, pyArray); } } static void copy(RefType const &ref, PyArrayObject *pyArray) { EigenAllocator::copy(ref, pyArray); } }; #endif template struct EigenAllocator : eigen_allocator_impl {}; } // namespace eigenpy #endif // __eigenpy_eigen_allocator_hpp__