.. _program_listing_file__tmp_ws_src_eigenpy_include_eigenpy_numpy-allocator.hpp: Program Listing for File numpy-allocator.hpp ============================================ |exhale_lsh| :ref:`Return to documentation for file ` (``/tmp/ws/src/eigenpy/include/eigenpy/numpy-allocator.hpp``) .. |exhale_lsh| unicode:: U+021B0 .. UPWARDS ARROW WITH TIP LEFTWARDS .. code-block:: cpp /* * Copyright 2020-2023 INRIA */ #ifndef __eigenpy_numpy_allocator_hpp__ #define __eigenpy_numpy_allocator_hpp__ #include "eigenpy/fwd.hpp" #include "eigenpy/eigen-allocator.hpp" #include "eigenpy/numpy-type.hpp" #include "eigenpy/register.hpp" namespace eigenpy { template struct numpy_allocator_impl; template struct numpy_allocator_impl_matrix; template struct numpy_allocator_impl< MatType, Eigen::MatrixBase::type> > : numpy_allocator_impl_matrix {}; template struct numpy_allocator_impl< const MatType, const Eigen::MatrixBase::type> > : numpy_allocator_impl_matrix {}; // template // struct numpy_allocator_impl > : // numpy_allocator_impl_matrix //{}; template struct numpy_allocator_impl > : numpy_allocator_impl_matrix {}; template ::type> struct NumpyAllocator : numpy_allocator_impl {}; template struct numpy_allocator_impl_matrix { template static PyArrayObject *allocate( const Eigen::MatrixBase &mat, npy_intp nd, npy_intp *shape) { typedef typename SimilarMatrixType::Scalar Scalar; const int code = Register::getTypeCode(); PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew( static_cast(nd), shape, code); // Copy data EigenAllocator::copy(mat, pyArray); return pyArray; } }; #ifdef EIGENPY_WITH_TENSOR_SUPPORT template struct numpy_allocator_impl_tensor; template struct numpy_allocator_impl > : numpy_allocator_impl_tensor {}; template struct numpy_allocator_impl > : numpy_allocator_impl_tensor {}; template struct numpy_allocator_impl_tensor { template static PyArrayObject *allocate(const TensorDerived &tensor, npy_intp nd, npy_intp *shape) { const int code = Register::getTypeCode(); PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew( static_cast(nd), shape, code); // Copy data EigenAllocator::copy( static_cast(tensor), pyArray); return pyArray; } }; #endif template struct numpy_allocator_impl_matrix { template static PyArrayObject *allocate(Eigen::PlainObjectBase &mat, npy_intp nd, npy_intp *shape) { typedef typename SimilarMatrixType::Scalar Scalar; enum { NPY_ARRAY_MEMORY_CONTIGUOUS = SimilarMatrixType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY }; if (NumpyType::sharedMemory()) { const int Scalar_type_code = Register::getTypeCode(); PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New( getPyArrayType(), static_cast(nd), shape, Scalar_type_code, mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED); return pyArray; } else { return NumpyAllocator::allocate(mat, nd, shape); } } }; #if EIGEN_VERSION_AT_LEAST(3, 2, 0) template struct numpy_allocator_impl_matrix > { typedef Eigen::Ref RefType; static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) { typedef typename RefType::Scalar Scalar; enum { NPY_ARRAY_MEMORY_CONTIGUOUS = RefType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY }; if (NumpyType::sharedMemory()) { const int Scalar_type_code = Register::getTypeCode(); const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1); Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride() : mat.innerStride(), outer_stride = reverse_strides ? mat.innerStride() : mat.outerStride(); #if NPY_ABI_VERSION < 0x02000000 const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize; #else const int elsize = PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code)); #endif npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride}; PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New( getPyArrayType(), static_cast(nd), shape, Scalar_type_code, strides, mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED); return pyArray; } else { return NumpyAllocator::allocate(mat, nd, shape); } } }; #endif template struct numpy_allocator_impl_matrix { template static PyArrayObject *allocate( const Eigen::PlainObjectBase &mat, npy_intp nd, npy_intp *shape) { typedef typename SimilarMatrixType::Scalar Scalar; enum { NPY_ARRAY_MEMORY_CONTIGUOUS_RO = SimilarMatrixType::IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO }; if (NumpyType::sharedMemory()) { const int Scalar_type_code = Register::getTypeCode(); PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New( getPyArrayType(), static_cast(nd), shape, Scalar_type_code, const_cast(mat.data()), NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED); return pyArray; } else { return NumpyAllocator::allocate(mat, nd, shape); } } }; #if EIGEN_VERSION_AT_LEAST(3, 2, 0) template struct numpy_allocator_impl_matrix< const Eigen::Ref > { typedef const Eigen::Ref RefType; static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) { typedef typename RefType::Scalar Scalar; enum { NPY_ARRAY_MEMORY_CONTIGUOUS_RO = RefType::IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO }; if (NumpyType::sharedMemory()) { const int Scalar_type_code = Register::getTypeCode(); const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1); Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride() : mat.innerStride(), outer_stride = reverse_strides ? mat.innerStride() : mat.outerStride(); #if NPY_ABI_VERSION < 0x02000000 const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize; #else const int elsize = PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code)); #endif npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride}; PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New( getPyArrayType(), static_cast(nd), shape, Scalar_type_code, strides, const_cast(mat.data()), NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED); return pyArray; } else { return NumpyAllocator::allocate(mat, nd, shape); } } }; #endif #ifdef EIGENPY_WITH_TENSOR_SUPPORT template struct numpy_allocator_impl_tensor > { typedef Eigen::TensorRef RefType; static PyArrayObject *allocate(RefType &tensor, npy_intp nd, npy_intp *shape) { typedef typename RefType::Scalar Scalar; static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit; enum { NPY_ARRAY_MEMORY_CONTIGUOUS = IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY }; if (NumpyType::sharedMemory()) { const int Scalar_type_code = Register::getTypeCode(); // static const Index NumIndices = TensorType::NumIndices; // const int elsize = // call_PyArray_DescrFromType(Scalar_type_code)->elsize; npy_intp // strides[NumIndices]; PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New( getPyArrayType(), static_cast(nd), shape, Scalar_type_code, NULL, const_cast(tensor.data()), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED); return pyArray; } else { return NumpyAllocator::allocate(tensor, nd, shape); } } }; template struct numpy_allocator_impl_tensor > { typedef const Eigen::TensorRef RefType; static PyArrayObject *allocate(RefType &tensor, npy_intp nd, npy_intp *shape) { typedef typename RefType::Scalar Scalar; static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit; enum { NPY_ARRAY_MEMORY_CONTIGUOUS_RO = IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO }; if (NumpyType::sharedMemory()) { const int Scalar_type_code = Register::getTypeCode(); PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New( getPyArrayType(), static_cast(nd), shape, Scalar_type_code, NULL, const_cast(tensor.data()), NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED); return pyArray; } else { return NumpyAllocator::allocate(tensor, nd, shape); } } }; #endif } // namespace eigenpy #endif // ifndef __eigenpy_numpy_allocator_hpp__