5 #ifndef __eigenpy_numpy_allocator_hpp__
6 #define __eigenpy_numpy_allocator_hpp__
15 template <
typename EigenType,
typename BaseType>
18 template <
typename EigenType>
21 template <
typename MatType>
23 MatType,
Eigen::MatrixBase<typename remove_const_reference<MatType>::type> >
26 template <
typename MatType>
29 const
Eigen::MatrixBase<typename remove_const_reference<MatType>::type> >
37 template <
typename MatType>
41 template <
typename EigenType,
45 template <
typename MatType>
47 template <
typename SimilarMatrixType>
49 const Eigen::MatrixBase<SimilarMatrixType> &
mat, npy_intp nd,
51 typedef typename SimilarMatrixType::Scalar Scalar;
53 const int code = Register::getTypeCode<Scalar>();
55 static_cast<int>(nd), shape, code);
64 #ifdef EIGENPY_WITH_TENSOR_SUPPORT
66 template <
typename TensorType>
67 struct numpy_allocator_impl_tensor;
69 template <
typename TensorType>
70 struct numpy_allocator_impl<TensorType,
Eigen::TensorBase<TensorType> >
71 : numpy_allocator_impl_tensor<TensorType> {};
73 template <
typename TensorType>
74 struct numpy_allocator_impl<const TensorType,
75 const
Eigen::TensorBase<TensorType> >
76 : numpy_allocator_impl_tensor<const TensorType> {};
78 template <
typename TensorType>
79 struct numpy_allocator_impl_tensor {
80 template <
typename TensorDerived>
81 static PyArrayObject *allocate(
const TensorDerived &tensor, npy_intp nd,
83 const int code = Register::getTypeCode<typename TensorDerived::Scalar>();
85 static_cast<int>(nd), shape, code);
89 static_cast<const TensorDerived &
>(tensor), pyArray);
96 template <
typename MatType>
98 template <
typename SimilarMatrixType>
99 static PyArrayObject *
allocate(Eigen::PlainObjectBase<SimilarMatrixType> &
mat,
100 npy_intp nd, npy_intp *shape) {
101 typedef typename SimilarMatrixType::Scalar Scalar;
103 NPY_ARRAY_MEMORY_CONTIGUOUS =
104 SimilarMatrixType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
108 const int Scalar_type_code = Register::getTypeCode<Scalar>();
111 mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
120 #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
122 template <
typename MatType,
int Options,
typename Str
ide>
123 struct numpy_allocator_impl_matrix<
Eigen::Ref<MatType, Options, Stride> > {
124 typedef Eigen::Ref<MatType, Options, Stride> RefType;
126 static PyArrayObject *
allocate(RefType &
mat, npy_intp nd, npy_intp *shape) {
127 typedef typename RefType::Scalar Scalar;
129 NPY_ARRAY_MEMORY_CONTIGUOUS =
130 RefType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
134 const int Scalar_type_code = Register::getTypeCode<Scalar>();
135 const bool reverse_strides = MatType::IsRowMajor || (
mat.rows() == 1);
136 Eigen::DenseIndex inner_stride = reverse_strides ?
mat.outerStride()
138 outer_stride = reverse_strides ?
mat.innerStride()
142 npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
146 strides,
mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
150 return NumpyAllocator<MatType>::allocate(
mat, nd, shape);
157 template <
typename MatType>
159 template <
typename SimilarMatrixType>
161 const Eigen::PlainObjectBase<SimilarMatrixType> &
mat, npy_intp nd,
163 typedef typename SimilarMatrixType::Scalar Scalar;
165 NPY_ARRAY_MEMORY_CONTIGUOUS_RO = SimilarMatrixType::IsRowMajor
166 ? NPY_ARRAY_CARRAY_RO
167 : NPY_ARRAY_FARRAY_RO
171 const int Scalar_type_code = Register::getTypeCode<Scalar>();
174 const_cast<Scalar *
>(
mat.data()),
175 NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
184 #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
186 template <
typename MatType,
int Options,
typename Str
ide>
187 struct numpy_allocator_impl_matrix<
188 const
Eigen::Ref<const MatType, Options, Stride> > {
189 typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
191 static PyArrayObject *
allocate(RefType &
mat, npy_intp nd, npy_intp *shape) {
192 typedef typename RefType::Scalar Scalar;
194 NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
195 RefType::IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
199 const int Scalar_type_code = Register::getTypeCode<Scalar>();
201 const bool reverse_strides = MatType::IsRowMajor || (
mat.rows() == 1);
202 Eigen::DenseIndex inner_stride = reverse_strides ?
mat.outerStride()
204 outer_stride = reverse_strides ?
mat.innerStride()
208 npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
212 strides,
const_cast<Scalar *
>(
mat.data()),
213 NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
217 return NumpyAllocator<MatType>::allocate(
mat, nd, shape);
224 #ifdef EIGENPY_WITH_TENSOR_SUPPORT
225 template <
typename TensorType>
226 struct numpy_allocator_impl_tensor<
Eigen::TensorRef<TensorType> > {
227 typedef Eigen::TensorRef<TensorType> RefType;
229 static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
231 typedef typename RefType::Scalar Scalar;
232 static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
234 NPY_ARRAY_MEMORY_CONTIGUOUS =
235 IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
239 const int Scalar_type_code = Register::getTypeCode<Scalar>();
247 getPyArrayType(),
static_cast<int>(nd), shape, Scalar_type_code, NULL,
248 const_cast<Scalar *
>(tensor.data()),
249 NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
253 return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
258 template <
typename TensorType>
259 struct numpy_allocator_impl_tensor<const
Eigen::TensorRef<const TensorType> > {
260 typedef const Eigen::TensorRef<const TensorType> RefType;
262 static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
264 typedef typename RefType::Scalar Scalar;
265 static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
267 NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
268 IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
272 const int Scalar_type_code = Register::getTypeCode<Scalar>();
275 getPyArrayType(),
static_cast<int>(nd), shape, Scalar_type_code, NULL,
276 const_cast<Scalar *
>(tensor.data()),
277 NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
281 return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
289 #endif // ifndef __eigenpy_numpy_allocator_hpp__