numpy-allocator.hpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020-2023 INRIA
3  */
4 
5 #ifndef __eigenpy_numpy_allocator_hpp__
6 #define __eigenpy_numpy_allocator_hpp__
7 
8 #include "eigenpy/fwd.hpp"
10 #include "eigenpy/numpy-type.hpp"
11 #include "eigenpy/register.hpp"
12 
13 namespace eigenpy {
14 
15 template <typename EigenType, typename BaseType>
17 
18 template <typename EigenType>
20 
21 template <typename MatType>
23  MatType, Eigen::MatrixBase<typename remove_const_reference<MatType>::type> >
24  : numpy_allocator_impl_matrix<MatType> {};
25 
26 template <typename MatType>
28  const MatType,
29  const Eigen::MatrixBase<typename remove_const_reference<MatType>::type> >
30  : numpy_allocator_impl_matrix<const MatType> {};
31 
32 // template <typename MatType>
33 // struct numpy_allocator_impl<MatType &, Eigen::MatrixBase<MatType> > :
34 // numpy_allocator_impl_matrix<MatType &>
35 //{};
36 
37 template <typename MatType>
38 struct numpy_allocator_impl<const MatType &, const Eigen::MatrixBase<MatType> >
40 
41 template <typename EigenType,
42  typename BaseType = typename get_eigen_base_type<EigenType>::type>
43 struct NumpyAllocator : numpy_allocator_impl<EigenType, BaseType> {};
44 
45 template <typename MatType>
47  template <typename SimilarMatrixType>
48  static PyArrayObject *allocate(
49  const Eigen::MatrixBase<SimilarMatrixType> &mat, npy_intp nd,
50  npy_intp *shape) {
51  typedef typename SimilarMatrixType::Scalar Scalar;
52 
53  const int code = Register::getTypeCode<Scalar>();
54  PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew(
55  static_cast<int>(nd), shape, code);
56 
57  // Copy data
59 
60  return pyArray;
61  }
62 };
63 
64 #ifdef EIGENPY_WITH_TENSOR_SUPPORT
65 
66 template <typename TensorType>
67 struct numpy_allocator_impl_tensor;
68 
69 template <typename TensorType>
70 struct numpy_allocator_impl<TensorType, Eigen::TensorBase<TensorType> >
71  : numpy_allocator_impl_tensor<TensorType> {};
72 
73 template <typename TensorType>
74 struct numpy_allocator_impl<const TensorType,
75  const Eigen::TensorBase<TensorType> >
76  : numpy_allocator_impl_tensor<const TensorType> {};
77 
78 template <typename TensorType>
79 struct numpy_allocator_impl_tensor {
80  template <typename TensorDerived>
81  static PyArrayObject *allocate(const TensorDerived &tensor, npy_intp nd,
82  npy_intp *shape) {
83  const int code = Register::getTypeCode<typename TensorDerived::Scalar>();
84  PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew(
85  static_cast<int>(nd), shape, code);
86 
87  // Copy data
89  static_cast<const TensorDerived &>(tensor), pyArray);
90 
91  return pyArray;
92  }
93 };
94 #endif
95 
96 template <typename MatType>
97 struct numpy_allocator_impl_matrix<MatType &> {
98  template <typename SimilarMatrixType>
99  static PyArrayObject *allocate(Eigen::PlainObjectBase<SimilarMatrixType> &mat,
100  npy_intp nd, npy_intp *shape) {
101  typedef typename SimilarMatrixType::Scalar Scalar;
102  enum {
103  NPY_ARRAY_MEMORY_CONTIGUOUS =
104  SimilarMatrixType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
105  };
106 
107  if (NumpyType::sharedMemory()) {
108  const int Scalar_type_code = Register::getTypeCode<Scalar>();
109  PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
110  getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
111  mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
112 
113  return pyArray;
114  } else {
115  return NumpyAllocator<MatType>::allocate(mat, nd, shape);
116  }
117  }
118 };
119 
120 #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
121 
122 template <typename MatType, int Options, typename Stride>
123 struct numpy_allocator_impl_matrix<Eigen::Ref<MatType, Options, Stride> > {
124  typedef Eigen::Ref<MatType, Options, Stride> RefType;
125 
126  static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
127  typedef typename RefType::Scalar Scalar;
128  enum {
129  NPY_ARRAY_MEMORY_CONTIGUOUS =
130  RefType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
131  };
132 
133  if (NumpyType::sharedMemory()) {
134  const int Scalar_type_code = Register::getTypeCode<Scalar>();
135  const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1);
136  Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride()
137  : mat.innerStride(),
138  outer_stride = reverse_strides ? mat.innerStride()
139  : mat.outerStride();
140 
141 #if NPY_ABI_VERSION < 0x02000000
142  const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
143 #else
144  const int elsize =
145  PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
146 #endif
147  npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
148 
149  PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
150  getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
151  strides, mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
152 
153  return pyArray;
154  } else {
155  return NumpyAllocator<MatType>::allocate(mat, nd, shape);
156  }
157  }
158 };
159 
160 #endif
161 
162 template <typename MatType>
163 struct numpy_allocator_impl_matrix<const MatType &> {
164  template <typename SimilarMatrixType>
165  static PyArrayObject *allocate(
166  const Eigen::PlainObjectBase<SimilarMatrixType> &mat, npy_intp nd,
167  npy_intp *shape) {
168  typedef typename SimilarMatrixType::Scalar Scalar;
169  enum {
170  NPY_ARRAY_MEMORY_CONTIGUOUS_RO = SimilarMatrixType::IsRowMajor
171  ? NPY_ARRAY_CARRAY_RO
172  : NPY_ARRAY_FARRAY_RO
173  };
174 
175  if (NumpyType::sharedMemory()) {
176  const int Scalar_type_code = Register::getTypeCode<Scalar>();
177  PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
178  getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
179  const_cast<Scalar *>(mat.data()),
180  NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
181 
182  return pyArray;
183  } else {
184  return NumpyAllocator<MatType>::allocate(mat, nd, shape);
185  }
186  }
187 };
188 
189 #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
190 
191 template <typename MatType, int Options, typename Stride>
192 struct numpy_allocator_impl_matrix<
193  const Eigen::Ref<const MatType, Options, Stride> > {
194  typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
195 
196  static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
197  typedef typename RefType::Scalar Scalar;
198  enum {
199  NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
200  RefType::IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
201  };
202 
203  if (NumpyType::sharedMemory()) {
204  const int Scalar_type_code = Register::getTypeCode<Scalar>();
205 
206  const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1);
207  Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride()
208  : mat.innerStride(),
209  outer_stride = reverse_strides ? mat.innerStride()
210  : mat.outerStride();
211 
212 #if NPY_ABI_VERSION < 0x02000000
213  const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
214 #else
215  const int elsize =
216  PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
217 #endif
218  npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
219 
220  PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
221  getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
222  strides, const_cast<Scalar *>(mat.data()),
223  NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
224 
225  return pyArray;
226  } else {
227  return NumpyAllocator<MatType>::allocate(mat, nd, shape);
228  }
229  }
230 };
231 
232 #endif
233 
234 #ifdef EIGENPY_WITH_TENSOR_SUPPORT
235 template <typename TensorType>
236 struct numpy_allocator_impl_tensor<Eigen::TensorRef<TensorType> > {
237  typedef Eigen::TensorRef<TensorType> RefType;
238 
239  static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
240  npy_intp *shape) {
241  typedef typename RefType::Scalar Scalar;
242  static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
243  enum {
244  NPY_ARRAY_MEMORY_CONTIGUOUS =
245  IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
246  };
247 
248  if (NumpyType::sharedMemory()) {
249  const int Scalar_type_code = Register::getTypeCode<Scalar>();
250  // static const Index NumIndices = TensorType::NumIndices;
251 
252  // const int elsize =
253  // call_PyArray_DescrFromType(Scalar_type_code)->elsize; npy_intp
254  // strides[NumIndices];
255 
256  PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
257  getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code, NULL,
258  const_cast<Scalar *>(tensor.data()),
259  NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
260 
261  return pyArray;
262  } else {
263  return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
264  }
265  }
266 };
267 
268 template <typename TensorType>
269 struct numpy_allocator_impl_tensor<const Eigen::TensorRef<const TensorType> > {
270  typedef const Eigen::TensorRef<const TensorType> RefType;
271 
272  static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
273  npy_intp *shape) {
274  typedef typename RefType::Scalar Scalar;
275  static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
276  enum {
277  NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
278  IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
279  };
280 
281  if (NumpyType::sharedMemory()) {
282  const int Scalar_type_code = Register::getTypeCode<Scalar>();
283 
284  PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
285  getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code, NULL,
286  const_cast<Scalar *>(tensor.data()),
287  NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
288 
289  return pyArray;
290  } else {
291  return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
292  }
293  }
294 };
295 
296 #endif
297 } // namespace eigenpy
298 
299 #endif // ifndef __eigenpy_numpy_allocator_hpp__
eigenpy::getPyArrayType
PyTypeObject * getPyArrayType()
Definition: numpy.hpp:264
Eigen
Definition: complex.cpp:7
register.hpp
eigenpy::numpy_allocator_impl_matrix
Definition: numpy-allocator.hpp:19
eigenpy::call_PyArray_DescrFromType
PyArray_Descr * call_PyArray_DescrFromType(int typenum)
Definition: numpy.hpp:266
fwd.hpp
eigenpy::numpy_allocator_impl_matrix< MatType & >::allocate
static PyArrayObject * allocate(Eigen::PlainObjectBase< SimilarMatrixType > &mat, npy_intp nd, npy_intp *shape)
Definition: numpy-allocator.hpp:99
eigenpy::numpy_allocator_impl
Definition: numpy-allocator.hpp:16
eigenpy::get_eigen_base_type::type
boost::mpl::if_< boost::is_const< typename boost::remove_reference< EigenType >::type >, const _type, _type >::type type
Definition: fwd.hpp:165
eigenpy::NumpyType::sharedMemory
static bool sharedMemory()
Definition: numpy-type.cpp:32
eigenpy
Definition: alignment.hpp:14
eigen-allocator.hpp
copy
ReturnMatrix copy(const Eigen::MatrixBase< Matrix > &mat)
Definition: matrix.cpp:131
eigenpy::call_PyArray_SimpleNew
PyObject * call_PyArray_SimpleNew(int nd, npy_intp *shape, int np_type)
Definition: numpy.hpp:241
eigenpy::NumpyAllocator
Definition: numpy-allocator.hpp:43
test_eigen_ref.mat
mat
Definition: test_eigen_ref.py:137
eigenpy::numpy_allocator_impl_matrix::allocate
static PyArrayObject * allocate(const Eigen::MatrixBase< SimilarMatrixType > &mat, npy_intp nd, npy_intp *shape)
Definition: numpy-allocator.hpp:48
eigenpy::call_PyArray_New
PyObject * call_PyArray_New(PyTypeObject *py_type_ptr, int nd, npy_intp *shape, int np_type, void *data_ptr, int options)
Definition: numpy.hpp:245
numpy-type.hpp
eigenpy::numpy_allocator_impl_matrix< const MatType & >
Definition: numpy-allocator.hpp:163
eigenpy::numpy_allocator_impl_matrix< const MatType & >::allocate
static PyArrayObject * allocate(const Eigen::PlainObjectBase< SimilarMatrixType > &mat, npy_intp nd, npy_intp *shape)
Definition: numpy-allocator.hpp:165


eigenpy
Author(s): Justin Carpentier, Nicolas Mansard
autogenerated on Sat Nov 2 2024 02:14:45