eigen-allocator.hpp
Go to the documentation of this file.
1 //
2 // Copyright (c) 2014-2023 CNRS INRIA
3 //
4 
5 #ifndef __eigenpy_eigen_allocator_hpp__
6 #define __eigenpy_eigen_allocator_hpp__
7 
8 #include "eigenpy/fwd.hpp"
9 #include "eigenpy/numpy-map.hpp"
10 #include "eigenpy/register.hpp"
13 
14 namespace eigenpy {
15 
16 namespace details {
17 template <typename MatType,
18  bool IsVectorAtCompileTime = MatType::IsVectorAtCompileTime>
20  static MatType *run(int rows, int cols, void *storage) {
21  if (storage)
22  return new (storage) MatType(rows, cols);
23  else
24  return new MatType(rows, cols);
25  }
26 
27  static MatType *run(PyArrayObject *pyArray, void *storage = NULL) {
28  assert(PyArray_NDIM(pyArray) == 1 || PyArray_NDIM(pyArray) == 2);
29 
30  int rows = -1, cols = -1;
31  const int ndim = PyArray_NDIM(pyArray);
32  if (ndim == 2) {
33  rows = (int)PyArray_DIMS(pyArray)[0];
34  cols = (int)PyArray_DIMS(pyArray)[1];
35  } else if (ndim == 1) {
36  rows = (int)PyArray_DIMS(pyArray)[0];
37  cols = 1;
38  }
39 
40  return run(rows, cols, storage);
41  }
42 };
43 
44 template <typename MatType>
45 struct init_matrix_or_array<MatType, true> {
46  static MatType *run(int rows, int cols, void *storage) {
47  if (storage)
48  return new (storage) MatType(rows, cols);
49  else
50  return new MatType(rows, cols);
51  }
52 
53  static MatType *run(int size, void *storage) {
54  if (storage)
55  return new (storage) MatType(size);
56  else
57  return new MatType(size);
58  }
59 
60  static MatType *run(PyArrayObject *pyArray, void *storage = NULL) {
61  const int ndim = PyArray_NDIM(pyArray);
62  if (ndim == 1) {
63  const int size = (int)PyArray_DIMS(pyArray)[0];
64  return run(size, storage);
65  } else {
66  const int rows = (int)PyArray_DIMS(pyArray)[0];
67  const int cols = (int)PyArray_DIMS(pyArray)[1];
68  return run(rows, cols, storage);
69  }
70  }
71 };
72 
73 #ifdef EIGENPY_WITH_TENSOR_SUPPORT
74 template <typename Tensor>
75 struct init_tensor {
76  static Tensor *run(PyArrayObject *pyArray, void *storage = NULL) {
77  enum { Rank = Tensor::NumDimensions };
78  assert(PyArray_NDIM(pyArray) == Rank);
79  typedef typename Tensor::Index Index;
80 
81  Eigen::array<Index, Rank> dimensions;
82  for (int k = 0; k < PyArray_NDIM(pyArray); ++k)
83  dimensions[k] = PyArray_DIMS(pyArray)[k];
84 
85  if (storage)
86  return new (storage) Tensor(dimensions);
87  else
88  return new Tensor(dimensions);
89  }
90 };
91 #endif
92 
93 template <typename MatType>
95 
96 template <typename EigenType,
97  typename BaseType = typename get_eigen_base_type<EigenType>::type>
99 
100 template <typename MatType>
101 struct check_swap_impl<MatType, Eigen::MatrixBase<MatType> >
102  : check_swap_impl_matrix<MatType> {};
103 
104 template <typename MatType>
105 struct check_swap_impl_matrix {
106  static bool run(PyArrayObject *pyArray,
107  const Eigen::MatrixBase<MatType> &mat) {
108  if (PyArray_NDIM(pyArray) == 0) return false;
109  if (mat.rows() == PyArray_DIMS(pyArray)[0])
110  return false;
111  else
112  return true;
113  }
114 };
115 
116 template <typename EigenType>
117 bool check_swap(PyArrayObject *pyArray, const EigenType &mat) {
118  return check_swap_impl<EigenType>::run(pyArray, mat);
119 }
120 
121 #ifdef EIGENPY_WITH_TENSOR_SUPPORT
122 template <typename TensorType>
123 struct check_swap_impl_tensor {
124  static bool run(PyArrayObject * /*pyArray*/, const TensorType & /*tensor*/) {
125  return false;
126  }
127 };
128 
129 template <typename TensorType>
130 struct check_swap_impl<TensorType, Eigen::TensorBase<TensorType> >
131  : check_swap_impl_tensor<TensorType> {};
132 #endif
133 
134 // template <typename MatType>
135 // struct cast_impl_matrix;
136 //
137 // template <typename EigenType,
138 // typename BaseType = typename get_eigen_base_type<EigenType>::type>
139 // struct cast_impl;
140 //
141 // template <typename MatType>
142 // struct cast_impl<MatType, Eigen::MatrixBase<MatType> >
143 // : cast_impl_matrix<MatType> {};
144 //
145 // template <typename MatType>
146 // struct cast_impl_matrix
147 //{
148 // template <typename NewScalar, typename MatrixIn, typename MatrixOut>
149 // static void run(const Eigen::MatrixBase<MatrixIn> &input,
150 // const Eigen::MatrixBase<MatrixOut> &dest) {
151 // dest.const_cast_derived() = input.template cast<NewScalar>();
152 // }
153 // };
154 
155 template <typename Scalar, typename NewScalar,
156  template <typename D> class EigenBase = Eigen::MatrixBase,
157  bool cast_is_valid = FromTypeToType<Scalar, NewScalar>::value>
158 struct cast {
159  template <typename MatrixIn, typename MatrixOut>
160  static void run(const Eigen::MatrixBase<MatrixIn> &input,
161  const Eigen::MatrixBase<MatrixOut> &dest) {
162  dest.const_cast_derived() = input.template cast<NewScalar>();
163  }
164 };
165 
166 #ifdef EIGENPY_WITH_TENSOR_SUPPORT
167 template <typename Scalar, typename NewScalar>
168 struct cast<Scalar, NewScalar, Eigen::TensorRef, true> {
169  template <typename TensorIn, typename TensorOut>
170  static void run(const TensorIn &input, TensorOut &dest) {
171  dest = input.template cast<NewScalar>();
172  }
173 };
174 #endif
175 
176 template <typename Scalar, typename NewScalar,
177  template <typename D> class EigenBase>
178 struct cast<Scalar, NewScalar, EigenBase, false> {
179  template <typename MatrixIn, typename MatrixOut>
180  static void run(const MatrixIn /*input*/, const MatrixOut /*dest*/) {
181  // do nothing
182  assert(false && "Must never happened");
183  }
184 };
185 
186 } // namespace details
187 
188 #define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, Scalar, NewScalar, \
189  pyArray, mat) \
190  details::cast<Scalar, NewScalar>::run( \
191  NumpyMap<MatType, Scalar>::map(pyArray, \
192  details::check_swap(pyArray, mat)), \
193  mat)
194 
195 #define EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, NewScalar, \
196  mat, pyArray) \
197  details::cast<Scalar, NewScalar>::run( \
198  mat, NumpyMap<MatType, NewScalar>::map( \
199  pyArray, details::check_swap(pyArray, mat)))
200 
201 template <typename EigenType>
203 
204 template <typename EigenType,
205  typename BaseType = typename get_eigen_base_type<EigenType>::type>
207 
208 template <typename MatType>
210 
211 template <typename MatType>
212 struct eigen_allocator_impl<MatType, Eigen::MatrixBase<MatType> >
213  : eigen_allocator_impl_matrix<MatType> {};
214 
215 template <typename MatType>
216 struct eigen_allocator_impl<const MatType, const Eigen::MatrixBase<MatType> >
217  : eigen_allocator_impl_matrix<const MatType> {};
218 
219 template <typename MatType>
221  typedef MatType Type;
222  typedef typename MatType::Scalar Scalar;
223 
224  static void allocate(
225  PyArrayObject *pyArray,
226  boost::python::converter::rvalue_from_python_storage<MatType> *storage) {
227  void *raw_ptr = storage->storage.bytes;
228  assert(is_aligned(raw_ptr, EIGENPY_DEFAULT_ALIGN_BYTES) &&
229  "The pointer is not aligned.");
230 
231  Type *mat_ptr = details::init_matrix_or_array<Type>::run(pyArray, raw_ptr);
232  Type &mat = *mat_ptr;
233 
234  copy(pyArray, mat);
235  }
236 
238  template <typename MatrixDerived>
239  static void copy(PyArrayObject *pyArray,
240  const Eigen::MatrixBase<MatrixDerived> &mat_) {
241  MatrixDerived &mat = mat_.const_cast_derived();
242  const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
243  const int Scalar_type_code = Register::getTypeCode<Scalar>();
244 
245  if (pyArray_type_code == Scalar_type_code) {
247  pyArray, details::check_swap(pyArray, mat)); // avoid useless cast
248  return;
249  }
250 
251  switch (pyArray_type_code) {
252  case NPY_INT:
253  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, int, Scalar, pyArray,
254  mat);
255  break;
256  case NPY_LONG:
257  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, long, Scalar,
258  pyArray, mat);
259  break;
260  case NPY_FLOAT:
261  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, float, Scalar,
262  pyArray, mat);
263  break;
264  case NPY_CFLOAT:
265  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, std::complex<float>,
266  Scalar, pyArray, mat);
267  break;
268  case NPY_DOUBLE:
269  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, double, Scalar,
270  pyArray, mat);
271  break;
272  case NPY_CDOUBLE:
273  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, std::complex<double>,
274  Scalar, pyArray, mat);
275  break;
276  case NPY_LONGDOUBLE:
277  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, long double, Scalar,
278  pyArray, mat);
279  break;
280  case NPY_CLONGDOUBLE:
282  MatType, std::complex<long double>, Scalar, pyArray, mat);
283  break;
284  default:
285  throw Exception("You asked for a conversion which is not implemented.");
286  }
287  }
288 
290  template <typename MatrixDerived>
291  static void copy(const Eigen::MatrixBase<MatrixDerived> &mat_,
292  PyArrayObject *pyArray) {
293  const MatrixDerived &mat =
294  const_cast<const MatrixDerived &>(mat_.derived());
295  const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
296  const int Scalar_type_code = Register::getTypeCode<Scalar>();
297 
298  if (pyArray_type_code == Scalar_type_code) // no cast needed
299  {
301  details::check_swap(pyArray, mat)) = mat;
302  return;
303  }
304 
305  switch (pyArray_type_code) {
306  case NPY_INT:
307  EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, int, mat,
308  pyArray);
309  break;
310  case NPY_LONG:
311  EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, long, mat,
312  pyArray);
313  break;
314  case NPY_FLOAT:
315  EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, float, mat,
316  pyArray);
317  break;
318  case NPY_CFLOAT:
320  MatType, Scalar, std::complex<float>, mat, pyArray);
321  break;
322  case NPY_DOUBLE:
323  EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, double, mat,
324  pyArray);
325  break;
326  case NPY_CDOUBLE:
328  MatType, Scalar, std::complex<double>, mat, pyArray);
329  break;
330  case NPY_LONGDOUBLE:
331  EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, long double,
332  mat, pyArray);
333  break;
334  case NPY_CLONGDOUBLE:
336  MatType, Scalar, std::complex<long double>, mat, pyArray);
337  break;
338  default:
339  throw Exception("You asked for a conversion which is not implemented.");
340  }
341  }
342 };
343 
344 #ifdef EIGENPY_WITH_TENSOR_SUPPORT
345 template <typename TensorType>
346 struct eigen_allocator_impl_tensor;
347 
348 template <typename TensorType>
349 struct eigen_allocator_impl<TensorType, Eigen::TensorBase<TensorType> >
350  : eigen_allocator_impl_tensor<TensorType> {};
351 
352 template <typename TensorType>
353 struct eigen_allocator_impl<const TensorType,
354  const Eigen::TensorBase<TensorType> >
355  : eigen_allocator_impl_tensor<const TensorType> {};
356 
357 template <typename TensorType>
358 struct eigen_allocator_impl_tensor {
359  typedef typename TensorType::Scalar Scalar;
360  static void allocate(
361  PyArrayObject *pyArray,
362  boost::python::converter::rvalue_from_python_storage<TensorType>
363  *storage) {
364  void *raw_ptr = storage->storage.bytes;
365  assert(is_aligned(raw_ptr, EIGENPY_DEFAULT_ALIGN_BYTES) &&
366  "The pointer is not aligned.");
367 
368  TensorType *tensor_ptr =
369  details::init_tensor<TensorType>::run(pyArray, raw_ptr);
370  TensorType &tensor = *tensor_ptr;
371 
372  copy(pyArray, tensor);
373  }
374 
375 #define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, Scalar, \
376  NewScalar, pyArray, tensor) \
377  { \
378  typename NumpyMap<TensorType, Scalar>::EigenMap pyArray_map = \
379  NumpyMap<TensorType, Scalar>::map( \
380  pyArray, details::check_swap(pyArray, tensor)); \
381  details::cast<Scalar, NewScalar, Eigen::TensorRef>::run(pyArray_map, \
382  tensor); \
383  }
384 
386  template <typename TensorDerived>
387  static void copy(PyArrayObject *pyArray, TensorDerived &tensor) {
388  const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
389  const int Scalar_type_code = Register::getTypeCode<Scalar>();
390 
391  if (pyArray_type_code == Scalar_type_code) {
393  pyArray, details::check_swap(pyArray, tensor)); // avoid useless cast
394  return;
395  }
396 
397  switch (pyArray_type_code) {
398  case NPY_INT:
399  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, int, Scalar,
400  pyArray, tensor);
401  break;
402  case NPY_LONG:
403  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, long, Scalar,
404  pyArray, tensor);
405  break;
406  case NPY_FLOAT:
407  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, float, Scalar,
408  pyArray, tensor);
409  break;
410  case NPY_CFLOAT:
411  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(
412  TensorType, std::complex<float>, Scalar, pyArray, tensor);
413  break;
414  case NPY_DOUBLE:
415  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, double, Scalar,
416  pyArray, tensor);
417  break;
418  case NPY_CDOUBLE:
419  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(
420  TensorType, std::complex<double>, Scalar, pyArray, tensor);
421  break;
422  case NPY_LONGDOUBLE:
423  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, long double,
424  Scalar, pyArray, tensor);
425  break;
426  case NPY_CLONGDOUBLE:
427  EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(
428  TensorType, std::complex<long double>, Scalar, pyArray, tensor);
429  break;
430  default:
431  throw Exception("You asked for a conversion which is not implemented.");
432  }
433  }
434 
435 #define EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar, \
436  NewScalar, tensor, pyArray) \
437  { \
438  typename NumpyMap<TensorType, NewScalar>::EigenMap pyArray_map = \
439  NumpyMap<TensorType, NewScalar>::map( \
440  pyArray, details::check_swap(pyArray, tensor)); \
441  details::cast<Scalar, NewScalar, Eigen::TensorRef>::run(tensor, \
442  pyArray_map); \
443  }
444 
446  static void copy(const TensorType &tensor, PyArrayObject *pyArray) {
447  const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
448  const int Scalar_type_code = Register::getTypeCode<Scalar>();
449 
450  if (pyArray_type_code == Scalar_type_code) // no cast needed
451  {
453  pyArray, details::check_swap(pyArray, tensor)) = tensor;
454  return;
455  }
456 
457  switch (pyArray_type_code) {
458  case NPY_INT:
459  EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar, int,
460  tensor, pyArray);
461  break;
462  case NPY_LONG:
463  EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar, long,
464  tensor, pyArray);
465  break;
466  case NPY_FLOAT:
467  EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar, float,
468  tensor, pyArray);
469  break;
470  case NPY_CFLOAT:
471  EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(
472  TensorType, Scalar, std::complex<float>, tensor, pyArray);
473  break;
474  case NPY_DOUBLE:
475  EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar, double,
476  tensor, pyArray);
477  break;
478  case NPY_CDOUBLE:
479  EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(
480  TensorType, Scalar, std::complex<double>, tensor, pyArray);
481  break;
482  case NPY_LONGDOUBLE:
483  EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar,
484  long double, tensor, pyArray);
485  break;
486  case NPY_CLONGDOUBLE:
487  EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(
488  TensorType, Scalar, std::complex<long double>, tensor, pyArray);
489  break;
490  default:
491  throw Exception("You asked for a conversion which is not implemented.");
492  }
493  }
494 };
495 #endif
496 
497 #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
498 template <typename MatType>
506 inline bool is_arr_layout_compatible_with_mat_type(PyArrayObject *pyArray) {
507  bool is_array_C_cont = PyArray_IS_C_CONTIGUOUS(pyArray);
508  bool is_array_F_cont = PyArray_IS_F_CONTIGUOUS(pyArray);
509  return (MatType::IsRowMajor && is_array_C_cont) ||
510  (!MatType::IsRowMajor && is_array_F_cont) ||
511  (MatType::IsVectorAtCompileTime &&
512  (is_array_C_cont || is_array_F_cont));
513 }
514 
515 template <typename MatType, int Options, typename Stride>
516 struct eigen_allocator_impl_matrix<Eigen::Ref<MatType, Options, Stride> > {
517  typedef Eigen::Ref<MatType, Options, Stride> RefType;
518  typedef typename MatType::Scalar Scalar;
519 
520  typedef
521  typename ::boost::python::detail::referent_storage<RefType &>::StorageType
522  StorageType;
523 
524  static void allocate(
525  PyArrayObject *pyArray,
526  ::boost::python::converter::rvalue_from_python_storage<RefType>
527  *storage) {
528  typedef typename StrideType<
529  MatType,
530  Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
531  Eigen::internal::traits<RefType>::StrideType::
532  OuterStrideAtCompileTime>::type NumpyMapStride;
533 
534  bool need_to_allocate = false;
535  const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
536  const int Scalar_type_code = Register::getTypeCode<Scalar>();
537  if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
538  bool incompatible_layout =
539  !is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
540  need_to_allocate |= incompatible_layout;
541  if (Options !=
542  Eigen::Unaligned) // we need to check whether the memory is correctly
543  // aligned and composed of a continuous segment
544  {
545  void *data_ptr = PyArray_DATA(pyArray);
546  if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr, Options))
547  need_to_allocate |= true;
548  }
549 
550  void *raw_ptr = storage->storage.bytes;
551  if (need_to_allocate) {
552  MatType *mat_ptr;
554  RefType mat_ref(*mat_ptr);
555 
556  new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr);
557 
558  RefType &mat = *reinterpret_cast<RefType *>(raw_ptr);
559  EigenAllocator<MatType>::copy(pyArray, mat);
560  } else {
561  assert(pyArray_type_code == Scalar_type_code);
563  numpyMap =
565  RefType mat_ref(numpyMap);
566  new (raw_ptr) StorageType(mat_ref, pyArray);
567  }
568  }
569 
570  static void copy(RefType const &ref, PyArrayObject *pyArray) {
571  EigenAllocator<MatType>::copy(ref, pyArray);
572  }
573 };
574 
575 template <typename MatType, int Options, typename Stride>
577  const Eigen::Ref<const MatType, Options, Stride> > {
578  typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
579  typedef typename MatType::Scalar Scalar;
580 
581  typedef
582  typename ::boost::python::detail::referent_storage<RefType &>::StorageType
583  StorageType;
584 
585  static void allocate(
586  PyArrayObject *pyArray,
587  ::boost::python::converter::rvalue_from_python_storage<RefType>
588  *storage) {
589  typedef typename StrideType<
590  MatType,
591  Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
592  Eigen::internal::traits<RefType>::StrideType::
593  OuterStrideAtCompileTime>::type NumpyMapStride;
594 
595  bool need_to_allocate = false;
596  const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
597  const int Scalar_type_code = Register::getTypeCode<Scalar>();
598 
599  if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
600  bool incompatible_layout =
601  !is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
602  need_to_allocate |= incompatible_layout;
603  if (Options !=
604  Eigen::Unaligned) // we need to check whether the memory is correctly
605  // aligned and composed of a continuous segment
606  {
607  void *data_ptr = PyArray_DATA(pyArray);
608  if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr, Options))
609  need_to_allocate |= true;
610  }
611 
612  void *raw_ptr = storage->storage.bytes;
613  if (need_to_allocate) {
614  MatType *mat_ptr;
616  RefType mat_ref(*mat_ptr);
617 
618  new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr);
619 
620  MatType &mat = *mat_ptr;
621  EigenAllocator<MatType>::copy(pyArray, mat);
622  } else {
623  assert(pyArray_type_code == Scalar_type_code);
625  numpyMap =
627  RefType mat_ref(numpyMap);
628  new (raw_ptr) StorageType(mat_ref, pyArray);
629  }
630  }
631 
632  static void copy(RefType const &ref, PyArrayObject *pyArray) {
633  EigenAllocator<MatType>::copy(ref, pyArray);
634  }
635 };
636 #endif
637 
638 #ifdef EIGENPY_WITH_TENSOR_SUPPORT
639 
640 template <typename TensorType, typename TensorRef>
641 struct eigen_allocator_impl_tensor_ref;
642 
643 template <typename TensorType>
644 struct eigen_allocator_impl_tensor<Eigen::TensorRef<TensorType> >
645  : eigen_allocator_impl_tensor_ref<TensorType,
646  Eigen::TensorRef<TensorType> > {};
647 
648 template <typename TensorType>
649 struct eigen_allocator_impl_tensor<const Eigen::TensorRef<const TensorType> >
650  : eigen_allocator_impl_tensor_ref<
651  const TensorType, const Eigen::TensorRef<const TensorType> > {};
652 
653 template <typename TensorType, typename RefType>
654 struct eigen_allocator_impl_tensor_ref {
655  typedef typename TensorType::Scalar Scalar;
656 
657  typedef
658  typename ::boost::python::detail::referent_storage<RefType &>::StorageType
659  StorageType;
660 
661  static void allocate(
662  PyArrayObject *pyArray,
663  ::boost::python::converter::rvalue_from_python_storage<RefType>
664  *storage) {
665  // typedef typename StrideType<
666  // MatType,
667  // Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
668  // Eigen::internal::traits<RefType>::StrideType::
669  // OuterStrideAtCompileTime>::type NumpyMapStride;
670 
671  static const int Options = Eigen::internal::traits<TensorType>::Options;
672 
673  bool need_to_allocate = false;
674  const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
675  const int Scalar_type_code = Register::getTypeCode<Scalar>();
676  if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
677  // bool incompatible_layout =
678  // !is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
679  // need_to_allocate |= incompatible_layout;
680  // if (Options !=
681  // Eigen::Unaligned) // we need to check whether the memory is
682  // correctly
683  // // aligned and composed of a continuous segment
684  // {
685  // void *data_ptr = PyArray_DATA(pyArray);
686  // if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr,
687  // Options))
688  // need_to_allocate |= true;
689  // }
690 
691  void *raw_ptr = storage->storage.bytes;
692  if (need_to_allocate) {
693  typedef typename boost::remove_const<TensorType>::type TensorTypeNonConst;
694  TensorTypeNonConst *tensor_ptr;
695  tensor_ptr = details::init_tensor<TensorTypeNonConst>::run(pyArray);
696  RefType tensor_ref(*tensor_ptr);
697 
698  new (raw_ptr) StorageType(tensor_ref, pyArray, tensor_ptr);
699 
700  TensorTypeNonConst &tensor = *tensor_ptr;
702  } else {
703  assert(pyArray_type_code == Scalar_type_code);
706  RefType tensor_ref(numpyMap);
707  new (raw_ptr) StorageType(tensor_ref, pyArray);
708  }
709  }
710 
711  static void copy(RefType const &ref, PyArrayObject *pyArray) {
712  EigenAllocator<TensorType>::copy(ref, pyArray);
713  }
714 };
715 
716 #endif
717 
718 template <typename EigenType>
719 struct EigenAllocator : eigen_allocator_impl<EigenType> {};
720 
721 } // namespace eigenpy
722 
723 #endif // __eigenpy_eigen_allocator_hpp__
ReturnMatrix copy(const Eigen::MatrixBase< Matrix > &mat)
Definition: matrix.cpp:131
#define EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, NewScalar, mat, pyArray)
static MatType * run(int rows, int cols, void *storage)
#define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, Scalar, NewScalar, pyArray, mat)
Definition: complex.cpp:7
static void copy(PyArrayObject *pyArray, const Eigen::MatrixBase< MatrixDerived > &mat_)
Copy Python array into the input matrix mat.
static void run(const Eigen::MatrixBase< MatrixIn > &input, const Eigen::MatrixBase< MatrixOut > &dest)
static MatType * run(PyArrayObject *pyArray, void *storage=NULL)
static MatType * run(PyArrayObject *pyArray, void *storage=NULL)
static void allocate(PyArrayObject *pyArray, boost::python::converter::rvalue_from_python_storage< MatType > *storage)
static void copy(const Eigen::MatrixBase< MatrixDerived > &mat_, PyArrayObject *pyArray)
Copy mat into the Python array using Eigen::Map.
static MatType * run(int size, void *storage)
static MatType * run(int rows, int cols, void *storage)
#define EIGENPY_DEFAULT_ALIGN_BYTES
Definition: fwd.hpp:102
static bool run(PyArrayObject *pyArray, const Eigen::MatrixBase< MatType > &mat)
#define EIGENPY_GET_PY_ARRAY_TYPE(array)
Definition: numpy.hpp:26
bool is_aligned(const void *ptr, std::size_t alignment)
Definition: is-aligned.hpp:9
Eigen::TensorRef< Tensor > ref(Eigen::TensorRef< Tensor > tensor)
Definition: tensor.cpp:55
static void run(const MatrixIn, const MatrixOut)
bool check_swap(PyArrayObject *pyArray, const EigenType &mat)
boost::mpl::if_< boost::is_const< typename boost::remove_reference< EigenType >::type >, const _type, _type >::type type
Definition: fwd.hpp:152


eigenpy
Author(s): Justin Carpentier, Nicolas Mansard
autogenerated on Fri Jun 2 2023 02:10:26