CoreEvaluators.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
5 // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6 // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
7 //
8 // This Source Code Form is subject to the terms of the Mozilla
9 // Public License v. 2.0. If a copy of the MPL was not distributed
10 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11 
12 
13 #ifndef EIGEN_COREEVALUATORS_H
14 #define EIGEN_COREEVALUATORS_H
15 
16 namespace Eigen {
17 
18 namespace internal {
19 
20 // This class returns the evaluator kind from the expression storage kind.
21 // Default assumes index based accessors
22 template<typename StorageKind>
24  typedef IndexBased Kind;
25 };
26 
27 // This class returns the evaluator shape from the expression storage kind.
28 // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.
29 template<typename StorageKind> struct storage_kind_to_shape;
30 
31 template<> struct storage_kind_to_shape<Dense> { typedef DenseShape Shape; };
32 template<> struct storage_kind_to_shape<SolverStorage> { typedef SolverShape Shape; };
35 
36 // Evaluators have to be specialized with respect to various criteria such as:
37 // - storage/structure/shape
38 // - scalar type
39 // - etc.
40 // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.
41 // We currently distinguish the following kind of evaluators:
42 // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate)
43 // - binary_evaluator for expression taking two arguments (CwiseBinaryOp)
44 // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp)
45 // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching.
46 // - mapbase_evaluator for Map, Block, Ref
47 // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator)
48 
49 template< typename T,
50  typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind,
51  typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind,
52  typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind,
53  typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar,
54  typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar,
55  typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator;
56 
57 template< typename T,
58  typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
59  typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
60  typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
61  typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator;
62 
63 template< typename T,
65  typename Scalar = typename T::Scalar> struct unary_evaluator;
66 
67 // evaluator_traits<T> contains traits for evaluator<T>
68 
69 template<typename T>
71 {
72  // by default, get evaluator kind and shape from storage
75 };
76 
77 // Default evaluator traits
78 template<typename T>
80 {
81 };
82 
83 template<typename T, typename Shape = typename evaluator_traits<T>::Shape >
85  static const bool value = false;
86 };
87 
88 // By default, we assume a unary expression:
89 template<typename T>
90 struct evaluator : public unary_evaluator<T>
91 {
94  explicit evaluator(const T& xpr) : Base(xpr) {}
95 };
96 
97 
98 // TODO: Think about const-correctness
99 template<typename T>
100 struct evaluator<const T>
101  : evaluator<T>
102 {
104  explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
105 };
106 
107 // ---------- base class for all evaluators ----------
108 
109 template<typename ExpressionType>
111 {
112  // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
114 
115  enum {
116  Alignment = 0
117  };
118  // noncopyable:
119  // Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization)
120  // and make complex evaluator much larger than then should do.
123 private:
125  EIGEN_DEVICE_FUNC const evaluator_base& operator=(const evaluator_base&);
126 };
127 
128 // -------------------- Matrix and Array --------------------
129 //
130 // evaluator<PlainObjectBase> is a common base class for the
131 // Matrix and Array evaluators.
132 // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,
133 // so no need for more sophisticated dispatching.
134 
135 // this helper permits to completely eliminate m_outerStride if it is known at compiletime.
136 template<typename Scalar,int OuterStride> class plainobjectbase_evaluator_data {
137 public:
139  plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
140  {
141 #ifndef EIGEN_INTERNAL_DEBUGGING
142  EIGEN_UNUSED_VARIABLE(outerStride);
143 #endif
144  eigen_internal_assert(outerStride==OuterStride);
145  }
148  const Scalar *data;
149 };
150 
151 template<typename Scalar> class plainobjectbase_evaluator_data<Scalar,Dynamic> {
152 public:
154  plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
156  Index outerStride() const { return m_outerStride; }
157  const Scalar *data;
158 protected:
160 };
161 
162 template<typename Derived>
163 struct evaluator<PlainObjectBase<Derived> >
164  : evaluator_base<Derived>
165 {
167  typedef typename PlainObjectType::Scalar Scalar;
168  typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
169 
170  enum {
171  IsRowMajor = PlainObjectType::IsRowMajor,
172  IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
173  RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
174  ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,
175 
179  };
180  enum {
181  // We do not need to know the outer stride for vectors
182  OuterStrideAtCompileTime = IsVectorAtCompileTime ? 0
183  : int(IsRowMajor) ? ColsAtCompileTime
184  : RowsAtCompileTime
185  };
186 
189  : m_d(0,OuterStrideAtCompileTime)
190  {
191  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
192  }
193 
195  explicit evaluator(const PlainObjectType& m)
196  : m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride())
197  {
198  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
199  }
200 
202  CoeffReturnType coeff(Index row, Index col) const
203  {
204  if (IsRowMajor)
205  return m_d.data[row * m_d.outerStride() + col];
206  else
207  return m_d.data[row + col * m_d.outerStride()];
208  }
209 
211  CoeffReturnType coeff(Index index) const
212  {
213  return m_d.data[index];
214  }
215 
218  {
219  if (IsRowMajor)
220  return const_cast<Scalar*>(m_d.data)[row * m_d.outerStride() + col];
221  else
222  return const_cast<Scalar*>(m_d.data)[row + col * m_d.outerStride()];
223  }
224 
226  Scalar& coeffRef(Index index)
227  {
228  return const_cast<Scalar*>(m_d.data)[index];
229  }
230 
231  template<int LoadMode, typename PacketType>
234  {
235  if (IsRowMajor)
236  return ploadt<PacketType, LoadMode>(m_d.data + row * m_d.outerStride() + col);
237  else
238  return ploadt<PacketType, LoadMode>(m_d.data + row + col * m_d.outerStride());
239  }
240 
241  template<int LoadMode, typename PacketType>
243  PacketType packet(Index index) const
244  {
245  return ploadt<PacketType, LoadMode>(m_d.data + index);
246  }
247 
248  template<int StoreMode,typename PacketType>
251  {
252  if (IsRowMajor)
253  return pstoret<Scalar, PacketType, StoreMode>
254  (const_cast<Scalar*>(m_d.data) + row * m_d.outerStride() + col, x);
255  else
256  return pstoret<Scalar, PacketType, StoreMode>
257  (const_cast<Scalar*>(m_d.data) + row + col * m_d.outerStride(), x);
258  }
259 
260  template<int StoreMode, typename PacketType>
262  void writePacket(Index index, const PacketType& x)
263  {
264  return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_d.data) + index, x);
265  }
266 
267 protected:
268 
270 };
271 
272 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
273 struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
274  : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
275 {
277 
280 
282  explicit evaluator(const XprType& m)
283  : evaluator<PlainObjectBase<XprType> >(m)
284  { }
285 };
286 
287 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
288 struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
289  : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
290 {
292 
295 
297  explicit evaluator(const XprType& m)
298  : evaluator<PlainObjectBase<XprType> >(m)
299  { }
300 };
301 
302 // -------------------- Transpose --------------------
303 
304 template<typename ArgType>
306  : evaluator_base<Transpose<ArgType> >
307 {
309 
310  enum {
314  };
315 
317  explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
318 
319  typedef typename XprType::Scalar Scalar;
320  typedef typename XprType::CoeffReturnType CoeffReturnType;
321 
323  CoeffReturnType coeff(Index row, Index col) const
324  {
325  return m_argImpl.coeff(col, row);
326  }
327 
329  CoeffReturnType coeff(Index index) const
330  {
331  return m_argImpl.coeff(index);
332  }
333 
336  {
337  return m_argImpl.coeffRef(col, row);
338  }
339 
341  typename XprType::Scalar& coeffRef(Index index)
342  {
343  return m_argImpl.coeffRef(index);
344  }
345 
346  template<int LoadMode, typename PacketType>
349  {
350  return m_argImpl.template packet<LoadMode,PacketType>(col, row);
351  }
352 
353  template<int LoadMode, typename PacketType>
355  PacketType packet(Index index) const
356  {
357  return m_argImpl.template packet<LoadMode,PacketType>(index);
358  }
359 
360  template<int StoreMode, typename PacketType>
363  {
364  m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x);
365  }
366 
367  template<int StoreMode, typename PacketType>
369  void writePacket(Index index, const PacketType& x)
370  {
371  m_argImpl.template writePacket<StoreMode,PacketType>(index, x);
372  }
373 
374 protected:
376 };
377 
378 // -------------------- CwiseNullaryOp --------------------
379 // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.
380 // Likewise, there is not need to more sophisticated dispatching here.
381 
382 template<typename Scalar,typename NullaryOp,
383  bool has_nullary = has_nullary_operator<NullaryOp>::value,
384  bool has_unary = has_unary_operator<NullaryOp>::value,
385  bool has_binary = has_binary_operator<NullaryOp>::value>
387 {
388  template <typename IndexType>
389  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); }
390  template <typename IndexType>
391  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
392 
393  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); }
394  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
395 };
396 
397 template<typename Scalar,typename NullaryOp>
398 struct nullary_wrapper<Scalar,NullaryOp,true,false,false>
399 {
400  template <typename IndexType>
401  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); }
402  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); }
403 };
404 
405 template<typename Scalar,typename NullaryOp>
406 struct nullary_wrapper<Scalar,NullaryOp,false,false,true>
407 {
408  template <typename IndexType>
409  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); }
410  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); }
411 };
412 
413 // We need the following specialization for vector-only functors assigned to a runtime vector,
414 // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.
415 // In this case, i==0 and j is used for the actual iteration.
416 template<typename Scalar,typename NullaryOp>
417 struct nullary_wrapper<Scalar,NullaryOp,false,true,false>
418 {
419  template <typename IndexType>
420  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
421  eigen_assert(i==0 || j==0);
422  return op(i+j);
423  }
424  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
425  eigen_assert(i==0 || j==0);
426  return op.template packetOp<T>(i+j);
427  }
428 
429  template <typename IndexType>
430  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
431  template <typename T, typename IndexType>
432  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
433 };
434 
435 template<typename Scalar,typename NullaryOp>
436 struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {};
437 
438 #if 0 && EIGEN_COMP_MSVC>0
439 // Disable this ugly workaround. This is now handled in traits<Ref>::match,
440 // but this piece of code might still become handly if some other weird compilation
441 // erros pop up again.
442 
443 // MSVC exhibits a weird compilation error when
444 // compiling:
445 // Eigen::MatrixXf A = MatrixXf::Random(3,3);
446 // Ref<const MatrixXf> R = 2.f*A;
447 // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.
448 // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>
449 // and at that time has_*ary_operator<T> returns true regardless of T.
450 // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.
451 // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),
452 // and packet() are really instantiated as implemented below:
453 
454 // This is a simple wrapper around Index to enforce the re-instantiation of
455 // has_*ary_operator when needed.
456 template<typename T> struct nullary_wrapper_workaround_msvc {
457  nullary_wrapper_workaround_msvc(const T&);
458  operator T()const;
459 };
460 
461 template<typename Scalar,typename NullaryOp>
462 struct nullary_wrapper<Scalar,NullaryOp,true,true,true>
463 {
464  template <typename IndexType>
465  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
466  return nullary_wrapper<Scalar,NullaryOp,
470  }
471  template <typename IndexType>
472  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {
473  return nullary_wrapper<Scalar,NullaryOp,
477  }
478 
479  template <typename T, typename IndexType>
480  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
481  return nullary_wrapper<Scalar,NullaryOp,
484  has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);
485  }
486  template <typename T, typename IndexType>
487  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {
488  return nullary_wrapper<Scalar,NullaryOp,
492  }
493 };
494 #endif // MSVC workaround
495 
496 template<typename NullaryOp, typename PlainObjectType>
497 struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
498  : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> >
499 {
502 
503  enum {
505 
507  & ( HereditaryBits
511  Alignment = AlignedMax
512  };
513 
514  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n)
515  : m_functor(n.functor()), m_wrapper()
516  {
517  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
518  }
519 
520  typedef typename XprType::CoeffReturnType CoeffReturnType;
521 
522  template <typename IndexType>
524  CoeffReturnType coeff(IndexType row, IndexType col) const
525  {
526  return m_wrapper(m_functor, row, col);
527  }
528 
529  template <typename IndexType>
531  CoeffReturnType coeff(IndexType index) const
532  {
533  return m_wrapper(m_functor,index);
534  }
535 
536  template<int LoadMode, typename PacketType, typename IndexType>
538  PacketType packet(IndexType row, IndexType col) const
539  {
540  return m_wrapper.template packetOp<PacketType>(m_functor, row, col);
541  }
542 
543  template<int LoadMode, typename PacketType, typename IndexType>
545  PacketType packet(IndexType index) const
546  {
547  return m_wrapper.template packetOp<PacketType>(m_functor, index);
548  }
549 
550 protected:
551  const NullaryOp m_functor;
553 };
554 
555 // -------------------- CwiseUnaryOp --------------------
556 
557 template<typename UnaryOp, typename ArgType>
558 struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
559  : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >
560 {
562 
563  enum {
565 
569  };
570 
572  explicit unary_evaluator(const XprType& op) : m_d(op)
573  {
575  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
576  }
577 
578  typedef typename XprType::CoeffReturnType CoeffReturnType;
579 
581  CoeffReturnType coeff(Index row, Index col) const
582  {
583  return m_d.func()(m_d.argImpl.coeff(row, col));
584  }
585 
587  CoeffReturnType coeff(Index index) const
588  {
589  return m_d.func()(m_d.argImpl.coeff(index));
590  }
591 
592  template<int LoadMode, typename PacketType>
595  {
596  return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(row, col));
597  }
598 
599  template<int LoadMode, typename PacketType>
601  PacketType packet(Index index) const
602  {
603  return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(index));
604  }
605 
606 protected:
607 
608  // this helper permits to completely eliminate the functor if it is empty
609  struct Data
610  {
612  Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
614  const UnaryOp& func() const { return op; }
615  UnaryOp op;
617  };
618 
619  Data m_d;
620 };
621 
622 // -------------------- CwiseTernaryOp --------------------
623 
624 // this is a ternary expression
625 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
626 struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
627  : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
628 {
631 
632  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
633 };
634 
635 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
636 struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased>
637  : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
638 {
640 
641  enum {
643 
648  StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit),
649  Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & (
651  | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) &
652  ( (StorageOrdersAgree ? LinearAccessBit : 0)
653  | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
654  )
655  )
656  ),
657  Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),
658  Alignment = EIGEN_PLAIN_ENUM_MIN(
661  };
662 
663  EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr)
664  {
666  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
667  }
668 
669  typedef typename XprType::CoeffReturnType CoeffReturnType;
670 
672  CoeffReturnType coeff(Index row, Index col) const
673  {
674  return m_d.func()(m_d.arg1Impl.coeff(row, col), m_d.arg2Impl.coeff(row, col), m_d.arg3Impl.coeff(row, col));
675  }
676 
678  CoeffReturnType coeff(Index index) const
679  {
680  return m_d.func()(m_d.arg1Impl.coeff(index), m_d.arg2Impl.coeff(index), m_d.arg3Impl.coeff(index));
681  }
682 
683  template<int LoadMode, typename PacketType>
686  {
687  return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(row, col),
688  m_d.arg2Impl.template packet<LoadMode,PacketType>(row, col),
689  m_d.arg3Impl.template packet<LoadMode,PacketType>(row, col));
690  }
691 
692  template<int LoadMode, typename PacketType>
694  PacketType packet(Index index) const
695  {
696  return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(index),
697  m_d.arg2Impl.template packet<LoadMode,PacketType>(index),
698  m_d.arg3Impl.template packet<LoadMode,PacketType>(index));
699  }
700 
701 protected:
702  // this helper permits to completely eliminate the functor if it is empty
703  struct Data
704  {
706  Data(const XprType& xpr) : op(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {}
708  const TernaryOp& func() const { return op; }
709  TernaryOp op;
713  };
714 
715  Data m_d;
716 };
717 
718 // -------------------- CwiseBinaryOp --------------------
719 
720 // this is a binary expression
721 template<typename BinaryOp, typename Lhs, typename Rhs>
722 struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
723  : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
724 {
727 
729  explicit evaluator(const XprType& xpr) : Base(xpr) {}
730 };
731 
732 template<typename BinaryOp, typename Lhs, typename Rhs>
734  : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
735 {
737 
738  enum {
740 
744  StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit),
745  Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
747  | (int(LhsFlags) & int(RhsFlags) &
748  ( (StorageOrdersAgree ? LinearAccessBit : 0)
749  | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
750  )
751  )
752  ),
753  Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
755  };
756 
758  explicit binary_evaluator(const XprType& xpr) : m_d(xpr)
759  {
761  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
762  }
763 
764  typedef typename XprType::CoeffReturnType CoeffReturnType;
765 
767  CoeffReturnType coeff(Index row, Index col) const
768  {
769  return m_d.func()(m_d.lhsImpl.coeff(row, col), m_d.rhsImpl.coeff(row, col));
770  }
771 
773  CoeffReturnType coeff(Index index) const
774  {
775  return m_d.func()(m_d.lhsImpl.coeff(index), m_d.rhsImpl.coeff(index));
776  }
777 
778  template<int LoadMode, typename PacketType>
781  {
782  return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(row, col),
783  m_d.rhsImpl.template packet<LoadMode,PacketType>(row, col));
784  }
785 
786  template<int LoadMode, typename PacketType>
788  PacketType packet(Index index) const
789  {
790  return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(index),
791  m_d.rhsImpl.template packet<LoadMode,PacketType>(index));
792  }
793 
794 protected:
795 
796  // this helper permits to completely eliminate the functor if it is empty
797  struct Data
798  {
800  Data(const XprType& xpr) : op(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {}
802  const BinaryOp& func() const { return op; }
803  BinaryOp op;
806  };
807 
808  Data m_d;
809 };
810 
811 // -------------------- CwiseUnaryView --------------------
812 
813 template<typename UnaryOp, typename ArgType>
814 struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
815  : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> >
816 {
818 
819  enum {
821 
822  Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),
823 
824  Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...
825  };
826 
827  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_d(op)
828  {
830  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
831  }
832 
833  typedef typename XprType::Scalar Scalar;
834  typedef typename XprType::CoeffReturnType CoeffReturnType;
835 
837  CoeffReturnType coeff(Index row, Index col) const
838  {
839  return m_d.func()(m_d.argImpl.coeff(row, col));
840  }
841 
843  CoeffReturnType coeff(Index index) const
844  {
845  return m_d.func()(m_d.argImpl.coeff(index));
846  }
847 
850  {
851  return m_d.func()(m_d.argImpl.coeffRef(row, col));
852  }
853 
855  Scalar& coeffRef(Index index)
856  {
857  return m_d.func()(m_d.argImpl.coeffRef(index));
858  }
859 
860 protected:
861 
862  // this helper permits to completely eliminate the functor if it is empty
863  struct Data
864  {
866  Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
868  const UnaryOp& func() const { return op; }
869  UnaryOp op;
871  };
872 
873  Data m_d;
874 };
875 
876 // -------------------- Map --------------------
877 
878 // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?
879 // but that might complicate template specialization
880 template<typename Derived, typename PlainObjectType>
882 
883 template<typename Derived, typename PlainObjectType>
884 struct mapbase_evaluator : evaluator_base<Derived>
885 {
886  typedef Derived XprType;
887  typedef typename XprType::PointerType PointerType;
888  typedef typename XprType::Scalar Scalar;
889  typedef typename XprType::CoeffReturnType CoeffReturnType;
890 
891  enum {
892  IsRowMajor = XprType::RowsAtCompileTime,
893  ColsAtCompileTime = XprType::ColsAtCompileTime,
895  };
896 
898  explicit mapbase_evaluator(const XprType& map)
899  : m_data(const_cast<PointerType>(map.data())),
900  m_innerStride(map.innerStride()),
901  m_outerStride(map.outerStride())
902  {
904  PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
905  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
906  }
907 
909  CoeffReturnType coeff(Index row, Index col) const
910  {
911  return m_data[col * colStride() + row * rowStride()];
912  }
913 
915  CoeffReturnType coeff(Index index) const
916  {
917  return m_data[index * m_innerStride.value()];
918  }
919 
922  {
923  return m_data[col * colStride() + row * rowStride()];
924  }
925 
927  Scalar& coeffRef(Index index)
928  {
929  return m_data[index * m_innerStride.value()];
930  }
931 
932  template<int LoadMode, typename PacketType>
935  {
936  PointerType ptr = m_data + row * rowStride() + col * colStride();
937  return internal::ploadt<PacketType, LoadMode>(ptr);
938  }
939 
940  template<int LoadMode, typename PacketType>
942  PacketType packet(Index index) const
943  {
944  return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value());
945  }
946 
947  template<int StoreMode, typename PacketType>
950  {
951  PointerType ptr = m_data + row * rowStride() + col * colStride();
952  return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x);
953  }
954 
955  template<int StoreMode, typename PacketType>
957  void writePacket(Index index, const PacketType& x)
958  {
959  internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
960  }
961 protected:
964  return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value();
965  }
968  return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value();
969  }
970 
971  PointerType m_data;
974 };
975 
976 template<typename PlainObjectType, int MapOptions, typename StrideType>
977 struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
978  : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>
979 {
981  typedef typename XprType::Scalar Scalar;
982  // TODO: should check for smaller packet types once we can handle multi-sized packet types
984 
985  enum {
986  InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
987  ? int(PlainObjectType::InnerStrideAtCompileTime)
988  : int(StrideType::InnerStrideAtCompileTime),
989  OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
990  ? int(PlainObjectType::OuterStrideAtCompileTime)
991  : int(StrideType::OuterStrideAtCompileTime),
992  HasNoInnerStride = InnerStrideAtCompileTime == 1,
993  HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
994  HasNoStride = HasNoInnerStride && HasNoOuterStride,
995  IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
996 
997  PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),
998  LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),
999  Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),
1000 
1001  Alignment = int(MapOptions)&int(AlignedMask)
1002  };
1003 
1004  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)
1005  : mapbase_evaluator<XprType, PlainObjectType>(map)
1006  { }
1007 };
1008 
1009 // -------------------- Ref --------------------
1010 
1011 template<typename PlainObjectType, int RefOptions, typename StrideType>
1012 struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
1013  : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>
1014 {
1016 
1017  enum {
1020  };
1021 
1023  explicit evaluator(const XprType& ref)
1024  : mapbase_evaluator<XprType, PlainObjectType>(ref)
1025  { }
1026 };
1027 
1028 // -------------------- Block --------------------
1029 
1030 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,
1032 
1033 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1034 struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1035  : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>
1036 {
1038  typedef typename XprType::Scalar Scalar;
1039  // TODO: should check for smaller packet types once we can handle multi-sized packet types
1041 
1042  enum {
1044 
1049 
1050  ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,
1051  IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1
1052  : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
1053  : ArgTypeIsRowMajor,
1054  HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor),
1055  InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
1056  InnerStrideAtCompileTime = HasSameStorageOrderAsArgType
1057  ? int(inner_stride_at_compile_time<ArgType>::ret)
1058  : int(outer_stride_at_compile_time<ArgType>::ret),
1059  OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
1060  ? int(outer_stride_at_compile_time<ArgType>::ret)
1061  : int(inner_stride_at_compile_time<ArgType>::ret),
1062  MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0,
1063 
1064  FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
1065  FlagsRowMajorBit = XprType::Flags&RowMajorBit,
1066  Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
1067  DirectAccessBit |
1068  MaskPacketAccessBit),
1069  Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
1070 
1071  PacketAlignment = unpacket_traits<PacketScalar>::alignment,
1072  Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
1073  && (OuterStrideAtCompileTime!=0)
1074  && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
1075  Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
1076  };
1079  explicit evaluator(const XprType& block) : block_evaluator_type(block)
1080  {
1081  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1082  }
1083 };
1084 
1085 // no direct-access => dispatch to a unary evaluator
1086 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1087 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false>
1088  : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1089 {
1091 
1093  explicit block_evaluator(const XprType& block)
1094  : unary_evaluator<XprType>(block)
1095  {}
1096 };
1097 
1098 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1099 struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased>
1100  : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1101 {
1103 
1105  explicit unary_evaluator(const XprType& block)
1106  : m_argImpl(block.nestedExpression()),
1107  m_startRow(block.startRow()),
1108  m_startCol(block.startCol()),
1109  m_linear_offset(ForwardLinearAccess?(ArgType::IsRowMajor ? block.startRow()*block.nestedExpression().cols() + block.startCol() : block.startCol()*block.nestedExpression().rows() + block.startRow()):0)
1110  { }
1111 
1112  typedef typename XprType::Scalar Scalar;
1113  typedef typename XprType::CoeffReturnType CoeffReturnType;
1114 
1115  enum {
1116  RowsAtCompileTime = XprType::RowsAtCompileTime,
1117  ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor)==int(ArgType::IsRowMajor)) && bool(evaluator<ArgType>::Flags&LinearAccessBit)
1118  };
1119 
1121  CoeffReturnType coeff(Index row, Index col) const
1122  {
1123  return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
1124  }
1125 
1127  CoeffReturnType coeff(Index index) const
1128  {
1129  return linear_coeff_impl(index, bool_constant<ForwardLinearAccess>());
1130  }
1131 
1134  {
1135  return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
1136  }
1137 
1139  Scalar& coeffRef(Index index)
1140  {
1141  return linear_coeffRef_impl(index, bool_constant<ForwardLinearAccess>());
1142  }
1143 
1144  template<int LoadMode, typename PacketType>
1147  {
1148  return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
1149  }
1150 
1151  template<int LoadMode, typename PacketType>
1153  PacketType packet(Index index) const
1154  {
1155  if (ForwardLinearAccess)
1156  return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index);
1157  else
1158  return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1159  RowsAtCompileTime == 1 ? index : 0);
1160  }
1161 
1162  template<int StoreMode, typename PacketType>
1165  {
1166  return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
1167  }
1168 
1169  template<int StoreMode, typename PacketType>
1171  void writePacket(Index index, const PacketType& x)
1172  {
1173  if (ForwardLinearAccess)
1174  return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x);
1175  else
1176  return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1177  RowsAtCompileTime == 1 ? index : 0,
1178  x);
1179  }
1180 
1181 protected:
1183  CoeffReturnType linear_coeff_impl(Index index, internal::true_type /* ForwardLinearAccess */) const
1184  {
1185  return m_argImpl.coeff(m_linear_offset.value() + index);
1186  }
1188  CoeffReturnType linear_coeff_impl(Index index, internal::false_type /* not ForwardLinearAccess */) const
1189  {
1190  return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1191  }
1192 
1194  Scalar& linear_coeffRef_impl(Index index, internal::true_type /* ForwardLinearAccess */)
1195  {
1196  return m_argImpl.coeffRef(m_linear_offset.value() + index);
1197  }
1199  Scalar& linear_coeffRef_impl(Index index, internal::false_type /* not ForwardLinearAccess */)
1200  {
1201  return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1202  }
1203 
1208 };
1209 
1210 // TODO: This evaluator does not actually use the child evaluator;
1211 // all action is via the data() as returned by the Block expression.
1212 
1213 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1214 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>
1215  : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,
1216  typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>
1217 {
1219  typedef typename XprType::Scalar Scalar;
1220 
1222  explicit block_evaluator(const XprType& block)
1223  : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
1224  {
1225  // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
1226  eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
1227  }
1228 };
1229 
1230 
1231 // -------------------- Select --------------------
1232 // NOTE shall we introduce a ternary_evaluator?
1233 
1234 // TODO enable vectorization for Select
1235 template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
1236 struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1237  : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1238 {
1240  enum {
1244 
1246 
1248  };
1249 
1251  explicit evaluator(const XprType& select)
1252  : m_conditionImpl(select.conditionMatrix()),
1253  m_thenImpl(select.thenMatrix()),
1254  m_elseImpl(select.elseMatrix())
1255  {
1256  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1257  }
1258 
1259  typedef typename XprType::CoeffReturnType CoeffReturnType;
1260 
1262  CoeffReturnType coeff(Index row, Index col) const
1263  {
1264  if (m_conditionImpl.coeff(row, col))
1265  return m_thenImpl.coeff(row, col);
1266  else
1267  return m_elseImpl.coeff(row, col);
1268  }
1269 
1271  CoeffReturnType coeff(Index index) const
1272  {
1273  if (m_conditionImpl.coeff(index))
1274  return m_thenImpl.coeff(index);
1275  else
1276  return m_elseImpl.coeff(index);
1277  }
1278 
1279 protected:
1283 };
1284 
1285 
1286 // -------------------- Replicate --------------------
1287 
1288 template<typename ArgType, int RowFactor, int ColFactor>
1289 struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
1290  : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
1291 {
1293  typedef typename XprType::CoeffReturnType CoeffReturnType;
1294  enum {
1295  Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
1296  };
1299 
1300  enum {
1302  LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
1304 
1306  };
1307 
1309  explicit unary_evaluator(const XprType& replicate)
1310  : m_arg(replicate.nestedExpression()),
1311  m_argImpl(m_arg),
1312  m_rows(replicate.nestedExpression().rows()),
1313  m_cols(replicate.nestedExpression().cols())
1314  {}
1315 
1317  CoeffReturnType coeff(Index row, Index col) const
1318  {
1319  // try to avoid using modulo; this is a pure optimization strategy
1320  const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1321  : RowFactor==1 ? row
1322  : row % m_rows.value();
1323  const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1324  : ColFactor==1 ? col
1325  : col % m_cols.value();
1326 
1327  return m_argImpl.coeff(actual_row, actual_col);
1328  }
1329 
1331  CoeffReturnType coeff(Index index) const
1332  {
1333  // try to avoid using modulo; this is a pure optimization strategy
1334  const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1335  ? (ColFactor==1 ? index : index%m_cols.value())
1336  : (RowFactor==1 ? index : index%m_rows.value());
1337 
1338  return m_argImpl.coeff(actual_index);
1339  }
1340 
1341  template<int LoadMode, typename PacketType>
1344  {
1345  const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1346  : RowFactor==1 ? row
1347  : row % m_rows.value();
1348  const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1349  : ColFactor==1 ? col
1350  : col % m_cols.value();
1351 
1352  return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);
1353  }
1354 
1355  template<int LoadMode, typename PacketType>
1357  PacketType packet(Index index) const
1358  {
1359  const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1360  ? (ColFactor==1 ? index : index%m_cols.value())
1361  : (RowFactor==1 ? index : index%m_rows.value());
1362 
1363  return m_argImpl.template packet<LoadMode,PacketType>(actual_index);
1364  }
1365 
1366 protected:
1367  const ArgTypeNested m_arg;
1371 };
1372 
1373 // -------------------- MatrixWrapper and ArrayWrapper --------------------
1374 //
1375 // evaluator_wrapper_base<T> is a common base class for the
1376 // MatrixWrapper and ArrayWrapper evaluators.
1377 
1378 template<typename XprType>
1380  : evaluator_base<XprType>
1381 {
1383  enum {
1387  };
1388 
1390  explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
1391 
1392  typedef typename ArgType::Scalar Scalar;
1393  typedef typename ArgType::CoeffReturnType CoeffReturnType;
1394 
1396  CoeffReturnType coeff(Index row, Index col) const
1397  {
1398  return m_argImpl.coeff(row, col);
1399  }
1400 
1402  CoeffReturnType coeff(Index index) const
1403  {
1404  return m_argImpl.coeff(index);
1405  }
1406 
1409  {
1410  return m_argImpl.coeffRef(row, col);
1411  }
1412 
1414  Scalar& coeffRef(Index index)
1415  {
1416  return m_argImpl.coeffRef(index);
1417  }
1418 
1419  template<int LoadMode, typename PacketType>
1422  {
1423  return m_argImpl.template packet<LoadMode,PacketType>(row, col);
1424  }
1425 
1426  template<int LoadMode, typename PacketType>
1428  PacketType packet(Index index) const
1429  {
1430  return m_argImpl.template packet<LoadMode,PacketType>(index);
1431  }
1432 
1433  template<int StoreMode, typename PacketType>
1436  {
1437  m_argImpl.template writePacket<StoreMode>(row, col, x);
1438  }
1439 
1440  template<int StoreMode, typename PacketType>
1442  void writePacket(Index index, const PacketType& x)
1443  {
1444  m_argImpl.template writePacket<StoreMode>(index, x);
1445  }
1446 
1447 protected:
1449 };
1450 
1451 template<typename TArgType>
1452 struct unary_evaluator<MatrixWrapper<TArgType> >
1453  : evaluator_wrapper_base<MatrixWrapper<TArgType> >
1454 {
1456 
1458  explicit unary_evaluator(const XprType& wrapper)
1459  : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
1460  { }
1461 };
1462 
1463 template<typename TArgType>
1464 struct unary_evaluator<ArrayWrapper<TArgType> >
1465  : evaluator_wrapper_base<ArrayWrapper<TArgType> >
1466 {
1468 
1470  explicit unary_evaluator(const XprType& wrapper)
1471  : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
1472  { }
1473 };
1474 
1475 
1476 // -------------------- Reverse --------------------
1477 
1478 // defined in Reverse.h:
1479 template<typename PacketType, bool ReversePacket> struct reverse_packet_cond;
1480 
1481 template<typename ArgType, int Direction>
1482 struct unary_evaluator<Reverse<ArgType, Direction> >
1483  : evaluator_base<Reverse<ArgType, Direction> >
1484 {
1486  typedef typename XprType::Scalar Scalar;
1487  typedef typename XprType::CoeffReturnType CoeffReturnType;
1488 
1489  enum {
1490  IsRowMajor = XprType::IsRowMajor,
1491  IsColMajor = !IsRowMajor,
1492  ReverseRow = (Direction == Vertical) || (Direction == BothDirections),
1493  ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
1494  ReversePacket = (Direction == BothDirections)
1495  || ((Direction == Vertical) && IsColMajor)
1496  || ((Direction == Horizontal) && IsRowMajor),
1497 
1499 
1500  // let's enable LinearAccess only with vectorization because of the product overhead
1501  // FIXME enable DirectAccess with negative strides?
1503  LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) )
1504  || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1))
1505  ? LinearAccessBit : 0,
1506 
1507  Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),
1508 
1509  Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
1510  };
1511 
1513  explicit unary_evaluator(const XprType& reverse)
1514  : m_argImpl(reverse.nestedExpression()),
1515  m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
1516  m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
1517  { }
1518 
1520  CoeffReturnType coeff(Index row, Index col) const
1521  {
1522  return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
1523  ReverseCol ? m_cols.value() - col - 1 : col);
1524  }
1525 
1527  CoeffReturnType coeff(Index index) const
1528  {
1529  return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
1530  }
1531 
1534  {
1535  return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
1536  ReverseCol ? m_cols.value() - col - 1 : col);
1537  }
1538 
1540  Scalar& coeffRef(Index index)
1541  {
1542  return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);
1543  }
1544 
1545  template<int LoadMode, typename PacketType>
1548  {
1549  enum {
1550  PacketSize = unpacket_traits<PacketType>::size,
1551  OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1552  OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1553  };
1555  return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>(
1556  ReverseRow ? m_rows.value() - row - OffsetRow : row,
1557  ReverseCol ? m_cols.value() - col - OffsetCol : col));
1558  }
1559 
1560  template<int LoadMode, typename PacketType>
1562  PacketType packet(Index index) const
1563  {
1564  enum { PacketSize = unpacket_traits<PacketType>::size };
1565  return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize));
1566  }
1567 
1568  template<int LoadMode, typename PacketType>
1571  {
1572  // FIXME we could factorize some code with packet(i,j)
1573  enum {
1574  PacketSize = unpacket_traits<PacketType>::size,
1575  OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1576  OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1577  };
1579  m_argImpl.template writePacket<LoadMode>(
1580  ReverseRow ? m_rows.value() - row - OffsetRow : row,
1581  ReverseCol ? m_cols.value() - col - OffsetCol : col,
1582  reverse_packet::run(x));
1583  }
1584 
1585  template<int LoadMode, typename PacketType>
1587  void writePacket(Index index, const PacketType& x)
1588  {
1589  enum { PacketSize = unpacket_traits<PacketType>::size };
1590  m_argImpl.template writePacket<LoadMode>
1591  (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
1592  }
1593 
1594 protected:
1596 
1597  // If we do not reverse rows, then we do not need to know the number of rows; same for columns
1598  // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.
1601 };
1602 
1603 
1604 // -------------------- Diagonal --------------------
1605 
1606 template<typename ArgType, int DiagIndex>
1607 struct evaluator<Diagonal<ArgType, DiagIndex> >
1608  : evaluator_base<Diagonal<ArgType, DiagIndex> >
1609 {
1611 
1612  enum {
1614 
1615  Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,
1616 
1617  Alignment = 0
1618  };
1619 
1621  explicit evaluator(const XprType& diagonal)
1622  : m_argImpl(diagonal.nestedExpression()),
1623  m_index(diagonal.index())
1624  { }
1625 
1626  typedef typename XprType::Scalar Scalar;
1627  typedef typename XprType::CoeffReturnType CoeffReturnType;
1628 
1630  CoeffReturnType coeff(Index row, Index) const
1631  {
1632  return m_argImpl.coeff(row + rowOffset(), row + colOffset());
1633  }
1634 
1636  CoeffReturnType coeff(Index index) const
1637  {
1638  return m_argImpl.coeff(index + rowOffset(), index + colOffset());
1639  }
1640 
1643  {
1644  return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());
1645  }
1646 
1648  Scalar& coeffRef(Index index)
1649  {
1650  return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());
1651  }
1652 
1653 protected:
1656 
1657 private:
1659  Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
1661  Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
1662 };
1663 
1664 
1665 //----------------------------------------------------------------------
1666 // deprecated code
1667 //----------------------------------------------------------------------
1668 
1669 // -------------------- EvalToTemp --------------------
1670 
1671 // expression class for evaluating nested expression to a temporary
1672 
1673 template<typename ArgType> class EvalToTemp;
1674 
1675 template<typename ArgType>
1676 struct traits<EvalToTemp<ArgType> >
1677  : public traits<ArgType>
1678 { };
1679 
1680 template<typename ArgType>
1681 class EvalToTemp
1682  : public dense_xpr_base<EvalToTemp<ArgType> >::type
1683 {
1684  public:
1685 
1688 
1689  explicit EvalToTemp(const ArgType& arg)
1690  : m_arg(arg)
1691  { }
1692 
1693  const ArgType& arg() const
1694  {
1695  return m_arg;
1696  }
1697 
1699  {
1700  return m_arg.rows();
1701  }
1702 
1704  {
1705  return m_arg.cols();
1706  }
1707 
1708  private:
1709  const ArgType& m_arg;
1710 };
1711 
1712 template<typename ArgType>
1713 struct evaluator<EvalToTemp<ArgType> >
1714  : public evaluator<typename ArgType::PlainObject>
1715 {
1717  typedef typename ArgType::PlainObject PlainObject;
1719 
1720  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
1721  : m_result(xpr.arg())
1722  {
1723  ::new (static_cast<Base*>(this)) Base(m_result);
1724  }
1725 
1726  // This constructor is used when nesting an EvalTo evaluator in another evaluator
1728  : m_result(arg)
1729  {
1730  ::new (static_cast<Base*>(this)) Base(m_result);
1731  }
1732 
1733 protected:
1734  PlainObject m_result;
1735 };
1736 
1737 } // namespace internal
1738 
1739 } // end namespace Eigen
1740 
1741 #endif // EIGEN_COREEVALUATORS_H
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unary_evaluator(const XprType &t)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE plainobjectbase_evaluator_data(const Scalar *ptr, Index outerStride)
Matrix3f m
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rowOffset() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator(const XprType &m)
Generic expression of a matrix where all coefficients are defined by a functor.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp &op, IndexType i) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE block_evaluator(const XprType &block)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator(const XprType &block)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unary_evaluator(const XprType &op)
SCALAR Scalar
Definition: bench_gemm.cpp:46
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived)
Definition: Macros.h:1264
internal::traits< Derived >::Scalar Scalar
storage_kind_to_evaluator_kind< typename traits< T >::StorageKind >::Kind Kind
#define EIGEN_STRONG_INLINE
Definition: Macros.h:917
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator(const XprType &diagonal)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index row, Index col)
internal::remove_all< PlainObjectType >::type PlainObjectTypeCleaned
Expression of a mathematical vector or matrix as an array object.
Definition: ArrayWrapper.h:42
m m block(1, 0, 2, 2)<< 4
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp &op, IndexType=0, IndexType=0) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator_wrapper_base(const ArgType &arg)
EIGEN_STRONG_INLINE PacketType packet(IndexType index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unary_evaluator(const XprType &wrapper)
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
plainobjectbase_evaluator_data< Scalar, OuterStrideAtCompileTime > m_d
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index) const
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:94
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator(const XprType &select)
Expression of the transpose of a matrix.
Definition: Transpose.h:52
const unsigned int DirectAccessBit
Definition: Constants.h:155
EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType &x)
EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT
EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType &x)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index row, Index col)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rowStride() const EIGEN_NOEXCEPT
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index row, Index col)
traits< ExpressionType > ExpressionTraits
int n
void diagonal(const MatrixType &m)
Definition: diagonal.cpp:12
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index row, Index col)
const internal::variable_if_dynamic< Index, XprType::InnerStrideAtCompileTime > m_innerStride
#define EIGEN_PLAIN_ENUM_MAX(a, b)
Definition: Macros.h:1289
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
const internal::nullary_wrapper< CoeffReturnType, NullaryOp > m_wrapper
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE XprType::Scalar & coeffRef(Index index)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp &op, IndexType i, IndexType j) const
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:232
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:127
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
const variable_if_dynamic< Index, ForwardLinearAccess ? Dynamic :0 > m_linear_offset
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator(const PlainObjectType &m)
Definition: cast.h:1238
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
const variable_if_dynamic< Index, ArgType::RowsAtCompileTime > m_rows
const unsigned int RowMajorBit
Definition: Constants.h:66
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp & func() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
#define EIGEN_IMPLIES(a, b)
Definition: Macros.h:1315
EIGEN_STRONG_INLINE PacketType packet(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE block_evaluator(const XprType &block)
const unsigned int PacketAccessBit
Definition: Constants.h:94
std::size_t UIntPtr
Definition: Meta.h:92
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & linear_coeffRef_impl(Index index, internal::true_type)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index outerStride() const EIGEN_NOEXCEPT
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unary_evaluator(const XprType &replicate)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index row, Index)
unary_evaluator< T > Base
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp &op, IndexType i) const
Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector.
Expression of an array as a mathematical vector or matrix.
Definition: ArrayBase.h:15
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index colStride() const EIGEN_NOEXCEPT
#define EIGEN_INTERNAL_CHECK_COST_VALUE(C)
Definition: StaticAssert.h:218
Generic expression where a coefficient-wise binary operator is applied to two expressions.
Definition: CwiseBinaryOp.h:77
const unsigned int HereditaryBits
Definition: Constants.h:195
XprType::CoeffReturnType CoeffReturnType
EIGEN_STRONG_INLINE PacketType packet(IndexType row, IndexType col) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
void replicate(const MatrixType &m)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator(const XprType &ref)
block_evaluator< ArgType, BlockRows, BlockCols, InnerPanel > block_evaluator_type
EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType &x)
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
m row(1)
EIGEN_STRONG_INLINE PacketType packet(Index index) const
#define EIGEN_NOEXCEPT
Definition: Macros.h:1418
const variable_if_dynamic< Index, ReverseRow ? ArgType::RowsAtCompileTime :1 > m_rows
EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType &x)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
#define eigen_assert(x)
Definition: Macros.h:1037
dense_xpr_base< EvalToTemp >::type Base
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR T value()
Definition: XprHelper.h:160
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
Eigen::Triplet< double > T
Generic expression where a coefficient-wise ternary operator is applied to two expressions.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType row, IndexType col) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
int data[]
Expression of the multiple replication of a matrix or vector.
Definition: Replicate.h:61
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType linear_coeff_impl(Index index, internal::false_type) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
const internal::variable_if_dynamicindex< Index, XprType::DiagIndex > m_index
#define EIGEN_CONSTEXPR
Definition: Macros.h:787
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType &x)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator(const T &xpr)
const variable_if_dynamic< Index, ArgType::ColsAtCompileTime > m_cols
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~evaluator_base()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index colOffset() const
EIGEN_STRONG_INLINE PacketType packet(Index index) const
const variable_if_dynamic< Index,(ArgType::RowsAtCompileTime==1 &&BlockRows==1) ? 0 :Dynamic > m_startRow
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC evaluator(const XprType &xpr)
EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType &x)
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
Reference counting helper.
Definition: object.h:67
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outerStride() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
DenseIndex ret
static const int Cols
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
EIGEN_STRONG_INLINE PacketType packet(Index index) const
A matrix or vector expression mapping an existing expression.
Definition: Ref.h:281
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp &op, IndexType i, IndexType j) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs > > Base
ternary_evaluator< CwiseTernaryOp< TernaryOp, Arg1, Arg2, Arg3 > > Base
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp &op, IndexType i) const
const internal::variable_if_dynamic< Index, XprType::OuterStrideAtCompileTime > m_outerStride
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator_base()
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:976
EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType &x)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index row, Index col)
EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType &x)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index row, Index col)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unary_evaluator(const XprType &reverse)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType linear_coeff_impl(Index index, internal::true_type) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unary_evaluator(const XprType &wrapper)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
Expression of a fixed-size or dynamic-size block.
Definition: Block.h:103
void reverse(const MatrixType &m)
#define EIGEN_PLAIN_ENUM_MIN(a, b)
Definition: Macros.h:1288
storage_kind_to_shape< typename traits< BandMatrixWrapper< _CoefficientsType, _Rows, _Cols, _Supers, _Subs, _Options > >::StorageKind >::Shape Shape
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp &op, IndexType=0, IndexType=0) const
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:45
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp & func() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE mapbase_evaluator(const XprType &map)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp &op, IndexType i, IndexType j) const
m col(1)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType index) const
remove_all< typename XprType::NestedExpressionType >::type ArgType
EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType &x)
EIGEN_STRONG_INLINE PacketType packet(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp &op, IndexType i, IndexType j=0) const
const variable_if_dynamic< Index,(ArgType::ColsAtCompileTime==1 &&BlockCols==1) ? 0 :Dynamic > m_startCol
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:63
EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType &x)
const int Dynamic
Definition: Constants.h:22
const unsigned int EvalBeforeNestingBit
Definition: Constants.h:70
#define eigen_internal_assert(x)
Definition: Macros.h:1043
const variable_if_dynamic< Index, ReverseCol ? ArgType::ColsAtCompileTime :1 > m_cols
Generic expression where a coefficient-wise unary operator is applied to an expression.
Definition: CwiseUnaryOp.h:55
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index row, Index col)
EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType &x)
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
The matrix class, also used for vectors and row-vectors.
const ArgType & arg() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator(const T &xpr)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
Convenience specialization of Stride to specify only an outer stride See class Map for some examples...
Definition: Stride.h:106
Expression of the reverse of a vector or matrix.
Definition: Reverse.h:63
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy x
internal::enable_if< internal::valid_indexed_view_overload< RowIndices, ColIndices >::value &&internal::traits< typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::ReturnAsIndexedView, typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::type operator()(const RowIndices &rowIndices, const ColIndices &colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
EIGEN_DEVICE_FUNC evaluator(const ArgType &arg)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & linear_coeffRef_impl(Index index, internal::false_type)
const unsigned int LinearAccessBit
Definition: Constants.h:130
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE plainobjectbase_evaluator_data(const Scalar *ptr, Index outerStride)
ArgType::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator(const XprType &xpr)
std::ptrdiff_t j
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp &op, IndexType i, IndexType j) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp &op, IndexType i) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:1076
Select< ConditionMatrixType, ThenMatrixType, ElseMatrixType > XprType
Point2 t(10, 10)
Definition: pytypes.h:1370
EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp &op, IndexType i, IndexType j=0) const
EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf &a)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
Expression of a coefficient wise version of the C++ ternary operator ?:
Definition: Select.h:52


gtsam
Author(s):
autogenerated on Tue Jul 4 2023 02:34:06