Assign.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>
5 // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
6 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
7 //
8 // This Source Code Form is subject to the terms of the Mozilla
9 // Public License v. 2.0. If a copy of the MPL was not distributed
10 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11 
12 #ifndef EIGEN_ASSIGN_H
13 #define EIGEN_ASSIGN_H
14 
15 namespace Eigen {
16 
17 namespace internal {
18 
19 /***************************************************************************
20 * Part 1 : the logic deciding a strategy for traversal and unrolling *
21 ***************************************************************************/
22 
23 template <typename Derived, typename OtherDerived>
25 {
26 public:
27  enum {
28  DstIsAligned = Derived::Flags & AlignedBit,
29  DstHasDirectAccess = Derived::Flags & DirectAccessBit,
30  SrcIsAligned = OtherDerived::Flags & AlignedBit,
32  };
33 
34 private:
35  enum {
36  InnerSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::SizeAtCompileTime)
37  : int(Derived::Flags)&RowMajorBit ? int(Derived::ColsAtCompileTime)
38  : int(Derived::RowsAtCompileTime),
39  InnerMaxSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::MaxSizeAtCompileTime)
40  : int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime)
41  : int(Derived::MaxRowsAtCompileTime),
42  MaxSizeAtCompileTime = Derived::SizeAtCompileTime,
43  PacketSize = packet_traits<typename Derived::Scalar>::size
44  };
45 
46  enum {
47  StorageOrdersAgree = (int(Derived::IsRowMajor) == int(OtherDerived::IsRowMajor)),
49  && (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit),
51  && int(DstIsAligned) && int(SrcIsAligned),
52  MayLinearize = StorageOrdersAgree && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit),
55  /* If the destination isn't aligned, we have to do runtime checks and we don't unroll,
56  so it's only good for large enough sizes. */
58  && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize)
59  /* slice vectorization can be slow, so we only want it if the slices are big, which is
60  indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block
61  in a fixed-size matrix */
62  };
63 
64 public:
65  enum {
69  : int(MayLinearize) ? int(LinearTraversal)
70  : int(DefaultTraversal),
74  };
75 
76 private:
77  enum {
79  MayUnrollCompletely = int(Derived::SizeAtCompileTime) != Dynamic
80  && int(OtherDerived::CoeffReadCost) != Dynamic
81  && int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit),
82  MayUnrollInner = int(InnerSize) != Dynamic
83  && int(OtherDerived::CoeffReadCost) != Dynamic
84  && int(InnerSize) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit)
85  };
86 
87 public:
88  enum {
90  ? (
91  int(MayUnrollCompletely) ? int(CompleteUnrolling)
92  : int(MayUnrollInner) ? int(InnerUnrolling)
93  : int(NoUnrolling)
94  )
96  ? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) )
97  : int(Traversal) == int(LinearTraversal)
98  ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) )
99  : int(NoUnrolling)
100  };
101 
102 #ifdef EIGEN_DEBUG_ASSIGN
103  static void debug()
104  {
119  EIGEN_DEBUG_VAR(MayUnrollCompletely)
120  EIGEN_DEBUG_VAR(MayUnrollInner)
122  }
123 #endif
124 };
125 
126 /***************************************************************************
127 * Part 2 : meta-unrollers
128 ***************************************************************************/
129 
130 /************************
131 *** Default traversal ***
132 ************************/
133 
134 template<typename Derived1, typename Derived2, int Index, int Stop>
136 {
137  enum {
138  outer = Index / Derived1::InnerSizeAtCompileTime,
139  inner = Index % Derived1::InnerSizeAtCompileTime
140  };
141 
142  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
143  {
144  dst.copyCoeffByOuterInner(outer, inner, src);
146  }
147 };
148 
149 template<typename Derived1, typename Derived2, int Stop>
150 struct assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
151 {
152  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {}
153 };
154 
155 template<typename Derived1, typename Derived2, int Index, int Stop>
157 {
158  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
159  {
160  dst.copyCoeffByOuterInner(outer, Index, src);
162  }
163 };
164 
165 template<typename Derived1, typename Derived2, int Stop>
166 struct assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Stop, Stop>
167 {
168  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {}
169 };
170 
171 /***********************
172 *** Linear traversal ***
173 ***********************/
174 
175 template<typename Derived1, typename Derived2, int Index, int Stop>
177 {
178  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
179  {
180  dst.copyCoeff(Index, src);
182  }
183 };
184 
185 template<typename Derived1, typename Derived2, int Stop>
186 struct assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
187 {
188  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {}
189 };
190 
191 /**************************
192 *** Inner vectorization ***
193 **************************/
194 
195 template<typename Derived1, typename Derived2, int Index, int Stop>
197 {
198  enum {
199  outer = Index / Derived1::InnerSizeAtCompileTime,
200  inner = Index % Derived1::InnerSizeAtCompileTime,
202  };
203 
204  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
205  {
206  dst.template copyPacketByOuterInner<Derived2, Aligned, JointAlignment>(outer, inner, src);
207  assign_innervec_CompleteUnrolling<Derived1, Derived2,
208  Index+packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src);
209  }
210 };
211 
212 template<typename Derived1, typename Derived2, int Stop>
213 struct assign_innervec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
214 {
215  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {}
216 };
217 
218 template<typename Derived1, typename Derived2, int Index, int Stop>
220 {
221  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
222  {
223  dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, Index, src);
224  assign_innervec_InnerUnrolling<Derived1, Derived2,
225  Index+packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src, outer);
226  }
227 };
228 
229 template<typename Derived1, typename Derived2, int Stop>
230 struct assign_innervec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
231 {
232  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {}
233 };
234 
235 /***************************************************************************
236 * Part 3 : implementation of all cases
237 ***************************************************************************/
238 
239 template<typename Derived1, typename Derived2,
242  int Version = Specialized>
243 struct assign_impl;
244 
245 /************************
246 *** Default traversal ***
247 ************************/
248 
249 template<typename Derived1, typename Derived2, int Unrolling, int Version>
250 struct assign_impl<Derived1, Derived2, InvalidTraversal, Unrolling, Version>
251 {
252  static inline void run(Derived1 &, const Derived2 &) { }
253 };
254 
255 template<typename Derived1, typename Derived2, int Version>
256 struct assign_impl<Derived1, Derived2, DefaultTraversal, NoUnrolling, Version>
257 {
258  typedef typename Derived1::Index Index;
259  static inline void run(Derived1 &dst, const Derived2 &src)
260  {
261  const Index innerSize = dst.innerSize();
262  const Index outerSize = dst.outerSize();
263  for(Index outer = 0; outer < outerSize; ++outer)
264  for(Index inner = 0; inner < innerSize; ++inner)
265  dst.copyCoeffByOuterInner(outer, inner, src);
266  }
267 };
268 
269 template<typename Derived1, typename Derived2, int Version>
270 struct assign_impl<Derived1, Derived2, DefaultTraversal, CompleteUnrolling, Version>
271 {
272  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
273  {
275  ::run(dst, src);
276  }
277 };
278 
279 template<typename Derived1, typename Derived2, int Version>
280 struct assign_impl<Derived1, Derived2, DefaultTraversal, InnerUnrolling, Version>
281 {
282  typedef typename Derived1::Index Index;
283  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
284  {
285  const Index outerSize = dst.outerSize();
286  for(Index outer = 0; outer < outerSize; ++outer)
288  ::run(dst, src, outer);
289  }
290 };
291 
292 /***********************
293 *** Linear traversal ***
294 ***********************/
295 
296 template<typename Derived1, typename Derived2, int Version>
297 struct assign_impl<Derived1, Derived2, LinearTraversal, NoUnrolling, Version>
298 {
299  typedef typename Derived1::Index Index;
300  static inline void run(Derived1 &dst, const Derived2 &src)
301  {
302  const Index size = dst.size();
303  for(Index i = 0; i < size; ++i)
304  dst.copyCoeff(i, src);
305  }
306 };
307 
308 template<typename Derived1, typename Derived2, int Version>
309 struct assign_impl<Derived1, Derived2, LinearTraversal, CompleteUnrolling, Version>
310 {
311  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
312  {
314  ::run(dst, src);
315  }
316 };
317 
318 /**************************
319 *** Inner vectorization ***
320 **************************/
321 
322 template<typename Derived1, typename Derived2, int Version>
323 struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, NoUnrolling, Version>
324 {
325  typedef typename Derived1::Index Index;
326  static inline void run(Derived1 &dst, const Derived2 &src)
327  {
328  const Index innerSize = dst.innerSize();
329  const Index outerSize = dst.outerSize();
330  const Index packetSize = packet_traits<typename Derived1::Scalar>::size;
331  for(Index outer = 0; outer < outerSize; ++outer)
332  for(Index inner = 0; inner < innerSize; inner+=packetSize)
333  dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, inner, src);
334  }
335 };
336 
337 template<typename Derived1, typename Derived2, int Version>
338 struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, CompleteUnrolling, Version>
339 {
340  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
341  {
343  ::run(dst, src);
344  }
345 };
346 
347 template<typename Derived1, typename Derived2, int Version>
348 struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, InnerUnrolling, Version>
349 {
350  typedef typename Derived1::Index Index;
351  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
352  {
353  const Index outerSize = dst.outerSize();
354  for(Index outer = 0; outer < outerSize; ++outer)
356  ::run(dst, src, outer);
357  }
358 };
359 
360 /***************************
361 *** Linear vectorization ***
362 ***************************/
363 
364 template <bool IsAligned = false>
366 {
367  template <typename Derived, typename OtherDerived>
368  static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {}
369 };
370 
371 template <>
373 {
374  // MSVC must not inline this functions. If it does, it fails to optimize the
375  // packet access path.
376 #ifdef _MSC_VER
377  template <typename Derived, typename OtherDerived>
378  static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
379 #else
380  template <typename Derived, typename OtherDerived>
381  static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
382 #endif
383  {
384  for (typename Derived::Index index = start; index < end; ++index)
385  dst.copyCoeff(index, src);
386  }
387 };
388 
389 template<typename Derived1, typename Derived2, int Version>
390 struct assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling, Version>
391 {
392  typedef typename Derived1::Index Index;
393  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
394  {
395  const Index size = dst.size();
396  typedef packet_traits<typename Derived1::Scalar> PacketTraits;
397  enum {
398  packetSize = PacketTraits::size,
399  dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
401  };
402  const Index alignedStart = assign_traits<Derived1,Derived2>::DstIsAligned ? 0
403  : internal::first_aligned(&dst.coeffRef(0), size);
404  const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
405 
407 
408  for(Index index = alignedStart; index < alignedEnd; index += packetSize)
409  {
410  dst.template copyPacket<Derived2, dstAlignment, srcAlignment>(index, src);
411  }
412 
413  unaligned_assign_impl<>::run(src,dst,alignedEnd,size);
414  }
415 };
416 
417 template<typename Derived1, typename Derived2, int Version>
418 struct assign_impl<Derived1, Derived2, LinearVectorizedTraversal, CompleteUnrolling, Version>
419 {
420  typedef typename Derived1::Index Index;
421  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
422  {
423  enum { size = Derived1::SizeAtCompileTime,
425  alignedSize = (size/packetSize)*packetSize };
426 
429  }
430 };
431 
432 /**************************
433 *** Slice vectorization ***
434 ***************************/
435 
436 template<typename Derived1, typename Derived2, int Version>
437 struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Version>
438 {
439  typedef typename Derived1::Index Index;
440  static inline void run(Derived1 &dst, const Derived2 &src)
441  {
442  typedef packet_traits<typename Derived1::Scalar> PacketTraits;
443  enum {
444  packetSize = PacketTraits::size,
445  alignable = PacketTraits::AlignedOnScalar,
446  dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
448  };
449  const Index packetAlignedMask = packetSize - 1;
450  const Index innerSize = dst.innerSize();
451  const Index outerSize = dst.outerSize();
452  const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
453  Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
454  : internal::first_aligned(&dst.coeffRef(0,0), innerSize);
455 
456  for(Index outer = 0; outer < outerSize; ++outer)
457  {
458  const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
459  // do the non-vectorizable part of the assignment
460  for(Index inner = 0; inner<alignedStart ; ++inner)
461  dst.copyCoeffByOuterInner(outer, inner, src);
462 
463  // do the vectorizable part of the assignment
464  for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize)
465  dst.template copyPacketByOuterInner<Derived2, dstAlignment, Unaligned>(outer, inner, src);
466 
467  // do the non-vectorizable part of the assignment
468  for(Index inner = alignedEnd; inner<innerSize ; ++inner)
469  dst.copyCoeffByOuterInner(outer, inner, src);
470 
471  alignedStart = std::min<Index>((alignedStart+alignedStep)%packetSize, innerSize);
472  }
473  }
474 };
475 
476 } // end namespace internal
477 
478 /***************************************************************************
479 * Part 4 : implementation of DenseBase methods
480 ***************************************************************************/
481 
482 template<typename Derived>
483 template<typename OtherDerived>
486 {
487  enum{
489  };
490 
492  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
493  EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
494 
495 #ifdef EIGEN_DEBUG_ASSIGN
497 #endif
498  eigen_assert(rows() == other.rows() && cols() == other.cols());
500  : int(InvalidTraversal)>::run(derived(),other.derived());
501 #ifndef EIGEN_NO_DEBUG
502  checkTransposeAliasing(other.derived());
503 #endif
504  return derived();
505 }
506 
507 namespace internal {
508 
509 template<typename Derived, typename OtherDerived,
510  bool EvalBeforeAssigning = (int(internal::traits<OtherDerived>::Flags) & EvalBeforeAssigningBit) != 0,
511  bool NeedToTranspose = ((int(Derived::RowsAtCompileTime) == 1 && int(OtherDerived::ColsAtCompileTime) == 1)
512  | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
513  // revert to || as soon as not needed anymore.
514  (int(Derived::ColsAtCompileTime) == 1 && int(OtherDerived::RowsAtCompileTime) == 1))
515  && int(Derived::SizeAtCompileTime) != 1>
517 
518 template<typename Derived, typename OtherDerived>
519 struct assign_selector<Derived,OtherDerived,false,false> {
520  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
521  template<typename ActualDerived, typename ActualOtherDerived>
522  static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { other.evalTo(dst); return dst; }
523 };
524 template<typename Derived, typename OtherDerived>
525 struct assign_selector<Derived,OtherDerived,true,false> {
526  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); }
527 };
528 template<typename Derived, typename OtherDerived>
529 struct assign_selector<Derived,OtherDerived,false,true> {
530  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); }
531  template<typename ActualDerived, typename ActualOtherDerived>
532  static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { Transpose<ActualDerived> dstTrans(dst); other.evalTo(dstTrans); return dst; }
533 };
534 template<typename Derived, typename OtherDerived>
535 struct assign_selector<Derived,OtherDerived,true,true> {
536  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); }
537 };
538 
539 } // end namespace internal
540 
541 template<typename Derived>
542 template<typename OtherDerived>
544 {
545  return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
546 }
547 
548 template<typename Derived>
550 {
551  return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
552 }
553 
554 template<typename Derived>
556 {
557  return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
558 }
559 
560 template<typename Derived>
561 template <typename OtherDerived>
563 {
564  return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
565 }
566 
567 template<typename Derived>
568 template <typename OtherDerived>
570 {
572 }
573 
574 template<typename Derived>
575 template<typename OtherDerived>
577 {
578  return internal::assign_selector<Derived,OtherDerived,false>::evalTo(derived(), other.derived());
579 }
580 
581 } // end namespace Eigen
582 
583 #endif // EIGEN_ASSIGN_H
static EIGEN_STRONG_INLINE void run(const Derived &, OtherDerived &, typename Derived::Index, typename Derived::Index)
Definition: Assign.h:368
const unsigned int ActualPacketAccessBit
static EIGEN_STRONG_INLINE Derived & evalTo(ActualDerived &dst, const ActualOtherDerived &other)
Definition: Assign.h:522
#define EIGEN_STRONG_INLINE
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:340
static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index)
Definition: Assign.h:168
Expression of the transpose of a matrix.
Definition: Transpose.h:57
const unsigned int DirectAccessBit
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:311
iterative scaling algorithm to equilibrate rows and column norms in matrices
Definition: matrix.hpp:471
static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &)
Definition: Assign.h:152
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:111
Derived & derived()
Definition: EigenBase.h:34
const unsigned int RowMajorBit
Base class for all dense matrices, vectors, and arrays.
Definition: DenseBase.h:41
#define EIGEN_STATIC_ASSERT_LVALUE(Derived)
Definition: StaticAssert.h:191
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:178
static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &)
Definition: Assign.h:188
const unsigned int AlignedBit
Derived & lazyAssign(const DenseBase< OtherDerived > &other)
static EIGEN_STRONG_INLINE Derived & run(Derived &dst, const OtherDerived &other)
Definition: Assign.h:520
static EIGEN_STRONG_INLINE Derived & evalTo(ActualDerived &dst, const ActualOtherDerived &other)
Definition: Assign.h:532
static EIGEN_STRONG_INLINE void run(const Derived &src, OtherDerived &dst, typename Derived::Index start, typename Derived::Index end)
Definition: Assign.h:381
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:393
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:204
const unsigned int EvalBeforeAssigningBit
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:351
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
Definition: Assign.h:221
static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &)
Definition: Assign.h:215
static EIGEN_STRONG_INLINE Derived & run(Derived &dst, const OtherDerived &other)
Definition: Assign.h:530
Derived & operator=(const DenseBase< OtherDerived > &other)
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:421
static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index)
Definition: Assign.h:232
static EIGEN_STRONG_INLINE Derived & run(Derived &dst, const OtherDerived &other)
Definition: Assign.h:526
static EIGEN_STRONG_INLINE Derived & run(Derived &dst, const OtherDerived &other)
Definition: Assign.h:536
#define EIGEN_DONT_INLINE
#define EIGEN_DEBUG_VAR(x)
static Derived::Index first_aligned(const Derived &m)
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
Definition: Assign.h:158
#define eigen_assert(x)
Derived & operator=(const MatrixBase &other)
Definition: Assign.h:555
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:272
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:48
const unsigned int LinearAccessBit
#define EIGEN_UNROLLING_LIMIT
Definition: Settings.h:24
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:142
static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
Definition: Assign.h:283
#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0, TYPE1)
Definition: StaticAssert.h:181


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Mon Jun 10 2019 12:34:28