GeneralMatrixMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
11 #define EIGEN_GENERAL_MATRIX_MATRIX_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
18 
19 /* Specialization for a row-major destination matrix => simple transposition of the product */
20 template<
21  typename Index,
22  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
23  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
24  int ResInnerStride>
25 struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride>
26 {
28 
30  static EIGEN_STRONG_INLINE void run(
32  const LhsScalar* lhs, Index lhsStride,
33  const RhsScalar* rhs, Index rhsStride,
34  ResScalar* res, Index resIncr, Index resStride,
35  ResScalar alpha,
38  {
39  // transpose the product such that the result is column major
41  RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
42  LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
43  ColMajor,ResInnerStride>
44  ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info);
45  }
46 };
47 
48 /* Specialization for a col-major destination matrix
49  * => Blocking algorithm following Goto's paper */
50 template<
51  typename Index,
52  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
53  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
54  int ResInnerStride>
55 struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride>
56 {
57 
59 
61 static void run(Index rows, Index cols, Index depth,
62  const LhsScalar* _lhs, Index lhsStride,
63  const RhsScalar* _rhs, Index rhsStride,
64  ResScalar* _res, Index resIncr, Index resStride,
65  ResScalar alpha,
68 {
72  LhsMapper lhs(_lhs, lhsStride);
73  RhsMapper rhs(_rhs, rhsStride);
74  ResMapper res(_res, resStride, resIncr);
75 
76  Index kc = blocking.kc(); // cache block size along the K direction
77  Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
78  Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
79 
83 
84 #ifdef EIGEN_HAS_OPENMP
85  if(info)
86  {
87  // this is the parallel version!
88  int tid = omp_get_thread_num();
89  int threads = omp_get_num_threads();
90 
91  LhsScalar* blockA = blocking.blockA();
92  eigen_internal_assert(blockA!=0);
93 
94  std::size_t sizeB = kc*nc;
95  ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
96 
97  // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
98  for(Index k=0; k<depth; k+=kc)
99  {
100  const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
101 
102  // In order to reduce the chance that a thread has to wait for the other,
103  // let's start by packing B'.
104  pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
105 
106  // Pack A_k to A' in a parallel fashion:
107  // each thread packs the sub block A_k,i to A'_i where i is the thread id.
108 
109  // However, before copying to A'_i, we have to make sure that no other thread is still using it,
110  // i.e., we test that info[tid].users equals 0.
111  // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
112  while(info[tid].users!=0) {}
113  info[tid].users = threads;
114 
115  pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
116 
117  // Notify the other threads that the part A'_i is ready to go.
118  info[tid].sync = k;
119 
120  // Computes C_i += A' * B' per A'_i
121  for(int shift=0; shift<threads; ++shift)
122  {
123  int i = (tid+shift)%threads;
124 
125  // At this point we have to make sure that A'_i has been updated by the thread i,
126  // we use testAndSetOrdered to mimic a volatile access.
127  // However, no need to wait for the B' part which has been updated by the current thread!
128  if (shift>0) {
129  while(info[i].sync!=k) {
130  }
131  }
132 
133  gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
134  }
135 
136  // Then keep going as usual with the remaining B'
137  for(Index j=nc; j<cols; j+=nc)
138  {
139  const Index actual_nc = (std::min)(j+nc,cols)-j;
140 
141  // pack B_k,j to B'
142  pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
143 
144  // C_j += A' * B'
145  gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
146  }
147 
148  // Release all the sub blocks A'_i of A' for the current thread,
149  // i.e., we simply decrement the number of users by 1
150  for(Index i=0; i<threads; ++i)
152  #pragma omp atomic
153 #endif
154  info[i].users -= 1;
155  }
156  }
157  else
158 #endif // EIGEN_HAS_OPENMP
159  {
161 
162  // this is the sequential version!
163  std::size_t sizeA = kc*mc;
164  std::size_t sizeB = kc*nc;
165 
166  ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
167  ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
168 
169  const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
170 
171  // For each horizontal panel of the rhs, and corresponding panel of the lhs...
172  for(Index i2=0; i2<rows; i2+=mc)
173  {
174  const Index actual_mc = (std::min)(i2+mc,rows)-i2;
175 
176  for(Index k2=0; k2<depth; k2+=kc)
177  {
178  const Index actual_kc = (std::min)(k2+kc,depth)-k2;
179 
180  // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
181  // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
182  // Note that this panel will be read as many times as the number of blocks in the rhs's
183  // horizontal panel which is, in practice, a very low number.
184  pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
185 
186  // For each kc x nc block of the rhs's horizontal panel...
187  for(Index j2=0; j2<cols; j2+=nc)
188  {
189  const Index actual_nc = (std::min)(j2+nc,cols)-j2;
190 
191  // We pack the rhs's block into a sequential chunk of memory (L2 caching)
192  // Note that this block will be read a very high number of times, which is equal to the number of
193  // micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
194  if((!pack_rhs_once) || i2==0)
195  pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
196 
197  // Everything is packed, we can now call the panel * block kernel:
198  gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
199  }
200  }
201  }
202  }
203 }
204 
205 };
206 
207 /*********************************************************************************
208 * Specialization of generic_product_impl for "large" GEMM, i.e.,
209 * implementation of the high level wrapper to general_matrix_matrix_product
210 **********************************************************************************/
211 
212 template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
214 {
215  gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
216  : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
217  {}
218 
219  void initParallelSession(Index num_threads) const
220  {
221  m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
222  m_blocking.allocateA();
223  }
224 
226  {
227  if(cols==-1)
228  cols = m_rhs.cols();
229 
230  Gemm::run(rows, cols, m_lhs.cols(),
231  &m_lhs.coeffRef(row,0), m_lhs.outerStride(),
232  &m_rhs.coeffRef(0,col), m_rhs.outerStride(),
233  (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(),
234  m_actualAlpha, m_blocking, info);
235  }
236 
237  typedef typename Gemm::Traits Traits;
238 
239  protected:
240  const Lhs& m_lhs;
241  const Rhs& m_rhs;
242  Dest& m_dest;
244  BlockingType& m_blocking;
245 };
246 
247 template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
248 bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
249 
250 template<typename _LhsScalar, typename _RhsScalar>
251 class level3_blocking
252 {
253  typedef _LhsScalar LhsScalar;
254  typedef _RhsScalar RhsScalar;
255 
256  protected:
257  LhsScalar* m_blockA;
258  RhsScalar* m_blockB;
259 
263 
264  public:
265 
267  : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
268  {}
269 
270  inline Index mc() const { return m_mc; }
271  inline Index nc() const { return m_nc; }
272  inline Index kc() const { return m_kc; }
273 
274  inline LhsScalar* blockA() { return m_blockA; }
275  inline RhsScalar* blockB() { return m_blockB; }
276 };
277 
278 template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
279 class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
280  : public level3_blocking<
281  typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
282  typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
283 {
284  enum {
285  Transpose = StorageOrder==RowMajor,
286  ActualRows = Transpose ? MaxCols : MaxRows,
287  ActualCols = Transpose ? MaxRows : MaxCols
288  };
292  enum {
293  SizeA = ActualRows * MaxDepth,
294  SizeB = ActualCols * MaxDepth
295  };
296 
297 #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
298  EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
299  EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
300 #else
301  EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
302  EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
303 #endif
304 
305  public:
306 
307  gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
308  {
309  this->m_mc = ActualRows;
310  this->m_nc = ActualCols;
311  this->m_kc = MaxDepth;
312 #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
313  this->m_blockA = m_staticA;
314  this->m_blockB = m_staticB;
315 #else
316  this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
317  this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
318 #endif
319  }
320 
322  {}
323 
324  inline void allocateA() {}
325  inline void allocateB() {}
326  inline void allocateAll() {}
327 };
328 
329 template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
330 class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
331  : public level3_blocking<
332  typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
333  typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
334 {
335  enum {
336  Transpose = StorageOrder==RowMajor
337  };
341 
344 
345  public:
346 
347  gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
348  {
349  this->m_mc = Transpose ? cols : rows;
350  this->m_nc = Transpose ? rows : cols;
351  this->m_kc = depth;
352 
353  if(l3_blocking)
354  {
355  computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
356  }
357  else // no l3 blocking
358  {
359  Index n = this->m_nc;
360  computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
361  }
362 
363  m_sizeA = this->m_mc * this->m_kc;
364  m_sizeB = this->m_kc * this->m_nc;
365  }
366 
368  {
369  this->m_mc = Transpose ? cols : rows;
370  this->m_nc = Transpose ? rows : cols;
371  this->m_kc = depth;
372 
373  eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
374  Index m = this->m_mc;
375  computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
376  m_sizeA = this->m_mc * this->m_kc;
377  m_sizeB = this->m_kc * this->m_nc;
378  }
379 
380  void allocateA()
381  {
382  if(this->m_blockA==0)
383  this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
384  }
385 
386  void allocateB()
387  {
388  if(this->m_blockB==0)
389  this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
390  }
391 
392  void allocateAll()
393  {
394  allocateA();
395  allocateB();
396  }
397 
399  {
400  aligned_delete(this->m_blockA, m_sizeA);
401  aligned_delete(this->m_blockB, m_sizeB);
402  }
403 };
404 
405 } // end namespace internal
406 
407 namespace internal {
408 
409 template<typename Lhs, typename Rhs>
411  : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
412 {
414  typedef typename Lhs::Scalar LhsScalar;
415  typedef typename Rhs::Scalar RhsScalar;
416 
420 
424 
425  enum {
426  MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
427  };
428 
430 
431  template<typename Dst>
432  static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
433  {
434  // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program
435  // to determine the following heuristic.
436  // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h,
437  // unless it has been specialized by the user or for a given architecture.
438  // Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs.
439  // I'm not sure it is still required.
440  if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
441  lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
442  else
443  {
444  dst.setZero();
445  scaleAndAddTo(dst, lhs, rhs, Scalar(1));
446  }
447  }
448 
449  template<typename Dst>
450  static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
451  {
452  if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
453  lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
454  else
455  scaleAndAddTo(dst,lhs, rhs, Scalar(1));
456  }
457 
458  template<typename Dst>
459  static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
460  {
461  if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
462  lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
463  else
464  scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
465  }
466 
467  template<typename Dest>
468  static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
469  {
470  eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
471  if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
472  return;
473 
474  if (dst.cols() == 1)
475  {
476  // Fallback to GEMV if either the lhs or rhs is a runtime vector
477  typename Dest::ColXpr dst_vec(dst.col(0));
479  ::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha);
480  }
481  else if (dst.rows() == 1)
482  {
483  // Fallback to GEMV if either the lhs or rhs is a runtime vector
484  typename Dest::RowXpr dst_vec(dst.row(0));
486  ::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha);
487  }
488 
489  typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
490  typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
491 
492  Scalar actualAlpha = combine_scalar_factors(alpha, a_lhs, a_rhs);
493 
494  typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
495  Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
496 
497  typedef internal::gemm_functor<
498  Scalar, Index,
500  Index,
501  LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
502  RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
503  (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,
504  Dest::InnerStrideAtCompileTime>,
505  ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
506 
507  BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
508  internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
509  (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
510  }
511 };
512 
513 } // end namespace internal
514 
515 } // end namespace Eigen
516 
517 #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
Matrix3f m
Block< Derived, 1, internal::traits< Derived >::ColsAtCompileTime, IsRowMajor > RowXpr
Definition: BlockMethods.h:17
SCALAR Scalar
Definition: bench_gemm.cpp:46
#define EIGEN_STRONG_INLINE
Definition: Macros.h:917
void initParallelSession(Index num_threads) const
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:71
#define min(a, b)
Definition: datatypes.h:19
Expression of the transpose of a matrix.
Definition: Transpose.h:52
static double depth
EIGEN_DEVICE_FUNC void aligned_delete(T *ptr, std::size_t size)
Definition: Memory.h:361
int n
#define EIGEN_DEFAULT_ALIGN_BYTES
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
Block< Derived, internal::traits< Derived >::RowsAtCompileTime, 1, !IsRowMajor > ColXpr
Definition: BlockMethods.h:14
Definition: BFloat16.h:88
#define EIGEN_ALIGN_MAX
const unsigned int RowMajorBit
Definition: Constants.h:66
gemm_functor(const Lhs &lhs, const Rhs &rhs, Dest &dest, const Scalar &actualAlpha, BlockingType &blocking)
#define EIGEN_SIZE_MIN_PREFER_FIXED(a, b)
Definition: Macros.h:1302
std::size_t UIntPtr
Definition: Meta.h:92
int omp_get_num_threads(void)
else if n * info
static EIGEN_STRONG_INLINE void run(Index rows, Index cols, Index depth, const LhsScalar *lhs, Index lhsStride, const RhsScalar *rhs, Index rhsStride, ResScalar *res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking< RhsScalar, LhsScalar > &blocking, GemmParallelInfo< Index > *info=0)
cout<< "Here is the matrix m:"<< endl<< m<< endl;Matrix< ptrdiff_t, 3, 1 > res
m row(1)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
#define eigen_assert(x)
Definition: Macros.h:1037
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE ResScalar combine_scalar_factors(const ResScalar &alpha, const Lhs &lhs, const Rhs &rhs)
Definition: BlasUtil.h:568
RealScalar alpha
static void run(Index rows, Index cols, Index depth, const LhsScalar *_lhs, Index lhsStride, const RhsScalar *_rhs, Index rhsStride, ResScalar *_res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking< LhsScalar, RhsScalar > &blocking, GemmParallelInfo< Index > *info=0)
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition: Memory.h:768
#define EIGEN_GEMM_TO_COEFFBASED_THRESHOLD
m col(1)
Determines whether the given binary operation of two numeric types is allowed and what the scalar ret...
Definition: XprHelper.h:801
static void scaleAndAddTo(Dest &dst, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar &alpha)
const int Dynamic
Definition: Constants.h:22
#define eigen_internal_assert(x)
Definition: Macros.h:1043
Generic expression where a coefficient-wise unary operator is applied to an expression.
Definition: CwiseUnaryOp.h:55
generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, CoeffBasedProductMode > lazyproduct
internal::enable_if< internal::valid_indexed_view_overload< RowIndices, ColIndices >::value &&internal::traits< typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::ReturnAsIndexedView, typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::type operator()(const RowIndices &rowIndices, const ColIndices &colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
#define EIGEN_HAS_CXX11_ATOMIC
Definition: Macros.h:831
std::ptrdiff_t j
int omp_get_thread_num(void)
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:1076


gtsam
Author(s):
autogenerated on Tue Jul 4 2023 02:34:16