TriangularMatrixMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H
11 #define EIGEN_TRIANGULAR_MATRIX_MATRIX_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 // template<typename Scalar, int mr, int StorageOrder, bool Conjugate, int Mode>
18 // struct gemm_pack_lhs_triangular
19 // {
20 // Matrix<Scalar,mr,mr,
21 // void operator()(Scalar* blockA, const EIGEN_RESTRICT Scalar* _lhs, int lhsStride, int depth, int rows)
22 // {
23 // conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
24 // const_blas_data_mapper<Scalar, StorageOrder> lhs(_lhs,lhsStride);
25 // int count = 0;
26 // const int peeled_mc = (rows/mr)*mr;
27 // for(int i=0; i<peeled_mc; i+=mr)
28 // {
29 // for(int k=0; k<depth; k++)
30 // for(int w=0; w<mr; w++)
31 // blockA[count++] = cj(lhs(i+w, k));
32 // }
33 // for(int i=peeled_mc; i<rows; i++)
34 // {
35 // for(int k=0; k<depth; k++)
36 // blockA[count++] = cj(lhs(i, k));
37 // }
38 // }
39 // };
40 
41 /* Optimized triangular matrix * matrix (_TRMM++) product built on top of
42  * the general matrix matrix product.
43  */
44 template <typename Scalar, typename Index,
45  int Mode, bool LhsIsTriangular,
46  int LhsStorageOrder, bool ConjugateLhs,
47  int RhsStorageOrder, bool ConjugateRhs,
48  int ResStorageOrder, int Version = Specialized>
50 
51 template <typename Scalar, typename Index,
52  int Mode, bool LhsIsTriangular,
53  int LhsStorageOrder, bool ConjugateLhs,
54  int RhsStorageOrder, bool ConjugateRhs, int Version>
55 struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
56  LhsStorageOrder,ConjugateLhs,
57  RhsStorageOrder,ConjugateRhs,RowMajor,Version>
58 {
59  static EIGEN_STRONG_INLINE void run(
60  Index rows, Index cols, Index depth,
61  const Scalar* lhs, Index lhsStride,
62  const Scalar* rhs, Index rhsStride,
63  Scalar* res, Index resStride,
65  {
67  (Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),
68  (!LhsIsTriangular),
69  RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
70  ConjugateRhs,
71  LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
72  ConjugateLhs,
73  ColMajor>
74  ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking);
75  }
76 };
77 
78 // implements col-major += alpha * op(triangular) * op(general)
79 template <typename Scalar, typename Index, int Mode,
80  int LhsStorageOrder, bool ConjugateLhs,
81  int RhsStorageOrder, bool ConjugateRhs, int Version>
83  LhsStorageOrder,ConjugateLhs,
84  RhsStorageOrder,ConjugateRhs,ColMajor,Version>
85 {
86 
88  enum {
89  SmallPanelWidth = 2 * EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
90  IsLower = (Mode&Lower) == Lower,
91  SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
92  };
93 
94  static EIGEN_DONT_INLINE void run(
95  Index _rows, Index _cols, Index _depth,
96  const Scalar* _lhs, Index lhsStride,
97  const Scalar* _rhs, Index rhsStride,
98  Scalar* res, Index resStride,
99  const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
100 };
101 
102 template <typename Scalar, typename Index, int Mode,
103  int LhsStorageOrder, bool ConjugateLhs,
104  int RhsStorageOrder, bool ConjugateRhs, int Version>
106  LhsStorageOrder,ConjugateLhs,
107  RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
108  Index _rows, Index _cols, Index _depth,
109  const Scalar* _lhs, Index lhsStride,
110  const Scalar* _rhs, Index rhsStride,
111  Scalar* _res, Index resStride,
112  const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
113  {
114  // strip zeros
115  Index diagSize = (std::min)(_rows,_depth);
116  Index rows = IsLower ? _rows : diagSize;
117  Index depth = IsLower ? diagSize : _depth;
118  Index cols = _cols;
119 
123  LhsMapper lhs(_lhs,lhsStride);
124  RhsMapper rhs(_rhs,rhsStride);
125  ResMapper res(_res, resStride);
126 
127  Index kc = blocking.kc(); // cache block size along the K direction
128  Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
129  // The small panel size must not be larger than blocking size.
130  // Usually this should never be the case because SmallPanelWidth^2 is very small
131  // compared to L2 cache size, but let's be safe:
132  Index panelWidth = (std::min)(Index(SmallPanelWidth),(std::min)(kc,mc));
133 
134  std::size_t sizeA = kc*mc;
135  std::size_t sizeB = kc*cols;
136 
137  ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
138  ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
139 
140  // To work around an "error: member reference base type 'Matrix<...>
141  // (Eigen::internal::constructor_without_unaligned_array_assert (*)())' is
142  // not a structure or union" compilation error in nvcc (tested V8.0.61),
143  // create a dummy internal::constructor_without_unaligned_array_assert
144  // object to pass to the Matrix constructor.
147  triangularBuffer.setZero();
148  if((Mode&ZeroDiag)==ZeroDiag)
149  triangularBuffer.diagonal().setZero();
150  else
151  triangularBuffer.diagonal().setOnes();
152 
156 
157  for(Index k2=IsLower ? depth : 0;
158  IsLower ? k2>0 : k2<depth;
159  IsLower ? k2-=kc : k2+=kc)
160  {
161  Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);
162  Index actual_k2 = IsLower ? k2-actual_kc : k2;
163 
164  // align blocks with the end of the triangular part for trapezoidal lhs
165  if((!IsLower)&&(k2<rows)&&(k2+actual_kc>rows))
166  {
167  actual_kc = rows-k2;
168  k2 = k2+actual_kc-kc;
169  }
170 
171  pack_rhs(blockB, rhs.getSubMapper(actual_k2,0), actual_kc, cols);
172 
173  // the selected lhs's panel has to be split in three different parts:
174  // 1 - the part which is zero => skip it
175  // 2 - the diagonal block => special kernel
176  // 3 - the dense panel below (lower case) or above (upper case) the diagonal block => GEPP
177 
178  // the block diagonal, if any:
179  if(IsLower || actual_k2<rows)
180  {
181  // for each small vertical panels of lhs
182  for (Index k1=0; k1<actual_kc; k1+=panelWidth)
183  {
184  Index actualPanelWidth = std::min<Index>(actual_kc-k1, panelWidth);
185  Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;
186  Index startBlock = actual_k2+k1;
187  Index blockBOffset = k1;
188 
189  // => GEBP with the micro triangular block
190  // The trick is to pack this micro block while filling the opposite triangular part with zeros.
191  // To this end we do an extra triangular copy to a small temporary buffer
192  for (Index k=0;k<actualPanelWidth;++k)
193  {
194  if (SetDiag)
195  triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k);
196  for (Index i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)
197  triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k);
198  }
199  pack_lhs(blockA, LhsMapper(triangularBuffer.data(), triangularBuffer.outerStride()), actualPanelWidth, actualPanelWidth);
200 
201  gebp_kernel(res.getSubMapper(startBlock, 0), blockA, blockB,
202  actualPanelWidth, actualPanelWidth, cols, alpha,
203  actualPanelWidth, actual_kc, 0, blockBOffset);
204 
205  // GEBP with remaining micro panel
206  if (lengthTarget>0)
207  {
208  Index startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;
209 
210  pack_lhs(blockA, lhs.getSubMapper(startTarget,startBlock), actualPanelWidth, lengthTarget);
211 
212  gebp_kernel(res.getSubMapper(startTarget, 0), blockA, blockB,
213  lengthTarget, actualPanelWidth, cols, alpha,
214  actualPanelWidth, actual_kc, 0, blockBOffset);
215  }
216  }
217  }
218  // the part below (lower case) or above (upper case) the diagonal => GEPP
219  {
220  Index start = IsLower ? k2 : 0;
221  Index end = IsLower ? rows : (std::min)(actual_k2,rows);
222  for(Index i2=start; i2<end; i2+=mc)
223  {
224  const Index actual_mc = (std::min)(i2+mc,end)-i2;
226  (blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);
227 
228  gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc,
229  actual_kc, cols, alpha, -1, -1, 0, 0);
230  }
231  }
232  }
233  }
234 
235 // implements col-major += alpha * op(general) * op(triangular)
236 template <typename Scalar, typename Index, int Mode,
237  int LhsStorageOrder, bool ConjugateLhs,
238  int RhsStorageOrder, bool ConjugateRhs, int Version>
239 struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
240  LhsStorageOrder,ConjugateLhs,
241  RhsStorageOrder,ConjugateRhs,ColMajor,Version>
242 {
244  enum {
245  SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
246  IsLower = (Mode&Lower) == Lower,
247  SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
248  };
249 
250  static EIGEN_DONT_INLINE void run(
251  Index _rows, Index _cols, Index _depth,
252  const Scalar* _lhs, Index lhsStride,
253  const Scalar* _rhs, Index rhsStride,
254  Scalar* res, Index resStride,
255  const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
256 };
257 
258 template <typename Scalar, typename Index, int Mode,
259  int LhsStorageOrder, bool ConjugateLhs,
260  int RhsStorageOrder, bool ConjugateRhs, int Version>
262  LhsStorageOrder,ConjugateLhs,
263  RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
264  Index _rows, Index _cols, Index _depth,
265  const Scalar* _lhs, Index lhsStride,
266  const Scalar* _rhs, Index rhsStride,
267  Scalar* _res, Index resStride,
268  const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
269  {
270  const Index PacketBytes = packet_traits<Scalar>::size*sizeof(Scalar);
271  // strip zeros
272  Index diagSize = (std::min)(_cols,_depth);
273  Index rows = _rows;
274  Index depth = IsLower ? _depth : diagSize;
275  Index cols = IsLower ? diagSize : _cols;
276 
280  LhsMapper lhs(_lhs,lhsStride);
281  RhsMapper rhs(_rhs,rhsStride);
282  ResMapper res(_res, resStride);
283 
284  Index kc = blocking.kc(); // cache block size along the K direction
285  Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
286 
287  std::size_t sizeA = kc*mc;
288  std::size_t sizeB = kc*cols+EIGEN_MAX_ALIGN_BYTES/sizeof(Scalar);
289 
290  ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
291  ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
292 
295  triangularBuffer.setZero();
296  if((Mode&ZeroDiag)==ZeroDiag)
297  triangularBuffer.diagonal().setZero();
298  else
299  triangularBuffer.diagonal().setOnes();
300 
305 
306  for(Index k2=IsLower ? 0 : depth;
307  IsLower ? k2<depth : k2>0;
308  IsLower ? k2+=kc : k2-=kc)
309  {
310  Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);
311  Index actual_k2 = IsLower ? k2 : k2-actual_kc;
312 
313  // align blocks with the end of the triangular part for trapezoidal rhs
314  if(IsLower && (k2<cols) && (actual_k2+actual_kc>cols))
315  {
316  actual_kc = cols-k2;
317  k2 = actual_k2 + actual_kc - kc;
318  }
319 
320  // remaining size
321  Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;
322  // size of the triangular part
323  Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
324 
325  Scalar* geb = blockB+ts*ts;
326  geb = geb + internal::first_aligned<PacketBytes>(geb,PacketBytes/sizeof(Scalar));
327 
328  pack_rhs(geb, rhs.getSubMapper(actual_k2,IsLower ? 0 : k2), actual_kc, rs);
329 
330  // pack the triangular part of the rhs padding the unrolled blocks with zeros
331  if(ts>0)
332  {
333  for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
334  {
335  Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
336  Index actual_j2 = actual_k2 + j2;
337  Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
338  Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
339  // general part
340  pack_rhs_panel(blockB+j2*actual_kc,
341  rhs.getSubMapper(actual_k2+panelOffset, actual_j2),
342  panelLength, actualPanelWidth,
343  actual_kc, panelOffset);
344 
345  // append the triangular part via a temporary buffer
346  for (Index j=0;j<actualPanelWidth;++j)
347  {
348  if (SetDiag)
349  triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j);
350  for (Index k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)
351  triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j);
352  }
353 
354  pack_rhs_panel(blockB+j2*actual_kc,
355  RhsMapper(triangularBuffer.data(), triangularBuffer.outerStride()),
356  actualPanelWidth, actualPanelWidth,
357  actual_kc, j2);
358  }
359  }
360 
361  for (Index i2=0; i2<rows; i2+=mc)
362  {
363  const Index actual_mc = (std::min)(mc,rows-i2);
364  pack_lhs(blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);
365 
366  // triangular kernel
367  if(ts>0)
368  {
369  for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
370  {
371  Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
372  Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;
373  Index blockOffset = IsLower ? j2 : 0;
374 
375  gebp_kernel(res.getSubMapper(i2, actual_k2 + j2),
376  blockA, blockB+j2*actual_kc,
377  actual_mc, panelLength, actualPanelWidth,
378  alpha,
379  actual_kc, actual_kc, // strides
380  blockOffset, blockOffset);// offsets
381  }
382  }
383  gebp_kernel(res.getSubMapper(i2, IsLower ? 0 : k2),
384  blockA, geb, actual_mc, actual_kc, rs,
385  alpha,
386  -1, -1, 0, 0);
387  }
388  }
389  }
390 
391 /***************************************************************************
392 * Wrapper to product_triangular_matrix_matrix
393 ***************************************************************************/
394 
395 } // end namespace internal
396 
397 namespace internal {
398 template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
399 struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
400 {
401  template<typename Dest> static void run(Dest& dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar& alpha)
402  {
403  typedef typename Lhs::Scalar LhsScalar;
404  typedef typename Rhs::Scalar RhsScalar;
405  typedef typename Dest::Scalar Scalar;
406 
407  typedef internal::blas_traits<Lhs> LhsBlasTraits;
408  typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
409  typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
410  typedef internal::blas_traits<Rhs> RhsBlasTraits;
411  typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
412  typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
413 
414  typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
415  typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
416 
417  LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(a_lhs);
418  RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(a_rhs);
419  Scalar actualAlpha = alpha * lhs_alpha * rhs_alpha;
420 
421  typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
422  Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,4> BlockingType;
423 
424  enum { IsLower = (Mode&Lower) == Lower };
425  Index stripedRows = ((!LhsIsTriangular) || (IsLower)) ? lhs.rows() : (std::min)(lhs.rows(),lhs.cols());
426  Index stripedCols = ((LhsIsTriangular) || (!IsLower)) ? rhs.cols() : (std::min)(rhs.cols(),rhs.rows());
427  Index stripedDepth = LhsIsTriangular ? ((!IsLower) ? lhs.cols() : (std::min)(lhs.cols(),lhs.rows()))
428  : ((IsLower) ? rhs.rows() : (std::min)(rhs.rows(),rhs.cols()));
429 
430  BlockingType blocking(stripedRows, stripedCols, stripedDepth, 1, false);
431 
433  Mode, LhsIsTriangular,
434  (internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
435  (internal::traits<ActualRhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
437  ::run(
438  stripedRows, stripedCols, stripedDepth, // sizes
439  &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
440  &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info
441  &dst.coeffRef(0,0), dst.outerStride(), // result info
442  actualAlpha, blocking
443  );
444 
445  // Apply correction if the diagonal is unit and a scalar factor was nested:
446  if ((Mode&UnitDiag)==UnitDiag)
447  {
448  if (LhsIsTriangular && lhs_alpha!=LhsScalar(1))
449  {
450  Index diagSize = (std::min)(lhs.rows(),lhs.cols());
451  dst.topRows(diagSize) -= ((lhs_alpha-LhsScalar(1))*a_rhs).topRows(diagSize);
452  }
453  else if ((!LhsIsTriangular) && rhs_alpha!=RhsScalar(1))
454  {
455  Index diagSize = (std::min)(rhs.rows(),rhs.cols());
456  dst.leftCols(diagSize) -= (rhs_alpha-RhsScalar(1))*a_lhs.leftCols(diagSize);
457  }
458  }
459  }
460 };
461 
462 } // end namespace internal
463 
464 } // end namespace Eigen
465 
466 #endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H
SCALAR Scalar
Definition: bench_gemm.cpp:33
#define EIGEN_STRONG_INLINE
Definition: Macros.h:494
EIGEN_DEVICE_FUNC Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols > & setZero(Index size)
#define EIGEN_MAX_ALIGN_BYTES
Definition: Macros.h:775
#define min(a, b)
Definition: datatypes.h:19
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar * data() const
#define EIGEN_PLAIN_ENUM_MAX(a, b)
Definition: Macros.h:876
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
EIGEN_DEVICE_FUNC Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols > & setOnes(Index size)
const unsigned int RowMajorBit
Definition: Constants.h:61
#define EIGEN_DONT_INLINE
Definition: Macros.h:517
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index rowId, Index colId)
Array33i a
cout<< "Here is the matrix m:"<< endl<< m<< endl;Matrix< ptrdiff_t, 3, 1 > res
EIGEN_DEVICE_FUNC Index outerStride() const
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
RealScalar alpha
static EIGEN_STRONG_INLINE void run(Index rows, Index cols, Index depth, const Scalar *lhs, Index lhsStride, const Scalar *rhs, Index rhsStride, Scalar *res, Index resStride, const Scalar &alpha, level3_blocking< Scalar, Scalar > &blocking)
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition: Memory.h:644
static void run(Dest &dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar &alpha)
The matrix class, also used for vectors and row-vectors.
void run(Expr &expr, Dev &dev)
Definition: TensorSyclRun.h:33
std::ptrdiff_t j


gtsam
Author(s):
autogenerated on Sat May 8 2021 02:51:15