10 #ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_H 11 #define EIGEN_SELFADJOINT_MATRIX_MATRIX_H 18 template<
typename Scalar,
typename Index,
int Pack1,
int Pack2_dummy,
int StorageOrder>
21 template<
int BlockRows>
inline 27 blockA[count++] = lhs(i+
w,k);
30 for(
Index k=i; k<i+BlockRows; k++)
38 blockA[count++] = lhs(i+
w, k);
53 HasHalf = (
int)HalfPacketSize < (
int)PacketSize,
54 HasQuarter = (
int)QuarterPacketSize < (
int)HalfPacketSize};
60 const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
61 const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
62 const Index peeled_mc1 = Pack1>=1*PacketSize ? peeled_mc2+((rows-peeled_mc2)/(1*PacketSize))*(1*PacketSize) : 0;
63 const Index peeled_mc_half = Pack1>=HalfPacketSize ? peeled_mc1+((rows-peeled_mc1)/(HalfPacketSize))*(HalfPacketSize) : 0;
64 const Index peeled_mc_quarter = Pack1>=QuarterPacketSize ? peeled_mc_half+((rows-peeled_mc_half)/(QuarterPacketSize))*(QuarterPacketSize) : 0;
66 if(Pack1>=3*PacketSize)
67 for(
Index i=0;
i<peeled_mc3;
i+=3*PacketSize)
68 pack<3*PacketSize>(blockA, lhs, cols,
i, count);
70 if(Pack1>=2*PacketSize)
71 for(
Index i=peeled_mc3;
i<peeled_mc2;
i+=2*PacketSize)
72 pack<2*PacketSize>(blockA, lhs, cols,
i, count);
74 if(Pack1>=1*PacketSize)
75 for(
Index i=peeled_mc2;
i<peeled_mc1;
i+=1*PacketSize)
76 pack<1*PacketSize>(blockA, lhs, cols,
i, count);
78 if(HasHalf && Pack1>=HalfPacketSize)
79 for(
Index i=peeled_mc1;
i<peeled_mc_half;
i+=HalfPacketSize)
80 pack<HalfPacketSize>(blockA, lhs, cols,
i, count);
82 if(HasQuarter && Pack1>=QuarterPacketSize)
83 for(
Index i=peeled_mc_half;
i<peeled_mc_quarter;
i+=QuarterPacketSize)
84 pack<QuarterPacketSize>(blockA, lhs, cols,
i, count);
90 blockA[count++] = lhs(i, k);
100 template<
typename Scalar,
typename Index,
int nr,
int StorageOrder>
109 Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
110 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
113 for(
Index j2=0; j2<k2; j2+=nr)
115 for(
Index k=k2; k<end_k; k++)
117 blockB[count+0] = rhs(k,j2+0);
118 blockB[count+1] = rhs(k,j2+1);
121 blockB[count+2] = rhs(k,j2+2);
122 blockB[count+3] = rhs(k,j2+3);
126 blockB[count+4] = rhs(k,j2+4);
127 blockB[count+5] = rhs(k,j2+5);
128 blockB[count+6] = rhs(k,j2+6);
129 blockB[count+7] = rhs(k,j2+7);
139 for(
Index j2=k2; j2<end8; j2+=8)
143 for(
Index k=k2; k<j2; k++)
157 for(
Index k=j2; k<j2+8; k++)
161 blockB[count+
w] = rhs(k,j2+
w);
172 for(
Index k=j2+8; k<end_k; k++)
174 blockB[count+0] = rhs(k,j2+0);
175 blockB[count+1] = rhs(k,j2+1);
176 blockB[count+2] = rhs(k,j2+2);
177 blockB[count+3] = rhs(k,j2+3);
178 blockB[count+4] = rhs(k,j2+4);
179 blockB[count+5] = rhs(k,j2+5);
180 blockB[count+6] = rhs(k,j2+6);
181 blockB[count+7] = rhs(k,j2+7);
188 for(
Index j2=end8; j2<(
std::min)(k2+rows,packet_cols4); j2+=4)
192 for(
Index k=k2; k<j2; k++)
202 for(
Index k=j2; k<j2+4; k++)
206 blockB[count+
w] = rhs(k,j2+
w);
217 for(
Index k=j2+4; k<end_k; k++)
219 blockB[count+0] = rhs(k,j2+0);
220 blockB[count+1] = rhs(k,j2+1);
221 blockB[count+2] = rhs(k,j2+2);
222 blockB[count+3] = rhs(k,j2+3);
231 for(
Index j2=k2+rows; j2<packet_cols8; j2+=8)
233 for(
Index k=k2; k<end_k; k++)
249 for(
Index j2=(
std::max)(packet_cols8,k2+rows); j2<packet_cols4; j2+=4)
251 for(
Index k=k2; k<end_k; k++)
263 for(
Index j2=packet_cols4; j2<
cols; ++j2)
267 for(
Index k=k2; k<half; k++)
273 if(half==j2 && half<k2+rows)
284 blockB[count] = rhs(k,j2);
295 int LhsStorageOrder,
bool LhsSelfAdjoint,
bool ConjugateLhs,
296 int RhsStorageOrder,
bool RhsSelfAdjoint,
bool ConjugateRhs,
297 int ResStorageOrder,
int ResInnerStride>
301 int LhsStorageOrder,
bool LhsSelfAdjoint,
bool ConjugateLhs,
302 int RhsStorageOrder,
bool RhsSelfAdjoint,
bool ConjugateRhs,
309 const Scalar* lhs, Index lhsStride,
310 const Scalar* rhs, Index rhsStride,
311 Scalar*
res, Index resIncr, Index resStride,
320 ::run(cols, rows, rhs, rhsStride, lhs, lhsStride, res, resIncr, resStride, alpha, blocking);
325 int LhsStorageOrder,
bool ConjugateLhs,
326 int RhsStorageOrder,
bool ConjugateRhs,
333 const Scalar* _lhs, Index lhsStride,
334 const Scalar* _rhs, Index rhsStride,
335 Scalar*
res, Index resIncr, Index resStride,
340 int LhsStorageOrder,
bool ConjugateLhs,
341 int RhsStorageOrder,
bool ConjugateRhs,
345 const Scalar* _lhs, Index lhsStride,
346 const Scalar* _rhs, Index rhsStride,
347 Scalar* _res, Index resIncr, Index resStride,
358 LhsMapper lhs(_lhs,lhsStride);
359 LhsTransposeMapper lhs_transpose(_lhs,lhsStride);
360 RhsMapper rhs(_rhs,rhsStride);
361 ResMapper
res(_res, resStride, resIncr);
363 Index kc = blocking.
kc();
377 for(Index k2=0; k2<
size; k2+=kc)
379 const Index actual_kc = (
std::min)(k2+kc,size)-k2;
384 pack_rhs(blockB, rhs.getSubMapper(k2,0), actual_kc,
cols);
390 for(Index i2=0; i2<k2; i2+=mc)
392 const Index actual_mc = (
std::min)(i2+mc,k2)-i2;
394 pack_lhs_transposed(blockA, lhs_transpose.getSubMapper(i2, k2), actual_kc, actual_mc);
396 gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc,
cols,
alpha);
400 const Index actual_mc = (
std::min)(k2+kc,size)-k2;
402 pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
404 gebp_kernel(res.getSubMapper(k2, 0), blockA, blockB, actual_mc, actual_kc,
cols,
alpha);
407 for(Index i2=k2+kc; i2<
size; i2+=mc)
409 const Index actual_mc = (
std::min)(i2+mc,size)-i2;
411 (blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
413 gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc,
cols,
alpha);
420 int LhsStorageOrder,
bool ConjugateLhs,
421 int RhsStorageOrder,
bool ConjugateRhs,
428 const Scalar* _lhs, Index lhsStride,
429 const Scalar* _rhs, Index rhsStride,
430 Scalar*
res, Index resIncr, Index resStride,
435 int LhsStorageOrder,
bool ConjugateLhs,
436 int RhsStorageOrder,
bool ConjugateRhs,
440 const Scalar* _lhs, Index lhsStride,
441 const Scalar* _rhs, Index rhsStride,
442 Scalar* _res, Index resIncr, Index resStride,
451 LhsMapper lhs(_lhs,lhsStride);
452 ResMapper
res(_res,resStride, resIncr);
454 Index kc = blocking.
kc();
465 for(Index k2=0; k2<
size; k2+=kc)
467 const Index actual_kc = (
std::min)(k2+kc,size)-k2;
469 pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);
472 for(Index i2=0; i2<
rows; i2+=mc)
474 const Index actual_mc = (
std::min)(i2+mc,rows)-i2;
475 pack_lhs(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
477 gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc,
cols,
alpha);
490 template<
typename Lhs,
int LhsMode,
typename Rhs,
int RhsMode>
507 template<
typename Dest>
508 static void run(Dest &dst,
const Lhs &a_lhs,
const Rhs &a_rhs,
const Scalar&
alpha)
510 eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
515 Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
516 * RhsBlasTraits::extractScalarFactor(a_rhs);
519 Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,1> BlockingType;
521 BlockingType blocking(lhs.rows(), rhs.cols(), lhs.cols(), 1,
false);
529 Dest::InnerStrideAtCompileTime>
531 lhs.rows(), rhs.cols(),
532 &lhs.coeffRef(0,0), lhs.outerStride(),
533 &rhs.coeffRef(0,0), rhs.outerStride(),
534 &dst.coeffRef(0,0), dst.innerStride(), dst.outerStride(),
535 actualAlpha, blocking
544 #endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_H LhsBlasTraits::DirectLinearAccessType ActualLhsType
void operator()(Scalar *blockB, const Scalar *_rhs, Index rhsStride, Index rows, Index cols, Index k2)
#define EIGEN_STRONG_INLINE
Expression of the product of two arbitrary matrices or vectors.
Product< Lhs, Rhs >::Scalar Scalar
Namespace containing all symbols from the Eigen library.
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
#define EIGEN_LOGICAL_XOR(a, b)
internal::blas_traits< Lhs > LhsBlasTraits
const unsigned int RowMajorBit
AnnoyingScalar conj(const AnnoyingScalar &x)
internal::blas_traits< Rhs > RhsBlasTraits
#define EIGEN_DONT_INLINE
cout<< "Here is the matrix m:"<< endl<< m<< endl;Matrix< ptrdiff_t, 3, 1 > res
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
void pack(Scalar *blockA, const const_blas_data_mapper< Scalar, Index, StorageOrder > &lhs, Index cols, Index i, Index &count)
RhsBlasTraits::DirectLinearAccessType ActualRhsType
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
EIGEN_CONSTEXPR Index size(const T &x)
void operator()(Scalar *blockA, const Scalar *_lhs, Index lhsStride, Index cols, Index rows)
static EIGEN_STRONG_INLINE void run(Index rows, Index cols, const Scalar *lhs, Index lhsStride, const Scalar *rhs, Index rhsStride, Scalar *res, Index resIncr, Index resStride, const Scalar &alpha, level3_blocking< Scalar, Scalar > &blocking)
Generic expression where a coefficient-wise unary operator is applied to an expression.
static void run(Dest &dst, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar &alpha)