10 #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H 11 #define EIGEN_GENERAL_MATRIX_MATRIX_H 22 typename LhsScalar,
int LhsStorageOrder,
bool ConjugateLhs,
23 typename RhsScalar,
int RhsStorageOrder,
bool ConjugateRhs>
28 Index rows, Index cols, Index depth,
29 const LhsScalar* lhs, Index lhsStride,
30 const RhsScalar* rhs, Index rhsStride,
31 ResScalar* res, Index resStride,
41 ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
49 typename LhsScalar,
int LhsStorageOrder,
bool ConjugateLhs,
50 typename RhsScalar,
int RhsStorageOrder,
bool ConjugateRhs>
55 static void run(Index rows, Index cols, Index depth,
56 const LhsScalar* _lhs, Index lhsStride,
57 const RhsScalar* _rhs, Index rhsStride,
58 ResScalar* res, Index resStride,
68 Index kc = blocking.
kc();
69 Index mc = (std::min)(rows,blocking.
mc());
76 #ifdef EIGEN_HAS_OPENMP 81 Index threads = omp_get_num_threads();
83 std::size_t sizeA = kc*mc;
84 std::size_t sizeW = kc*Traits::WorkSpaceFactor;
88 RhsScalar* blockB = blocking.
blockB();
92 for(Index k=0; k<depth; k+=kc)
94 const Index actual_kc = (std::min)(k+kc,depth)-k;
98 pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
106 while(info[tid].users!=0) {}
107 info[tid].users += threads;
109 pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
115 for(Index shift=0; shift<threads; ++shift)
117 Index j = (tid+shift)%threads;
123 while(info[j].sync!=k) {}
125 gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0,
w);
129 for(Index i=mc; i<rows; i+=mc)
131 const Index actual_mc = (std::min)(i+mc,rows)-i;
134 pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
137 gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0,
w);
142 for(Index j=0; j<threads; ++j)
148 #endif // EIGEN_HAS_OPENMP 153 std::size_t sizeA = kc*mc;
154 std::size_t sizeB = kc*cols;
155 std::size_t sizeW = kc*Traits::WorkSpaceFactor;
163 for(Index k2=0; k2<depth; k2+=kc)
165 const Index actual_kc = (std::min)(k2+kc,depth)-k2;
171 pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
175 for(Index i2=0; i2<rows; i2+=mc)
177 const Index actual_mc = (std::min)(i2+mc,rows)-i2;
182 pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
185 gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
198 template<
typename Lhs,
typename Rhs>
200 :
traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
203 template<
typename Scalar,
typename Index,
typename Gemm,
typename Lhs,
typename Rhs,
typename Dest,
typename BlockingType>
206 gemm_functor(
const Lhs& lhs,
const Rhs& rhs, Dest& dest,
const Scalar& actualAlpha,
207 BlockingType& blocking)
208 : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
213 m_blocking.allocateB();
221 Gemm::run(rows, cols, m_lhs.cols(),
222 &m_lhs.coeffRef(row,0), m_lhs.outerStride(),
223 &m_rhs.coeffRef(0,
col), m_rhs.outerStride(),
224 (Scalar*)&(m_dest.coeffRef(row,
col)), m_dest.outerStride(),
225 m_actualAlpha, m_blocking, info);
236 template<
int StorageOrder,
typename LhsScalar,
typename RhsScalar,
int MaxRows,
int MaxCols,
int MaxDepth,
int KcFactor=1,
239 template<
typename _LhsScalar,
typename _RhsScalar>
257 : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
264 inline LhsScalar*
blockA() {
return m_blockA; }
265 inline RhsScalar*
blockB() {
return m_blockB; }
266 inline RhsScalar*
blockW() {
return m_blockW; }
269 template<
int StorageOrder,
typename _LhsScalar,
typename _RhsScalar,
int MaxRows,
int MaxCols,
int MaxDepth,
int KcFactor>
272 typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
273 typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
284 SizeA = ActualRows * MaxDepth,
285 SizeB = ActualCols * MaxDepth,
286 SizeW = MaxDepth * Traits::WorkSpaceFactor
297 this->m_mc = ActualRows;
298 this->m_nc = ActualCols;
299 this->m_kc = MaxDepth;
300 this->m_blockA = m_staticA;
301 this->m_blockB = m_staticB;
302 this->m_blockW = m_staticW;
311 template<
int StorageOrder,
typename _LhsScalar,
typename _RhsScalar,
int MaxRows,
int MaxCols,
int MaxDepth,
int KcFactor>
314 typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
315 typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
336 computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc);
337 m_sizeA = this->m_mc * this->m_kc;
338 m_sizeB = this->m_kc * this->m_nc;
339 m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
344 if(this->m_blockA==0)
345 this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
350 if(this->m_blockB==0)
351 this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
356 if(this->m_blockW==0)
357 this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
377 template<
typename Lhs,
typename Rhs>
379 :
public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
399 eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
404 Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
405 * RhsBlasTraits::extractScalarFactor(m_rhs);
408 Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
419 BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
421 internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==
Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&
RowMajorBit);
427 #endif // EIGEN_GENERAL_MATRIX_MATRIX_H scalar_product_traits< LhsScalar, RhsScalar >::ReturnType ResScalar
Expression of the product of two general matrices or vectors.
gebp_traits< LhsScalar, RhsScalar > Traits
internal::remove_all< ActualRhsType >::type _ActualRhsType
#define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived)
gebp_traits< LhsScalar, RhsScalar > Traits
static void run(Index rows, Index cols, Index depth, const LhsScalar *_lhs, Index lhsStride, const RhsScalar *_rhs, Index rhsStride, ResScalar *res, Index resStride, ResScalar alpha, level3_blocking< LhsScalar, RhsScalar > &blocking, GemmParallelInfo< Index > *info=0)
#define EIGEN_STRONG_INLINE
internal::traits< Derived >::Scalar Scalar
static EIGEN_STRONG_INLINE void run(Index rows, Index cols, Index depth, const LhsScalar *lhs, Index lhsStride, const RhsScalar *rhs, Index rhsStride, ResScalar *res, Index resStride, ResScalar alpha, level3_blocking< RhsScalar, LhsScalar > &blocking, GemmParallelInfo< Index > *info=0)
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
internal::traits< Derived >::Index Index
The type of indices.
Expression of the transpose of a matrix.
#define EIGEN_UNUSED_VARIABLE(var)
conditional< Transpose, _LhsScalar, _RhsScalar >::type RhsScalar
#define eigen_internal_assert(x)
#define EIGEN_SIZE_MIN_PREFER_FIXED(a, b)
const unsigned int RowMajorBit
gemm_functor(const Lhs &lhs, const Rhs &rhs, Dest &dest, const Scalar &actualAlpha, BlockingType &blocking)
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
internal::remove_all< ActualLhsType >::type _ActualLhsType
scalar_product_traits< LhsScalar, RhsScalar >::ReturnType ResScalar
conditional< Transpose, _RhsScalar, _LhsScalar >::type LhsScalar
gemm_blocking_space(DenseIndex, DenseIndex, DenseIndex)
conditional< Transpose, _RhsScalar, _LhsScalar >::type LhsScalar
conditional< Transpose, _LhsScalar, _RhsScalar >::type RhsScalar
void scaleAndAddTo(Dest &dst, const Scalar &alpha) const
void aligned_delete(T *ptr, size_t size)
EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex
TFSIMD_FORCE_INLINE const tfScalar & w() const
BlockingType & m_blocking
void initParallelSession() const
#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP, LHS, RHS)