Go to the documentation of this file.
19 using namespace Eigen;
27 #define SCALARA SCALAR
31 #define SCALARB SCALAR
59 static float fone = 1;
60 static float fzero = 0;
61 static double done = 1;
62 static double szero = 0;
63 static std::complex<float> cfone = 1;
64 static std::complex<float> cfzero = 0;
65 static std::complex<double> cdone = 1;
66 static std::complex<double> cdzero = 0;
68 static char trans =
'T';
70 static char lower =
'L';
71 static char right =
'R';
75 const char transA =
trans;
81 const char transB =
trans;
86 template<
typename A,
typename B>
87 void blas_gemm(
const A&
a,
const B&
b, MatrixXf&
c)
89 int M =
c.rows();
int N =
c.cols();
int K =
a.cols();
90 int lda =
a.outerStride();
int ldb =
b.outerStride();
int ldc =
c.rows();
92 sgemm_(&transA,&transB,&
M,&
N,&
K,&fone,
93 const_cast<float*
>(
a.data()),&
lda,
94 const_cast<float*
>(
b.data()),&ldb,&fone,
98 template<
typename A,
typename B>
99 void blas_gemm(
const A&
a,
const B&
b, MatrixXd&
c)
101 int M =
c.rows();
int N =
c.cols();
int K =
a.cols();
102 int lda =
a.outerStride();
int ldb =
b.outerStride();
int ldc =
c.rows();
104 dgemm_(&transA,&transB,&
M,&
N,&
K,&done,
105 const_cast<double*
>(
a.data()),&
lda,
106 const_cast<double*
>(
b.data()),&ldb,&done,
110 template<
typename A,
typename B>
111 void blas_gemm(
const A&
a,
const B&
b, MatrixXcf&
c)
113 int M =
c.rows();
int N =
c.cols();
int K =
a.cols();
114 int lda =
a.outerStride();
int ldb =
b.outerStride();
int ldc =
c.rows();
116 cgemm_(&transA,&transB,&
M,&
N,&
K,(
float*)&cfone,
117 const_cast<float*
>((
const float*)
a.data()),&
lda,
118 const_cast<float*
>((
const float*)
b.data()),&ldb,(
float*)&cfone,
119 (
float*)
c.data(),&ldc);
122 template<
typename A,
typename B>
123 void blas_gemm(
const A&
a,
const B&
b, MatrixXcd&
c)
125 int M =
c.rows();
int N =
c.cols();
int K =
a.cols();
126 int lda =
a.outerStride();
int ldb =
b.outerStride();
int ldc =
c.rows();
128 zgemm_(&transA,&transB,&
M,&
N,&
K,(
double*)&cdone,
129 const_cast<double*
>((
const double*)
a.data()),&
lda,
130 const_cast<double*
>((
const double*)
b.data()),&ldb,(
double*)&cdone,
131 (
double*)
c.data(),&ldc);
140 cr.noalias() += ar * br;
141 cr.noalias() -= ai * bi;
142 ci.noalias() += ar * bi;
143 ci.noalias() += ai * br;
149 cr.noalias() +=
a * br;
150 ci.noalias() +=
a * bi;
155 cr.noalias() += ar *
b;
156 ci.noalias() += ai *
b;
161 template<
typename A,
typename B,
typename C>
164 c.noalias() +=
a *
b;
167 int main(
int argc,
char ** argv)
171 std::cout <<
"L1 cache size = " << (
l1>0 ?
l1/1024 : -1) <<
" KB\n";
172 std::cout <<
"L2/L3 cache size = " << (
l2>0 ?
l2/1024 : -1) <<
" KB\n";
173 typedef internal::gebp_traits<Scalar,Scalar> Traits;
174 std::cout <<
"Register blocking = " << Traits::mr <<
" x " << Traits::nr <<
"\n";
183 int cache_size1=-1, cache_size2=
l2, cache_size3 = 0;
185 bool need_help =
false;
186 for (
int i=1;
i<argc;)
201 else if(argv[
i][1]==
'c')
204 cache_size1 = atoi(argv[
i++]);
207 cache_size2 = atoi(argv[
i++]);
209 cache_size3 = atoi(argv[
i++]);
212 else if(argv[
i][1]==
't')
214 tries = atoi(argv[++
i]);
217 else if(argv[
i][1]==
'p')
220 rep = atoi(argv[
i++]);
232 std::cout << argv[0] <<
" -s <matrix sizes> -c <cache sizes> -t <nb tries> -p <nb repeats>\n";
233 std::cout <<
" <matrix sizes> : size\n";
234 std::cout <<
" <matrix sizes> : rows columns depth\n";
238 #if EIGEN_VERSION_AT_LEAST(3,2,90)
243 A a(
m,
p);
a.setRandom();
244 B b(
p,
n);
b.setRandom();
245 C c(
m,
n);
c.setOnes();
248 std::cout <<
"Matrix sizes = " <<
m <<
"x" <<
p <<
" * " <<
p <<
"x" <<
n <<
"\n";
249 std::ptrdiff_t mc(
m),
nc(
n), kc(
p);
250 internal::computeProductBlockingSizes<Scalar,Scalar>(kc, mc,
nc);
251 std::cout <<
"blocking size (mc x kc) = " << mc <<
" x " << kc <<
" x " <<
nc <<
"\n";
256 #if defined EIGEN_HAS_OPENMP
265 r.noalias() +=
a *
b;
268 c.noalias() +=
a *
b;
269 if(!r.isApprox(
c)) std::cerr <<
"Warning, your parallel product is crap!\n\n";
271 #elif defined HAVE_BLAS
273 c.noalias() +=
a *
b;
275 std::cout << (r -
c).norm()/r.norm() <<
"\n";
276 std::cerr <<
"Warning, your product is crap!\n\n";
279 if(1.*
m*
n*
p<2000.*2000*2000)
284 std::cout << (r -
c).norm()/r.norm() <<
"\n";
285 std::cerr <<
"Warning, your product is crap!\n\n";
293 BENCH(tblas, tries, rep, blas_gemm(
a,
b,
c));
299 if(
b.norm()+
a.norm()==123.554) std::cout <<
"\n";
307 #ifdef EIGEN_HAS_OPENMP
321 if(1.*
m*
n*
p<30*30*30)
325 BENCH(tmt, tries, rep,
c.noalias()+=
a.lazyProduct(
b));
333 M ar(
m,
p); ar.setRandom();
334 M ai(
m,
p); ai.setRandom();
335 M br(
p,
n); br.setRandom();
336 M bi(
p,
n); bi.setRandom();
337 M cr(
m,
n); cr.setRandom();
338 M ci(
m,
n); ci.setRandom();
347 M a(
m,
p);
a.setRandom();
348 M br(
p,
n); br.setRandom();
349 M bi(
p,
n); bi.setRandom();
350 M cr(
m,
n); cr.setRandom();
351 M ci(
m,
n); ci.setRandom();
360 M ar(
m,
p); ar.setRandom();
361 M ai(
m,
p); ai.setRandom();
362 M b(
p,
n);
b.setRandom();
363 M cr(
m,
n); cr.setRandom();
364 M ci(
m,
n); ci.setRandom();
Namespace containing all symbols from the Eigen library.
Matrix< SCALARB, Dynamic, Dynamic, opt_B > B
Array< double, 1, 3 > e(1./3., 0.5, 2.)
void omp_set_num_threads(int num_threads)
void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2, std::ptrdiff_t l3)
Matrix< SCALARA, Dynamic, Dynamic, opt_A > A
double total(int TIMER=CPU_TIMER) const
#define BENCH(TIMER, TRIES, REP, CODE)
EIGEN_DONT_INLINE void gemm(const A &a, const B &b, C &c)
int queryTopLevelCacheSize()
int main(int argc, char **argv)
void matlab_real_cplx(const M &a, const M &br, const M &bi, M &cr, M &ci)
void matlab_cplx_cplx(const M &ar, const M &ai, const M &br, const M &bi, M &cr, M &ci)
NumTraits< Scalar >::Real RealScalar
Matrix< Scalar, Dynamic, Dynamic > C
int omp_get_max_threads(void)
double best(int TIMER=CPU_TIMER) const
The matrix class, also used for vectors and row-vectors.
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
#define EIGEN_DONT_INLINE
void matlab_cplx_real(const M &ar, const M &ai, const M &b, M &cr, M &ci)
Matrix< RealScalar, Dynamic, Dynamic > M
gtsam
Author(s):
autogenerated on Sat Nov 16 2024 04:01:54