00001
00002
00003
00004
00005 #include <iostream>
00006 #include <Eigen/Core>
00007 #include <bench/BenchTimer.h>
00008
00009 using namespace std;
00010 using namespace Eigen;
00011
00012 #ifndef SCALAR
00013
00014 #define SCALAR float
00015 #endif
00016
00017 typedef SCALAR Scalar;
00018 typedef NumTraits<Scalar>::Real RealScalar;
00019 typedef Matrix<RealScalar,Dynamic,Dynamic> A;
00020 typedef Matrix<Scalar,Dynamic,Dynamic> B;
00021 typedef Matrix<Scalar,Dynamic,Dynamic> C;
00022 typedef Matrix<RealScalar,Dynamic,Dynamic> M;
00023
00024 #ifdef HAVE_BLAS
00025
00026 extern "C" {
00027 #include <bench/btl/libs/C_BLAS/blas.h>
00028 }
00029
00030 static float fone = 1;
00031 static float fzero = 0;
00032 static double done = 1;
00033 static double szero = 0;
00034 static std::complex<float> cfone = 1;
00035 static std::complex<float> cfzero = 0;
00036 static std::complex<double> cdone = 1;
00037 static std::complex<double> cdzero = 0;
00038 static char notrans = 'N';
00039 static char trans = 'T';
00040 static char nonunit = 'N';
00041 static char lower = 'L';
00042 static char right = 'R';
00043 static int intone = 1;
00044
00045 void blas_gemm(const MatrixXf& a, const MatrixXf& b, MatrixXf& c)
00046 {
00047 int M = c.rows(); int N = c.cols(); int K = a.cols();
00048 int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
00049
00050 sgemm_(¬rans,¬rans,&M,&N,&K,&fone,
00051 const_cast<float*>(a.data()),&lda,
00052 const_cast<float*>(b.data()),&ldb,&fone,
00053 c.data(),&ldc);
00054 }
00055
00056 EIGEN_DONT_INLINE void blas_gemm(const MatrixXd& a, const MatrixXd& b, MatrixXd& c)
00057 {
00058 int M = c.rows(); int N = c.cols(); int K = a.cols();
00059 int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
00060
00061 dgemm_(¬rans,¬rans,&M,&N,&K,&done,
00062 const_cast<double*>(a.data()),&lda,
00063 const_cast<double*>(b.data()),&ldb,&done,
00064 c.data(),&ldc);
00065 }
00066
00067 void blas_gemm(const MatrixXcf& a, const MatrixXcf& b, MatrixXcf& c)
00068 {
00069 int M = c.rows(); int N = c.cols(); int K = a.cols();
00070 int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
00071
00072 cgemm_(¬rans,¬rans,&M,&N,&K,(float*)&cfone,
00073 const_cast<float*>((const float*)a.data()),&lda,
00074 const_cast<float*>((const float*)b.data()),&ldb,(float*)&cfone,
00075 (float*)c.data(),&ldc);
00076 }
00077
00078 void blas_gemm(const MatrixXcd& a, const MatrixXcd& b, MatrixXcd& c)
00079 {
00080 int M = c.rows(); int N = c.cols(); int K = a.cols();
00081 int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
00082
00083 zgemm_(¬rans,¬rans,&M,&N,&K,(double*)&cdone,
00084 const_cast<double*>((const double*)a.data()),&lda,
00085 const_cast<double*>((const double*)b.data()),&ldb,(double*)&cdone,
00086 (double*)c.data(),&ldc);
00087 }
00088
00089
00090
00091 #endif
00092
00093 void matlab_cplx_cplx(const M& ar, const M& ai, const M& br, const M& bi, M& cr, M& ci)
00094 {
00095 cr.noalias() += ar * br;
00096 cr.noalias() -= ai * bi;
00097 ci.noalias() += ar * bi;
00098 ci.noalias() += ai * br;
00099 }
00100
00101 void matlab_real_cplx(const M& a, const M& br, const M& bi, M& cr, M& ci)
00102 {
00103 cr.noalias() += a * br;
00104 ci.noalias() += a * bi;
00105 }
00106
00107 void matlab_cplx_real(const M& ar, const M& ai, const M& b, M& cr, M& ci)
00108 {
00109 cr.noalias() += ar * b;
00110 ci.noalias() += ai * b;
00111 }
00112
00113 template<typename A, typename B, typename C>
00114 EIGEN_DONT_INLINE void gemm(const A& a, const B& b, C& c)
00115 {
00116 c.noalias() += a * b;
00117 }
00118
00119 int main(int argc, char ** argv)
00120 {
00121 std::ptrdiff_t l1 = internal::queryL1CacheSize();
00122 std::ptrdiff_t l2 = internal::queryTopLevelCacheSize();
00123 std::cout << "L1 cache size = " << (l1>0 ? l1/1024 : -1) << " KB\n";
00124 std::cout << "L2/L3 cache size = " << (l2>0 ? l2/1024 : -1) << " KB\n";
00125 typedef internal::gebp_traits<Scalar,Scalar> Traits;
00126 std::cout << "Register blocking = " << Traits::mr << " x " << Traits::nr << "\n";
00127
00128 int rep = 1;
00129 int tries = 2;
00130
00131 int s = 2048;
00132 int cache_size = -1;
00133
00134 bool need_help = false;
00135 for (int i=1; i<argc; ++i)
00136 {
00137 if(argv[i][0]=='s')
00138 s = atoi(argv[i]+1);
00139 else if(argv[i][0]=='c')
00140 cache_size = atoi(argv[i]+1);
00141 else if(argv[i][0]=='t')
00142 tries = atoi(argv[i]+1);
00143 else if(argv[i][0]=='p')
00144 rep = atoi(argv[i]+1);
00145 else
00146 need_help = true;
00147 }
00148
00149 if(need_help)
00150 {
00151 std::cout << argv[0] << " s<matrix size> c<cache size> t<nb tries> p<nb repeats>\n";
00152 return 1;
00153 }
00154
00155 if(cache_size>0)
00156 setCpuCacheSizes(cache_size,96*cache_size);
00157
00158 int m = s;
00159 int n = s;
00160 int p = s;
00161 A a(m,p); a.setRandom();
00162 B b(p,n); b.setRandom();
00163 C c(m,n); c.setOnes();
00164
00165 std::cout << "Matrix sizes = " << m << "x" << p << " * " << p << "x" << n << "\n";
00166 std::ptrdiff_t mc(m), nc(n), kc(p);
00167 computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
00168 std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << "\n";
00169
00170 C r = c;
00171
00172
00173 #if defined EIGEN_HAS_OPENMP
00174 int procs = omp_get_max_threads();
00175 if(procs>1)
00176 {
00177 #ifdef HAVE_BLAS
00178 blas_gemm(a,b,r);
00179 #else
00180 omp_set_num_threads(1);
00181 r.noalias() += a * b;
00182 omp_set_num_threads(procs);
00183 #endif
00184 c.noalias() += a * b;
00185 if(!r.isApprox(c)) std::cerr << "Warning, your parallel product is crap!\n\n";
00186 }
00187 #elif defined HAVE_BLAS
00188 blas_gemm(a,b,r);
00189 c.noalias() += a * b;
00190 if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n";
00191
00192 #else
00193 gemm(a,b,c);
00194 r.noalias() += a.cast<Scalar>() * b.cast<Scalar>();
00195 if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n";
00196
00197
00198 #endif
00199
00200 #ifdef HAVE_BLAS
00201 BenchTimer tblas;
00202 BENCH(tblas, tries, rep, blas_gemm(a,b,c));
00203 std::cout << "blas cpu " << tblas.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(CPU_TIMER) << "s)\n";
00204 std::cout << "blas real " << tblas.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n";
00205 #endif
00206
00207 BenchTimer tmt;
00208 BENCH(tmt, tries, rep, gemm(a,b,c));
00209 std::cout << "eigen cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n";
00210 std::cout << "eigen real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
00211
00212 #ifdef EIGEN_HAS_OPENMP
00213 if(procs>1)
00214 {
00215 BenchTimer tmono;
00216
00217 Eigen::setNbThreads(1);
00218 BENCH(tmono, tries, rep, gemm(a,b,c));
00219 std::cout << "eigen mono cpu " << tmono.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(CPU_TIMER) << "s)\n";
00220 std::cout << "eigen mono real " << tmono.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n";
00221 std::cout << "mt speed up x" << tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER) << " => " << (100.0*tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER))/procs << "%\n";
00222 }
00223 #endif
00224
00225 #ifdef DECOUPLED
00226 if((NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
00227 {
00228 M ar(m,p); ar.setRandom();
00229 M ai(m,p); ai.setRandom();
00230 M br(p,n); br.setRandom();
00231 M bi(p,n); bi.setRandom();
00232 M cr(m,n); cr.setRandom();
00233 M ci(m,n); ci.setRandom();
00234
00235 BenchTimer t;
00236 BENCH(t, tries, rep, matlab_cplx_cplx(ar,ai,br,bi,cr,ci));
00237 std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
00238 std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
00239 }
00240 if((!NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
00241 {
00242 M a(m,p); a.setRandom();
00243 M br(p,n); br.setRandom();
00244 M bi(p,n); bi.setRandom();
00245 M cr(m,n); cr.setRandom();
00246 M ci(m,n); ci.setRandom();
00247
00248 BenchTimer t;
00249 BENCH(t, tries, rep, matlab_real_cplx(a,br,bi,cr,ci));
00250 std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
00251 std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
00252 }
00253 if((NumTraits<A::Scalar>::IsComplex) && (!NumTraits<B::Scalar>::IsComplex))
00254 {
00255 M ar(m,p); ar.setRandom();
00256 M ai(m,p); ai.setRandom();
00257 M b(p,n); b.setRandom();
00258 M cr(m,n); cr.setRandom();
00259 M ci(m,n); ci.setRandom();
00260
00261 BenchTimer t;
00262 BENCH(t, tries, rep, matlab_cplx_real(ar,ai,b,cr,ci));
00263 std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
00264 std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
00265 }
00266 #endif
00267
00268 return 0;
00269 }
00270