MatrixLogarithm.h
Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
00005 // Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
00006 //
00007 // This Source Code Form is subject to the terms of the Mozilla
00008 // Public License v. 2.0. If a copy of the MPL was not distributed
00009 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00010 
00011 #ifndef EIGEN_MATRIX_LOGARITHM
00012 #define EIGEN_MATRIX_LOGARITHM
00013 
00014 #ifndef M_PI
00015 #define M_PI 3.141592653589793238462643383279503L
00016 #endif
00017 
00018 namespace Eigen { 
00019 
00030 template <typename MatrixType>
00031 class MatrixLogarithmAtomic
00032 {
00033 public:
00034 
00035   typedef typename MatrixType::Scalar Scalar;
00036   // typedef typename MatrixType::Index Index;
00037   typedef typename NumTraits<Scalar>::Real RealScalar;
00038   // typedef typename internal::stem_function<Scalar>::type StemFunction;
00039   // typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
00040 
00042   MatrixLogarithmAtomic() { }
00043 
00048   MatrixType compute(const MatrixType& A);
00049 
00050 private:
00051 
00052   void compute2x2(const MatrixType& A, MatrixType& result);
00053   void computeBig(const MatrixType& A, MatrixType& result);
00054   static Scalar atanh(Scalar x);
00055   int getPadeDegree(float normTminusI);
00056   int getPadeDegree(double normTminusI);
00057   int getPadeDegree(long double normTminusI);
00058   void computePade(MatrixType& result, const MatrixType& T, int degree);
00059   void computePade3(MatrixType& result, const MatrixType& T);
00060   void computePade4(MatrixType& result, const MatrixType& T);
00061   void computePade5(MatrixType& result, const MatrixType& T);
00062   void computePade6(MatrixType& result, const MatrixType& T);
00063   void computePade7(MatrixType& result, const MatrixType& T);
00064   void computePade8(MatrixType& result, const MatrixType& T);
00065   void computePade9(MatrixType& result, const MatrixType& T);
00066   void computePade10(MatrixType& result, const MatrixType& T);
00067   void computePade11(MatrixType& result, const MatrixType& T);
00068 
00069   static const int minPadeDegree = 3;
00070   static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24?  5:      // single precision
00071                                    std::numeric_limits<RealScalar>::digits<= 53?  7:      // double precision
00072                                    std::numeric_limits<RealScalar>::digits<= 64?  8:      // extended precision
00073                                    std::numeric_limits<RealScalar>::digits<=106? 10: 11;  // double-double or quadruple precision
00074 
00075   // Prevent copying
00076   MatrixLogarithmAtomic(const MatrixLogarithmAtomic&);
00077   MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&);
00078 };
00079 
00081 template <typename MatrixType>
00082 MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
00083 {
00084   using std::log;
00085   MatrixType result(A.rows(), A.rows());
00086   if (A.rows() == 1)
00087     result(0,0) = log(A(0,0));
00088   else if (A.rows() == 2)
00089     compute2x2(A, result);
00090   else
00091     computeBig(A, result);
00092   return result;
00093 }
00094 
00096 template <typename MatrixType>
00097 typename MatrixType::Scalar MatrixLogarithmAtomic<MatrixType>::atanh(typename MatrixType::Scalar x)
00098 {
00099   using std::abs;
00100   using std::sqrt;
00101   if (abs(x) > sqrt(NumTraits<Scalar>::epsilon()))
00102     return Scalar(0.5) * log((Scalar(1) + x) / (Scalar(1) - x));
00103   else
00104     return x + x*x*x / Scalar(3);
00105 }
00106 
00108 template <typename MatrixType>
00109 void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixType& result)
00110 {
00111   using std::abs;
00112   using std::ceil;
00113   using std::imag;
00114   using std::log;
00115 
00116   Scalar logA00 = log(A(0,0));
00117   Scalar logA11 = log(A(1,1));
00118 
00119   result(0,0) = logA00;
00120   result(1,0) = Scalar(0);
00121   result(1,1) = logA11;
00122 
00123   if (A(0,0) == A(1,1)) {
00124     result(0,1) = A(0,1) / A(0,0);
00125   } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) {
00126     result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0));
00127   } else {
00128     // computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
00129     int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI)));
00130     Scalar z = (A(1,1) - A(0,0)) / (A(1,1) + A(0,0));
00131     result(0,1) = A(0,1) * (Scalar(2) * atanh(z) + Scalar(0,2*M_PI*unwindingNumber)) / (A(1,1) - A(0,0));
00132   }
00133 }
00134 
00137 template <typename MatrixType>
00138 void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result)
00139 {
00140   int numberOfSquareRoots = 0;
00141   int numberOfExtraSquareRoots = 0;
00142   int degree;
00143   MatrixType T = A;
00144   const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1:                     // single precision
00145                                     maxPadeDegree<= 7? 2.6429608311114350e-1:                     // double precision
00146                                     maxPadeDegree<= 8? 2.32777776523703892094e-1L:                // extended precision
00147                                     maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L:    // double-double
00148                                                        1.1880960220216759245467951592883642e-1L;  // quadruple precision
00149 
00150   while (true) {
00151     RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
00152     if (normTminusI < maxNormForPade) {
00153       degree = getPadeDegree(normTminusI);
00154       int degree2 = getPadeDegree(normTminusI / RealScalar(2));
00155       if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1)) 
00156         break;
00157       ++numberOfExtraSquareRoots;
00158     }
00159     MatrixType sqrtT;
00160     MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
00161     T = sqrtT;
00162     ++numberOfSquareRoots;
00163   }
00164 
00165   computePade(result, T, degree);
00166   result *= pow(RealScalar(2), numberOfSquareRoots);
00167 }
00168 
00169 /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */
00170 template <typename MatrixType>
00171 int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI)
00172 {
00173   const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1,
00174             5.3149729967117310e-1 };
00175   for (int degree = 3; degree <= maxPadeDegree; ++degree) 
00176     if (normTminusI <= maxNormForPade[degree - minPadeDegree])
00177       return degree;
00178   assert(false); // this line should never be reached
00179 }
00180 
00181 /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
00182 template <typename MatrixType>
00183 int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI)
00184 {
00185   const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
00186             1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
00187   for (int degree = 3; degree <= maxPadeDegree; ++degree)
00188     if (normTminusI <= maxNormForPade[degree - minPadeDegree])
00189       return degree;
00190   assert(false); // this line should never be reached
00191 }
00192 
00193 /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
00194 template <typename MatrixType>
00195 int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
00196 {
00197 #if   LDBL_MANT_DIG == 53         // double precision
00198   const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L,
00199             1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L };
00200 #elif LDBL_MANT_DIG <= 64         // extended precision
00201   const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L,
00202             5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L,
00203             2.32777776523703892094e-1L };
00204 #elif LDBL_MANT_DIG <= 106        // double-double
00205   const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */,
00206             9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L,
00207             1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L,
00208             4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L,
00209             1.05026503471351080481093652651105e-1L };
00210 #else                             // quadruple precision
00211   const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */,
00212             5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L,
00213             8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L,
00214             3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L,
00215             8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L };
00216 #endif
00217   for (int degree = 3; degree <= maxPadeDegree; ++degree)
00218     if (normTminusI <= maxNormForPade[degree - minPadeDegree])
00219       return degree;
00220   assert(false); // this line should never be reached
00221 }
00222 
00223 /* \brief Compute Pade approximation to matrix logarithm */
00224 template <typename MatrixType>
00225 void MatrixLogarithmAtomic<MatrixType>::computePade(MatrixType& result, const MatrixType& T, int degree)
00226 {
00227   switch (degree) {
00228     case 3:  computePade3(result, T);  break;
00229     case 4:  computePade4(result, T);  break;
00230     case 5:  computePade5(result, T);  break;
00231     case 6:  computePade6(result, T);  break;
00232     case 7:  computePade7(result, T);  break;
00233     case 8:  computePade8(result, T);  break;
00234     case 9:  computePade9(result, T);  break;
00235     case 10: computePade10(result, T); break;
00236     case 11: computePade11(result, T); break;
00237     default: assert(false); // should never happen
00238   }
00239 } 
00240 
00241 template <typename MatrixType>
00242 void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const MatrixType& T)
00243 {
00244   const int degree = 3;
00245   const RealScalar nodes[]   = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L,
00246             0.8872983346207416885179265399782400L };
00247   const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L,
00248             0.2777777777777777777777777777777778L };
00249   assert(degree <= maxPadeDegree);
00250   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
00251   result.setZero(T.rows(), T.rows());
00252   for (int k = 0; k < degree; ++k)
00253     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
00254                            .template triangularView<Upper>().solve(TminusI);
00255 }
00256 
00257 template <typename MatrixType>
00258 void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const MatrixType& T)
00259 {
00260   const int degree = 4;
00261   const RealScalar nodes[]   = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L,
00262             0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L };
00263   const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L,
00264             0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L };
00265   assert(degree <= maxPadeDegree);
00266   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
00267   result.setZero(T.rows(), T.rows());
00268   for (int k = 0; k < degree; ++k)
00269     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
00270                            .template triangularView<Upper>().solve(TminusI);
00271 }
00272 
00273 template <typename MatrixType>
00274 void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const MatrixType& T)
00275 {
00276   const int degree = 5;
00277   const RealScalar nodes[]   = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L,
00278             0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L,
00279             0.9530899229693319963988134391496965L };
00280   const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L,
00281             0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
00282             0.1184634425280945437571320203599587L };
00283   assert(degree <= maxPadeDegree);
00284   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
00285   result.setZero(T.rows(), T.rows());
00286   for (int k = 0; k < degree; ++k)
00287     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
00288                            .template triangularView<Upper>().solve(TminusI);
00289 }
00290 
00291 template <typename MatrixType>
00292 void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const MatrixType& T)
00293 {
00294   const int degree = 6;
00295   const RealScalar nodes[]   = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L,
00296             0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
00297                         0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L };
00298   const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L,
00299             0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
00300                         0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
00301   assert(degree <= maxPadeDegree);
00302   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
00303   result.setZero(T.rows(), T.rows());
00304   for (int k = 0; k < degree; ++k)
00305     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
00306                            .template triangularView<Upper>().solve(TminusI);
00307 }
00308 
00309 template <typename MatrixType>
00310 void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const MatrixType& T)
00311 {
00312   const int degree = 7;
00313   const RealScalar nodes[]   = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L,
00314             0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L,
00315             0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L,
00316             0.9745539561713792622630948420239256L };
00317   const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L,
00318             0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
00319             0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
00320             0.0647424830844348466353057163395410L };
00321   assert(degree <= maxPadeDegree);
00322   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
00323   result.setZero(T.rows(), T.rows());
00324   for (int k = 0; k < degree; ++k)
00325     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
00326                            .template triangularView<Upper>().solve(TminusI);
00327 }
00328 
00329 template <typename MatrixType>
00330 void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const MatrixType& T)
00331 {
00332   const int degree = 8;
00333   const RealScalar nodes[]   = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L,
00334             0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L,
00335             0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L,
00336             0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L };
00337   const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L,
00338             0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
00339             0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
00340             0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L };
00341   assert(degree <= maxPadeDegree);
00342   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
00343   result.setZero(T.rows(), T.rows());
00344   for (int k = 0; k < degree; ++k)
00345     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
00346                            .template triangularView<Upper>().solve(TminusI);
00347 }
00348 
00349 template <typename MatrixType>
00350 void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const MatrixType& T)
00351 {
00352   const int degree = 9;
00353   const RealScalar nodes[]   = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L,
00354             0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L,
00355             0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L,
00356             0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L,
00357             0.9840801197538130449177881014518364L };
00358   const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L,
00359             0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L,
00360             0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
00361             0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
00362             0.0406371941807872059859460790552618L };
00363   assert(degree <= maxPadeDegree);
00364   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
00365   result.setZero(T.rows(), T.rows());
00366   for (int k = 0; k < degree; ++k)
00367     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
00368                            .template triangularView<Upper>().solve(TminusI);
00369 }
00370 
00371 template <typename MatrixType>
00372 void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const MatrixType& T)
00373 {
00374   const int degree = 10;
00375   const RealScalar nodes[]   = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L,
00376             0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L,
00377             0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L,
00378             0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L,
00379             0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L };
00380   const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L,
00381             0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L,
00382             0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
00383             0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
00384             0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L };
00385   assert(degree <= maxPadeDegree);
00386   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
00387   result.setZero(T.rows(), T.rows());
00388   for (int k = 0; k < degree; ++k)
00389     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
00390                            .template triangularView<Upper>().solve(TminusI);
00391 }
00392 
00393 template <typename MatrixType>
00394 void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const MatrixType& T)
00395 {
00396   const int degree = 11;
00397   const RealScalar nodes[]   = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L,
00398             0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L,
00399             0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L,
00400             0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L,
00401             0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L,
00402             0.9891143290730284964019690005614287L };
00403   const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L,
00404             0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L,
00405             0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L,
00406             0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
00407             0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
00408             0.0278342835580868332413768602212743L };
00409   assert(degree <= maxPadeDegree);
00410   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
00411   result.setZero(T.rows(), T.rows());
00412   for (int k = 0; k < degree; ++k)
00413     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
00414                            .template triangularView<Upper>().solve(TminusI);
00415 }
00416 
00429 template<typename Derived> class MatrixLogarithmReturnValue
00430 : public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
00431 {
00432 public:
00433 
00434   typedef typename Derived::Scalar Scalar;
00435   typedef typename Derived::Index Index;
00436 
00441   MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
00442   
00447   template <typename ResultType>
00448   inline void evalTo(ResultType& result) const
00449   {
00450     typedef typename Derived::PlainObject PlainObject;
00451     typedef internal::traits<PlainObject> Traits;
00452     static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
00453     static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
00454     static const int Options = PlainObject::Options;
00455     typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
00456     typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
00457     typedef MatrixLogarithmAtomic<DynMatrixType> AtomicType;
00458     AtomicType atomic;
00459     
00460     const PlainObject Aevaluated = m_A.eval();
00461     MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
00462     mf.compute(result);
00463   }
00464 
00465   Index rows() const { return m_A.rows(); }
00466   Index cols() const { return m_A.cols(); }
00467   
00468 private:
00469   typename internal::nested<Derived>::type m_A;
00470   
00471   MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&);
00472 };
00473 
00474 namespace internal {
00475   template<typename Derived>
00476   struct traits<MatrixLogarithmReturnValue<Derived> >
00477   {
00478     typedef typename Derived::PlainObject ReturnType;
00479   };
00480 }
00481 
00482 
00483 /********** MatrixBase method **********/
00484 
00485 
00486 template <typename Derived>
00487 const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
00488 {
00489   eigen_assert(rows() == cols());
00490   return MatrixLogarithmReturnValue<Derived>(derived());
00491 }
00492 
00493 } // end namespace Eigen
00494 
00495 #endif // EIGEN_MATRIX_LOGARITHM


win_eigen
Author(s): Daniel Stonier
autogenerated on Wed Sep 16 2015 07:11:16