Home | History | Annotate | Download | only in MatrixFunctions
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2011 Jitse Niesen <jitse (at) maths.leeds.ac.uk>
      5 // Copyright (C) 2011 Chen-Pang He <jdh8 (at) ms63.hinet.net>
      6 //
      7 // This Source Code Form is subject to the terms of the Mozilla
      8 // Public License v. 2.0. If a copy of the MPL was not distributed
      9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
     10 
     11 #ifndef EIGEN_MATRIX_LOGARITHM
     12 #define EIGEN_MATRIX_LOGARITHM
     13 
     14 #ifndef M_PI
     15 #define M_PI 3.141592653589793238462643383279503L
     16 #endif
     17 
     18 namespace Eigen {
     19 
     20 /** \ingroup MatrixFunctions_Module
     21   * \class MatrixLogarithmAtomic
     22   * \brief Helper class for computing matrix logarithm of atomic matrices.
     23   *
     24   * \internal
     25   * Here, an atomic matrix is a triangular matrix whose diagonal
     26   * entries are close to each other.
     27   *
     28   * \sa class MatrixFunctionAtomic, MatrixBase::log()
     29   */
     30 template <typename MatrixType>
     31 class MatrixLogarithmAtomic
     32 {
     33 public:
     34 
     35   typedef typename MatrixType::Scalar Scalar;
     36   // typedef typename MatrixType::Index Index;
     37   typedef typename NumTraits<Scalar>::Real RealScalar;
     38   // typedef typename internal::stem_function<Scalar>::type StemFunction;
     39   // typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
     40 
     41   /** \brief Constructor. */
     42   MatrixLogarithmAtomic() { }
     43 
     44   /** \brief Compute matrix logarithm of atomic matrix
     45     * \param[in]  A  argument of matrix logarithm, should be upper triangular and atomic
     46     * \returns  The logarithm of \p A.
     47     */
     48   MatrixType compute(const MatrixType& A);
     49 
     50 private:
     51 
     52   void compute2x2(const MatrixType& A, MatrixType& result);
     53   void computeBig(const MatrixType& A, MatrixType& result);
     54   static Scalar atanh(Scalar x);
     55   int getPadeDegree(float normTminusI);
     56   int getPadeDegree(double normTminusI);
     57   int getPadeDegree(long double normTminusI);
     58   void computePade(MatrixType& result, const MatrixType& T, int degree);
     59   void computePade3(MatrixType& result, const MatrixType& T);
     60   void computePade4(MatrixType& result, const MatrixType& T);
     61   void computePade5(MatrixType& result, const MatrixType& T);
     62   void computePade6(MatrixType& result, const MatrixType& T);
     63   void computePade7(MatrixType& result, const MatrixType& T);
     64   void computePade8(MatrixType& result, const MatrixType& T);
     65   void computePade9(MatrixType& result, const MatrixType& T);
     66   void computePade10(MatrixType& result, const MatrixType& T);
     67   void computePade11(MatrixType& result, const MatrixType& T);
     68 
     69   static const int minPadeDegree = 3;
     70   static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24?  5:      // single precision
     71                                    std::numeric_limits<RealScalar>::digits<= 53?  7:      // double precision
     72                                    std::numeric_limits<RealScalar>::digits<= 64?  8:      // extended precision
     73                                    std::numeric_limits<RealScalar>::digits<=106? 10: 11;  // double-double or quadruple precision
     74 
     75   // Prevent copying
     76   MatrixLogarithmAtomic(const MatrixLogarithmAtomic&);
     77   MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&);
     78 };
     79 
     80 /** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */
     81 template <typename MatrixType>
     82 MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
     83 {
     84   using std::log;
     85   MatrixType result(A.rows(), A.rows());
     86   if (A.rows() == 1)
     87     result(0,0) = log(A(0,0));
     88   else if (A.rows() == 2)
     89     compute2x2(A, result);
     90   else
     91     computeBig(A, result);
     92   return result;
     93 }
     94 
     95 /** \brief Compute atanh (inverse hyperbolic tangent). */
     96 template <typename MatrixType>
     97 typename MatrixType::Scalar MatrixLogarithmAtomic<MatrixType>::atanh(typename MatrixType::Scalar x)
     98 {
     99   using std::abs;
    100   using std::sqrt;
    101   if (abs(x) > sqrt(NumTraits<Scalar>::epsilon()))
    102     return Scalar(0.5) * log((Scalar(1) + x) / (Scalar(1) - x));
    103   else
    104     return x + x*x*x / Scalar(3);
    105 }
    106 
    107 /** \brief Compute logarithm of 2x2 triangular matrix. */
    108 template <typename MatrixType>
    109 void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixType& result)
    110 {
    111   using std::abs;
    112   using std::ceil;
    113   using std::imag;
    114   using std::log;
    115 
    116   Scalar logA00 = log(A(0,0));
    117   Scalar logA11 = log(A(1,1));
    118 
    119   result(0,0) = logA00;
    120   result(1,0) = Scalar(0);
    121   result(1,1) = logA11;
    122 
    123   if (A(0,0) == A(1,1)) {
    124     result(0,1) = A(0,1) / A(0,0);
    125   } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) {
    126     result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0));
    127   } else {
    128     // computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
    129     int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI)));
    130     Scalar z = (A(1,1) - A(0,0)) / (A(1,1) + A(0,0));
    131     result(0,1) = A(0,1) * (Scalar(2) * atanh(z) + Scalar(0,2*M_PI*unwindingNumber)) / (A(1,1) - A(0,0));
    132   }
    133 }
    134 
    135 /** \brief Compute logarithm of triangular matrices with size > 2.
    136   * \details This uses a inverse scale-and-square algorithm. */
    137 template <typename MatrixType>
    138 void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result)
    139 {
    140   int numberOfSquareRoots = 0;
    141   int numberOfExtraSquareRoots = 0;
    142   int degree;
    143   MatrixType T = A;
    144   const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1:                     // single precision
    145                                     maxPadeDegree<= 7? 2.6429608311114350e-1:                     // double precision
    146                                     maxPadeDegree<= 8? 2.32777776523703892094e-1L:                // extended precision
    147                                     maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L:    // double-double
    148                                                        1.1880960220216759245467951592883642e-1L;  // quadruple precision
    149 
    150   while (true) {
    151     RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
    152     if (normTminusI < maxNormForPade) {
    153       degree = getPadeDegree(normTminusI);
    154       int degree2 = getPadeDegree(normTminusI / RealScalar(2));
    155       if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1))
    156 	break;
    157       ++numberOfExtraSquareRoots;
    158     }
    159     MatrixType sqrtT;
    160     MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
    161     T = sqrtT;
    162     ++numberOfSquareRoots;
    163   }
    164 
    165   computePade(result, T, degree);
    166   result *= pow(RealScalar(2), numberOfSquareRoots);
    167 }
    168 
    169 /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */
    170 template <typename MatrixType>
    171 int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI)
    172 {
    173   const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1,
    174             5.3149729967117310e-1 };
    175   for (int degree = 3; degree <= maxPadeDegree; ++degree)
    176     if (normTminusI <= maxNormForPade[degree - minPadeDegree])
    177       return degree;
    178   assert(false); // this line should never be reached
    179 }
    180 
    181 /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
    182 template <typename MatrixType>
    183 int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI)
    184 {
    185   const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
    186             1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
    187   for (int degree = 3; degree <= maxPadeDegree; ++degree)
    188     if (normTminusI <= maxNormForPade[degree - minPadeDegree])
    189       return degree;
    190   assert(false); // this line should never be reached
    191 }
    192 
    193 /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
    194 template <typename MatrixType>
    195 int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
    196 {
    197 #if   LDBL_MANT_DIG == 53         // double precision
    198   const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
    199             1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
    200 #elif LDBL_MANT_DIG <= 64         // extended precision
    201   const double maxNormForPade[] = { 5.48256690357782863103e-3 /* degree = 3 */, 2.34559162387971167321e-2,
    202             5.84603923897347449857e-2, 1.08486423756725170223e-1, 1.68385767881294446649e-1,
    203             2.32777776523703892094e-1 };
    204 #elif LDBL_MANT_DIG <= 106        // double-double
    205   const double maxNormForPade[] = { 8.58970550342939562202529664318890e-5 /* degree = 3 */,
    206             9.34074328446359654039446552677759e-4, 4.26117194647672175773064114582860e-3,
    207             1.21546224740281848743149666560464e-2, 2.61100544998339436713088248557444e-2,
    208             4.66170074627052749243018566390567e-2, 7.32585144444135027565872014932387e-2,
    209             1.05026503471351080481093652651105e-1 };
    210 #else                             // quadruple precision
    211   const double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5 /* degree = 3 */,
    212             5.8853168473544560470387769480192666e-4, 2.9216120366601315391789493628113520e-3,
    213             8.8415758124319434347116734705174308e-3, 1.9850836029449446668518049562565291e-2,
    214             3.6688019729653446926585242192447447e-2, 5.9290962294020186998954055264528393e-2,
    215             8.6998436081634343903250580992127677e-2, 1.1880960220216759245467951592883642e-1 };
    216 #endif
    217   for (int degree = 3; degree <= maxPadeDegree; ++degree)
    218     if (normTminusI <= maxNormForPade[degree - minPadeDegree])
    219       return degree;
    220   assert(false); // this line should never be reached
    221 }
    222 
    223 /* \brief Compute Pade approximation to matrix logarithm */
    224 template <typename MatrixType>
    225 void MatrixLogarithmAtomic<MatrixType>::computePade(MatrixType& result, const MatrixType& T, int degree)
    226 {
    227   switch (degree) {
    228     case 3:  computePade3(result, T);  break;
    229     case 4:  computePade4(result, T);  break;
    230     case 5:  computePade5(result, T);  break;
    231     case 6:  computePade6(result, T);  break;
    232     case 7:  computePade7(result, T);  break;
    233     case 8:  computePade8(result, T);  break;
    234     case 9:  computePade9(result, T);  break;
    235     case 10: computePade10(result, T); break;
    236     case 11: computePade11(result, T); break;
    237     default: assert(false); // should never happen
    238   }
    239 }
    240 
    241 template <typename MatrixType>
    242 void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const MatrixType& T)
    243 {
    244   const int degree = 3;
    245   const RealScalar nodes[]   = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L,
    246             0.8872983346207416885179265399782400L };
    247   const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L,
    248             0.2777777777777777777777777777777778L };
    249   assert(degree <= maxPadeDegree);
    250   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
    251   result.setZero(T.rows(), T.rows());
    252   for (int k = 0; k < degree; ++k)
    253     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
    254                            .template triangularView<Upper>().solve(TminusI);
    255 }
    256 
    257 template <typename MatrixType>
    258 void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const MatrixType& T)
    259 {
    260   const int degree = 4;
    261   const RealScalar nodes[]   = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L,
    262             0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L };
    263   const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L,
    264             0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L };
    265   assert(degree <= maxPadeDegree);
    266   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
    267   result.setZero(T.rows(), T.rows());
    268   for (int k = 0; k < degree; ++k)
    269     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
    270                            .template triangularView<Upper>().solve(TminusI);
    271 }
    272 
    273 template <typename MatrixType>
    274 void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const MatrixType& T)
    275 {
    276   const int degree = 5;
    277   const RealScalar nodes[]   = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L,
    278             0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L,
    279             0.9530899229693319963988134391496965L };
    280   const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L,
    281             0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
    282             0.1184634425280945437571320203599587L };
    283   assert(degree <= maxPadeDegree);
    284   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
    285   result.setZero(T.rows(), T.rows());
    286   for (int k = 0; k < degree; ++k)
    287     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
    288                            .template triangularView<Upper>().solve(TminusI);
    289 }
    290 
    291 template <typename MatrixType>
    292 void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const MatrixType& T)
    293 {
    294   const int degree = 6;
    295   const RealScalar nodes[]   = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L,
    296             0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
    297 		        0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L };
    298   const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L,
    299             0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
    300  		        0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
    301   assert(degree <= maxPadeDegree);
    302   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
    303   result.setZero(T.rows(), T.rows());
    304   for (int k = 0; k < degree; ++k)
    305     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
    306                            .template triangularView<Upper>().solve(TminusI);
    307 }
    308 
    309 template <typename MatrixType>
    310 void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const MatrixType& T)
    311 {
    312   const int degree = 7;
    313   const RealScalar nodes[]   = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L,
    314             0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L,
    315             0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L,
    316             0.9745539561713792622630948420239256L };
    317   const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L,
    318             0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
    319             0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
    320             0.0647424830844348466353057163395410L };
    321   assert(degree <= maxPadeDegree);
    322   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
    323   result.setZero(T.rows(), T.rows());
    324   for (int k = 0; k < degree; ++k)
    325     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
    326                            .template triangularView<Upper>().solve(TminusI);
    327 }
    328 
    329 template <typename MatrixType>
    330 void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const MatrixType& T)
    331 {
    332   const int degree = 8;
    333   const RealScalar nodes[]   = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L,
    334             0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L,
    335             0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L,
    336             0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L };
    337   const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L,
    338             0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
    339             0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
    340             0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L };
    341   assert(degree <= maxPadeDegree);
    342   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
    343   result.setZero(T.rows(), T.rows());
    344   for (int k = 0; k < degree; ++k)
    345     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
    346                            .template triangularView<Upper>().solve(TminusI);
    347 }
    348 
    349 template <typename MatrixType>
    350 void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const MatrixType& T)
    351 {
    352   const int degree = 9;
    353   const RealScalar nodes[]   = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L,
    354             0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L,
    355             0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L,
    356             0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L,
    357             0.9840801197538130449177881014518364L };
    358   const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L,
    359             0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L,
    360             0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
    361             0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
    362             0.0406371941807872059859460790552618L };
    363   assert(degree <= maxPadeDegree);
    364   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
    365   result.setZero(T.rows(), T.rows());
    366   for (int k = 0; k < degree; ++k)
    367     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
    368                            .template triangularView<Upper>().solve(TminusI);
    369 }
    370 
    371 template <typename MatrixType>
    372 void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const MatrixType& T)
    373 {
    374   const int degree = 10;
    375   const RealScalar nodes[]   = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L,
    376             0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L,
    377             0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L,
    378             0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L,
    379             0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L };
    380   const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L,
    381             0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L,
    382             0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
    383             0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
    384             0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L };
    385   assert(degree <= maxPadeDegree);
    386   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
    387   result.setZero(T.rows(), T.rows());
    388   for (int k = 0; k < degree; ++k)
    389     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
    390                            .template triangularView<Upper>().solve(TminusI);
    391 }
    392 
    393 template <typename MatrixType>
    394 void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const MatrixType& T)
    395 {
    396   const int degree = 11;
    397   const RealScalar nodes[]   = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L,
    398             0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L,
    399             0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L,
    400             0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L,
    401             0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L,
    402             0.9891143290730284964019690005614287L };
    403   const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L,
    404             0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L,
    405             0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L,
    406             0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
    407             0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
    408             0.0278342835580868332413768602212743L };
    409   assert(degree <= maxPadeDegree);
    410   MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
    411   result.setZero(T.rows(), T.rows());
    412   for (int k = 0; k < degree; ++k)
    413     result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
    414                            .template triangularView<Upper>().solve(TminusI);
    415 }
    416 
    417 /** \ingroup MatrixFunctions_Module
    418   *
    419   * \brief Proxy for the matrix logarithm of some matrix (expression).
    420   *
    421   * \tparam Derived  Type of the argument to the matrix function.
    422   *
    423   * This class holds the argument to the matrix function until it is
    424   * assigned or evaluated for some other reason (so the argument
    425   * should not be changed in the meantime). It is the return type of
    426   * matrixBase::matrixLogarithm() and most of the time this is the
    427   * only way it is used.
    428   */
    429 template<typename Derived> class MatrixLogarithmReturnValue
    430 : public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
    431 {
    432 public:
    433 
    434   typedef typename Derived::Scalar Scalar;
    435   typedef typename Derived::Index Index;
    436 
    437   /** \brief Constructor.
    438     *
    439     * \param[in]  A  %Matrix (expression) forming the argument of the matrix logarithm.
    440     */
    441   MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
    442 
    443   /** \brief Compute the matrix logarithm.
    444     *
    445     * \param[out]  result  Logarithm of \p A, where \A is as specified in the constructor.
    446     */
    447   template <typename ResultType>
    448   inline void evalTo(ResultType& result) const
    449   {
    450     typedef typename Derived::PlainObject PlainObject;
    451     typedef internal::traits<PlainObject> Traits;
    452     static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
    453     static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
    454     static const int Options = PlainObject::Options;
    455     typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
    456     typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
    457     typedef MatrixLogarithmAtomic<DynMatrixType> AtomicType;
    458     AtomicType atomic;
    459 
    460     const PlainObject Aevaluated = m_A.eval();
    461     MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
    462     mf.compute(result);
    463   }
    464 
    465   Index rows() const { return m_A.rows(); }
    466   Index cols() const { return m_A.cols(); }
    467 
    468 private:
    469   typename internal::nested<Derived>::type m_A;
    470 
    471   MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&);
    472 };
    473 
    474 namespace internal {
    475   template<typename Derived>
    476   struct traits<MatrixLogarithmReturnValue<Derived> >
    477   {
    478     typedef typename Derived::PlainObject ReturnType;
    479   };
    480 }
    481 
    482 
    483 /********** MatrixBase method **********/
    484 
    485 
    486 template <typename Derived>
    487 const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
    488 {
    489   eigen_assert(rows() == cols());
    490   return MatrixLogarithmReturnValue<Derived>(derived());
    491 }
    492 
    493 } // end namespace Eigen
    494 
    495 #endif // EIGEN_MATRIX_LOGARITHM
    496